1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Video for Linux Two
4 *
5 * A generic video device interface for the LINUX operating system
6 * using a set of device structures/vectors for low level operations.
7 *
8 * This file replaces the videodev.c file that comes with the
9 * regular kernel distribution.
10 *
11 * Author: Bill Dirks <bill@thedirks.org>
12 * based on code by Alan Cox, <alan@cymru.net>
13 */
14
15 /*
16 * Video capture interface for Linux
17 *
18 * A generic video device interface for the LINUX operating system
19 * using a set of device structures/vectors for low level operations.
20 *
21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
22 *
23 * Fixes:
24 */
25
26 /*
27 * Video4linux 1/2 integration by Justin Schoeman
28 * <justin@suntiger.ee.up.ac.za>
29 * 2.4 PROCFS support ported from 2.4 kernels by
30 * Iñaki García Etxebarria <garetxe@euskalnet.net>
31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32 * 2.4 devfs support ported from 2.4 kernels by
33 * Dan Merillat <dan@merillat.org>
34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/string.h>
42 #include <linux/errno.h>
43 #include <linux/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/div64.h>
46 #include <media/v4l2-common.h>
47 #include <media/v4l2-device.h>
48 #include <media/v4l2-ctrls.h>
49
50 #include <linux/videodev2.h>
51
52 /*
53 *
54 * V 4 L 2 D R I V E R H E L P E R A P I
55 *
56 */
57
58 /*
59 * Video Standard Operations (contributed by Michael Schimek)
60 */
61
62 /* Helper functions for control handling */
63
64 /* Fill in a struct v4l2_queryctrl */
v4l2_ctrl_query_fill(struct v4l2_queryctrl * qctrl,s32 _min,s32 _max,s32 _step,s32 _def)65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
66 {
67 const char *name;
68 s64 min = _min;
69 s64 max = _max;
70 u64 step = _step;
71 s64 def = _def;
72
73 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
74 &min, &max, &step, &def, &qctrl->flags);
75
76 if (name == NULL)
77 return -EINVAL;
78
79 qctrl->minimum = min;
80 qctrl->maximum = max;
81 qctrl->step = step;
82 qctrl->default_value = def;
83 qctrl->reserved[0] = qctrl->reserved[1] = 0;
84 strscpy(qctrl->name, name, sizeof(qctrl->name));
85 return 0;
86 }
87 EXPORT_SYMBOL(v4l2_ctrl_query_fill);
88
89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min
90 * and max don't have to be aligned, but there must be at least one valid
91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
92 * of 16 between 17 and 31. */
clamp_align(unsigned int x,unsigned int min,unsigned int max,unsigned int align)93 static unsigned int clamp_align(unsigned int x, unsigned int min,
94 unsigned int max, unsigned int align)
95 {
96 /* Bits that must be zero to be aligned */
97 unsigned int mask = ~((1 << align) - 1);
98
99 /* Clamp to aligned min and max */
100 x = clamp(x, (min + ~mask) & mask, max & mask);
101
102 /* Round to nearest aligned value */
103 if (align)
104 x = (x + (1 << (align - 1))) & mask;
105
106 return x;
107 }
108
clamp_roundup(unsigned int x,unsigned int min,unsigned int max,unsigned int alignment)109 static unsigned int clamp_roundup(unsigned int x, unsigned int min,
110 unsigned int max, unsigned int alignment)
111 {
112 x = clamp(x, min, max);
113 if (alignment)
114 x = round_up(x, alignment);
115
116 return x;
117 }
118
v4l_bound_align_image(u32 * w,unsigned int wmin,unsigned int wmax,unsigned int walign,u32 * h,unsigned int hmin,unsigned int hmax,unsigned int halign,unsigned int salign)119 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
120 unsigned int walign,
121 u32 *h, unsigned int hmin, unsigned int hmax,
122 unsigned int halign, unsigned int salign)
123 {
124 *w = clamp_align(*w, wmin, wmax, walign);
125 *h = clamp_align(*h, hmin, hmax, halign);
126
127 /* Usually we don't need to align the size and are done now. */
128 if (!salign)
129 return;
130
131 /* How much alignment do we have? */
132 walign = __ffs(*w);
133 halign = __ffs(*h);
134 /* Enough to satisfy the image alignment? */
135 if (walign + halign < salign) {
136 /* Max walign where there is still a valid width */
137 unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
138 /* Max halign where there is still a valid height */
139 unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
140
141 /* up the smaller alignment until we have enough */
142 do {
143 if (halign >= hmaxa ||
144 (walign <= halign && walign < wmaxa)) {
145 *w = clamp_align(*w, wmin, wmax, walign + 1);
146 walign = __ffs(*w);
147 } else {
148 *h = clamp_align(*h, hmin, hmax, halign + 1);
149 halign = __ffs(*h);
150 }
151 } while (halign + walign < salign);
152 }
153 }
154 EXPORT_SYMBOL_GPL(v4l_bound_align_image);
155
156 const void *
__v4l2_find_nearest_size(const void * array,size_t array_size,size_t entry_size,size_t width_offset,size_t height_offset,s32 width,s32 height)157 __v4l2_find_nearest_size(const void *array, size_t array_size,
158 size_t entry_size, size_t width_offset,
159 size_t height_offset, s32 width, s32 height)
160 {
161 u32 error, min_error = U32_MAX;
162 const void *best = NULL;
163 unsigned int i;
164
165 if (!array)
166 return NULL;
167
168 for (i = 0; i < array_size; i++, array += entry_size) {
169 const u32 *entry_width = array + width_offset;
170 const u32 *entry_height = array + height_offset;
171
172 error = abs(*entry_width - width) + abs(*entry_height - height);
173 if (error > min_error)
174 continue;
175
176 min_error = error;
177 best = array;
178 if (!error)
179 break;
180 }
181
182 return best;
183 }
184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
185
v4l2_g_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)186 int v4l2_g_parm_cap(struct video_device *vdev,
187 struct v4l2_subdev *sd, struct v4l2_streamparm *a)
188 {
189 struct v4l2_subdev_frame_interval ival = { 0 };
190 int ret;
191
192 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
193 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
194 return -EINVAL;
195
196 if (vdev->device_caps & V4L2_CAP_READWRITE)
197 a->parm.capture.readbuffers = 2;
198 if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
199 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
200 ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival);
201 if (!ret)
202 a->parm.capture.timeperframe = ival.interval;
203 return ret;
204 }
205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
206
v4l2_s_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)207 int v4l2_s_parm_cap(struct video_device *vdev,
208 struct v4l2_subdev *sd, struct v4l2_streamparm *a)
209 {
210 struct v4l2_subdev_frame_interval ival = {
211 .interval = a->parm.capture.timeperframe
212 };
213 int ret;
214
215 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
216 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
217 return -EINVAL;
218
219 memset(&a->parm, 0, sizeof(a->parm));
220 if (vdev->device_caps & V4L2_CAP_READWRITE)
221 a->parm.capture.readbuffers = 2;
222 else
223 a->parm.capture.readbuffers = 0;
224
225 if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
226 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
227 ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival);
228 if (!ret)
229 a->parm.capture.timeperframe = ival.interval;
230 return ret;
231 }
232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
233
v4l2_format_info(u32 format)234 const struct v4l2_format_info *v4l2_format_info(u32 format)
235 {
236 static const struct v4l2_format_info formats[] = {
237 /* RGB formats */
238 { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
239 { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
240 { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
241 { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
242 { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
243 { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
244 { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
245 { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
246 { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
247 { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
248 { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
249 { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
250 { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
251 { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
252 { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
253 { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
254 { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
255 { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
256 { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
257 { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
258 { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
259 { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
260
261 /* YUV packed formats */
262 { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
263 { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
264 { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
265 { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
266 { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
267 { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
268 { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
269 { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
270 { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
271 .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
272 { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
273 .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
274
275 /* YUV planar formats */
276 { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
277 { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
278 { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
279 { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
280 { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
281 { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
282 { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
283 { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
284
285 { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
286 { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
287 { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
288 { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
289 { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
290 { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
291 { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
292
293 /* Tiled YUV formats */
294 { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
295 { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
296 .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
297 { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
298
299 /* YUV planar formats, non contiguous variant */
300 { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
301 { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
302 { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
303 { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
304 { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
305 { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
306
307 { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
308 { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
309 { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
310 { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
311 { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
312
313 /* Bayer RGB formats */
314 { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
315 { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
316 { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
317 { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
318 { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
319 { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
320 { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
321 { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
322 { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
323 { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
324 { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
325 { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
326 { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
327 { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
328 { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
329 { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
330 { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
331 { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
332 { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
333 { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
334 };
335 unsigned int i;
336
337 for (i = 0; i < ARRAY_SIZE(formats); ++i)
338 if (formats[i].format == format)
339 return &formats[i];
340 return NULL;
341 }
342 EXPORT_SYMBOL(v4l2_format_info);
343
v4l2_format_block_width(const struct v4l2_format_info * info,int plane)344 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
345 {
346 if (!info->block_w[plane])
347 return 1;
348 return info->block_w[plane];
349 }
350
v4l2_format_block_height(const struct v4l2_format_info * info,int plane)351 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
352 {
353 if (!info->block_h[plane])
354 return 1;
355 return info->block_h[plane];
356 }
357
v4l2_apply_frmsize_constraints(u32 * width,u32 * height,const struct v4l2_frmsize_stepwise * frmsize)358 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
359 const struct v4l2_frmsize_stepwise *frmsize)
360 {
361 if (!frmsize)
362 return;
363
364 /*
365 * Clamp width/height to meet min/max constraints and round it up to
366 * macroblock alignment.
367 */
368 *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
369 frmsize->step_width);
370 *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
371 frmsize->step_height);
372 }
373 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
374
v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane * pixfmt,u32 pixelformat,u32 width,u32 height)375 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
376 u32 pixelformat, u32 width, u32 height)
377 {
378 const struct v4l2_format_info *info;
379 struct v4l2_plane_pix_format *plane;
380 int i;
381
382 info = v4l2_format_info(pixelformat);
383 if (!info)
384 return -EINVAL;
385
386 pixfmt->width = width;
387 pixfmt->height = height;
388 pixfmt->pixelformat = pixelformat;
389 pixfmt->num_planes = info->mem_planes;
390
391 if (info->mem_planes == 1) {
392 plane = &pixfmt->plane_fmt[0];
393 plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
394 plane->sizeimage = 0;
395
396 for (i = 0; i < info->comp_planes; i++) {
397 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
398 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
399 unsigned int aligned_width;
400 unsigned int aligned_height;
401
402 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
403 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
404
405 plane->sizeimage += info->bpp[i] *
406 DIV_ROUND_UP(aligned_width, hdiv) *
407 DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
408 }
409 } else {
410 for (i = 0; i < info->comp_planes; i++) {
411 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
412 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
413 unsigned int aligned_width;
414 unsigned int aligned_height;
415
416 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
417 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
418
419 plane = &pixfmt->plane_fmt[i];
420 plane->bytesperline =
421 info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
422 plane->sizeimage =
423 plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
424 }
425 }
426 return 0;
427 }
428 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
429
v4l2_fill_pixfmt(struct v4l2_pix_format * pixfmt,u32 pixelformat,u32 width,u32 height)430 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
431 u32 width, u32 height)
432 {
433 const struct v4l2_format_info *info;
434 int i;
435
436 info = v4l2_format_info(pixelformat);
437 if (!info)
438 return -EINVAL;
439
440 /* Single planar API cannot be used for multi plane formats. */
441 if (info->mem_planes > 1)
442 return -EINVAL;
443
444 pixfmt->width = width;
445 pixfmt->height = height;
446 pixfmt->pixelformat = pixelformat;
447 pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
448 pixfmt->sizeimage = 0;
449
450 for (i = 0; i < info->comp_planes; i++) {
451 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
452 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
453 unsigned int aligned_width;
454 unsigned int aligned_height;
455
456 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
457 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
458
459 pixfmt->sizeimage += info->bpp[i] *
460 DIV_ROUND_UP(aligned_width, hdiv) *
461 DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
462 }
463 return 0;
464 }
465 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
466
v4l2_get_link_freq(struct v4l2_ctrl_handler * handler,unsigned int mul,unsigned int div)467 s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
468 unsigned int div)
469 {
470 struct v4l2_ctrl *ctrl;
471 s64 freq;
472
473 ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
474 if (ctrl) {
475 struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
476 int ret;
477
478 qm.index = v4l2_ctrl_g_ctrl(ctrl);
479
480 ret = v4l2_querymenu(handler, &qm);
481 if (ret)
482 return -ENOENT;
483
484 freq = qm.value;
485 } else {
486 if (!mul || !div)
487 return -ENOENT;
488
489 ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
490 if (!ctrl)
491 return -ENOENT;
492
493 freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
494
495 pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
496 __func__);
497 pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
498 __func__);
499 }
500
501 return freq > 0 ? freq : -EINVAL;
502 }
503 EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
504
505 /*
506 * Simplify a fraction using a simple continued fraction decomposition. The
507 * idea here is to convert fractions such as 333333/10000000 to 1/30 using
508 * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
509 * arbitrary parameters to remove non-significative terms from the simple
510 * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
511 * respectively seems to give nice results.
512 */
v4l2_simplify_fraction(u32 * numerator,u32 * denominator,unsigned int n_terms,unsigned int threshold)513 void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
514 unsigned int n_terms, unsigned int threshold)
515 {
516 u32 *an;
517 u32 x, y, r;
518 unsigned int i, n;
519
520 an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
521 if (an == NULL)
522 return;
523
524 /*
525 * Convert the fraction to a simple continued fraction. See
526 * https://en.wikipedia.org/wiki/Continued_fraction
527 * Stop if the current term is bigger than or equal to the given
528 * threshold.
529 */
530 x = *numerator;
531 y = *denominator;
532
533 for (n = 0; n < n_terms && y != 0; ++n) {
534 an[n] = x / y;
535 if (an[n] >= threshold) {
536 if (n < 2)
537 n++;
538 break;
539 }
540
541 r = x - an[n] * y;
542 x = y;
543 y = r;
544 }
545
546 /* Expand the simple continued fraction back to an integer fraction. */
547 x = 0;
548 y = 1;
549
550 for (i = n; i > 0; --i) {
551 r = y;
552 y = an[i-1] * y + x;
553 x = r;
554 }
555
556 *numerator = y;
557 *denominator = x;
558 kfree(an);
559 }
560 EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
561
562 /*
563 * Convert a fraction to a frame interval in 100ns multiples. The idea here is
564 * to compute numerator / denominator * 10000000 using 32 bit fixed point
565 * arithmetic only.
566 */
v4l2_fraction_to_interval(u32 numerator,u32 denominator)567 u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
568 {
569 u32 multiplier;
570
571 /* Saturate the result if the operation would overflow. */
572 if (denominator == 0 ||
573 numerator/denominator >= ((u32)-1)/10000000)
574 return (u32)-1;
575
576 /*
577 * Divide both the denominator and the multiplier by two until
578 * numerator * multiplier doesn't overflow. If anyone knows a better
579 * algorithm please let me know.
580 */
581 multiplier = 10000000;
582 while (numerator > ((u32)-1)/multiplier) {
583 multiplier /= 2;
584 denominator /= 2;
585 }
586
587 return denominator ? numerator * multiplier / denominator : 0;
588 }
589 EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
590
v4l2_link_freq_to_bitmap(struct device * dev,const u64 * fw_link_freqs,unsigned int num_of_fw_link_freqs,const s64 * driver_link_freqs,unsigned int num_of_driver_link_freqs,unsigned long * bitmap)591 int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
592 unsigned int num_of_fw_link_freqs,
593 const s64 *driver_link_freqs,
594 unsigned int num_of_driver_link_freqs,
595 unsigned long *bitmap)
596 {
597 unsigned int i;
598
599 *bitmap = 0;
600
601 if (!num_of_fw_link_freqs) {
602 dev_err(dev, "no link frequencies in firmware\n");
603 return -ENODATA;
604 }
605
606 for (i = 0; i < num_of_fw_link_freqs; i++) {
607 unsigned int j;
608
609 for (j = 0; j < num_of_driver_link_freqs; j++) {
610 if (fw_link_freqs[i] != driver_link_freqs[j])
611 continue;
612
613 dev_dbg(dev, "enabling link frequency %lld Hz\n",
614 driver_link_freqs[j]);
615 *bitmap |= BIT(j);
616 break;
617 }
618 }
619
620 if (!*bitmap) {
621 dev_err(dev, "no matching link frequencies found\n");
622
623 dev_dbg(dev, "specified in firmware:\n");
624 for (i = 0; i < num_of_fw_link_freqs; i++)
625 dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]);
626
627 dev_dbg(dev, "driver supported:\n");
628 for (i = 0; i < num_of_driver_link_freqs; i++)
629 dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]);
630
631 return -ENOENT;
632 }
633
634 return 0;
635 }
636 EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
637