xref: /linux/drivers/media/v4l2-core/v4l2-common.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Video for Linux Two
4  *
5  *	A generic video device interface for the LINUX operating system
6  *	using a set of device structures/vectors for low level operations.
7  *
8  *	This file replaces the videodev.c file that comes with the
9  *	regular kernel distribution.
10  *
11  * Author:	Bill Dirks <bill@thedirks.org>
12  *		based on code by Alan Cox, <alan@cymru.net>
13  */
14 
15 /*
16  * Video capture interface for Linux
17  *
18  *	A generic video device interface for the LINUX operating system
19  *	using a set of device structures/vectors for low level operations.
20  *
21  * Author:	Alan Cox, <alan@lxorguk.ukuu.org.uk>
22  *
23  * Fixes:
24  */
25 
26 /*
27  * Video4linux 1/2 integration by Justin Schoeman
28  * <justin@suntiger.ee.up.ac.za>
29  * 2.4 PROCFS support ported from 2.4 kernels by
30  *  Iñaki García Etxebarria <garetxe@euskalnet.net>
31  * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32  * 2.4 devfs support ported from 2.4 kernels by
33  *  Dan Merillat <dan@merillat.org>
34  * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
35  */
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/string.h>
42 #include <linux/errno.h>
43 #include <linux/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/div64.h>
46 #include <media/v4l2-common.h>
47 #include <media/v4l2-device.h>
48 #include <media/v4l2-ctrls.h>
49 
50 #include <linux/videodev2.h>
51 
52 /*
53  *
54  *	V 4 L 2   D R I V E R   H E L P E R   A P I
55  *
56  */
57 
58 /*
59  *  Video Standard Operations (contributed by Michael Schimek)
60  */
61 
62 /* Helper functions for control handling			     */
63 
64 /* Fill in a struct v4l2_queryctrl */
v4l2_ctrl_query_fill(struct v4l2_queryctrl * qctrl,s32 _min,s32 _max,s32 _step,s32 _def)65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
66 {
67 	const char *name;
68 	s64 min = _min;
69 	s64 max = _max;
70 	u64 step = _step;
71 	s64 def = _def;
72 
73 	v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
74 		       &min, &max, &step, &def, &qctrl->flags);
75 
76 	if (name == NULL)
77 		return -EINVAL;
78 
79 	qctrl->minimum = min;
80 	qctrl->maximum = max;
81 	qctrl->step = step;
82 	qctrl->default_value = def;
83 	qctrl->reserved[0] = qctrl->reserved[1] = 0;
84 	strscpy(qctrl->name, name, sizeof(qctrl->name));
85 	return 0;
86 }
87 EXPORT_SYMBOL(v4l2_ctrl_query_fill);
88 
89 /* Clamp x to be between min and max, aligned to a multiple of 2^align.  min
90  * and max don't have to be aligned, but there must be at least one valid
91  * value.  E.g., min=17,max=31,align=4 is not allowed as there are no multiples
92  * of 16 between 17 and 31.  */
clamp_align(unsigned int x,unsigned int min,unsigned int max,unsigned int align)93 static unsigned int clamp_align(unsigned int x, unsigned int min,
94 				unsigned int max, unsigned int align)
95 {
96 	/* Bits that must be zero to be aligned */
97 	unsigned int mask = ~((1 << align) - 1);
98 
99 	/* Clamp to aligned min and max */
100 	x = clamp(x, (min + ~mask) & mask, max & mask);
101 
102 	/* Round to nearest aligned value */
103 	if (align)
104 		x = (x + (1 << (align - 1))) & mask;
105 
106 	return x;
107 }
108 
clamp_roundup(unsigned int x,unsigned int min,unsigned int max,unsigned int alignment)109 static unsigned int clamp_roundup(unsigned int x, unsigned int min,
110 				   unsigned int max, unsigned int alignment)
111 {
112 	x = clamp(x, min, max);
113 	if (alignment)
114 		x = round_up(x, alignment);
115 
116 	return x;
117 }
118 
v4l_bound_align_image(u32 * w,unsigned int wmin,unsigned int wmax,unsigned int walign,u32 * h,unsigned int hmin,unsigned int hmax,unsigned int halign,unsigned int salign)119 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
120 			   unsigned int walign,
121 			   u32 *h, unsigned int hmin, unsigned int hmax,
122 			   unsigned int halign, unsigned int salign)
123 {
124 	*w = clamp_align(*w, wmin, wmax, walign);
125 	*h = clamp_align(*h, hmin, hmax, halign);
126 
127 	/* Usually we don't need to align the size and are done now. */
128 	if (!salign)
129 		return;
130 
131 	/* How much alignment do we have? */
132 	walign = __ffs(*w);
133 	halign = __ffs(*h);
134 	/* Enough to satisfy the image alignment? */
135 	if (walign + halign < salign) {
136 		/* Max walign where there is still a valid width */
137 		unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
138 		/* Max halign where there is still a valid height */
139 		unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
140 
141 		/* up the smaller alignment until we have enough */
142 		do {
143 			if (halign >= hmaxa ||
144 			    (walign <= halign && walign < wmaxa)) {
145 				*w = clamp_align(*w, wmin, wmax, walign + 1);
146 				walign = __ffs(*w);
147 			} else {
148 				*h = clamp_align(*h, hmin, hmax, halign + 1);
149 				halign = __ffs(*h);
150 			}
151 		} while (halign + walign < salign);
152 	}
153 }
154 EXPORT_SYMBOL_GPL(v4l_bound_align_image);
155 
156 const void *
__v4l2_find_nearest_size(const void * array,size_t array_size,size_t entry_size,size_t width_offset,size_t height_offset,s32 width,s32 height)157 __v4l2_find_nearest_size(const void *array, size_t array_size,
158 			 size_t entry_size, size_t width_offset,
159 			 size_t height_offset, s32 width, s32 height)
160 {
161 	u32 error, min_error = U32_MAX;
162 	const void *best = NULL;
163 	unsigned int i;
164 
165 	if (!array)
166 		return NULL;
167 
168 	for (i = 0; i < array_size; i++, array += entry_size) {
169 		const u32 *entry_width = array + width_offset;
170 		const u32 *entry_height = array + height_offset;
171 
172 		error = abs(*entry_width - width) + abs(*entry_height - height);
173 		if (error > min_error)
174 			continue;
175 
176 		min_error = error;
177 		best = array;
178 		if (!error)
179 			break;
180 	}
181 
182 	return best;
183 }
184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
185 
v4l2_g_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)186 int v4l2_g_parm_cap(struct video_device *vdev,
187 		    struct v4l2_subdev *sd, struct v4l2_streamparm *a)
188 {
189 	struct v4l2_subdev_frame_interval ival = { 0 };
190 	int ret;
191 
192 	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
193 	    a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
194 		return -EINVAL;
195 
196 	if (vdev->device_caps & V4L2_CAP_READWRITE)
197 		a->parm.capture.readbuffers = 2;
198 	if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
199 		a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
200 	ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival);
201 	if (!ret)
202 		a->parm.capture.timeperframe = ival.interval;
203 	return ret;
204 }
205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
206 
v4l2_s_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)207 int v4l2_s_parm_cap(struct video_device *vdev,
208 		    struct v4l2_subdev *sd, struct v4l2_streamparm *a)
209 {
210 	struct v4l2_subdev_frame_interval ival = {
211 		.interval = a->parm.capture.timeperframe
212 	};
213 	int ret;
214 
215 	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
216 	    a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
217 		return -EINVAL;
218 
219 	memset(&a->parm, 0, sizeof(a->parm));
220 	if (vdev->device_caps & V4L2_CAP_READWRITE)
221 		a->parm.capture.readbuffers = 2;
222 	else
223 		a->parm.capture.readbuffers = 0;
224 
225 	if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
226 		a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
227 	ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival);
228 	if (!ret)
229 		a->parm.capture.timeperframe = ival.interval;
230 	return ret;
231 }
232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
233 
v4l2_format_info(u32 format)234 const struct v4l2_format_info *v4l2_format_info(u32 format)
235 {
236 	static const struct v4l2_format_info formats[] = {
237 		/* RGB formats */
238 		{ .format = V4L2_PIX_FMT_BGR24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
239 		{ .format = V4L2_PIX_FMT_RGB24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
240 		{ .format = V4L2_PIX_FMT_HSV24,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
241 		{ .format = V4L2_PIX_FMT_BGR32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
242 		{ .format = V4L2_PIX_FMT_XBGR32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
243 		{ .format = V4L2_PIX_FMT_BGRX32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
244 		{ .format = V4L2_PIX_FMT_RGB32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
245 		{ .format = V4L2_PIX_FMT_XRGB32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
246 		{ .format = V4L2_PIX_FMT_RGBX32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
247 		{ .format = V4L2_PIX_FMT_HSV32,   .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
248 		{ .format = V4L2_PIX_FMT_ARGB32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
249 		{ .format = V4L2_PIX_FMT_RGBA32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
250 		{ .format = V4L2_PIX_FMT_ABGR32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
251 		{ .format = V4L2_PIX_FMT_BGRA32,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
252 		{ .format = V4L2_PIX_FMT_RGB565,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
253 		{ .format = V4L2_PIX_FMT_RGB555,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
254 		{ .format = V4L2_PIX_FMT_BGR666,  .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
255 		{ .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
256 		{ .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
257 		{ .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
258 		{ .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
259 		{ .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
260 		{ .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
261 		{ .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
262 
263 		/* YUV packed formats */
264 		{ .format = V4L2_PIX_FMT_YUYV,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
265 		{ .format = V4L2_PIX_FMT_YVYU,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
266 		{ .format = V4L2_PIX_FMT_UYVY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
267 		{ .format = V4L2_PIX_FMT_VYUY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
268 		{ .format = V4L2_PIX_FMT_Y210,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
269 		{ .format = V4L2_PIX_FMT_Y212,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
270 		{ .format = V4L2_PIX_FMT_Y216,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
271 		{ .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
272 		{ .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
273 		  .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
274 		{ .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
275 		  .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
276 
277 		/* YUV planar formats */
278 		{ .format = V4L2_PIX_FMT_NV12,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
279 		{ .format = V4L2_PIX_FMT_NV21,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
280 		{ .format = V4L2_PIX_FMT_NV16,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
281 		{ .format = V4L2_PIX_FMT_NV61,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
282 		{ .format = V4L2_PIX_FMT_NV24,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
283 		{ .format = V4L2_PIX_FMT_NV42,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
284 		{ .format = V4L2_PIX_FMT_P010,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
285 		{ .format = V4L2_PIX_FMT_P012,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
286 
287 		{ .format = V4L2_PIX_FMT_YUV410,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
288 		{ .format = V4L2_PIX_FMT_YVU410,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
289 		{ .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
290 		{ .format = V4L2_PIX_FMT_YUV420,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
291 		{ .format = V4L2_PIX_FMT_YVU420,  .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
292 		{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
293 		{ .format = V4L2_PIX_FMT_GREY,    .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
294 
295 		/* Tiled YUV formats */
296 		{ .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
297 		{ .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
298 		  .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
299 		{ .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
300 
301 		/* YUV planar formats, non contiguous variant */
302 		{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
303 		{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
304 		{ .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
305 		{ .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
306 		{ .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
307 		{ .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
308 
309 		{ .format = V4L2_PIX_FMT_NV12M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
310 		{ .format = V4L2_PIX_FMT_NV21M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
311 		{ .format = V4L2_PIX_FMT_NV16M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
312 		{ .format = V4L2_PIX_FMT_NV61M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
313 		{ .format = V4L2_PIX_FMT_P012M,   .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
314 
315 		/* Bayer RGB formats */
316 		{ .format = V4L2_PIX_FMT_SBGGR8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
317 		{ .format = V4L2_PIX_FMT_SGBRG8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
318 		{ .format = V4L2_PIX_FMT_SGRBG8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
319 		{ .format = V4L2_PIX_FMT_SRGGB8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
320 		{ .format = V4L2_PIX_FMT_SBGGR10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
321 		{ .format = V4L2_PIX_FMT_SGBRG10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
322 		{ .format = V4L2_PIX_FMT_SGRBG10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
323 		{ .format = V4L2_PIX_FMT_SRGGB10,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
324 		{ .format = V4L2_PIX_FMT_SBGGR10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
325 		{ .format = V4L2_PIX_FMT_SGBRG10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
326 		{ .format = V4L2_PIX_FMT_SGRBG10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
327 		{ .format = V4L2_PIX_FMT_SRGGB10ALAW8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
328 		{ .format = V4L2_PIX_FMT_SBGGR10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
329 		{ .format = V4L2_PIX_FMT_SGBRG10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
330 		{ .format = V4L2_PIX_FMT_SGRBG10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
331 		{ .format = V4L2_PIX_FMT_SRGGB10DPCM8,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
332 		{ .format = V4L2_PIX_FMT_SBGGR12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
333 		{ .format = V4L2_PIX_FMT_SGBRG12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
334 		{ .format = V4L2_PIX_FMT_SGRBG12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
335 		{ .format = V4L2_PIX_FMT_SRGGB12,	.pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
336 	};
337 	unsigned int i;
338 
339 	for (i = 0; i < ARRAY_SIZE(formats); ++i)
340 		if (formats[i].format == format)
341 			return &formats[i];
342 	return NULL;
343 }
344 EXPORT_SYMBOL(v4l2_format_info);
345 
v4l2_format_block_width(const struct v4l2_format_info * info,int plane)346 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
347 {
348 	if (!info->block_w[plane])
349 		return 1;
350 	return info->block_w[plane];
351 }
352 
v4l2_format_block_height(const struct v4l2_format_info * info,int plane)353 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
354 {
355 	if (!info->block_h[plane])
356 		return 1;
357 	return info->block_h[plane];
358 }
359 
v4l2_apply_frmsize_constraints(u32 * width,u32 * height,const struct v4l2_frmsize_stepwise * frmsize)360 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
361 				    const struct v4l2_frmsize_stepwise *frmsize)
362 {
363 	if (!frmsize)
364 		return;
365 
366 	/*
367 	 * Clamp width/height to meet min/max constraints and round it up to
368 	 * macroblock alignment.
369 	 */
370 	*width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
371 			       frmsize->step_width);
372 	*height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
373 				frmsize->step_height);
374 }
375 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
376 
v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane * pixfmt,u32 pixelformat,u32 width,u32 height)377 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
378 			u32 pixelformat, u32 width, u32 height)
379 {
380 	const struct v4l2_format_info *info;
381 	struct v4l2_plane_pix_format *plane;
382 	int i;
383 
384 	info = v4l2_format_info(pixelformat);
385 	if (!info)
386 		return -EINVAL;
387 
388 	pixfmt->width = width;
389 	pixfmt->height = height;
390 	pixfmt->pixelformat = pixelformat;
391 	pixfmt->num_planes = info->mem_planes;
392 
393 	if (info->mem_planes == 1) {
394 		plane = &pixfmt->plane_fmt[0];
395 		plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
396 		plane->sizeimage = 0;
397 
398 		for (i = 0; i < info->comp_planes; i++) {
399 			unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
400 			unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
401 			unsigned int aligned_width;
402 			unsigned int aligned_height;
403 
404 			aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
405 			aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
406 
407 			plane->sizeimage += info->bpp[i] *
408 				DIV_ROUND_UP(aligned_width, hdiv) *
409 				DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
410 		}
411 	} else {
412 		for (i = 0; i < info->comp_planes; i++) {
413 			unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
414 			unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
415 			unsigned int aligned_width;
416 			unsigned int aligned_height;
417 
418 			aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
419 			aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
420 
421 			plane = &pixfmt->plane_fmt[i];
422 			plane->bytesperline =
423 				info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
424 			plane->sizeimage =
425 				plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
426 		}
427 	}
428 	return 0;
429 }
430 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
431 
v4l2_fill_pixfmt(struct v4l2_pix_format * pixfmt,u32 pixelformat,u32 width,u32 height)432 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
433 		     u32 width, u32 height)
434 {
435 	const struct v4l2_format_info *info;
436 	int i;
437 
438 	info = v4l2_format_info(pixelformat);
439 	if (!info)
440 		return -EINVAL;
441 
442 	/* Single planar API cannot be used for multi plane formats. */
443 	if (info->mem_planes > 1)
444 		return -EINVAL;
445 
446 	pixfmt->width = width;
447 	pixfmt->height = height;
448 	pixfmt->pixelformat = pixelformat;
449 	pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
450 	pixfmt->sizeimage = 0;
451 
452 	for (i = 0; i < info->comp_planes; i++) {
453 		unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
454 		unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
455 		unsigned int aligned_width;
456 		unsigned int aligned_height;
457 
458 		aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
459 		aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
460 
461 		pixfmt->sizeimage += info->bpp[i] *
462 			DIV_ROUND_UP(aligned_width, hdiv) *
463 			DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
464 	}
465 	return 0;
466 }
467 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
468 
v4l2_get_link_freq(struct v4l2_ctrl_handler * handler,unsigned int mul,unsigned int div)469 s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
470 		       unsigned int div)
471 {
472 	struct v4l2_ctrl *ctrl;
473 	s64 freq;
474 
475 	ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
476 	if (ctrl) {
477 		struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
478 		int ret;
479 
480 		qm.index = v4l2_ctrl_g_ctrl(ctrl);
481 
482 		ret = v4l2_querymenu(handler, &qm);
483 		if (ret)
484 			return -ENOENT;
485 
486 		freq = qm.value;
487 	} else {
488 		if (!mul || !div)
489 			return -ENOENT;
490 
491 		ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
492 		if (!ctrl)
493 			return -ENOENT;
494 
495 		freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
496 
497 		pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
498 			__func__);
499 		pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
500 			__func__);
501 	}
502 
503 	return freq > 0 ? freq : -EINVAL;
504 }
505 EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
506 
507 /*
508  * Simplify a fraction using a simple continued fraction decomposition. The
509  * idea here is to convert fractions such as 333333/10000000 to 1/30 using
510  * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
511  * arbitrary parameters to remove non-significative terms from the simple
512  * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
513  * respectively seems to give nice results.
514  */
v4l2_simplify_fraction(u32 * numerator,u32 * denominator,unsigned int n_terms,unsigned int threshold)515 void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
516 		unsigned int n_terms, unsigned int threshold)
517 {
518 	u32 *an;
519 	u32 x, y, r;
520 	unsigned int i, n;
521 
522 	an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
523 	if (an == NULL)
524 		return;
525 
526 	/*
527 	 * Convert the fraction to a simple continued fraction. See
528 	 * https://en.wikipedia.org/wiki/Continued_fraction
529 	 * Stop if the current term is bigger than or equal to the given
530 	 * threshold.
531 	 */
532 	x = *numerator;
533 	y = *denominator;
534 
535 	for (n = 0; n < n_terms && y != 0; ++n) {
536 		an[n] = x / y;
537 		if (an[n] >= threshold) {
538 			if (n < 2)
539 				n++;
540 			break;
541 		}
542 
543 		r = x - an[n] * y;
544 		x = y;
545 		y = r;
546 	}
547 
548 	/* Expand the simple continued fraction back to an integer fraction. */
549 	x = 0;
550 	y = 1;
551 
552 	for (i = n; i > 0; --i) {
553 		r = y;
554 		y = an[i-1] * y + x;
555 		x = r;
556 	}
557 
558 	*numerator = y;
559 	*denominator = x;
560 	kfree(an);
561 }
562 EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
563 
564 /*
565  * Convert a fraction to a frame interval in 100ns multiples. The idea here is
566  * to compute numerator / denominator * 10000000 using 32 bit fixed point
567  * arithmetic only.
568  */
v4l2_fraction_to_interval(u32 numerator,u32 denominator)569 u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
570 {
571 	u32 multiplier;
572 
573 	/* Saturate the result if the operation would overflow. */
574 	if (denominator == 0 ||
575 	    numerator/denominator >= ((u32)-1)/10000000)
576 		return (u32)-1;
577 
578 	/*
579 	 * Divide both the denominator and the multiplier by two until
580 	 * numerator * multiplier doesn't overflow. If anyone knows a better
581 	 * algorithm please let me know.
582 	 */
583 	multiplier = 10000000;
584 	while (numerator > ((u32)-1)/multiplier) {
585 		multiplier /= 2;
586 		denominator /= 2;
587 	}
588 
589 	return denominator ? numerator * multiplier / denominator : 0;
590 }
591 EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
592 
v4l2_link_freq_to_bitmap(struct device * dev,const u64 * fw_link_freqs,unsigned int num_of_fw_link_freqs,const s64 * driver_link_freqs,unsigned int num_of_driver_link_freqs,unsigned long * bitmap)593 int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
594 			     unsigned int num_of_fw_link_freqs,
595 			     const s64 *driver_link_freqs,
596 			     unsigned int num_of_driver_link_freqs,
597 			     unsigned long *bitmap)
598 {
599 	unsigned int i;
600 
601 	*bitmap = 0;
602 
603 	if (!num_of_fw_link_freqs) {
604 		dev_err(dev, "no link frequencies in firmware\n");
605 		return -ENODATA;
606 	}
607 
608 	for (i = 0; i < num_of_fw_link_freqs; i++) {
609 		unsigned int j;
610 
611 		for (j = 0; j < num_of_driver_link_freqs; j++) {
612 			if (fw_link_freqs[i] != driver_link_freqs[j])
613 				continue;
614 
615 			dev_dbg(dev, "enabling link frequency %lld Hz\n",
616 				driver_link_freqs[j]);
617 			*bitmap |= BIT(j);
618 			break;
619 		}
620 	}
621 
622 	if (!*bitmap) {
623 		dev_err(dev, "no matching link frequencies found\n");
624 
625 		dev_dbg(dev, "specified in firmware:\n");
626 		for (i = 0; i < num_of_fw_link_freqs; i++)
627 			dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]);
628 
629 		dev_dbg(dev, "driver supported:\n");
630 		for (i = 0; i < num_of_driver_link_freqs; i++)
631 			dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]);
632 
633 		return -ENOENT;
634 	}
635 
636 	return 0;
637 }
638 EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
639