1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * V4L2 sub-device
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
9 */
10
11 #include <linux/export.h>
12 #include <linux/ioctl.h>
13 #include <linux/leds.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/version.h>
21 #include <linux/videodev2.h>
22
23 #include <media/v4l2-ctrls.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-event.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-ioctl.h>
28
29 /**
30 * struct v4l2_subdev_stream_config - Used for storing stream configuration.
31 *
32 * @pad: pad number
33 * @stream: stream number
34 * @enabled: has the stream been enabled with v4l2_subdev_enable_streams()
35 * @fmt: &struct v4l2_mbus_framefmt
36 * @crop: &struct v4l2_rect to be used for crop
37 * @compose: &struct v4l2_rect to be used for compose
38 * @interval: frame interval
39 *
40 * This structure stores configuration for a stream.
41 */
42 struct v4l2_subdev_stream_config {
43 u32 pad;
44 u32 stream;
45 bool enabled;
46
47 struct v4l2_mbus_framefmt fmt;
48 struct v4l2_rect crop;
49 struct v4l2_rect compose;
50 struct v4l2_fract interval;
51 };
52
53 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
54 /*
55 * The Streams API is an experimental feature. To use the Streams API, set
56 * 'v4l2_subdev_enable_streams_api' to 1 below.
57 */
58
59 static bool v4l2_subdev_enable_streams_api;
60 #endif
61
62 /*
63 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
64 * of streams.
65 *
66 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
67 * restricts the total number of streams in a pad, although the stream ID is
68 * not restricted.
69 */
70 #define V4L2_SUBDEV_MAX_STREAM_ID 63
71
72 #include "v4l2-subdev-priv.h"
73
74 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
subdev_fh_init(struct v4l2_subdev_fh * fh,struct v4l2_subdev * sd)75 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
76 {
77 struct v4l2_subdev_state *state;
78 static struct lock_class_key key;
79
80 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
81 if (IS_ERR(state))
82 return PTR_ERR(state);
83
84 fh->state = state;
85
86 return 0;
87 }
88
subdev_fh_free(struct v4l2_subdev_fh * fh)89 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
90 {
91 __v4l2_subdev_state_free(fh->state);
92 fh->state = NULL;
93 }
94
subdev_open(struct file * file)95 static int subdev_open(struct file *file)
96 {
97 struct video_device *vdev = video_devdata(file);
98 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
99 struct v4l2_subdev_fh *subdev_fh;
100 int ret;
101
102 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
103 if (subdev_fh == NULL)
104 return -ENOMEM;
105
106 ret = subdev_fh_init(subdev_fh, sd);
107 if (ret) {
108 kfree(subdev_fh);
109 return ret;
110 }
111
112 v4l2_fh_init(&subdev_fh->vfh, vdev);
113 v4l2_fh_add(&subdev_fh->vfh, file);
114
115 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
116 struct module *owner;
117
118 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
119 if (!try_module_get(owner)) {
120 ret = -EBUSY;
121 goto err;
122 }
123 subdev_fh->owner = owner;
124 }
125
126 if (sd->internal_ops && sd->internal_ops->open) {
127 ret = sd->internal_ops->open(sd, subdev_fh);
128 if (ret < 0)
129 goto err;
130 }
131
132 return 0;
133
134 err:
135 module_put(subdev_fh->owner);
136 v4l2_fh_del(&subdev_fh->vfh, file);
137 v4l2_fh_exit(&subdev_fh->vfh);
138 subdev_fh_free(subdev_fh);
139 kfree(subdev_fh);
140
141 return ret;
142 }
143
subdev_close(struct file * file)144 static int subdev_close(struct file *file)
145 {
146 struct video_device *vdev = video_devdata(file);
147 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
148 struct v4l2_fh *vfh = file_to_v4l2_fh(file);
149 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
150
151 if (sd->internal_ops && sd->internal_ops->close)
152 sd->internal_ops->close(sd, subdev_fh);
153 module_put(subdev_fh->owner);
154 v4l2_fh_del(vfh, file);
155 v4l2_fh_exit(vfh);
156 subdev_fh_free(subdev_fh);
157 kfree(subdev_fh);
158
159 return 0;
160 }
161 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_open(struct file * file)162 static int subdev_open(struct file *file)
163 {
164 return -ENODEV;
165 }
166
subdev_close(struct file * file)167 static int subdev_close(struct file *file)
168 {
169 return -ENODEV;
170 }
171 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
172
v4l2_subdev_enable_privacy_led(struct v4l2_subdev * sd)173 static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd)
174 {
175 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
176 if (!IS_ERR_OR_NULL(sd->privacy_led))
177 led_set_brightness(sd->privacy_led,
178 sd->privacy_led->max_brightness);
179 #endif
180 }
181
v4l2_subdev_disable_privacy_led(struct v4l2_subdev * sd)182 static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd)
183 {
184 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
185 if (!IS_ERR_OR_NULL(sd->privacy_led))
186 led_set_brightness(sd->privacy_led, 0);
187 #endif
188 }
189
check_which(u32 which)190 static inline int check_which(u32 which)
191 {
192 if (which != V4L2_SUBDEV_FORMAT_TRY &&
193 which != V4L2_SUBDEV_FORMAT_ACTIVE)
194 return -EINVAL;
195
196 return 0;
197 }
198
check_pad(struct v4l2_subdev * sd,u32 pad)199 static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
200 {
201 #if defined(CONFIG_MEDIA_CONTROLLER)
202 if (sd->entity.num_pads) {
203 if (pad >= sd->entity.num_pads)
204 return -EINVAL;
205 return 0;
206 }
207 #endif
208 /* allow pad 0 on subdevices not registered as media entities */
209 if (pad > 0)
210 return -EINVAL;
211 return 0;
212 }
213
check_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 which,u32 pad,u32 stream)214 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
215 u32 which, u32 pad, u32 stream)
216 {
217 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
218 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
219 if (!v4l2_subdev_state_get_format(state, pad, stream))
220 return -EINVAL;
221 return 0;
222 #else
223 return -EINVAL;
224 #endif
225 }
226
227 if (stream != 0)
228 return -EINVAL;
229
230 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
231 return -EINVAL;
232
233 return 0;
234 }
235
check_format(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)236 static inline int check_format(struct v4l2_subdev *sd,
237 struct v4l2_subdev_state *state,
238 struct v4l2_subdev_format *format)
239 {
240 if (!format)
241 return -EINVAL;
242
243 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
244 check_state(sd, state, format->which, format->pad, format->stream);
245 }
246
call_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)247 static int call_get_fmt(struct v4l2_subdev *sd,
248 struct v4l2_subdev_state *state,
249 struct v4l2_subdev_format *format)
250 {
251 return check_format(sd, state, format) ? :
252 sd->ops->pad->get_fmt(sd, state, format);
253 }
254
call_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)255 static int call_set_fmt(struct v4l2_subdev *sd,
256 struct v4l2_subdev_state *state,
257 struct v4l2_subdev_format *format)
258 {
259 return check_format(sd, state, format) ? :
260 sd->ops->pad->set_fmt(sd, state, format);
261 }
262
call_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_mbus_code_enum * code)263 static int call_enum_mbus_code(struct v4l2_subdev *sd,
264 struct v4l2_subdev_state *state,
265 struct v4l2_subdev_mbus_code_enum *code)
266 {
267 if (!code)
268 return -EINVAL;
269
270 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
271 check_state(sd, state, code->which, code->pad, code->stream) ? :
272 sd->ops->pad->enum_mbus_code(sd, state, code);
273 }
274
call_enum_frame_size(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_size_enum * fse)275 static int call_enum_frame_size(struct v4l2_subdev *sd,
276 struct v4l2_subdev_state *state,
277 struct v4l2_subdev_frame_size_enum *fse)
278 {
279 if (!fse)
280 return -EINVAL;
281
282 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
283 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
284 sd->ops->pad->enum_frame_size(sd, state, fse);
285 }
286
call_enum_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval_enum * fie)287 static int call_enum_frame_interval(struct v4l2_subdev *sd,
288 struct v4l2_subdev_state *state,
289 struct v4l2_subdev_frame_interval_enum *fie)
290 {
291 if (!fie)
292 return -EINVAL;
293
294 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
295 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
296 sd->ops->pad->enum_frame_interval(sd, state, fie);
297 }
298
check_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)299 static inline int check_selection(struct v4l2_subdev *sd,
300 struct v4l2_subdev_state *state,
301 struct v4l2_subdev_selection *sel)
302 {
303 if (!sel)
304 return -EINVAL;
305
306 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
307 check_state(sd, state, sel->which, sel->pad, sel->stream);
308 }
309
call_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)310 static int call_get_selection(struct v4l2_subdev *sd,
311 struct v4l2_subdev_state *state,
312 struct v4l2_subdev_selection *sel)
313 {
314 return check_selection(sd, state, sel) ? :
315 sd->ops->pad->get_selection(sd, state, sel);
316 }
317
call_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)318 static int call_set_selection(struct v4l2_subdev *sd,
319 struct v4l2_subdev_state *state,
320 struct v4l2_subdev_selection *sel)
321 {
322 return check_selection(sd, state, sel) ? :
323 sd->ops->pad->set_selection(sd, state, sel);
324 }
325
check_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval * fi)326 static inline int check_frame_interval(struct v4l2_subdev *sd,
327 struct v4l2_subdev_state *state,
328 struct v4l2_subdev_frame_interval *fi)
329 {
330 if (!fi)
331 return -EINVAL;
332
333 return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
334 check_state(sd, state, fi->which, fi->pad, fi->stream);
335 }
336
call_get_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval * fi)337 static int call_get_frame_interval(struct v4l2_subdev *sd,
338 struct v4l2_subdev_state *state,
339 struct v4l2_subdev_frame_interval *fi)
340 {
341 return check_frame_interval(sd, state, fi) ? :
342 sd->ops->pad->get_frame_interval(sd, state, fi);
343 }
344
call_set_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval * fi)345 static int call_set_frame_interval(struct v4l2_subdev *sd,
346 struct v4l2_subdev_state *state,
347 struct v4l2_subdev_frame_interval *fi)
348 {
349 return check_frame_interval(sd, state, fi) ? :
350 sd->ops->pad->set_frame_interval(sd, state, fi);
351 }
352
call_get_frame_desc(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_frame_desc * fd)353 static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
354 struct v4l2_mbus_frame_desc *fd)
355 {
356 unsigned int i;
357 int ret;
358
359 #if defined(CONFIG_MEDIA_CONTROLLER)
360 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
361 return -EOPNOTSUPP;
362 #endif
363
364 memset(fd, 0, sizeof(*fd));
365
366 ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
367 if (ret)
368 return ret;
369
370 dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
371 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
372 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
373 "unknown");
374
375 for (i = 0; i < fd->num_entries; i++) {
376 struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
377 char buf[20] = "";
378
379 if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
380 WARN_ON(snprintf(buf, sizeof(buf),
381 ", vc %u, dt 0x%02x",
382 entry->bus.csi2.vc,
383 entry->bus.csi2.dt) >= sizeof(buf));
384
385 dev_dbg(sd->dev,
386 "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
387 entry->stream, entry->pixelcode, entry->length,
388 entry->flags, buf);
389 }
390
391 return 0;
392 }
393
check_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)394 static inline int check_edid(struct v4l2_subdev *sd,
395 struct v4l2_subdev_edid *edid)
396 {
397 if (!edid)
398 return -EINVAL;
399
400 if (edid->blocks && edid->edid == NULL)
401 return -EINVAL;
402
403 return check_pad(sd, edid->pad);
404 }
405
call_get_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)406 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
407 {
408 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
409 }
410
call_set_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)411 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
412 {
413 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
414 }
415
call_s_dv_timings(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_dv_timings * timings)416 static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
417 struct v4l2_dv_timings *timings)
418 {
419 if (!timings)
420 return -EINVAL;
421
422 return check_pad(sd, pad) ? :
423 sd->ops->pad->s_dv_timings(sd, pad, timings);
424 }
425
call_g_dv_timings(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_dv_timings * timings)426 static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
427 struct v4l2_dv_timings *timings)
428 {
429 if (!timings)
430 return -EINVAL;
431
432 return check_pad(sd, pad) ? :
433 sd->ops->pad->g_dv_timings(sd, pad, timings);
434 }
435
call_query_dv_timings(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_dv_timings * timings)436 static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
437 struct v4l2_dv_timings *timings)
438 {
439 if (!timings)
440 return -EINVAL;
441
442 return check_pad(sd, pad) ? :
443 sd->ops->pad->query_dv_timings(sd, pad, timings);
444 }
445
call_dv_timings_cap(struct v4l2_subdev * sd,struct v4l2_dv_timings_cap * cap)446 static int call_dv_timings_cap(struct v4l2_subdev *sd,
447 struct v4l2_dv_timings_cap *cap)
448 {
449 if (!cap)
450 return -EINVAL;
451
452 return check_pad(sd, cap->pad) ? :
453 sd->ops->pad->dv_timings_cap(sd, cap);
454 }
455
call_enum_dv_timings(struct v4l2_subdev * sd,struct v4l2_enum_dv_timings * dvt)456 static int call_enum_dv_timings(struct v4l2_subdev *sd,
457 struct v4l2_enum_dv_timings *dvt)
458 {
459 if (!dvt)
460 return -EINVAL;
461
462 return check_pad(sd, dvt->pad) ? :
463 sd->ops->pad->enum_dv_timings(sd, dvt);
464 }
465
call_get_mbus_config(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_config * config)466 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
467 struct v4l2_mbus_config *config)
468 {
469 memset(config, 0, sizeof(*config));
470
471 return check_pad(sd, pad) ? :
472 sd->ops->pad->get_mbus_config(sd, pad, config);
473 }
474
call_s_stream(struct v4l2_subdev * sd,int enable)475 static int call_s_stream(struct v4l2_subdev *sd, int enable)
476 {
477 int ret;
478
479 /*
480 * The .s_stream() operation must never be called to start or stop an
481 * already started or stopped subdev. Catch offenders but don't return
482 * an error yet to avoid regressions.
483 */
484 if (WARN_ON(sd->s_stream_enabled == !!enable))
485 return 0;
486
487 ret = sd->ops->video->s_stream(sd, enable);
488
489 if (!enable && ret < 0) {
490 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
491 ret = 0;
492 }
493
494 if (!ret) {
495 sd->s_stream_enabled = enable;
496
497 if (enable)
498 v4l2_subdev_enable_privacy_led(sd);
499 else
500 v4l2_subdev_disable_privacy_led(sd);
501 }
502
503 return ret;
504 }
505
506 #ifdef CONFIG_MEDIA_CONTROLLER
507 /*
508 * Create state-management wrapper for pad ops dealing with subdev state. The
509 * wrapper handles the case where the caller does not provide the called
510 * subdev's state. This should be removed when all the callers are fixed.
511 */
512 #define DEFINE_STATE_WRAPPER(f, arg_type) \
513 static int call_##f##_state(struct v4l2_subdev *sd, \
514 struct v4l2_subdev_state *_state, \
515 arg_type *arg) \
516 { \
517 struct v4l2_subdev_state *state = _state; \
518 int ret; \
519 if (!_state) \
520 state = v4l2_subdev_lock_and_get_active_state(sd); \
521 ret = call_##f(sd, state, arg); \
522 if (!_state && state) \
523 v4l2_subdev_unlock_state(state); \
524 return ret; \
525 }
526
527 #else /* CONFIG_MEDIA_CONTROLLER */
528
529 #define DEFINE_STATE_WRAPPER(f, arg_type) \
530 static int call_##f##_state(struct v4l2_subdev *sd, \
531 struct v4l2_subdev_state *state, \
532 arg_type *arg) \
533 { \
534 return call_##f(sd, state, arg); \
535 }
536
537 #endif /* CONFIG_MEDIA_CONTROLLER */
538
539 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
540 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
541 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
542 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
543 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
544 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
545 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
546
547 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
548 .get_fmt = call_get_fmt_state,
549 .set_fmt = call_set_fmt_state,
550 .enum_mbus_code = call_enum_mbus_code_state,
551 .enum_frame_size = call_enum_frame_size_state,
552 .enum_frame_interval = call_enum_frame_interval_state,
553 .get_selection = call_get_selection_state,
554 .set_selection = call_set_selection_state,
555 .get_frame_interval = call_get_frame_interval,
556 .set_frame_interval = call_set_frame_interval,
557 .get_edid = call_get_edid,
558 .set_edid = call_set_edid,
559 .s_dv_timings = call_s_dv_timings,
560 .g_dv_timings = call_g_dv_timings,
561 .query_dv_timings = call_query_dv_timings,
562 .dv_timings_cap = call_dv_timings_cap,
563 .enum_dv_timings = call_enum_dv_timings,
564 .get_frame_desc = call_get_frame_desc,
565 .get_mbus_config = call_get_mbus_config,
566 };
567
568 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
569 .s_stream = call_s_stream,
570 };
571
572 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
573 .pad = &v4l2_subdev_call_pad_wrappers,
574 .video = &v4l2_subdev_call_video_wrappers,
575 };
576 EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
577
578 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
579
580 static struct v4l2_subdev_state *
subdev_ioctl_get_state(struct v4l2_subdev * sd,struct v4l2_subdev_fh * subdev_fh,unsigned int cmd,void * arg)581 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
582 unsigned int cmd, void *arg)
583 {
584 u32 which;
585
586 switch (cmd) {
587 default:
588 return NULL;
589 case VIDIOC_SUBDEV_G_FMT:
590 case VIDIOC_SUBDEV_S_FMT:
591 which = ((struct v4l2_subdev_format *)arg)->which;
592 break;
593 case VIDIOC_SUBDEV_G_CROP:
594 case VIDIOC_SUBDEV_S_CROP:
595 which = ((struct v4l2_subdev_crop *)arg)->which;
596 break;
597 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
598 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
599 break;
600 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
601 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
602 break;
603 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
604 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
605 break;
606 case VIDIOC_SUBDEV_G_SELECTION:
607 case VIDIOC_SUBDEV_S_SELECTION:
608 which = ((struct v4l2_subdev_selection *)arg)->which;
609 break;
610 case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
611 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
612 struct v4l2_subdev_frame_interval *fi = arg;
613
614 if (!(subdev_fh->client_caps &
615 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
616 fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
617
618 which = fi->which;
619 break;
620 }
621 case VIDIOC_SUBDEV_G_ROUTING:
622 case VIDIOC_SUBDEV_S_ROUTING:
623 which = ((struct v4l2_subdev_routing *)arg)->which;
624 break;
625 }
626
627 return which == V4L2_SUBDEV_FORMAT_TRY ?
628 subdev_fh->state :
629 v4l2_subdev_get_unlocked_active_state(sd);
630 }
631
subdev_do_ioctl(struct file * file,unsigned int cmd,void * arg,struct v4l2_subdev_state * state)632 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
633 struct v4l2_subdev_state *state)
634 {
635 struct video_device *vdev = video_devdata(file);
636 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
637 struct v4l2_fh *vfh = file_to_v4l2_fh(file);
638 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
639 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
640 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
641 bool client_supports_streams = subdev_fh->client_caps &
642 V4L2_SUBDEV_CLIENT_CAP_STREAMS;
643 int rval;
644
645 /*
646 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
647 * Remove this when the API is no longer experimental.
648 */
649 if (!v4l2_subdev_enable_streams_api)
650 streams_subdev = false;
651
652 switch (cmd) {
653 case VIDIOC_SUBDEV_QUERYCAP: {
654 struct v4l2_subdev_capability *cap = arg;
655
656 memset(cap->reserved, 0, sizeof(cap->reserved));
657 cap->version = LINUX_VERSION_CODE;
658 cap->capabilities =
659 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
660 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
661
662 return 0;
663 }
664
665 case VIDIOC_QUERYCTRL:
666 /*
667 * TODO: this really should be folded into v4l2_queryctrl (this
668 * currently returns -EINVAL for NULL control handlers).
669 * However, v4l2_queryctrl() is still called directly by
670 * drivers as well and until that has been addressed I believe
671 * it is safer to do the check here. The same is true for the
672 * other control ioctls below.
673 */
674 if (!vfh->ctrl_handler)
675 return -ENOTTY;
676 return v4l2_queryctrl(vfh->ctrl_handler, arg);
677
678 case VIDIOC_QUERY_EXT_CTRL:
679 if (!vfh->ctrl_handler)
680 return -ENOTTY;
681 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
682
683 case VIDIOC_QUERYMENU:
684 if (!vfh->ctrl_handler)
685 return -ENOTTY;
686 return v4l2_querymenu(vfh->ctrl_handler, arg);
687
688 case VIDIOC_G_CTRL:
689 if (!vfh->ctrl_handler)
690 return -ENOTTY;
691 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
692
693 case VIDIOC_S_CTRL:
694 if (!vfh->ctrl_handler)
695 return -ENOTTY;
696 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
697
698 case VIDIOC_G_EXT_CTRLS:
699 if (!vfh->ctrl_handler)
700 return -ENOTTY;
701 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
702 vdev, sd->v4l2_dev->mdev, arg);
703
704 case VIDIOC_S_EXT_CTRLS:
705 if (!vfh->ctrl_handler)
706 return -ENOTTY;
707 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
708 vdev, sd->v4l2_dev->mdev, arg);
709
710 case VIDIOC_TRY_EXT_CTRLS:
711 if (!vfh->ctrl_handler)
712 return -ENOTTY;
713 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
714 vdev, sd->v4l2_dev->mdev, arg);
715
716 case VIDIOC_DQEVENT:
717 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
718 return -ENOIOCTLCMD;
719
720 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
721
722 case VIDIOC_SUBSCRIBE_EVENT:
723 if (v4l2_subdev_has_op(sd, core, subscribe_event))
724 return v4l2_subdev_call(sd, core, subscribe_event,
725 vfh, arg);
726
727 if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) &&
728 vfh->ctrl_handler)
729 return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg);
730
731 return -ENOIOCTLCMD;
732
733 case VIDIOC_UNSUBSCRIBE_EVENT:
734 if (v4l2_subdev_has_op(sd, core, unsubscribe_event))
735 return v4l2_subdev_call(sd, core, unsubscribe_event,
736 vfh, arg);
737
738 if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)
739 return v4l2_event_subdev_unsubscribe(sd, vfh, arg);
740
741 return -ENOIOCTLCMD;
742
743 #ifdef CONFIG_VIDEO_ADV_DEBUG
744 case VIDIOC_DBG_G_REGISTER:
745 {
746 struct v4l2_dbg_register *p = arg;
747
748 if (!capable(CAP_SYS_ADMIN))
749 return -EPERM;
750 return v4l2_subdev_call(sd, core, g_register, p);
751 }
752 case VIDIOC_DBG_S_REGISTER:
753 {
754 struct v4l2_dbg_register *p = arg;
755
756 if (!capable(CAP_SYS_ADMIN))
757 return -EPERM;
758 return v4l2_subdev_call(sd, core, s_register, p);
759 }
760 case VIDIOC_DBG_G_CHIP_INFO:
761 {
762 struct v4l2_dbg_chip_info *p = arg;
763
764 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
765 return -EINVAL;
766 if (sd->ops->core && sd->ops->core->s_register)
767 p->flags |= V4L2_CHIP_FL_WRITABLE;
768 if (sd->ops->core && sd->ops->core->g_register)
769 p->flags |= V4L2_CHIP_FL_READABLE;
770 strscpy(p->name, sd->name, sizeof(p->name));
771 return 0;
772 }
773 #endif
774
775 case VIDIOC_LOG_STATUS: {
776 int ret;
777
778 pr_info("%s: ================= START STATUS =================\n",
779 sd->name);
780 ret = v4l2_subdev_call(sd, core, log_status);
781 pr_info("%s: ================== END STATUS ==================\n",
782 sd->name);
783 return ret;
784 }
785
786 case VIDIOC_SUBDEV_G_FMT: {
787 struct v4l2_subdev_format *format = arg;
788
789 if (!client_supports_streams)
790 format->stream = 0;
791
792 memset(format->reserved, 0, sizeof(format->reserved));
793 memset(format->format.reserved, 0, sizeof(format->format.reserved));
794 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
795 }
796
797 case VIDIOC_SUBDEV_S_FMT: {
798 struct v4l2_subdev_format *format = arg;
799
800 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
801 return -EPERM;
802
803 if (!client_supports_streams)
804 format->stream = 0;
805
806 memset(format->reserved, 0, sizeof(format->reserved));
807 memset(format->format.reserved, 0, sizeof(format->format.reserved));
808 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
809 }
810
811 case VIDIOC_SUBDEV_G_CROP: {
812 struct v4l2_subdev_crop *crop = arg;
813 struct v4l2_subdev_selection sel;
814
815 if (!client_supports_streams)
816 crop->stream = 0;
817
818 memset(crop->reserved, 0, sizeof(crop->reserved));
819 memset(&sel, 0, sizeof(sel));
820 sel.which = crop->which;
821 sel.pad = crop->pad;
822 sel.stream = crop->stream;
823 sel.target = V4L2_SEL_TGT_CROP;
824
825 rval = v4l2_subdev_call(
826 sd, pad, get_selection, state, &sel);
827
828 crop->rect = sel.r;
829
830 return rval;
831 }
832
833 case VIDIOC_SUBDEV_S_CROP: {
834 struct v4l2_subdev_crop *crop = arg;
835 struct v4l2_subdev_selection sel;
836
837 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
838 return -EPERM;
839
840 if (!client_supports_streams)
841 crop->stream = 0;
842
843 memset(crop->reserved, 0, sizeof(crop->reserved));
844 memset(&sel, 0, sizeof(sel));
845 sel.which = crop->which;
846 sel.pad = crop->pad;
847 sel.stream = crop->stream;
848 sel.target = V4L2_SEL_TGT_CROP;
849 sel.r = crop->rect;
850
851 rval = v4l2_subdev_call(
852 sd, pad, set_selection, state, &sel);
853
854 crop->rect = sel.r;
855
856 return rval;
857 }
858
859 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
860 struct v4l2_subdev_mbus_code_enum *code = arg;
861
862 if (!client_supports_streams)
863 code->stream = 0;
864
865 memset(code->reserved, 0, sizeof(code->reserved));
866 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
867 code);
868 }
869
870 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
871 struct v4l2_subdev_frame_size_enum *fse = arg;
872
873 if (!client_supports_streams)
874 fse->stream = 0;
875
876 memset(fse->reserved, 0, sizeof(fse->reserved));
877 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
878 fse);
879 }
880
881 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
882 struct v4l2_subdev_frame_interval *fi = arg;
883
884 if (!client_supports_streams)
885 fi->stream = 0;
886
887 memset(fi->reserved, 0, sizeof(fi->reserved));
888 return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
889 }
890
891 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
892 struct v4l2_subdev_frame_interval *fi = arg;
893
894 if (!client_supports_streams)
895 fi->stream = 0;
896
897 if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
898 return -EPERM;
899
900 memset(fi->reserved, 0, sizeof(fi->reserved));
901 return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
902 }
903
904 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
905 struct v4l2_subdev_frame_interval_enum *fie = arg;
906
907 if (!client_supports_streams)
908 fie->stream = 0;
909
910 memset(fie->reserved, 0, sizeof(fie->reserved));
911 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
912 fie);
913 }
914
915 case VIDIOC_SUBDEV_G_SELECTION: {
916 struct v4l2_subdev_selection *sel = arg;
917
918 if (!client_supports_streams)
919 sel->stream = 0;
920
921 memset(sel->reserved, 0, sizeof(sel->reserved));
922 return v4l2_subdev_call(
923 sd, pad, get_selection, state, sel);
924 }
925
926 case VIDIOC_SUBDEV_S_SELECTION: {
927 struct v4l2_subdev_selection *sel = arg;
928
929 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
930 return -EPERM;
931
932 if (!client_supports_streams)
933 sel->stream = 0;
934
935 memset(sel->reserved, 0, sizeof(sel->reserved));
936 return v4l2_subdev_call(
937 sd, pad, set_selection, state, sel);
938 }
939
940 case VIDIOC_G_EDID: {
941 struct v4l2_subdev_edid *edid = arg;
942
943 return v4l2_subdev_call(sd, pad, get_edid, edid);
944 }
945
946 case VIDIOC_S_EDID: {
947 struct v4l2_subdev_edid *edid = arg;
948
949 return v4l2_subdev_call(sd, pad, set_edid, edid);
950 }
951
952 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
953 struct v4l2_dv_timings_cap *cap = arg;
954
955 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
956 }
957
958 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
959 struct v4l2_enum_dv_timings *dvt = arg;
960
961 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
962 }
963
964 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
965 return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg);
966
967 case VIDIOC_SUBDEV_G_DV_TIMINGS:
968 return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg);
969
970 case VIDIOC_SUBDEV_S_DV_TIMINGS:
971 if (ro_subdev)
972 return -EPERM;
973
974 return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg);
975
976 case VIDIOC_SUBDEV_G_STD:
977 return v4l2_subdev_call(sd, video, g_std, arg);
978
979 case VIDIOC_SUBDEV_S_STD: {
980 v4l2_std_id *std = arg;
981
982 if (ro_subdev)
983 return -EPERM;
984
985 return v4l2_subdev_call(sd, video, s_std, *std);
986 }
987
988 case VIDIOC_SUBDEV_ENUMSTD: {
989 struct v4l2_standard *p = arg;
990 v4l2_std_id id;
991
992 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
993 return -EINVAL;
994
995 return v4l_video_std_enumstd(p, id);
996 }
997
998 case VIDIOC_SUBDEV_QUERYSTD:
999 return v4l2_subdev_call(sd, video, querystd, arg);
1000
1001 case VIDIOC_SUBDEV_G_ROUTING: {
1002 struct v4l2_subdev_routing *routing = arg;
1003 struct v4l2_subdev_krouting *krouting;
1004
1005 if (!v4l2_subdev_enable_streams_api)
1006 return -ENOIOCTLCMD;
1007
1008 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
1009 return -ENOIOCTLCMD;
1010
1011 memset(routing->reserved, 0, sizeof(routing->reserved));
1012
1013 krouting = &state->routing;
1014
1015 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
1016 krouting->routes,
1017 min(krouting->num_routes, routing->len_routes) *
1018 sizeof(*krouting->routes));
1019 routing->num_routes = krouting->num_routes;
1020
1021 return 0;
1022 }
1023
1024 case VIDIOC_SUBDEV_S_ROUTING: {
1025 struct v4l2_subdev_routing *routing = arg;
1026 struct v4l2_subdev_route *routes =
1027 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
1028 struct v4l2_subdev_krouting krouting = {};
1029 unsigned int num_active_routes = 0;
1030 unsigned int i;
1031
1032 if (!v4l2_subdev_enable_streams_api)
1033 return -ENOIOCTLCMD;
1034
1035 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
1036 return -ENOIOCTLCMD;
1037
1038 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
1039 return -EPERM;
1040
1041 if (routing->num_routes > routing->len_routes)
1042 return -EINVAL;
1043
1044 memset(routing->reserved, 0, sizeof(routing->reserved));
1045
1046 for (i = 0; i < routing->num_routes; ++i) {
1047 const struct v4l2_subdev_route *route = &routes[i];
1048 const struct media_pad *pads = sd->entity.pads;
1049
1050 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
1051 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
1052 return -EINVAL;
1053
1054 if (route->sink_pad >= sd->entity.num_pads)
1055 return -EINVAL;
1056
1057 if (!(pads[route->sink_pad].flags &
1058 MEDIA_PAD_FL_SINK))
1059 return -EINVAL;
1060
1061 if (route->source_pad >= sd->entity.num_pads)
1062 return -EINVAL;
1063
1064 if (!(pads[route->source_pad].flags &
1065 MEDIA_PAD_FL_SOURCE))
1066 return -EINVAL;
1067
1068 if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)
1069 num_active_routes++;
1070 }
1071
1072 /*
1073 * Drivers that implement routing need to report a frame
1074 * descriptor accordingly, with up to one entry per route. Until
1075 * the frame descriptors entries get allocated dynamically,
1076 * limit the number of active routes to
1077 * V4L2_FRAME_DESC_ENTRY_MAX.
1078 */
1079 if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX)
1080 return -E2BIG;
1081
1082 /*
1083 * If the driver doesn't support setting routing, just return
1084 * the routing table.
1085 */
1086 if (!v4l2_subdev_has_op(sd, pad, set_routing)) {
1087 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
1088 state->routing.routes,
1089 min(state->routing.num_routes, routing->len_routes) *
1090 sizeof(*state->routing.routes));
1091 routing->num_routes = state->routing.num_routes;
1092
1093 return 0;
1094 }
1095
1096 krouting.num_routes = routing->num_routes;
1097 krouting.len_routes = routing->len_routes;
1098 krouting.routes = routes;
1099
1100 rval = v4l2_subdev_call(sd, pad, set_routing, state,
1101 routing->which, &krouting);
1102 if (rval < 0)
1103 return rval;
1104
1105 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
1106 state->routing.routes,
1107 min(state->routing.num_routes, routing->len_routes) *
1108 sizeof(*state->routing.routes));
1109 routing->num_routes = state->routing.num_routes;
1110
1111 return 0;
1112 }
1113
1114 case VIDIOC_SUBDEV_G_CLIENT_CAP: {
1115 struct v4l2_subdev_client_capability *client_cap = arg;
1116
1117 client_cap->capabilities = subdev_fh->client_caps;
1118
1119 return 0;
1120 }
1121
1122 case VIDIOC_SUBDEV_S_CLIENT_CAP: {
1123 struct v4l2_subdev_client_capability *client_cap = arg;
1124
1125 /*
1126 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
1127 * enabled. Remove this when streams API is no longer
1128 * experimental.
1129 */
1130 if (!v4l2_subdev_enable_streams_api)
1131 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
1132
1133 /* Filter out unsupported capabilities */
1134 client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
1135 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
1136
1137 subdev_fh->client_caps = client_cap->capabilities;
1138
1139 return 0;
1140 }
1141
1142 default:
1143 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
1144 }
1145
1146 return 0;
1147 }
1148
subdev_do_ioctl_lock(struct file * file,unsigned int cmd,void * arg)1149 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
1150 {
1151 struct video_device *vdev = video_devdata(file);
1152 struct mutex *lock = vdev->lock;
1153 long ret = -ENODEV;
1154
1155 if (lock && mutex_lock_interruptible(lock))
1156 return -ERESTARTSYS;
1157
1158 if (video_is_registered(vdev)) {
1159 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1160 struct v4l2_fh *vfh = file_to_v4l2_fh(file);
1161 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
1162 struct v4l2_subdev_state *state;
1163
1164 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
1165
1166 if (state)
1167 v4l2_subdev_lock_state(state);
1168
1169 ret = subdev_do_ioctl(file, cmd, arg, state);
1170
1171 if (state)
1172 v4l2_subdev_unlock_state(state);
1173 }
1174
1175 if (lock)
1176 mutex_unlock(lock);
1177 return ret;
1178 }
1179
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1180 static long subdev_ioctl(struct file *file, unsigned int cmd,
1181 unsigned long arg)
1182 {
1183 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
1184 }
1185
1186 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)1187 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1188 unsigned long arg)
1189 {
1190 struct video_device *vdev = video_devdata(file);
1191 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1192
1193 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
1194 }
1195 #endif
1196
1197 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1198 static long subdev_ioctl(struct file *file, unsigned int cmd,
1199 unsigned long arg)
1200 {
1201 return -ENODEV;
1202 }
1203
1204 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)1205 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1206 unsigned long arg)
1207 {
1208 return -ENODEV;
1209 }
1210 #endif
1211 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1212
subdev_poll(struct file * file,poll_table * wait)1213 static __poll_t subdev_poll(struct file *file, poll_table *wait)
1214 {
1215 struct video_device *vdev = video_devdata(file);
1216 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1217 struct v4l2_fh *fh = file_to_v4l2_fh(file);
1218
1219 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
1220 return EPOLLERR;
1221
1222 poll_wait(file, &fh->wait, wait);
1223
1224 if (v4l2_event_pending(fh))
1225 return EPOLLPRI;
1226
1227 return 0;
1228 }
1229
1230 const struct v4l2_file_operations v4l2_subdev_fops = {
1231 .owner = THIS_MODULE,
1232 .open = subdev_open,
1233 .unlocked_ioctl = subdev_ioctl,
1234 #ifdef CONFIG_COMPAT
1235 .compat_ioctl32 = subdev_compat_ioctl32,
1236 #endif
1237 .release = subdev_close,
1238 .poll = subdev_poll,
1239 };
1240
1241 #ifdef CONFIG_MEDIA_CONTROLLER
1242
v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity * entity,struct fwnode_endpoint * endpoint)1243 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1244 struct fwnode_endpoint *endpoint)
1245 {
1246 struct fwnode_handle *fwnode;
1247 struct v4l2_subdev *sd;
1248
1249 if (!is_media_entity_v4l2_subdev(entity))
1250 return -EINVAL;
1251
1252 sd = media_entity_to_v4l2_subdev(entity);
1253
1254 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1255 fwnode_handle_put(fwnode);
1256
1257 if (device_match_fwnode(sd->dev, fwnode))
1258 return endpoint->port;
1259
1260 return -ENXIO;
1261 }
1262 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1263
v4l2_subdev_link_validate_default(struct v4l2_subdev * sd,struct media_link * link,struct v4l2_subdev_format * source_fmt,struct v4l2_subdev_format * sink_fmt)1264 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1265 struct media_link *link,
1266 struct v4l2_subdev_format *source_fmt,
1267 struct v4l2_subdev_format *sink_fmt)
1268 {
1269 bool pass = true;
1270
1271 /* The width, height and code must match. */
1272 if (source_fmt->format.width != sink_fmt->format.width) {
1273 dev_dbg(sd->entity.graph_obj.mdev->dev,
1274 "%s: width does not match (source %u, sink %u)\n",
1275 __func__,
1276 source_fmt->format.width, sink_fmt->format.width);
1277 pass = false;
1278 }
1279
1280 if (source_fmt->format.height != sink_fmt->format.height) {
1281 dev_dbg(sd->entity.graph_obj.mdev->dev,
1282 "%s: height does not match (source %u, sink %u)\n",
1283 __func__,
1284 source_fmt->format.height, sink_fmt->format.height);
1285 pass = false;
1286 }
1287
1288 if (source_fmt->format.code != sink_fmt->format.code) {
1289 dev_dbg(sd->entity.graph_obj.mdev->dev,
1290 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1291 __func__,
1292 source_fmt->format.code, sink_fmt->format.code);
1293 pass = false;
1294 }
1295
1296 /* The field order must match, or the sink field order must be NONE
1297 * to support interlaced hardware connected to bridges that support
1298 * progressive formats only.
1299 */
1300 if (source_fmt->format.field != sink_fmt->format.field &&
1301 sink_fmt->format.field != V4L2_FIELD_NONE) {
1302 dev_dbg(sd->entity.graph_obj.mdev->dev,
1303 "%s: field does not match (source %u, sink %u)\n",
1304 __func__,
1305 source_fmt->format.field, sink_fmt->format.field);
1306 pass = false;
1307 }
1308
1309 if (pass)
1310 return 0;
1311
1312 dev_dbg(sd->entity.graph_obj.mdev->dev,
1313 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1314 link->source->entity->name, link->source->index,
1315 link->sink->entity->name, link->sink->index);
1316
1317 return -EPIPE;
1318 }
1319 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1320
1321 static int
v4l2_subdev_link_validate_get_format(struct media_pad * pad,u32 stream,struct v4l2_subdev_format * fmt,bool states_locked)1322 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1323 struct v4l2_subdev_format *fmt,
1324 bool states_locked)
1325 {
1326 struct v4l2_subdev_state *state;
1327 struct v4l2_subdev *sd;
1328 int ret;
1329
1330 sd = media_entity_to_v4l2_subdev(pad->entity);
1331
1332 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1333 fmt->pad = pad->index;
1334 fmt->stream = stream;
1335
1336 if (states_locked)
1337 state = v4l2_subdev_get_locked_active_state(sd);
1338 else
1339 state = v4l2_subdev_lock_and_get_active_state(sd);
1340
1341 ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1342
1343 if (!states_locked && state)
1344 v4l2_subdev_unlock_state(state);
1345
1346 return ret;
1347 }
1348
1349 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1350
__v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask,bool states_locked)1351 static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1352 u64 *streams_mask,
1353 bool states_locked)
1354 {
1355 struct v4l2_subdev_route *route;
1356 struct v4l2_subdev_state *state;
1357 struct v4l2_subdev *subdev;
1358
1359 subdev = media_entity_to_v4l2_subdev(pad->entity);
1360
1361 *streams_mask = 0;
1362
1363 if (states_locked)
1364 state = v4l2_subdev_get_locked_active_state(subdev);
1365 else
1366 state = v4l2_subdev_lock_and_get_active_state(subdev);
1367
1368 if (WARN_ON(!state))
1369 return;
1370
1371 for_each_active_route(&state->routing, route) {
1372 u32 route_pad;
1373 u32 route_stream;
1374
1375 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1376 route_pad = route->source_pad;
1377 route_stream = route->source_stream;
1378 } else {
1379 route_pad = route->sink_pad;
1380 route_stream = route->sink_stream;
1381 }
1382
1383 if (route_pad != pad->index)
1384 continue;
1385
1386 *streams_mask |= BIT_ULL(route_stream);
1387 }
1388
1389 if (!states_locked)
1390 v4l2_subdev_unlock_state(state);
1391 }
1392
1393 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1394
v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask,bool states_locked)1395 static void v4l2_link_validate_get_streams(struct media_pad *pad,
1396 u64 *streams_mask,
1397 bool states_locked)
1398 {
1399 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1400
1401 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1402 /* Non-streams subdevs have an implicit stream 0 */
1403 *streams_mask = BIT_ULL(0);
1404 return;
1405 }
1406
1407 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1408 __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
1409 #else
1410 /* This shouldn't happen */
1411 *streams_mask = 0;
1412 #endif
1413 }
1414
v4l2_subdev_link_validate_locked(struct media_link * link,bool states_locked)1415 static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
1416 {
1417 struct v4l2_subdev *sink_subdev =
1418 media_entity_to_v4l2_subdev(link->sink->entity);
1419 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1420 u64 source_streams_mask;
1421 u64 sink_streams_mask;
1422 u64 dangling_sink_streams;
1423 u32 stream;
1424 int ret;
1425
1426 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1427 link->source->entity->name, link->source->index,
1428 link->sink->entity->name, link->sink->index);
1429
1430 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1431 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
1432
1433 /*
1434 * It is ok to have more source streams than sink streams as extra
1435 * source streams can just be ignored by the receiver, but having extra
1436 * sink streams is an error as streams must have a source.
1437 */
1438 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1439 sink_streams_mask;
1440 if (dangling_sink_streams) {
1441 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1442 dangling_sink_streams);
1443 return -EINVAL;
1444 }
1445
1446 /* Validate source and sink stream formats */
1447
1448 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1449 struct v4l2_subdev_format sink_fmt, source_fmt;
1450
1451 if (!(sink_streams_mask & BIT_ULL(stream)))
1452 continue;
1453
1454 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1455 link->source->entity->name, link->source->index, stream,
1456 link->sink->entity->name, link->sink->index, stream);
1457
1458 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1459 &source_fmt, states_locked);
1460 if (ret < 0) {
1461 dev_dbg(dev,
1462 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1463 link->source->entity->name, link->source->index,
1464 stream);
1465 continue;
1466 }
1467
1468 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1469 &sink_fmt, states_locked);
1470 if (ret < 0) {
1471 dev_dbg(dev,
1472 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1473 link->sink->entity->name, link->sink->index,
1474 stream);
1475 continue;
1476 }
1477
1478 /* TODO: add stream number to link_validate() */
1479 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1480 &source_fmt, &sink_fmt);
1481 if (!ret)
1482 continue;
1483
1484 if (ret != -ENOIOCTLCMD)
1485 return ret;
1486
1487 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1488 &source_fmt, &sink_fmt);
1489
1490 if (ret)
1491 return ret;
1492 }
1493
1494 return 0;
1495 }
1496
v4l2_subdev_link_validate(struct media_link * link)1497 int v4l2_subdev_link_validate(struct media_link *link)
1498 {
1499 struct v4l2_subdev *source_sd, *sink_sd;
1500 struct v4l2_subdev_state *source_state, *sink_state;
1501 bool states_locked;
1502 int ret;
1503
1504 /*
1505 * Links are validated in the context of the sink entity. Usage of this
1506 * helper on a sink that is not a subdev is a clear driver bug.
1507 */
1508 if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity)))
1509 return -EINVAL;
1510
1511 /*
1512 * If the source is a video device, delegate link validation to it. This
1513 * allows usage of this helper for subdev connected to a video output
1514 * device, provided that the driver implement the video output device's
1515 * .link_validate() operation.
1516 */
1517 if (is_media_entity_v4l2_video_device(link->source->entity)) {
1518 struct media_entity *source = link->source->entity;
1519
1520 if (!source->ops || !source->ops->link_validate) {
1521 /*
1522 * Many existing drivers do not implement the required
1523 * .link_validate() operation for their video devices.
1524 * Print a warning to get the drivers fixed, and return
1525 * 0 to avoid breaking userspace. This should
1526 * eventually be turned into a WARN_ON() when all
1527 * drivers will have been fixed.
1528 */
1529 pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
1530 source->name);
1531 return 0;
1532 }
1533
1534 /*
1535 * Avoid infinite loops in case a video device incorrectly uses
1536 * this helper function as its .link_validate() handler.
1537 */
1538 if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate))
1539 return -EINVAL;
1540
1541 return source->ops->link_validate(link);
1542 }
1543
1544 /*
1545 * If the source is still not a subdev, usage of this helper is a clear
1546 * driver bug.
1547 */
1548 if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity)))
1549 return -EINVAL;
1550
1551 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1552 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1553
1554 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1555 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1556
1557 states_locked = sink_state && source_state;
1558
1559 if (states_locked)
1560 v4l2_subdev_lock_states(sink_state, source_state);
1561
1562 ret = v4l2_subdev_link_validate_locked(link, states_locked);
1563
1564 if (states_locked)
1565 v4l2_subdev_unlock_states(sink_state, source_state);
1566
1567 return ret;
1568 }
1569 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1570
v4l2_subdev_has_pad_interdep(struct media_entity * entity,unsigned int pad0,unsigned int pad1)1571 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1572 unsigned int pad0, unsigned int pad1)
1573 {
1574 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1575 struct v4l2_subdev_krouting *routing;
1576 struct v4l2_subdev_state *state;
1577 unsigned int i;
1578
1579 state = v4l2_subdev_lock_and_get_active_state(sd);
1580
1581 routing = &state->routing;
1582
1583 for (i = 0; i < routing->num_routes; ++i) {
1584 struct v4l2_subdev_route *route = &routing->routes[i];
1585
1586 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1587 continue;
1588
1589 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1590 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1591 v4l2_subdev_unlock_state(state);
1592 return true;
1593 }
1594 }
1595
1596 v4l2_subdev_unlock_state(state);
1597
1598 return false;
1599 }
1600 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1601
1602 struct v4l2_subdev_state *
__v4l2_subdev_state_alloc(struct v4l2_subdev * sd,const char * lock_name,struct lock_class_key * lock_key)1603 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1604 struct lock_class_key *lock_key)
1605 {
1606 struct v4l2_subdev_state *state;
1607 int ret;
1608
1609 state = kzalloc(sizeof(*state), GFP_KERNEL);
1610 if (!state)
1611 return ERR_PTR(-ENOMEM);
1612
1613 __mutex_init(&state->_lock, lock_name, lock_key);
1614 if (sd->state_lock)
1615 state->lock = sd->state_lock;
1616 else
1617 state->lock = &state->_lock;
1618
1619 state->sd = sd;
1620
1621 /* Drivers that support streams do not need the legacy pad config */
1622 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1623 state->pads = kvcalloc(sd->entity.num_pads,
1624 sizeof(*state->pads), GFP_KERNEL);
1625 if (!state->pads) {
1626 ret = -ENOMEM;
1627 goto err;
1628 }
1629 }
1630
1631 if (sd->internal_ops && sd->internal_ops->init_state) {
1632 /*
1633 * There can be no race at this point, but we lock the state
1634 * anyway to satisfy lockdep checks.
1635 */
1636 v4l2_subdev_lock_state(state);
1637 ret = sd->internal_ops->init_state(sd, state);
1638 v4l2_subdev_unlock_state(state);
1639
1640 if (ret)
1641 goto err;
1642 }
1643
1644 return state;
1645
1646 err:
1647 if (state && state->pads)
1648 kvfree(state->pads);
1649
1650 kfree(state);
1651
1652 return ERR_PTR(ret);
1653 }
1654 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1655
__v4l2_subdev_state_free(struct v4l2_subdev_state * state)1656 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1657 {
1658 if (!state)
1659 return;
1660
1661 mutex_destroy(&state->_lock);
1662
1663 kfree(state->routing.routes);
1664 kvfree(state->stream_configs.configs);
1665 kvfree(state->pads);
1666 kfree(state);
1667 }
1668 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1669
__v4l2_subdev_init_finalize(struct v4l2_subdev * sd,const char * name,struct lock_class_key * key)1670 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1671 struct lock_class_key *key)
1672 {
1673 struct v4l2_subdev_state *state;
1674 struct device *dev = sd->dev;
1675 bool has_disable_streams;
1676 bool has_enable_streams;
1677 bool has_s_stream;
1678
1679 /* Check that the subdevice implements the required features */
1680
1681 has_s_stream = v4l2_subdev_has_op(sd, video, s_stream);
1682 has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams);
1683 has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams);
1684
1685 if (has_enable_streams != has_disable_streams) {
1686 dev_err(dev,
1687 "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n",
1688 sd->name);
1689 return -EINVAL;
1690 }
1691
1692 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
1693 if (has_s_stream && !has_enable_streams) {
1694 dev_err(dev,
1695 "subdev '%s' must implement .enable/disable_streams()\n",
1696 sd->name);
1697
1698 return -EINVAL;
1699 }
1700 }
1701
1702 if (sd->ctrl_handler)
1703 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
1704
1705 state = __v4l2_subdev_state_alloc(sd, name, key);
1706 if (IS_ERR(state))
1707 return PTR_ERR(state);
1708
1709 sd->active_state = state;
1710
1711 return 0;
1712 }
1713 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1714
v4l2_subdev_cleanup(struct v4l2_subdev * sd)1715 void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1716 {
1717 struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1718
1719 __v4l2_subdev_state_free(sd->active_state);
1720 sd->active_state = NULL;
1721
1722 /* Uninitialised sub-device, bail out here. */
1723 if (!sd->async_subdev_endpoint_list.next)
1724 return;
1725
1726 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1727 async_subdev_endpoint_entry) {
1728 list_del(&ase->async_subdev_endpoint_entry);
1729
1730 kfree(ase);
1731 }
1732 }
1733 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1734
1735 struct v4l2_mbus_framefmt *
__v4l2_subdev_state_get_format(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1736 __v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
1737 unsigned int pad, u32 stream)
1738 {
1739 struct v4l2_subdev_stream_configs *stream_configs;
1740 unsigned int i;
1741
1742 if (WARN_ON_ONCE(!state))
1743 return NULL;
1744
1745 if (state->pads) {
1746 if (stream)
1747 return NULL;
1748
1749 if (pad >= state->sd->entity.num_pads)
1750 return NULL;
1751
1752 return &state->pads[pad].format;
1753 }
1754
1755 lockdep_assert_held(state->lock);
1756
1757 stream_configs = &state->stream_configs;
1758
1759 for (i = 0; i < stream_configs->num_configs; ++i) {
1760 if (stream_configs->configs[i].pad == pad &&
1761 stream_configs->configs[i].stream == stream)
1762 return &stream_configs->configs[i].fmt;
1763 }
1764
1765 return NULL;
1766 }
1767 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
1768
1769 struct v4l2_rect *
__v4l2_subdev_state_get_crop(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1770 __v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
1771 u32 stream)
1772 {
1773 struct v4l2_subdev_stream_configs *stream_configs;
1774 unsigned int i;
1775
1776 if (WARN_ON_ONCE(!state))
1777 return NULL;
1778
1779 if (state->pads) {
1780 if (stream)
1781 return NULL;
1782
1783 if (pad >= state->sd->entity.num_pads)
1784 return NULL;
1785
1786 return &state->pads[pad].crop;
1787 }
1788
1789 lockdep_assert_held(state->lock);
1790
1791 stream_configs = &state->stream_configs;
1792
1793 for (i = 0; i < stream_configs->num_configs; ++i) {
1794 if (stream_configs->configs[i].pad == pad &&
1795 stream_configs->configs[i].stream == stream)
1796 return &stream_configs->configs[i].crop;
1797 }
1798
1799 return NULL;
1800 }
1801 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
1802
1803 struct v4l2_rect *
__v4l2_subdev_state_get_compose(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1804 __v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
1805 unsigned int pad, u32 stream)
1806 {
1807 struct v4l2_subdev_stream_configs *stream_configs;
1808 unsigned int i;
1809
1810 if (WARN_ON_ONCE(!state))
1811 return NULL;
1812
1813 if (state->pads) {
1814 if (stream)
1815 return NULL;
1816
1817 if (pad >= state->sd->entity.num_pads)
1818 return NULL;
1819
1820 return &state->pads[pad].compose;
1821 }
1822
1823 lockdep_assert_held(state->lock);
1824
1825 stream_configs = &state->stream_configs;
1826
1827 for (i = 0; i < stream_configs->num_configs; ++i) {
1828 if (stream_configs->configs[i].pad == pad &&
1829 stream_configs->configs[i].stream == stream)
1830 return &stream_configs->configs[i].compose;
1831 }
1832
1833 return NULL;
1834 }
1835 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
1836
1837 struct v4l2_fract *
__v4l2_subdev_state_get_interval(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1838 __v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
1839 unsigned int pad, u32 stream)
1840 {
1841 struct v4l2_subdev_stream_configs *stream_configs;
1842 unsigned int i;
1843
1844 if (WARN_ON(!state))
1845 return NULL;
1846
1847 lockdep_assert_held(state->lock);
1848
1849 if (state->pads) {
1850 if (stream)
1851 return NULL;
1852
1853 if (pad >= state->sd->entity.num_pads)
1854 return NULL;
1855
1856 return &state->pads[pad].interval;
1857 }
1858
1859 lockdep_assert_held(state->lock);
1860
1861 stream_configs = &state->stream_configs;
1862
1863 for (i = 0; i < stream_configs->num_configs; ++i) {
1864 if (stream_configs->configs[i].pad == pad &&
1865 stream_configs->configs[i].stream == stream)
1866 return &stream_configs->configs[i].interval;
1867 }
1868
1869 return NULL;
1870 }
1871 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
1872
1873 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1874
1875 static int
v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs * stream_configs,const struct v4l2_subdev_krouting * routing)1876 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1877 const struct v4l2_subdev_krouting *routing)
1878 {
1879 struct v4l2_subdev_stream_configs new_configs = { 0 };
1880 struct v4l2_subdev_route *route;
1881 u32 idx;
1882
1883 /* Count number of formats needed */
1884 for_each_active_route(routing, route) {
1885 /*
1886 * Each route needs a format on both ends of the route.
1887 */
1888 new_configs.num_configs += 2;
1889 }
1890
1891 if (new_configs.num_configs) {
1892 new_configs.configs = kvcalloc(new_configs.num_configs,
1893 sizeof(*new_configs.configs),
1894 GFP_KERNEL);
1895
1896 if (!new_configs.configs)
1897 return -ENOMEM;
1898 }
1899
1900 /*
1901 * Fill in the 'pad' and stream' value for each item in the array from
1902 * the routing table
1903 */
1904 idx = 0;
1905
1906 for_each_active_route(routing, route) {
1907 new_configs.configs[idx].pad = route->sink_pad;
1908 new_configs.configs[idx].stream = route->sink_stream;
1909
1910 idx++;
1911
1912 new_configs.configs[idx].pad = route->source_pad;
1913 new_configs.configs[idx].stream = route->source_stream;
1914
1915 idx++;
1916 }
1917
1918 kvfree(stream_configs->configs);
1919 *stream_configs = new_configs;
1920
1921 return 0;
1922 }
1923
v4l2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)1924 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1925 struct v4l2_subdev_format *format)
1926 {
1927 struct v4l2_mbus_framefmt *fmt;
1928
1929 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
1930 if (!fmt)
1931 return -EINVAL;
1932
1933 format->format = *fmt;
1934
1935 return 0;
1936 }
1937 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1938
v4l2_subdev_get_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval * fi)1939 int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
1940 struct v4l2_subdev_state *state,
1941 struct v4l2_subdev_frame_interval *fi)
1942 {
1943 struct v4l2_fract *interval;
1944
1945 interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
1946 if (!interval)
1947 return -EINVAL;
1948
1949 fi->interval = *interval;
1950
1951 return 0;
1952 }
1953 EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
1954
v4l2_subdev_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,const struct v4l2_subdev_krouting * routing)1955 int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1956 struct v4l2_subdev_state *state,
1957 const struct v4l2_subdev_krouting *routing)
1958 {
1959 struct v4l2_subdev_krouting *dst = &state->routing;
1960 const struct v4l2_subdev_krouting *src = routing;
1961 struct v4l2_subdev_krouting new_routing = { 0 };
1962 size_t bytes;
1963 int r;
1964
1965 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1966 sizeof(*src->routes), &bytes)))
1967 return -EOVERFLOW;
1968
1969 lockdep_assert_held(state->lock);
1970
1971 if (src->num_routes > 0) {
1972 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1973 if (!new_routing.routes)
1974 return -ENOMEM;
1975 }
1976
1977 new_routing.num_routes = src->num_routes;
1978
1979 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1980 &new_routing);
1981 if (r) {
1982 kfree(new_routing.routes);
1983 return r;
1984 }
1985
1986 kfree(dst->routes);
1987 *dst = new_routing;
1988
1989 return 0;
1990 }
1991 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1992
1993 struct v4l2_subdev_route *
__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting * routing,struct v4l2_subdev_route * route)1994 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1995 struct v4l2_subdev_route *route)
1996 {
1997 if (route)
1998 ++route;
1999 else
2000 route = &routing->routes[0];
2001
2002 for (; route < routing->routes + routing->num_routes; ++route) {
2003 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
2004 continue;
2005
2006 return route;
2007 }
2008
2009 return NULL;
2010 }
2011 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
2012
v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,const struct v4l2_subdev_krouting * routing,const struct v4l2_mbus_framefmt * fmt)2013 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
2014 struct v4l2_subdev_state *state,
2015 const struct v4l2_subdev_krouting *routing,
2016 const struct v4l2_mbus_framefmt *fmt)
2017 {
2018 struct v4l2_subdev_stream_configs *stream_configs;
2019 unsigned int i;
2020 int ret;
2021
2022 ret = v4l2_subdev_set_routing(sd, state, routing);
2023 if (ret)
2024 return ret;
2025
2026 stream_configs = &state->stream_configs;
2027
2028 for (i = 0; i < stream_configs->num_configs; ++i)
2029 stream_configs->configs[i].fmt = *fmt;
2030
2031 return 0;
2032 }
2033 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
2034
v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting * routing,u32 pad,u32 stream,u32 * other_pad,u32 * other_stream)2035 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
2036 u32 pad, u32 stream, u32 *other_pad,
2037 u32 *other_stream)
2038 {
2039 unsigned int i;
2040
2041 for (i = 0; i < routing->num_routes; ++i) {
2042 struct v4l2_subdev_route *route = &routing->routes[i];
2043
2044 if (route->source_pad == pad &&
2045 route->source_stream == stream) {
2046 if (other_pad)
2047 *other_pad = route->sink_pad;
2048 if (other_stream)
2049 *other_stream = route->sink_stream;
2050 return 0;
2051 }
2052
2053 if (route->sink_pad == pad && route->sink_stream == stream) {
2054 if (other_pad)
2055 *other_pad = route->source_pad;
2056 if (other_stream)
2057 *other_stream = route->source_stream;
2058 return 0;
2059 }
2060 }
2061
2062 return -EINVAL;
2063 }
2064 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
2065
2066 struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state * state,u32 pad,u32 stream)2067 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
2068 u32 pad, u32 stream)
2069 {
2070 u32 other_pad, other_stream;
2071 int ret;
2072
2073 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
2074 pad, stream,
2075 &other_pad, &other_stream);
2076 if (ret)
2077 return NULL;
2078
2079 return v4l2_subdev_state_get_format(state, other_pad, other_stream);
2080 }
2081 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
2082
v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state * state,u32 pad0,u32 pad1,u64 * streams)2083 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
2084 u32 pad0, u32 pad1, u64 *streams)
2085 {
2086 const struct v4l2_subdev_krouting *routing = &state->routing;
2087 struct v4l2_subdev_route *route;
2088 u64 streams0 = 0;
2089 u64 streams1 = 0;
2090
2091 for_each_active_route(routing, route) {
2092 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
2093 (*streams & BIT_ULL(route->sink_stream))) {
2094 streams0 |= BIT_ULL(route->sink_stream);
2095 streams1 |= BIT_ULL(route->source_stream);
2096 }
2097 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
2098 (*streams & BIT_ULL(route->source_stream))) {
2099 streams0 |= BIT_ULL(route->source_stream);
2100 streams1 |= BIT_ULL(route->sink_stream);
2101 }
2102 }
2103
2104 *streams = streams0;
2105 return streams1;
2106 }
2107 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
2108
v4l2_subdev_routing_validate(struct v4l2_subdev * sd,const struct v4l2_subdev_krouting * routing,enum v4l2_subdev_routing_restriction disallow)2109 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
2110 const struct v4l2_subdev_krouting *routing,
2111 enum v4l2_subdev_routing_restriction disallow)
2112 {
2113 u32 *remote_pads = NULL;
2114 unsigned int i, j;
2115 int ret = -EINVAL;
2116
2117 if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
2118 V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
2119 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
2120 GFP_KERNEL);
2121 if (!remote_pads)
2122 return -ENOMEM;
2123
2124 for (i = 0; i < sd->entity.num_pads; ++i)
2125 remote_pads[i] = U32_MAX;
2126 }
2127
2128 for (i = 0; i < routing->num_routes; ++i) {
2129 const struct v4l2_subdev_route *route = &routing->routes[i];
2130
2131 /* Validate the sink and source pad numbers. */
2132 if (route->sink_pad >= sd->entity.num_pads ||
2133 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
2134 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
2135 i, route->sink_pad);
2136 goto out;
2137 }
2138
2139 if (route->source_pad >= sd->entity.num_pads ||
2140 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
2141 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
2142 i, route->source_pad);
2143 goto out;
2144 }
2145
2146 /*
2147 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
2148 * sink pad must be routed to a single source pad.
2149 */
2150 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
2151 if (remote_pads[route->sink_pad] != U32_MAX &&
2152 remote_pads[route->sink_pad] != route->source_pad) {
2153 dev_dbg(sd->dev,
2154 "route %u attempts to mix %s streams\n",
2155 i, "sink");
2156 goto out;
2157 }
2158 }
2159
2160 /*
2161 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
2162 * source pad must originate from a single sink pad.
2163 */
2164 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
2165 if (remote_pads[route->source_pad] != U32_MAX &&
2166 remote_pads[route->source_pad] != route->sink_pad) {
2167 dev_dbg(sd->dev,
2168 "route %u attempts to mix %s streams\n",
2169 i, "source");
2170 goto out;
2171 }
2172 }
2173
2174 /*
2175 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
2176 * side can not do stream multiplexing, i.e. there can be only
2177 * a single stream in a sink pad.
2178 */
2179 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
2180 if (remote_pads[route->sink_pad] != U32_MAX) {
2181 dev_dbg(sd->dev,
2182 "route %u attempts to multiplex on %s pad %u\n",
2183 i, "sink", route->sink_pad);
2184 goto out;
2185 }
2186 }
2187
2188 /*
2189 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
2190 * source side can not do stream multiplexing, i.e. there can
2191 * be only a single stream in a source pad.
2192 */
2193 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
2194 if (remote_pads[route->source_pad] != U32_MAX) {
2195 dev_dbg(sd->dev,
2196 "route %u attempts to multiplex on %s pad %u\n",
2197 i, "source", route->source_pad);
2198 goto out;
2199 }
2200 }
2201
2202 if (remote_pads) {
2203 remote_pads[route->sink_pad] = route->source_pad;
2204 remote_pads[route->source_pad] = route->sink_pad;
2205 }
2206
2207 for (j = i + 1; j < routing->num_routes; ++j) {
2208 const struct v4l2_subdev_route *r = &routing->routes[j];
2209
2210 /*
2211 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
2212 * originate from the same (sink) stream.
2213 */
2214 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
2215 route->sink_pad == r->sink_pad &&
2216 route->sink_stream == r->sink_stream) {
2217 dev_dbg(sd->dev,
2218 "routes %u and %u originate from same sink (%u/%u)\n",
2219 i, j, route->sink_pad,
2220 route->sink_stream);
2221 goto out;
2222 }
2223
2224 /*
2225 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
2226 * at the same (source) stream.
2227 */
2228 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
2229 route->source_pad == r->source_pad &&
2230 route->source_stream == r->source_stream) {
2231 dev_dbg(sd->dev,
2232 "routes %u and %u end at same source (%u/%u)\n",
2233 i, j, route->source_pad,
2234 route->source_stream);
2235 goto out;
2236 }
2237 }
2238 }
2239
2240 ret = 0;
2241
2242 out:
2243 kfree(remote_pads);
2244 return ret;
2245 }
2246 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
2247
v4l2_subdev_collect_streams(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 pad,u64 streams_mask,u64 * found_streams,u64 * enabled_streams)2248 static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd,
2249 struct v4l2_subdev_state *state,
2250 u32 pad, u64 streams_mask,
2251 u64 *found_streams,
2252 u64 *enabled_streams)
2253 {
2254 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
2255 *found_streams = BIT_ULL(0);
2256 *enabled_streams =
2257 (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0;
2258 dev_dbg(sd->dev,
2259 "collect_streams: sub-device \"%s\" does not support streams\n",
2260 sd->entity.name);
2261 return;
2262 }
2263
2264 *found_streams = 0;
2265 *enabled_streams = 0;
2266
2267 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2268 const struct v4l2_subdev_stream_config *cfg =
2269 &state->stream_configs.configs[i];
2270
2271 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2272 continue;
2273
2274 *found_streams |= BIT_ULL(cfg->stream);
2275 if (cfg->enabled)
2276 *enabled_streams |= BIT_ULL(cfg->stream);
2277 }
2278
2279 dev_dbg(sd->dev,
2280 "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n",
2281 sd->entity.name, pad, *found_streams, *enabled_streams);
2282 }
2283
v4l2_subdev_set_streams_enabled(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 pad,u64 streams_mask,bool enabled)2284 static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd,
2285 struct v4l2_subdev_state *state,
2286 u32 pad, u64 streams_mask,
2287 bool enabled)
2288 {
2289 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) {
2290 if (enabled)
2291 sd->enabled_pads |= BIT_ULL(pad);
2292 else
2293 sd->enabled_pads &= ~BIT_ULL(pad);
2294 return;
2295 }
2296
2297 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2298 struct v4l2_subdev_stream_config *cfg =
2299 &state->stream_configs.configs[i];
2300
2301 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2302 cfg->enabled = enabled;
2303 }
2304 }
2305
v4l2_subdev_enable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)2306 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
2307 u64 streams_mask)
2308 {
2309 struct device *dev = sd->entity.graph_obj.mdev->dev;
2310 struct v4l2_subdev_state *state;
2311 bool already_streaming;
2312 u64 enabled_streams;
2313 u64 found_streams;
2314 bool use_s_stream;
2315 int ret;
2316
2317 dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
2318 streams_mask);
2319
2320 /* A few basic sanity checks first. */
2321 if (pad >= sd->entity.num_pads)
2322 return -EINVAL;
2323
2324 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2325 return -EOPNOTSUPP;
2326
2327 /*
2328 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
2329 * with 64 pads or less can be supported.
2330 */
2331 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
2332 return -EOPNOTSUPP;
2333
2334 if (!streams_mask)
2335 return 0;
2336
2337 /* Fallback on .s_stream() if .enable_streams() isn't available. */
2338 use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams);
2339
2340 if (!use_s_stream)
2341 state = v4l2_subdev_lock_and_get_active_state(sd);
2342 else
2343 state = NULL;
2344
2345 /*
2346 * Verify that the requested streams exist and that they are not
2347 * already enabled.
2348 */
2349
2350 v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
2351 &found_streams, &enabled_streams);
2352
2353 if (found_streams != streams_mask) {
2354 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2355 streams_mask & ~found_streams, sd->entity.name, pad);
2356 ret = -EINVAL;
2357 goto done;
2358 }
2359
2360 if (enabled_streams) {
2361 dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n",
2362 enabled_streams, sd->entity.name, pad);
2363 ret = -EALREADY;
2364 goto done;
2365 }
2366
2367 already_streaming = v4l2_subdev_is_streaming(sd);
2368
2369 if (!use_s_stream) {
2370 /* Call the .enable_streams() operation. */
2371 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
2372 streams_mask);
2373 } else {
2374 /* Start streaming when the first pad is enabled. */
2375 if (!already_streaming)
2376 ret = v4l2_subdev_call(sd, video, s_stream, 1);
2377 else
2378 ret = 0;
2379 }
2380
2381 if (ret) {
2382 dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
2383 streams_mask, ret);
2384 goto done;
2385 }
2386
2387 /* Mark the streams as enabled. */
2388 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true);
2389
2390 /*
2391 * TODO: When all the drivers have been changed to use
2392 * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(),
2393 * instead of calling .s_stream() operation directly, we can remove
2394 * the privacy LED handling from call_s_stream() and do it here
2395 * for all cases.
2396 */
2397 if (!use_s_stream && !already_streaming)
2398 v4l2_subdev_enable_privacy_led(sd);
2399
2400 done:
2401 if (!use_s_stream)
2402 v4l2_subdev_unlock_state(state);
2403
2404 return ret;
2405 }
2406 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2407
v4l2_subdev_disable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)2408 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2409 u64 streams_mask)
2410 {
2411 struct device *dev = sd->entity.graph_obj.mdev->dev;
2412 struct v4l2_subdev_state *state;
2413 u64 enabled_streams;
2414 u64 found_streams;
2415 bool use_s_stream;
2416 int ret;
2417
2418 dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad,
2419 streams_mask);
2420
2421 /* A few basic sanity checks first. */
2422 if (pad >= sd->entity.num_pads)
2423 return -EINVAL;
2424
2425 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2426 return -EOPNOTSUPP;
2427
2428 /*
2429 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
2430 * with 64 pads or less can be supported.
2431 */
2432 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
2433 return -EOPNOTSUPP;
2434
2435 if (!streams_mask)
2436 return 0;
2437
2438 /* Fallback on .s_stream() if .disable_streams() isn't available. */
2439 use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams);
2440
2441 if (!use_s_stream)
2442 state = v4l2_subdev_lock_and_get_active_state(sd);
2443 else
2444 state = NULL;
2445
2446 /*
2447 * Verify that the requested streams exist and that they are not
2448 * already disabled.
2449 */
2450
2451 v4l2_subdev_collect_streams(sd, state, pad, streams_mask,
2452 &found_streams, &enabled_streams);
2453
2454 if (found_streams != streams_mask) {
2455 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2456 streams_mask & ~found_streams, sd->entity.name, pad);
2457 ret = -EINVAL;
2458 goto done;
2459 }
2460
2461 if (enabled_streams != streams_mask) {
2462 dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n",
2463 streams_mask & ~enabled_streams, sd->entity.name, pad);
2464 ret = -EALREADY;
2465 goto done;
2466 }
2467
2468 if (!use_s_stream) {
2469 /* Call the .disable_streams() operation. */
2470 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2471 streams_mask);
2472 } else {
2473 /* Stop streaming when the last streams are disabled. */
2474
2475 if (!(sd->enabled_pads & ~BIT_ULL(pad)))
2476 ret = v4l2_subdev_call(sd, video, s_stream, 0);
2477 else
2478 ret = 0;
2479 }
2480
2481 if (ret) {
2482 dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2483 streams_mask, ret);
2484 goto done;
2485 }
2486
2487 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false);
2488
2489 done:
2490 if (!use_s_stream) {
2491 if (!v4l2_subdev_is_streaming(sd))
2492 v4l2_subdev_disable_privacy_led(sd);
2493
2494 v4l2_subdev_unlock_state(state);
2495 }
2496
2497 return ret;
2498 }
2499 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2500
v4l2_subdev_s_stream_helper(struct v4l2_subdev * sd,int enable)2501 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2502 {
2503 struct v4l2_subdev_state *state;
2504 struct v4l2_subdev_route *route;
2505 struct media_pad *pad;
2506 u64 source_mask = 0;
2507 int pad_index = -1;
2508
2509 /*
2510 * Find the source pad. This helper is meant for subdevs that have a
2511 * single source pad, so failures shouldn't happen, but catch them
2512 * loudly nonetheless as they indicate a driver bug.
2513 */
2514 media_entity_for_each_pad(&sd->entity, pad) {
2515 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2516 pad_index = pad->index;
2517 break;
2518 }
2519 }
2520
2521 if (WARN_ON(pad_index == -1))
2522 return -EINVAL;
2523
2524 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
2525 /*
2526 * As there's a single source pad, just collect all the source
2527 * streams.
2528 */
2529 state = v4l2_subdev_lock_and_get_active_state(sd);
2530
2531 for_each_active_route(&state->routing, route)
2532 source_mask |= BIT_ULL(route->source_stream);
2533
2534 v4l2_subdev_unlock_state(state);
2535 } else {
2536 /*
2537 * For non-streams subdevices, there's a single implicit stream
2538 * per pad.
2539 */
2540 source_mask = BIT_ULL(0);
2541 }
2542
2543 if (enable)
2544 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2545 else
2546 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2547 }
2548 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2549
2550 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2551
2552 #endif /* CONFIG_MEDIA_CONTROLLER */
2553
v4l2_subdev_init(struct v4l2_subdev * sd,const struct v4l2_subdev_ops * ops)2554 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2555 {
2556 INIT_LIST_HEAD(&sd->list);
2557 BUG_ON(!ops);
2558 sd->ops = ops;
2559 sd->v4l2_dev = NULL;
2560 sd->flags = 0;
2561 sd->name[0] = '\0';
2562 sd->grp_id = 0;
2563 sd->dev_priv = NULL;
2564 sd->host_priv = NULL;
2565 sd->privacy_led = NULL;
2566 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
2567 #if defined(CONFIG_MEDIA_CONTROLLER)
2568 sd->entity.name = sd->name;
2569 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2570 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2571 #endif
2572 }
2573 EXPORT_SYMBOL(v4l2_subdev_init);
2574
v4l2_subdev_notify_event(struct v4l2_subdev * sd,const struct v4l2_event * ev)2575 void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2576 const struct v4l2_event *ev)
2577 {
2578 v4l2_event_queue(sd->devnode, ev);
2579 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2580 }
2581 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2582
v4l2_subdev_is_streaming(struct v4l2_subdev * sd)2583 bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
2584 {
2585 struct v4l2_subdev_state *state;
2586
2587 if (!v4l2_subdev_has_op(sd, pad, enable_streams))
2588 return sd->s_stream_enabled;
2589
2590 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
2591 return !!sd->enabled_pads;
2592
2593 state = v4l2_subdev_get_locked_active_state(sd);
2594
2595 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2596 const struct v4l2_subdev_stream_config *cfg;
2597
2598 cfg = &state->stream_configs.configs[i];
2599
2600 if (cfg->enabled)
2601 return true;
2602 }
2603
2604 return false;
2605 }
2606 EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
2607
v4l2_subdev_get_privacy_led(struct v4l2_subdev * sd)2608 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2609 {
2610 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2611 sd->privacy_led = led_get(sd->dev, "privacy");
2612 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2613 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2614 "getting privacy LED\n");
2615
2616 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2617 mutex_lock(&sd->privacy_led->led_access);
2618 led_sysfs_disable(sd->privacy_led);
2619 led_trigger_remove(sd->privacy_led);
2620 led_set_brightness(sd->privacy_led, 0);
2621 mutex_unlock(&sd->privacy_led->led_access);
2622 }
2623 #endif
2624 return 0;
2625 }
2626 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2627
v4l2_subdev_put_privacy_led(struct v4l2_subdev * sd)2628 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2629 {
2630 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2631 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2632 mutex_lock(&sd->privacy_led->led_access);
2633 led_sysfs_enable(sd->privacy_led);
2634 mutex_unlock(&sd->privacy_led->led_access);
2635 led_put(sd->privacy_led);
2636 }
2637 #endif
2638 }
2639 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
2640