Lines Matching +full:sink +full:- +full:only
1 // SPDX-License-Identifier: GPL-2.0-only
3 * V4L2 sub-device
23 #include <media/v4l2-ctrls.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-event.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-ioctl.h>
30 * struct v4l2_subdev_stream_config - Used for storing stream configuration.
72 #include "v4l2-subdev-priv.h"
80 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); in subdev_fh_init()
84 fh->state = state; in subdev_fh_init()
91 __v4l2_subdev_state_free(fh->state); in subdev_fh_free()
92 fh->state = NULL; in subdev_fh_free()
104 return -ENOMEM; in subdev_open()
112 v4l2_fh_init(&subdev_fh->vfh, vdev); in subdev_open()
113 v4l2_fh_add(&subdev_fh->vfh, file); in subdev_open()
115 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { in subdev_open()
118 owner = sd->entity.graph_obj.mdev->dev->driver->owner; in subdev_open()
120 ret = -EBUSY; in subdev_open()
123 subdev_fh->owner = owner; in subdev_open()
126 if (sd->internal_ops && sd->internal_ops->open) { in subdev_open()
127 ret = sd->internal_ops->open(sd, subdev_fh); in subdev_open()
135 module_put(subdev_fh->owner); in subdev_open()
136 v4l2_fh_del(&subdev_fh->vfh, file); in subdev_open()
137 v4l2_fh_exit(&subdev_fh->vfh); in subdev_open()
151 if (sd->internal_ops && sd->internal_ops->close) in subdev_close()
152 sd->internal_ops->close(sd, subdev_fh); in subdev_close()
153 module_put(subdev_fh->owner); in subdev_close()
164 return -ENODEV; in subdev_open()
169 return -ENODEV; in subdev_close()
176 if (!IS_ERR_OR_NULL(sd->privacy_led)) in v4l2_subdev_enable_privacy_led()
177 led_set_brightness(sd->privacy_led, in v4l2_subdev_enable_privacy_led()
178 sd->privacy_led->max_brightness); in v4l2_subdev_enable_privacy_led()
185 if (!IS_ERR_OR_NULL(sd->privacy_led)) in v4l2_subdev_disable_privacy_led()
186 led_set_brightness(sd->privacy_led, 0); in v4l2_subdev_disable_privacy_led()
194 return -EINVAL; in check_which()
202 if (sd->entity.num_pads) { in check_pad()
203 if (pad >= sd->entity.num_pads) in check_pad()
204 return -EINVAL; in check_pad()
210 return -EINVAL; in check_pad()
217 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { in check_state()
220 return -EINVAL; in check_state()
223 return -EINVAL; in check_state()
228 return -EINVAL; in check_state()
230 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) in check_state()
231 return -EINVAL; in check_state()
241 return -EINVAL; in check_format()
243 return check_which(format->which) ? : check_pad(sd, format->pad) ? : in check_format()
244 check_state(sd, state, format->which, format->pad, format->stream); in check_format()
252 sd->ops->pad->get_fmt(sd, state, format); in call_get_fmt()
260 sd->ops->pad->set_fmt(sd, state, format); in call_set_fmt()
268 return -EINVAL; in call_enum_mbus_code()
270 return check_which(code->which) ? : check_pad(sd, code->pad) ? : in call_enum_mbus_code()
271 check_state(sd, state, code->which, code->pad, code->stream) ? : in call_enum_mbus_code()
272 sd->ops->pad->enum_mbus_code(sd, state, code); in call_enum_mbus_code()
280 return -EINVAL; in call_enum_frame_size()
282 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : in call_enum_frame_size()
283 check_state(sd, state, fse->which, fse->pad, fse->stream) ? : in call_enum_frame_size()
284 sd->ops->pad->enum_frame_size(sd, state, fse); in call_enum_frame_size()
292 return -EINVAL; in call_enum_frame_interval()
294 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : in call_enum_frame_interval()
295 check_state(sd, state, fie->which, fie->pad, fie->stream) ? : in call_enum_frame_interval()
296 sd->ops->pad->enum_frame_interval(sd, state, fie); in call_enum_frame_interval()
304 return -EINVAL; in check_selection()
306 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : in check_selection()
307 check_state(sd, state, sel->which, sel->pad, sel->stream); in check_selection()
315 sd->ops->pad->get_selection(sd, state, sel); in call_get_selection()
323 sd->ops->pad->set_selection(sd, state, sel); in call_set_selection()
331 return -EINVAL; in check_frame_interval()
333 return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : in check_frame_interval()
334 check_state(sd, state, fi->which, fi->pad, fi->stream); in check_frame_interval()
342 sd->ops->pad->get_frame_interval(sd, state, fi); in call_get_frame_interval()
350 sd->ops->pad->set_frame_interval(sd, state, fi); in call_set_frame_interval()
360 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) in call_get_frame_desc()
361 return -EOPNOTSUPP; in call_get_frame_desc()
366 ret = sd->ops->pad->get_frame_desc(sd, pad, fd); in call_get_frame_desc()
370 dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, in call_get_frame_desc()
371 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : in call_get_frame_desc()
372 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : in call_get_frame_desc()
375 for (i = 0; i < fd->num_entries; i++) { in call_get_frame_desc()
376 struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; in call_get_frame_desc()
379 if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) in call_get_frame_desc()
382 entry->bus.csi2.vc, in call_get_frame_desc()
383 entry->bus.csi2.dt) >= sizeof(buf)); in call_get_frame_desc()
385 dev_dbg(sd->dev, in call_get_frame_desc()
387 entry->stream, entry->pixelcode, entry->length, in call_get_frame_desc()
388 entry->flags, buf); in call_get_frame_desc()
398 return -EINVAL; in check_edid()
400 if (edid->blocks && edid->edid == NULL) in check_edid()
401 return -EINVAL; in check_edid()
403 return check_pad(sd, edid->pad); in check_edid()
408 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); in call_get_edid()
413 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); in call_set_edid()
420 return -EINVAL; in call_s_dv_timings()
423 sd->ops->pad->s_dv_timings(sd, pad, timings); in call_s_dv_timings()
430 return -EINVAL; in call_g_dv_timings()
433 sd->ops->pad->g_dv_timings(sd, pad, timings); in call_g_dv_timings()
440 return -EINVAL; in call_query_dv_timings()
443 sd->ops->pad->query_dv_timings(sd, pad, timings); in call_query_dv_timings()
450 return -EINVAL; in call_dv_timings_cap()
452 return check_pad(sd, cap->pad) ? : in call_dv_timings_cap()
453 sd->ops->pad->dv_timings_cap(sd, cap); in call_dv_timings_cap()
460 return -EINVAL; in call_enum_dv_timings()
462 return check_pad(sd, dvt->pad) ? : in call_enum_dv_timings()
463 sd->ops->pad->enum_dv_timings(sd, dvt); in call_enum_dv_timings()
472 sd->ops->pad->get_mbus_config(sd, pad, config); in call_get_mbus_config()
484 if (WARN_ON(sd->s_stream_enabled == !!enable)) in call_s_stream()
487 ret = sd->ops->video->s_stream(sd, enable); in call_s_stream()
490 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); in call_s_stream()
495 sd->s_stream_enabled = enable; in call_s_stream()
508 * Create state-management wrapper for pad ops dealing with subdev state. The
591 which = ((struct v4l2_subdev_format *)arg)->which; in subdev_ioctl_get_state()
595 which = ((struct v4l2_subdev_crop *)arg)->which; in subdev_ioctl_get_state()
598 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; in subdev_ioctl_get_state()
601 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; in subdev_ioctl_get_state()
604 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; in subdev_ioctl_get_state()
608 which = ((struct v4l2_subdev_selection *)arg)->which; in subdev_ioctl_get_state()
614 if (!(subdev_fh->client_caps & in subdev_ioctl_get_state()
616 fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; in subdev_ioctl_get_state()
618 which = fi->which; in subdev_ioctl_get_state()
623 which = ((struct v4l2_subdev_routing *)arg)->which; in subdev_ioctl_get_state()
628 subdev_fh->state : in subdev_ioctl_get_state()
639 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); in subdev_do_ioctl()
640 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; in subdev_do_ioctl()
641 bool client_supports_streams = subdev_fh->client_caps & in subdev_do_ioctl()
656 memset(cap->reserved, 0, sizeof(cap->reserved)); in subdev_do_ioctl()
657 cap->version = LINUX_VERSION_CODE; in subdev_do_ioctl()
658 cap->capabilities = in subdev_do_ioctl()
668 * currently returns -EINVAL for NULL control handlers). in subdev_do_ioctl()
674 if (!vfh->ctrl_handler) in subdev_do_ioctl()
675 return -ENOTTY; in subdev_do_ioctl()
676 return v4l2_queryctrl(vfh->ctrl_handler, arg); in subdev_do_ioctl()
679 if (!vfh->ctrl_handler) in subdev_do_ioctl()
680 return -ENOTTY; in subdev_do_ioctl()
681 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); in subdev_do_ioctl()
684 if (!vfh->ctrl_handler) in subdev_do_ioctl()
685 return -ENOTTY; in subdev_do_ioctl()
686 return v4l2_querymenu(vfh->ctrl_handler, arg); in subdev_do_ioctl()
689 if (!vfh->ctrl_handler) in subdev_do_ioctl()
690 return -ENOTTY; in subdev_do_ioctl()
691 return v4l2_g_ctrl(vfh->ctrl_handler, arg); in subdev_do_ioctl()
694 if (!vfh->ctrl_handler) in subdev_do_ioctl()
695 return -ENOTTY; in subdev_do_ioctl()
696 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); in subdev_do_ioctl()
699 if (!vfh->ctrl_handler) in subdev_do_ioctl()
700 return -ENOTTY; in subdev_do_ioctl()
701 return v4l2_g_ext_ctrls(vfh->ctrl_handler, in subdev_do_ioctl()
702 vdev, sd->v4l2_dev->mdev, arg); in subdev_do_ioctl()
705 if (!vfh->ctrl_handler) in subdev_do_ioctl()
706 return -ENOTTY; in subdev_do_ioctl()
707 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, in subdev_do_ioctl()
708 vdev, sd->v4l2_dev->mdev, arg); in subdev_do_ioctl()
711 if (!vfh->ctrl_handler) in subdev_do_ioctl()
712 return -ENOTTY; in subdev_do_ioctl()
713 return v4l2_try_ext_ctrls(vfh->ctrl_handler, in subdev_do_ioctl()
714 vdev, sd->v4l2_dev->mdev, arg); in subdev_do_ioctl()
717 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) in subdev_do_ioctl()
718 return -ENOIOCTLCMD; in subdev_do_ioctl()
720 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); in subdev_do_ioctl()
727 if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && in subdev_do_ioctl()
728 vfh->ctrl_handler) in subdev_do_ioctl()
731 return -ENOIOCTLCMD; in subdev_do_ioctl()
738 if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) in subdev_do_ioctl()
741 return -ENOIOCTLCMD; in subdev_do_ioctl()
749 return -EPERM; in subdev_do_ioctl()
757 return -EPERM; in subdev_do_ioctl()
764 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) in subdev_do_ioctl()
765 return -EINVAL; in subdev_do_ioctl()
766 if (sd->ops->core && sd->ops->core->s_register) in subdev_do_ioctl()
767 p->flags |= V4L2_CHIP_FL_WRITABLE; in subdev_do_ioctl()
768 if (sd->ops->core && sd->ops->core->g_register) in subdev_do_ioctl()
769 p->flags |= V4L2_CHIP_FL_READABLE; in subdev_do_ioctl()
770 strscpy(p->name, sd->name, sizeof(p->name)); in subdev_do_ioctl()
779 sd->name); in subdev_do_ioctl()
782 sd->name); in subdev_do_ioctl()
790 format->stream = 0; in subdev_do_ioctl()
792 memset(format->reserved, 0, sizeof(format->reserved)); in subdev_do_ioctl()
793 memset(format->format.reserved, 0, sizeof(format->format.reserved)); in subdev_do_ioctl()
800 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) in subdev_do_ioctl()
801 return -EPERM; in subdev_do_ioctl()
804 format->stream = 0; in subdev_do_ioctl()
806 memset(format->reserved, 0, sizeof(format->reserved)); in subdev_do_ioctl()
807 memset(format->format.reserved, 0, sizeof(format->format.reserved)); in subdev_do_ioctl()
816 crop->stream = 0; in subdev_do_ioctl()
818 memset(crop->reserved, 0, sizeof(crop->reserved)); in subdev_do_ioctl()
820 sel.which = crop->which; in subdev_do_ioctl()
821 sel.pad = crop->pad; in subdev_do_ioctl()
822 sel.stream = crop->stream; in subdev_do_ioctl()
828 crop->rect = sel.r; in subdev_do_ioctl()
837 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) in subdev_do_ioctl()
838 return -EPERM; in subdev_do_ioctl()
841 crop->stream = 0; in subdev_do_ioctl()
843 memset(crop->reserved, 0, sizeof(crop->reserved)); in subdev_do_ioctl()
845 sel.which = crop->which; in subdev_do_ioctl()
846 sel.pad = crop->pad; in subdev_do_ioctl()
847 sel.stream = crop->stream; in subdev_do_ioctl()
849 sel.r = crop->rect; in subdev_do_ioctl()
854 crop->rect = sel.r; in subdev_do_ioctl()
863 code->stream = 0; in subdev_do_ioctl()
865 memset(code->reserved, 0, sizeof(code->reserved)); in subdev_do_ioctl()
874 fse->stream = 0; in subdev_do_ioctl()
876 memset(fse->reserved, 0, sizeof(fse->reserved)); in subdev_do_ioctl()
885 fi->stream = 0; in subdev_do_ioctl()
887 memset(fi->reserved, 0, sizeof(fi->reserved)); in subdev_do_ioctl()
895 fi->stream = 0; in subdev_do_ioctl()
897 if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) in subdev_do_ioctl()
898 return -EPERM; in subdev_do_ioctl()
900 memset(fi->reserved, 0, sizeof(fi->reserved)); in subdev_do_ioctl()
908 fie->stream = 0; in subdev_do_ioctl()
910 memset(fie->reserved, 0, sizeof(fie->reserved)); in subdev_do_ioctl()
919 sel->stream = 0; in subdev_do_ioctl()
921 memset(sel->reserved, 0, sizeof(sel->reserved)); in subdev_do_ioctl()
929 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) in subdev_do_ioctl()
930 return -EPERM; in subdev_do_ioctl()
933 sel->stream = 0; in subdev_do_ioctl()
935 memset(sel->reserved, 0, sizeof(sel->reserved)); in subdev_do_ioctl()
972 return -EPERM; in subdev_do_ioctl()
983 return -EPERM; in subdev_do_ioctl()
993 return -EINVAL; in subdev_do_ioctl()
1006 return -ENOIOCTLCMD; in subdev_do_ioctl()
1008 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) in subdev_do_ioctl()
1009 return -ENOIOCTLCMD; in subdev_do_ioctl()
1011 memset(routing->reserved, 0, sizeof(routing->reserved)); in subdev_do_ioctl()
1013 krouting = &state->routing; in subdev_do_ioctl()
1015 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, in subdev_do_ioctl()
1016 krouting->routes, in subdev_do_ioctl()
1017 min(krouting->num_routes, routing->len_routes) * in subdev_do_ioctl()
1018 sizeof(*krouting->routes)); in subdev_do_ioctl()
1019 routing->num_routes = krouting->num_routes; in subdev_do_ioctl()
1027 (struct v4l2_subdev_route *)(uintptr_t)routing->routes; in subdev_do_ioctl()
1033 return -ENOIOCTLCMD; in subdev_do_ioctl()
1035 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) in subdev_do_ioctl()
1036 return -ENOIOCTLCMD; in subdev_do_ioctl()
1038 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) in subdev_do_ioctl()
1039 return -EPERM; in subdev_do_ioctl()
1041 if (routing->num_routes > routing->len_routes) in subdev_do_ioctl()
1042 return -EINVAL; in subdev_do_ioctl()
1044 memset(routing->reserved, 0, sizeof(routing->reserved)); in subdev_do_ioctl()
1046 for (i = 0; i < routing->num_routes; ++i) { in subdev_do_ioctl()
1048 const struct media_pad *pads = sd->entity.pads; in subdev_do_ioctl()
1050 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || in subdev_do_ioctl()
1051 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) in subdev_do_ioctl()
1052 return -EINVAL; in subdev_do_ioctl()
1054 if (route->sink_pad >= sd->entity.num_pads) in subdev_do_ioctl()
1055 return -EINVAL; in subdev_do_ioctl()
1057 if (!(pads[route->sink_pad].flags & in subdev_do_ioctl()
1059 return -EINVAL; in subdev_do_ioctl()
1061 if (route->source_pad >= sd->entity.num_pads) in subdev_do_ioctl()
1062 return -EINVAL; in subdev_do_ioctl()
1064 if (!(pads[route->source_pad].flags & in subdev_do_ioctl()
1066 return -EINVAL; in subdev_do_ioctl()
1068 if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) in subdev_do_ioctl()
1080 return -E2BIG; in subdev_do_ioctl()
1087 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, in subdev_do_ioctl()
1088 state->routing.routes, in subdev_do_ioctl()
1089 min(state->routing.num_routes, routing->len_routes) * in subdev_do_ioctl()
1090 sizeof(*state->routing.routes)); in subdev_do_ioctl()
1091 routing->num_routes = state->routing.num_routes; in subdev_do_ioctl()
1096 krouting.num_routes = routing->num_routes; in subdev_do_ioctl()
1097 krouting.len_routes = routing->len_routes; in subdev_do_ioctl()
1101 routing->which, &krouting); in subdev_do_ioctl()
1105 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, in subdev_do_ioctl()
1106 state->routing.routes, in subdev_do_ioctl()
1107 min(state->routing.num_routes, routing->len_routes) * in subdev_do_ioctl()
1108 sizeof(*state->routing.routes)); in subdev_do_ioctl()
1109 routing->num_routes = state->routing.num_routes; in subdev_do_ioctl()
1117 client_cap->capabilities = subdev_fh->client_caps; in subdev_do_ioctl()
1131 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; in subdev_do_ioctl()
1134 client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | in subdev_do_ioctl()
1137 subdev_fh->client_caps = client_cap->capabilities; in subdev_do_ioctl()
1152 struct mutex *lock = vdev->lock; in subdev_do_ioctl_lock()
1153 long ret = -ENODEV; in subdev_do_ioctl_lock()
1156 return -ERESTARTSYS; in subdev_do_ioctl_lock()
1201 return -ENODEV; in subdev_ioctl()
1208 return -ENODEV; in subdev_compat_ioctl32()
1219 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) in subdev_poll()
1222 poll_wait(file, &fh->wait, wait); in subdev_poll()
1250 return -EINVAL; in v4l2_subdev_get_fwnode_pad_1_to_1()
1254 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); in v4l2_subdev_get_fwnode_pad_1_to_1()
1257 if (device_match_fwnode(sd->dev, fwnode)) in v4l2_subdev_get_fwnode_pad_1_to_1()
1258 return endpoint->port; in v4l2_subdev_get_fwnode_pad_1_to_1()
1260 return -ENXIO; in v4l2_subdev_get_fwnode_pad_1_to_1()
1272 if (source_fmt->format.width != sink_fmt->format.width) { in v4l2_subdev_link_validate_default()
1273 dev_dbg(sd->entity.graph_obj.mdev->dev, in v4l2_subdev_link_validate_default()
1274 "%s: width does not match (source %u, sink %u)\n", in v4l2_subdev_link_validate_default()
1276 source_fmt->format.width, sink_fmt->format.width); in v4l2_subdev_link_validate_default()
1280 if (source_fmt->format.height != sink_fmt->format.height) { in v4l2_subdev_link_validate_default()
1281 dev_dbg(sd->entity.graph_obj.mdev->dev, in v4l2_subdev_link_validate_default()
1282 "%s: height does not match (source %u, sink %u)\n", in v4l2_subdev_link_validate_default()
1284 source_fmt->format.height, sink_fmt->format.height); in v4l2_subdev_link_validate_default()
1288 if (source_fmt->format.code != sink_fmt->format.code) { in v4l2_subdev_link_validate_default()
1289 dev_dbg(sd->entity.graph_obj.mdev->dev, in v4l2_subdev_link_validate_default()
1290 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", in v4l2_subdev_link_validate_default()
1292 source_fmt->format.code, sink_fmt->format.code); in v4l2_subdev_link_validate_default()
1296 /* The field order must match, or the sink field order must be NONE in v4l2_subdev_link_validate_default()
1298 * progressive formats only. in v4l2_subdev_link_validate_default()
1300 if (source_fmt->format.field != sink_fmt->format.field && in v4l2_subdev_link_validate_default()
1301 sink_fmt->format.field != V4L2_FIELD_NONE) { in v4l2_subdev_link_validate_default()
1302 dev_dbg(sd->entity.graph_obj.mdev->dev, in v4l2_subdev_link_validate_default()
1303 "%s: field does not match (source %u, sink %u)\n", in v4l2_subdev_link_validate_default()
1305 source_fmt->format.field, sink_fmt->format.field); in v4l2_subdev_link_validate_default()
1312 dev_dbg(sd->entity.graph_obj.mdev->dev, in v4l2_subdev_link_validate_default()
1313 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, in v4l2_subdev_link_validate_default()
1314 link->source->entity->name, link->source->index, in v4l2_subdev_link_validate_default()
1315 link->sink->entity->name, link->sink->index); in v4l2_subdev_link_validate_default()
1317 return -EPIPE; in v4l2_subdev_link_validate_default()
1330 sd = media_entity_to_v4l2_subdev(pad->entity); in v4l2_subdev_link_validate_get_format()
1332 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; in v4l2_subdev_link_validate_get_format()
1333 fmt->pad = pad->index; in v4l2_subdev_link_validate_get_format()
1334 fmt->stream = stream; in v4l2_subdev_link_validate_get_format()
1359 subdev = media_entity_to_v4l2_subdev(pad->entity); in __v4l2_link_validate_get_streams()
1371 for_each_active_route(&state->routing, route) { in __v4l2_link_validate_get_streams()
1375 if (pad->flags & MEDIA_PAD_FL_SOURCE) { in __v4l2_link_validate_get_streams()
1376 route_pad = route->source_pad; in __v4l2_link_validate_get_streams()
1377 route_stream = route->source_stream; in __v4l2_link_validate_get_streams()
1379 route_pad = route->sink_pad; in __v4l2_link_validate_get_streams()
1380 route_stream = route->sink_stream; in __v4l2_link_validate_get_streams()
1383 if (route_pad != pad->index) in __v4l2_link_validate_get_streams()
1399 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); in v4l2_link_validate_get_streams()
1401 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { in v4l2_link_validate_get_streams()
1402 /* Non-streams subdevs have an implicit stream 0 */ in v4l2_link_validate_get_streams()
1418 media_entity_to_v4l2_subdev(link->sink->entity); in v4l2_subdev_link_validate_locked()
1419 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; in v4l2_subdev_link_validate_locked()
1426 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", in v4l2_subdev_link_validate_locked()
1427 link->source->entity->name, link->source->index, in v4l2_subdev_link_validate_locked()
1428 link->sink->entity->name, link->sink->index); in v4l2_subdev_link_validate_locked()
1430 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); in v4l2_subdev_link_validate_locked()
1431 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); in v4l2_subdev_link_validate_locked()
1434 * It is ok to have more source streams than sink streams as extra in v4l2_subdev_link_validate_locked()
1436 * sink streams is an error as streams must have a source. in v4l2_subdev_link_validate_locked()
1441 dev_err(dev, "Dangling sink streams: mask %#llx\n", in v4l2_subdev_link_validate_locked()
1443 return -EINVAL; in v4l2_subdev_link_validate_locked()
1446 /* Validate source and sink stream formats */ in v4l2_subdev_link_validate_locked()
1454 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", in v4l2_subdev_link_validate_locked()
1455 link->source->entity->name, link->source->index, stream, in v4l2_subdev_link_validate_locked()
1456 link->sink->entity->name, link->sink->index, stream); in v4l2_subdev_link_validate_locked()
1458 ret = v4l2_subdev_link_validate_get_format(link->source, stream, in v4l2_subdev_link_validate_locked()
1463 link->source->entity->name, link->source->index, in v4l2_subdev_link_validate_locked()
1468 ret = v4l2_subdev_link_validate_get_format(link->sink, stream, in v4l2_subdev_link_validate_locked()
1473 link->sink->entity->name, link->sink->index, in v4l2_subdev_link_validate_locked()
1484 if (ret != -ENOIOCTLCMD) in v4l2_subdev_link_validate_locked()
1505 * Links are validated in the context of the sink entity. Usage of this in v4l2_subdev_link_validate()
1506 * helper on a sink that is not a subdev is a clear driver bug. in v4l2_subdev_link_validate()
1508 if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) in v4l2_subdev_link_validate()
1509 return -EINVAL; in v4l2_subdev_link_validate()
1517 if (is_media_entity_v4l2_video_device(link->source->entity)) { in v4l2_subdev_link_validate()
1518 struct media_entity *source = link->source->entity; in v4l2_subdev_link_validate()
1520 if (!source->ops || !source->ops->link_validate) { in v4l2_subdev_link_validate()
1530 source->name); in v4l2_subdev_link_validate()
1538 if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) in v4l2_subdev_link_validate()
1539 return -EINVAL; in v4l2_subdev_link_validate()
1541 return source->ops->link_validate(link); in v4l2_subdev_link_validate()
1548 if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) in v4l2_subdev_link_validate()
1549 return -EINVAL; in v4l2_subdev_link_validate()
1551 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); in v4l2_subdev_link_validate()
1552 source_sd = media_entity_to_v4l2_subdev(link->source->entity); in v4l2_subdev_link_validate()
1581 routing = &state->routing; in v4l2_subdev_has_pad_interdep()
1583 for (i = 0; i < routing->num_routes; ++i) { in v4l2_subdev_has_pad_interdep()
1584 struct v4l2_subdev_route *route = &routing->routes[i]; in v4l2_subdev_has_pad_interdep()
1586 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) in v4l2_subdev_has_pad_interdep()
1589 if ((route->sink_pad == pad0 && route->source_pad == pad1) || in v4l2_subdev_has_pad_interdep()
1590 (route->source_pad == pad0 && route->sink_pad == pad1)) { in v4l2_subdev_has_pad_interdep()
1611 return ERR_PTR(-ENOMEM); in __v4l2_subdev_state_alloc()
1613 __mutex_init(&state->_lock, lock_name, lock_key); in __v4l2_subdev_state_alloc()
1614 if (sd->state_lock) in __v4l2_subdev_state_alloc()
1615 state->lock = sd->state_lock; in __v4l2_subdev_state_alloc()
1617 state->lock = &state->_lock; in __v4l2_subdev_state_alloc()
1619 state->sd = sd; in __v4l2_subdev_state_alloc()
1622 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { in __v4l2_subdev_state_alloc()
1623 state->pads = kvcalloc(sd->entity.num_pads, in __v4l2_subdev_state_alloc()
1624 sizeof(*state->pads), GFP_KERNEL); in __v4l2_subdev_state_alloc()
1625 if (!state->pads) { in __v4l2_subdev_state_alloc()
1626 ret = -ENOMEM; in __v4l2_subdev_state_alloc()
1631 if (sd->internal_ops && sd->internal_ops->init_state) { in __v4l2_subdev_state_alloc()
1637 ret = sd->internal_ops->init_state(sd, state); in __v4l2_subdev_state_alloc()
1647 if (state && state->pads) in __v4l2_subdev_state_alloc()
1648 kvfree(state->pads); in __v4l2_subdev_state_alloc()
1661 mutex_destroy(&state->_lock); in __v4l2_subdev_state_free()
1663 kfree(state->routing.routes); in __v4l2_subdev_state_free()
1664 kvfree(state->stream_configs.configs); in __v4l2_subdev_state_free()
1665 kvfree(state->pads); in __v4l2_subdev_state_free()
1674 struct device *dev = sd->dev; in __v4l2_subdev_init_finalize()
1688 sd->name); in __v4l2_subdev_init_finalize()
1689 return -EINVAL; in __v4l2_subdev_init_finalize()
1692 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { in __v4l2_subdev_init_finalize()
1696 sd->name); in __v4l2_subdev_init_finalize()
1698 return -EINVAL; in __v4l2_subdev_init_finalize()
1702 if (sd->ctrl_handler) in __v4l2_subdev_init_finalize()
1703 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; in __v4l2_subdev_init_finalize()
1709 sd->active_state = state; in __v4l2_subdev_init_finalize()
1719 __v4l2_subdev_state_free(sd->active_state); in v4l2_subdev_cleanup()
1720 sd->active_state = NULL; in v4l2_subdev_cleanup()
1722 /* Uninitialised sub-device, bail out here. */ in v4l2_subdev_cleanup()
1723 if (!sd->async_subdev_endpoint_list.next) in v4l2_subdev_cleanup()
1726 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, in v4l2_subdev_cleanup()
1728 list_del(&ase->async_subdev_endpoint_entry); in v4l2_subdev_cleanup()
1745 if (state->pads) { in __v4l2_subdev_state_get_format()
1749 if (pad >= state->sd->entity.num_pads) in __v4l2_subdev_state_get_format()
1752 return &state->pads[pad].format; in __v4l2_subdev_state_get_format()
1755 lockdep_assert_held(state->lock); in __v4l2_subdev_state_get_format()
1757 stream_configs = &state->stream_configs; in __v4l2_subdev_state_get_format()
1759 for (i = 0; i < stream_configs->num_configs; ++i) { in __v4l2_subdev_state_get_format()
1760 if (stream_configs->configs[i].pad == pad && in __v4l2_subdev_state_get_format()
1761 stream_configs->configs[i].stream == stream) in __v4l2_subdev_state_get_format()
1762 return &stream_configs->configs[i].fmt; in __v4l2_subdev_state_get_format()
1779 if (state->pads) { in __v4l2_subdev_state_get_crop()
1783 if (pad >= state->sd->entity.num_pads) in __v4l2_subdev_state_get_crop()
1786 return &state->pads[pad].crop; in __v4l2_subdev_state_get_crop()
1789 lockdep_assert_held(state->lock); in __v4l2_subdev_state_get_crop()
1791 stream_configs = &state->stream_configs; in __v4l2_subdev_state_get_crop()
1793 for (i = 0; i < stream_configs->num_configs; ++i) { in __v4l2_subdev_state_get_crop()
1794 if (stream_configs->configs[i].pad == pad && in __v4l2_subdev_state_get_crop()
1795 stream_configs->configs[i].stream == stream) in __v4l2_subdev_state_get_crop()
1796 return &stream_configs->configs[i].crop; in __v4l2_subdev_state_get_crop()
1813 if (state->pads) { in __v4l2_subdev_state_get_compose()
1817 if (pad >= state->sd->entity.num_pads) in __v4l2_subdev_state_get_compose()
1820 return &state->pads[pad].compose; in __v4l2_subdev_state_get_compose()
1823 lockdep_assert_held(state->lock); in __v4l2_subdev_state_get_compose()
1825 stream_configs = &state->stream_configs; in __v4l2_subdev_state_get_compose()
1827 for (i = 0; i < stream_configs->num_configs; ++i) { in __v4l2_subdev_state_get_compose()
1828 if (stream_configs->configs[i].pad == pad && in __v4l2_subdev_state_get_compose()
1829 stream_configs->configs[i].stream == stream) in __v4l2_subdev_state_get_compose()
1830 return &stream_configs->configs[i].compose; in __v4l2_subdev_state_get_compose()
1847 lockdep_assert_held(state->lock); in __v4l2_subdev_state_get_interval()
1849 if (state->pads) { in __v4l2_subdev_state_get_interval()
1853 if (pad >= state->sd->entity.num_pads) in __v4l2_subdev_state_get_interval()
1856 return &state->pads[pad].interval; in __v4l2_subdev_state_get_interval()
1859 lockdep_assert_held(state->lock); in __v4l2_subdev_state_get_interval()
1861 stream_configs = &state->stream_configs; in __v4l2_subdev_state_get_interval()
1863 for (i = 0; i < stream_configs->num_configs; ++i) { in __v4l2_subdev_state_get_interval()
1864 if (stream_configs->configs[i].pad == pad && in __v4l2_subdev_state_get_interval()
1865 stream_configs->configs[i].stream == stream) in __v4l2_subdev_state_get_interval()
1866 return &stream_configs->configs[i].interval; in __v4l2_subdev_state_get_interval()
1897 return -ENOMEM; in v4l2_subdev_init_stream_configs()
1907 new_configs.configs[idx].pad = route->sink_pad; in v4l2_subdev_init_stream_configs()
1908 new_configs.configs[idx].stream = route->sink_stream; in v4l2_subdev_init_stream_configs()
1912 new_configs.configs[idx].pad = route->source_pad; in v4l2_subdev_init_stream_configs()
1913 new_configs.configs[idx].stream = route->source_stream; in v4l2_subdev_init_stream_configs()
1918 kvfree(stream_configs->configs); in v4l2_subdev_init_stream_configs()
1929 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); in v4l2_subdev_get_fmt()
1931 return -EINVAL; in v4l2_subdev_get_fmt()
1933 format->format = *fmt; in v4l2_subdev_get_fmt()
1945 interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); in v4l2_subdev_get_frame_interval()
1947 return -EINVAL; in v4l2_subdev_get_frame_interval()
1949 fi->interval = *interval; in v4l2_subdev_get_frame_interval()
1959 struct v4l2_subdev_krouting *dst = &state->routing; in v4l2_subdev_set_routing()
1965 if (unlikely(check_mul_overflow((size_t)src->num_routes, in v4l2_subdev_set_routing()
1966 sizeof(*src->routes), &bytes))) in v4l2_subdev_set_routing()
1967 return -EOVERFLOW; in v4l2_subdev_set_routing()
1969 lockdep_assert_held(state->lock); in v4l2_subdev_set_routing()
1971 if (src->num_routes > 0) { in v4l2_subdev_set_routing()
1972 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); in v4l2_subdev_set_routing()
1974 return -ENOMEM; in v4l2_subdev_set_routing()
1977 new_routing.num_routes = src->num_routes; in v4l2_subdev_set_routing()
1979 r = v4l2_subdev_init_stream_configs(&state->stream_configs, in v4l2_subdev_set_routing()
1986 kfree(dst->routes); in v4l2_subdev_set_routing()
2000 route = &routing->routes[0]; in __v4l2_subdev_next_active_route()
2002 for (; route < routing->routes + routing->num_routes; ++route) { in __v4l2_subdev_next_active_route()
2003 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) in __v4l2_subdev_next_active_route()
2026 stream_configs = &state->stream_configs; in v4l2_subdev_set_routing_with_fmt()
2028 for (i = 0; i < stream_configs->num_configs; ++i) in v4l2_subdev_set_routing_with_fmt()
2029 stream_configs->configs[i].fmt = *fmt; in v4l2_subdev_set_routing_with_fmt()
2041 for (i = 0; i < routing->num_routes; ++i) { in v4l2_subdev_routing_find_opposite_end()
2042 struct v4l2_subdev_route *route = &routing->routes[i]; in v4l2_subdev_routing_find_opposite_end()
2044 if (route->source_pad == pad && in v4l2_subdev_routing_find_opposite_end()
2045 route->source_stream == stream) { in v4l2_subdev_routing_find_opposite_end()
2047 *other_pad = route->sink_pad; in v4l2_subdev_routing_find_opposite_end()
2049 *other_stream = route->sink_stream; in v4l2_subdev_routing_find_opposite_end()
2053 if (route->sink_pad == pad && route->sink_stream == stream) { in v4l2_subdev_routing_find_opposite_end()
2055 *other_pad = route->source_pad; in v4l2_subdev_routing_find_opposite_end()
2057 *other_stream = route->source_stream; in v4l2_subdev_routing_find_opposite_end()
2062 return -EINVAL; in v4l2_subdev_routing_find_opposite_end()
2073 ret = v4l2_subdev_routing_find_opposite_end(&state->routing, in v4l2_subdev_state_get_opposite_stream_format()
2086 const struct v4l2_subdev_krouting *routing = &state->routing; in v4l2_subdev_state_xlate_streams()
2092 if (route->sink_pad == pad0 && route->source_pad == pad1 && in v4l2_subdev_state_xlate_streams()
2093 (*streams & BIT_ULL(route->sink_stream))) { in v4l2_subdev_state_xlate_streams()
2094 streams0 |= BIT_ULL(route->sink_stream); in v4l2_subdev_state_xlate_streams()
2095 streams1 |= BIT_ULL(route->source_stream); in v4l2_subdev_state_xlate_streams()
2097 if (route->source_pad == pad0 && route->sink_pad == pad1 && in v4l2_subdev_state_xlate_streams()
2098 (*streams & BIT_ULL(route->source_stream))) { in v4l2_subdev_state_xlate_streams()
2099 streams0 |= BIT_ULL(route->source_stream); in v4l2_subdev_state_xlate_streams()
2100 streams1 |= BIT_ULL(route->sink_stream); in v4l2_subdev_state_xlate_streams()
2115 int ret = -EINVAL; in v4l2_subdev_routing_validate()
2119 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), in v4l2_subdev_routing_validate()
2122 return -ENOMEM; in v4l2_subdev_routing_validate()
2124 for (i = 0; i < sd->entity.num_pads; ++i) in v4l2_subdev_routing_validate()
2128 for (i = 0; i < routing->num_routes; ++i) { in v4l2_subdev_routing_validate()
2129 const struct v4l2_subdev_route *route = &routing->routes[i]; in v4l2_subdev_routing_validate()
2131 /* Validate the sink and source pad numbers. */ in v4l2_subdev_routing_validate()
2132 if (route->sink_pad >= sd->entity.num_pads || in v4l2_subdev_routing_validate()
2133 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { in v4l2_subdev_routing_validate()
2134 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", in v4l2_subdev_routing_validate()
2135 i, route->sink_pad); in v4l2_subdev_routing_validate()
2139 if (route->source_pad >= sd->entity.num_pads || in v4l2_subdev_routing_validate()
2140 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { in v4l2_subdev_routing_validate()
2141 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", in v4l2_subdev_routing_validate()
2142 i, route->source_pad); in v4l2_subdev_routing_validate()
2148 * sink pad must be routed to a single source pad. in v4l2_subdev_routing_validate()
2151 if (remote_pads[route->sink_pad] != U32_MAX && in v4l2_subdev_routing_validate()
2152 remote_pads[route->sink_pad] != route->source_pad) { in v4l2_subdev_routing_validate()
2153 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2155 i, "sink"); in v4l2_subdev_routing_validate()
2162 * source pad must originate from a single sink pad. in v4l2_subdev_routing_validate()
2165 if (remote_pads[route->source_pad] != U32_MAX && in v4l2_subdev_routing_validate()
2166 remote_pads[route->source_pad] != route->sink_pad) { in v4l2_subdev_routing_validate()
2167 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2175 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink in v4l2_subdev_routing_validate()
2176 * side can not do stream multiplexing, i.e. there can be only in v4l2_subdev_routing_validate()
2177 * a single stream in a sink pad. in v4l2_subdev_routing_validate()
2180 if (remote_pads[route->sink_pad] != U32_MAX) { in v4l2_subdev_routing_validate()
2181 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2183 i, "sink", route->sink_pad); in v4l2_subdev_routing_validate()
2191 * be only a single stream in a source pad. in v4l2_subdev_routing_validate()
2194 if (remote_pads[route->source_pad] != U32_MAX) { in v4l2_subdev_routing_validate()
2195 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2197 i, "source", route->source_pad); in v4l2_subdev_routing_validate()
2203 remote_pads[route->sink_pad] = route->source_pad; in v4l2_subdev_routing_validate()
2204 remote_pads[route->source_pad] = route->sink_pad; in v4l2_subdev_routing_validate()
2207 for (j = i + 1; j < routing->num_routes; ++j) { in v4l2_subdev_routing_validate()
2208 const struct v4l2_subdev_route *r = &routing->routes[j]; in v4l2_subdev_routing_validate()
2212 * originate from the same (sink) stream. in v4l2_subdev_routing_validate()
2215 route->sink_pad == r->sink_pad && in v4l2_subdev_routing_validate()
2216 route->sink_stream == r->sink_stream) { in v4l2_subdev_routing_validate()
2217 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2218 "routes %u and %u originate from same sink (%u/%u)\n", in v4l2_subdev_routing_validate()
2219 i, j, route->sink_pad, in v4l2_subdev_routing_validate()
2220 route->sink_stream); in v4l2_subdev_routing_validate()
2229 route->source_pad == r->source_pad && in v4l2_subdev_routing_validate()
2230 route->source_stream == r->source_stream) { in v4l2_subdev_routing_validate()
2231 dev_dbg(sd->dev, in v4l2_subdev_routing_validate()
2233 i, j, route->source_pad, in v4l2_subdev_routing_validate()
2234 route->source_stream); in v4l2_subdev_routing_validate()
2254 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { in v4l2_subdev_collect_streams()
2257 (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; in v4l2_subdev_collect_streams()
2258 dev_dbg(sd->dev, in v4l2_subdev_collect_streams()
2259 "collect_streams: sub-device \"%s\" does not support streams\n", in v4l2_subdev_collect_streams()
2260 sd->entity.name); in v4l2_subdev_collect_streams()
2267 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { in v4l2_subdev_collect_streams()
2269 &state->stream_configs.configs[i]; in v4l2_subdev_collect_streams()
2271 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) in v4l2_subdev_collect_streams()
2274 *found_streams |= BIT_ULL(cfg->stream); in v4l2_subdev_collect_streams()
2275 if (cfg->enabled) in v4l2_subdev_collect_streams()
2276 *enabled_streams |= BIT_ULL(cfg->stream); in v4l2_subdev_collect_streams()
2279 dev_dbg(sd->dev, in v4l2_subdev_collect_streams()
2281 sd->entity.name, pad, *found_streams, *enabled_streams); in v4l2_subdev_collect_streams()
2289 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { in v4l2_subdev_set_streams_enabled()
2291 sd->enabled_pads |= BIT_ULL(pad); in v4l2_subdev_set_streams_enabled()
2293 sd->enabled_pads &= ~BIT_ULL(pad); in v4l2_subdev_set_streams_enabled()
2297 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { in v4l2_subdev_set_streams_enabled()
2299 &state->stream_configs.configs[i]; in v4l2_subdev_set_streams_enabled()
2301 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) in v4l2_subdev_set_streams_enabled()
2302 cfg->enabled = enabled; in v4l2_subdev_set_streams_enabled()
2309 struct device *dev = sd->entity.graph_obj.mdev->dev; in v4l2_subdev_enable_streams()
2317 dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, in v4l2_subdev_enable_streams()
2321 if (pad >= sd->entity.num_pads) in v4l2_subdev_enable_streams()
2322 return -EINVAL; in v4l2_subdev_enable_streams()
2324 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) in v4l2_subdev_enable_streams()
2325 return -EOPNOTSUPP; in v4l2_subdev_enable_streams()
2328 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices in v4l2_subdev_enable_streams()
2331 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) in v4l2_subdev_enable_streams()
2332 return -EOPNOTSUPP; in v4l2_subdev_enable_streams()
2355 streams_mask & ~found_streams, sd->entity.name, pad); in v4l2_subdev_enable_streams()
2356 ret = -EINVAL; in v4l2_subdev_enable_streams()
2362 enabled_streams, sd->entity.name, pad); in v4l2_subdev_enable_streams()
2363 ret = -EALREADY; in v4l2_subdev_enable_streams()
2411 struct device *dev = sd->entity.graph_obj.mdev->dev; in v4l2_subdev_disable_streams()
2418 dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, in v4l2_subdev_disable_streams()
2422 if (pad >= sd->entity.num_pads) in v4l2_subdev_disable_streams()
2423 return -EINVAL; in v4l2_subdev_disable_streams()
2425 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) in v4l2_subdev_disable_streams()
2426 return -EOPNOTSUPP; in v4l2_subdev_disable_streams()
2429 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices in v4l2_subdev_disable_streams()
2432 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) in v4l2_subdev_disable_streams()
2433 return -EOPNOTSUPP; in v4l2_subdev_disable_streams()
2456 streams_mask & ~found_streams, sd->entity.name, pad); in v4l2_subdev_disable_streams()
2457 ret = -EINVAL; in v4l2_subdev_disable_streams()
2463 streams_mask & ~enabled_streams, sd->entity.name, pad); in v4l2_subdev_disable_streams()
2464 ret = -EALREADY; in v4l2_subdev_disable_streams()
2475 if (!(sd->enabled_pads & ~BIT_ULL(pad))) in v4l2_subdev_disable_streams()
2507 int pad_index = -1; in v4l2_subdev_s_stream_helper()
2514 media_entity_for_each_pad(&sd->entity, pad) { in v4l2_subdev_s_stream_helper()
2515 if (pad->flags & MEDIA_PAD_FL_SOURCE) { in v4l2_subdev_s_stream_helper()
2516 pad_index = pad->index; in v4l2_subdev_s_stream_helper()
2521 if (WARN_ON(pad_index == -1)) in v4l2_subdev_s_stream_helper()
2522 return -EINVAL; in v4l2_subdev_s_stream_helper()
2524 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { in v4l2_subdev_s_stream_helper()
2531 for_each_active_route(&state->routing, route) in v4l2_subdev_s_stream_helper()
2532 source_mask |= BIT_ULL(route->source_stream); in v4l2_subdev_s_stream_helper()
2537 * For non-streams subdevices, there's a single implicit stream in v4l2_subdev_s_stream_helper()
2556 INIT_LIST_HEAD(&sd->list); in v4l2_subdev_init()
2558 sd->ops = ops; in v4l2_subdev_init()
2559 sd->v4l2_dev = NULL; in v4l2_subdev_init()
2560 sd->flags = 0; in v4l2_subdev_init()
2561 sd->name[0] = '\0'; in v4l2_subdev_init()
2562 sd->grp_id = 0; in v4l2_subdev_init()
2563 sd->dev_priv = NULL; in v4l2_subdev_init()
2564 sd->host_priv = NULL; in v4l2_subdev_init()
2565 sd->privacy_led = NULL; in v4l2_subdev_init()
2566 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); in v4l2_subdev_init()
2568 sd->entity.name = sd->name; in v4l2_subdev_init()
2569 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; in v4l2_subdev_init()
2570 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; in v4l2_subdev_init()
2578 v4l2_event_queue(sd->devnode, ev); in v4l2_subdev_notify_event()
2588 return sd->s_stream_enabled; in v4l2_subdev_is_streaming()
2590 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) in v4l2_subdev_is_streaming()
2591 return !!sd->enabled_pads; in v4l2_subdev_is_streaming()
2595 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { in v4l2_subdev_is_streaming()
2598 cfg = &state->stream_configs.configs[i]; in v4l2_subdev_is_streaming()
2600 if (cfg->enabled) in v4l2_subdev_is_streaming()
2611 sd->privacy_led = led_get(sd->dev, "privacy"); in v4l2_subdev_get_privacy_led()
2612 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) in v4l2_subdev_get_privacy_led()
2613 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), in v4l2_subdev_get_privacy_led()
2616 if (!IS_ERR_OR_NULL(sd->privacy_led)) { in v4l2_subdev_get_privacy_led()
2617 mutex_lock(&sd->privacy_led->led_access); in v4l2_subdev_get_privacy_led()
2618 led_sysfs_disable(sd->privacy_led); in v4l2_subdev_get_privacy_led()
2619 led_trigger_remove(sd->privacy_led); in v4l2_subdev_get_privacy_led()
2620 led_set_brightness(sd->privacy_led, 0); in v4l2_subdev_get_privacy_led()
2621 mutex_unlock(&sd->privacy_led->led_access); in v4l2_subdev_get_privacy_led()
2631 if (!IS_ERR_OR_NULL(sd->privacy_led)) { in v4l2_subdev_put_privacy_led()
2632 mutex_lock(&sd->privacy_led->led_access); in v4l2_subdev_put_privacy_led()
2633 led_sysfs_enable(sd->privacy_led); in v4l2_subdev_put_privacy_led()
2634 mutex_unlock(&sd->privacy_led->led_access); in v4l2_subdev_put_privacy_led()
2635 led_put(sd->privacy_led); in v4l2_subdev_put_privacy_led()