1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * V4L2 sub-device 4 * 5 * Copyright (C) 2010 Nokia Corporation 6 * 7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 8 * Sakari Ailus <sakari.ailus@iki.fi> 9 */ 10 11 #include <linux/export.h> 12 #include <linux/ioctl.h> 13 #include <linux/leds.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/overflow.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <linux/version.h> 21 #include <linux/videodev2.h> 22 23 #include <media/v4l2-ctrls.h> 24 #include <media/v4l2-device.h> 25 #include <media/v4l2-event.h> 26 #include <media/v4l2-fh.h> 27 #include <media/v4l2-ioctl.h> 28 29 /** 30 * struct v4l2_subdev_stream_config - Used for storing stream configuration. 31 * 32 * @pad: pad number 33 * @stream: stream number 34 * @enabled: has the stream been enabled with v4l2_subdev_enable_streams() 35 * @fmt: &struct v4l2_mbus_framefmt 36 * @crop: &struct v4l2_rect to be used for crop 37 * @compose: &struct v4l2_rect to be used for compose 38 * @interval: frame interval 39 * 40 * This structure stores configuration for a stream. 41 */ 42 struct v4l2_subdev_stream_config { 43 u32 pad; 44 u32 stream; 45 bool enabled; 46 47 struct v4l2_mbus_framefmt fmt; 48 struct v4l2_rect crop; 49 struct v4l2_rect compose; 50 struct v4l2_fract interval; 51 }; 52 53 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 54 /* 55 * The Streams API is an experimental feature. To use the Streams API, set 56 * 'v4l2_subdev_enable_streams_api' to 1 below. 57 */ 58 59 static bool v4l2_subdev_enable_streams_api; 60 #endif 61 62 /* 63 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set 64 * of streams. 65 * 66 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX 67 * restricts the total number of streams in a pad, although the stream ID is 68 * not restricted. 69 */ 70 #define V4L2_SUBDEV_MAX_STREAM_ID 63 71 72 #include "v4l2-subdev-priv.h" 73 74 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 75 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) 76 { 77 struct v4l2_subdev_state *state; 78 static struct lock_class_key key; 79 80 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); 81 if (IS_ERR(state)) 82 return PTR_ERR(state); 83 84 fh->state = state; 85 86 return 0; 87 } 88 89 static void subdev_fh_free(struct v4l2_subdev_fh *fh) 90 { 91 __v4l2_subdev_state_free(fh->state); 92 fh->state = NULL; 93 } 94 95 static int subdev_open(struct file *file) 96 { 97 struct video_device *vdev = video_devdata(file); 98 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 99 struct v4l2_subdev_fh *subdev_fh; 100 int ret; 101 102 subdev_fh = kzalloc_obj(*subdev_fh); 103 if (subdev_fh == NULL) 104 return -ENOMEM; 105 106 ret = subdev_fh_init(subdev_fh, sd); 107 if (ret) { 108 kfree(subdev_fh); 109 return ret; 110 } 111 112 v4l2_fh_init(&subdev_fh->vfh, vdev); 113 v4l2_fh_add(&subdev_fh->vfh, file); 114 115 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { 116 struct module *owner; 117 118 owner = sd->entity.graph_obj.mdev->dev->driver->owner; 119 if (!try_module_get(owner)) { 120 ret = -EBUSY; 121 goto err; 122 } 123 subdev_fh->owner = owner; 124 } 125 126 if (sd->internal_ops && sd->internal_ops->open) { 127 ret = sd->internal_ops->open(sd, subdev_fh); 128 if (ret < 0) 129 goto err; 130 } 131 132 return 0; 133 134 err: 135 module_put(subdev_fh->owner); 136 v4l2_fh_del(&subdev_fh->vfh, file); 137 v4l2_fh_exit(&subdev_fh->vfh); 138 subdev_fh_free(subdev_fh); 139 kfree(subdev_fh); 140 141 return ret; 142 } 143 144 static int subdev_close(struct file *file) 145 { 146 struct video_device *vdev = video_devdata(file); 147 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 148 struct v4l2_fh *vfh = file_to_v4l2_fh(file); 149 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); 150 151 if (sd->internal_ops && sd->internal_ops->close) 152 sd->internal_ops->close(sd, subdev_fh); 153 module_put(subdev_fh->owner); 154 v4l2_fh_del(vfh, file); 155 v4l2_fh_exit(vfh); 156 subdev_fh_free(subdev_fh); 157 kfree(subdev_fh); 158 159 return 0; 160 } 161 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 162 static int subdev_open(struct file *file) 163 { 164 return -ENODEV; 165 } 166 167 static int subdev_close(struct file *file) 168 { 169 return -ENODEV; 170 } 171 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 172 173 static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) 174 { 175 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 176 if (!IS_ERR_OR_NULL(sd->privacy_led)) 177 led_set_brightness(sd->privacy_led, 178 sd->privacy_led->max_brightness); 179 #endif 180 } 181 182 static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) 183 { 184 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 185 if (!IS_ERR_OR_NULL(sd->privacy_led)) 186 led_set_brightness(sd->privacy_led, 0); 187 #endif 188 } 189 190 static inline int check_which(u32 which) 191 { 192 if (which != V4L2_SUBDEV_FORMAT_TRY && 193 which != V4L2_SUBDEV_FORMAT_ACTIVE) 194 return -EINVAL; 195 196 return 0; 197 } 198 199 static inline int check_pad(struct v4l2_subdev *sd, u32 pad) 200 { 201 #if defined(CONFIG_MEDIA_CONTROLLER) 202 if (sd->entity.num_pads) { 203 if (pad >= sd->entity.num_pads) 204 return -EINVAL; 205 return 0; 206 } 207 #endif 208 /* allow pad 0 on subdevices not registered as media entities */ 209 if (pad > 0) 210 return -EINVAL; 211 return 0; 212 } 213 214 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, 215 u32 which, u32 pad, u32 stream) 216 { 217 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { 218 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 219 if (!v4l2_subdev_state_get_format(state, pad, stream)) 220 return -EINVAL; 221 return 0; 222 #else 223 return -EINVAL; 224 #endif 225 } 226 227 if (stream != 0) 228 return -EINVAL; 229 230 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) 231 return -EINVAL; 232 233 return 0; 234 } 235 236 static inline int check_format(struct v4l2_subdev *sd, 237 struct v4l2_subdev_state *state, 238 struct v4l2_subdev_format *format) 239 { 240 if (!format) 241 return -EINVAL; 242 243 return check_which(format->which) ? : check_pad(sd, format->pad) ? : 244 check_state(sd, state, format->which, format->pad, format->stream); 245 } 246 247 static int call_get_fmt(struct v4l2_subdev *sd, 248 struct v4l2_subdev_state *state, 249 struct v4l2_subdev_format *format) 250 { 251 return check_format(sd, state, format) ? : 252 sd->ops->pad->get_fmt(sd, state, format); 253 } 254 255 static int call_set_fmt(struct v4l2_subdev *sd, 256 struct v4l2_subdev_state *state, 257 struct v4l2_subdev_format *format) 258 { 259 return check_format(sd, state, format) ? : 260 sd->ops->pad->set_fmt(sd, state, format); 261 } 262 263 static int call_enum_mbus_code(struct v4l2_subdev *sd, 264 struct v4l2_subdev_state *state, 265 struct v4l2_subdev_mbus_code_enum *code) 266 { 267 if (!code) 268 return -EINVAL; 269 270 return check_which(code->which) ? : check_pad(sd, code->pad) ? : 271 check_state(sd, state, code->which, code->pad, code->stream) ? : 272 sd->ops->pad->enum_mbus_code(sd, state, code); 273 } 274 275 static int call_enum_frame_size(struct v4l2_subdev *sd, 276 struct v4l2_subdev_state *state, 277 struct v4l2_subdev_frame_size_enum *fse) 278 { 279 if (!fse) 280 return -EINVAL; 281 282 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : 283 check_state(sd, state, fse->which, fse->pad, fse->stream) ? : 284 sd->ops->pad->enum_frame_size(sd, state, fse); 285 } 286 287 static int call_enum_frame_interval(struct v4l2_subdev *sd, 288 struct v4l2_subdev_state *state, 289 struct v4l2_subdev_frame_interval_enum *fie) 290 { 291 if (!fie) 292 return -EINVAL; 293 294 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : 295 check_state(sd, state, fie->which, fie->pad, fie->stream) ? : 296 sd->ops->pad->enum_frame_interval(sd, state, fie); 297 } 298 299 static inline int check_selection(struct v4l2_subdev *sd, 300 struct v4l2_subdev_state *state, 301 struct v4l2_subdev_selection *sel) 302 { 303 if (!sel) 304 return -EINVAL; 305 306 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : 307 check_state(sd, state, sel->which, sel->pad, sel->stream); 308 } 309 310 static int call_get_selection(struct v4l2_subdev *sd, 311 struct v4l2_subdev_state *state, 312 struct v4l2_subdev_selection *sel) 313 { 314 return check_selection(sd, state, sel) ? : 315 sd->ops->pad->get_selection(sd, state, sel); 316 } 317 318 static int call_set_selection(struct v4l2_subdev *sd, 319 struct v4l2_subdev_state *state, 320 struct v4l2_subdev_selection *sel) 321 { 322 return check_selection(sd, state, sel) ? : 323 sd->ops->pad->set_selection(sd, state, sel); 324 } 325 326 static inline int check_frame_interval(struct v4l2_subdev *sd, 327 struct v4l2_subdev_state *state, 328 struct v4l2_subdev_frame_interval *fi) 329 { 330 if (!fi) 331 return -EINVAL; 332 333 return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : 334 check_state(sd, state, fi->which, fi->pad, fi->stream); 335 } 336 337 static int call_get_frame_interval(struct v4l2_subdev *sd, 338 struct v4l2_subdev_state *state, 339 struct v4l2_subdev_frame_interval *fi) 340 { 341 return check_frame_interval(sd, state, fi) ? : 342 sd->ops->pad->get_frame_interval(sd, state, fi); 343 } 344 345 static int call_set_frame_interval(struct v4l2_subdev *sd, 346 struct v4l2_subdev_state *state, 347 struct v4l2_subdev_frame_interval *fi) 348 { 349 return check_frame_interval(sd, state, fi) ? : 350 sd->ops->pad->set_frame_interval(sd, state, fi); 351 } 352 353 static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, 354 struct v4l2_mbus_frame_desc *fd) 355 { 356 unsigned int i; 357 int ret; 358 359 #if defined(CONFIG_MEDIA_CONTROLLER) 360 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) 361 return -EOPNOTSUPP; 362 #endif 363 364 memset(fd, 0, sizeof(*fd)); 365 366 ret = sd->ops->pad->get_frame_desc(sd, pad, fd); 367 if (ret) 368 return ret; 369 370 dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, 371 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : 372 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : 373 "unknown"); 374 375 for (i = 0; i < fd->num_entries; i++) { 376 struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; 377 char buf[20] = ""; 378 379 if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) 380 WARN_ON(snprintf(buf, sizeof(buf), 381 ", vc %u, dt 0x%02x", 382 entry->bus.csi2.vc, 383 entry->bus.csi2.dt) >= sizeof(buf)); 384 385 dev_dbg(sd->dev, 386 "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", 387 entry->stream, entry->pixelcode, entry->length, 388 entry->flags, buf); 389 } 390 391 return 0; 392 } 393 394 static inline int check_edid(struct v4l2_subdev *sd, 395 struct v4l2_subdev_edid *edid) 396 { 397 if (!edid) 398 return -EINVAL; 399 400 if (edid->blocks && edid->edid == NULL) 401 return -EINVAL; 402 403 return check_pad(sd, edid->pad); 404 } 405 406 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) 407 { 408 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); 409 } 410 411 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) 412 { 413 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); 414 } 415 416 static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, 417 struct v4l2_dv_timings *timings) 418 { 419 if (!timings) 420 return -EINVAL; 421 422 return check_pad(sd, pad) ? : 423 sd->ops->pad->s_dv_timings(sd, pad, timings); 424 } 425 426 static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, 427 struct v4l2_dv_timings *timings) 428 { 429 if (!timings) 430 return -EINVAL; 431 432 return check_pad(sd, pad) ? : 433 sd->ops->pad->g_dv_timings(sd, pad, timings); 434 } 435 436 static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, 437 struct v4l2_dv_timings *timings) 438 { 439 if (!timings) 440 return -EINVAL; 441 442 return check_pad(sd, pad) ? : 443 sd->ops->pad->query_dv_timings(sd, pad, timings); 444 } 445 446 static int call_dv_timings_cap(struct v4l2_subdev *sd, 447 struct v4l2_dv_timings_cap *cap) 448 { 449 if (!cap) 450 return -EINVAL; 451 452 return check_pad(sd, cap->pad) ? : 453 sd->ops->pad->dv_timings_cap(sd, cap); 454 } 455 456 static int call_enum_dv_timings(struct v4l2_subdev *sd, 457 struct v4l2_enum_dv_timings *dvt) 458 { 459 if (!dvt) 460 return -EINVAL; 461 462 return check_pad(sd, dvt->pad) ? : 463 sd->ops->pad->enum_dv_timings(sd, dvt); 464 } 465 466 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, 467 struct v4l2_mbus_config *config) 468 { 469 memset(config, 0, sizeof(*config)); 470 471 return check_pad(sd, pad) ? : 472 sd->ops->pad->get_mbus_config(sd, pad, config); 473 } 474 475 static int call_s_stream(struct v4l2_subdev *sd, int enable) 476 { 477 int ret; 478 479 /* 480 * The .s_stream() operation must never be called to start or stop an 481 * already started or stopped subdev. Catch offenders but don't return 482 * an error yet to avoid regressions. 483 */ 484 if (WARN_ON(sd->s_stream_enabled == !!enable)) 485 return 0; 486 487 ret = sd->ops->video->s_stream(sd, enable); 488 489 if (!enable && ret < 0) { 490 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); 491 ret = 0; 492 } 493 494 if (!ret) { 495 sd->s_stream_enabled = enable; 496 497 if (enable) 498 v4l2_subdev_enable_privacy_led(sd); 499 else 500 v4l2_subdev_disable_privacy_led(sd); 501 } 502 503 return ret; 504 } 505 506 #ifdef CONFIG_MEDIA_CONTROLLER 507 /* 508 * Create state-management wrapper for pad ops dealing with subdev state. The 509 * wrapper handles the case where the caller does not provide the called 510 * subdev's state. This should be removed when all the callers are fixed. 511 */ 512 #define DEFINE_STATE_WRAPPER(f, arg_type) \ 513 static int call_##f##_state(struct v4l2_subdev *sd, \ 514 struct v4l2_subdev_state *_state, \ 515 arg_type *arg) \ 516 { \ 517 struct v4l2_subdev_state *state = _state; \ 518 int ret; \ 519 if (!_state) \ 520 state = v4l2_subdev_lock_and_get_active_state(sd); \ 521 ret = call_##f(sd, state, arg); \ 522 if (!_state && state) \ 523 v4l2_subdev_unlock_state(state); \ 524 return ret; \ 525 } 526 527 #else /* CONFIG_MEDIA_CONTROLLER */ 528 529 #define DEFINE_STATE_WRAPPER(f, arg_type) \ 530 static int call_##f##_state(struct v4l2_subdev *sd, \ 531 struct v4l2_subdev_state *state, \ 532 arg_type *arg) \ 533 { \ 534 return call_##f(sd, state, arg); \ 535 } 536 537 #endif /* CONFIG_MEDIA_CONTROLLER */ 538 539 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); 540 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); 541 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); 542 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); 543 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); 544 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); 545 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); 546 547 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { 548 .get_fmt = call_get_fmt_state, 549 .set_fmt = call_set_fmt_state, 550 .enum_mbus_code = call_enum_mbus_code_state, 551 .enum_frame_size = call_enum_frame_size_state, 552 .enum_frame_interval = call_enum_frame_interval_state, 553 .get_selection = call_get_selection_state, 554 .set_selection = call_set_selection_state, 555 .get_frame_interval = call_get_frame_interval, 556 .set_frame_interval = call_set_frame_interval, 557 .get_edid = call_get_edid, 558 .set_edid = call_set_edid, 559 .s_dv_timings = call_s_dv_timings, 560 .g_dv_timings = call_g_dv_timings, 561 .query_dv_timings = call_query_dv_timings, 562 .dv_timings_cap = call_dv_timings_cap, 563 .enum_dv_timings = call_enum_dv_timings, 564 .get_frame_desc = call_get_frame_desc, 565 .get_mbus_config = call_get_mbus_config, 566 }; 567 568 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { 569 .s_stream = call_s_stream, 570 }; 571 572 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { 573 .pad = &v4l2_subdev_call_pad_wrappers, 574 .video = &v4l2_subdev_call_video_wrappers, 575 }; 576 EXPORT_SYMBOL(v4l2_subdev_call_wrappers); 577 578 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 579 580 static struct v4l2_subdev_state * 581 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, 582 unsigned int cmd, void *arg) 583 { 584 u32 which; 585 586 switch (cmd) { 587 default: 588 return NULL; 589 case VIDIOC_SUBDEV_G_FMT: 590 case VIDIOC_SUBDEV_S_FMT: 591 which = ((struct v4l2_subdev_format *)arg)->which; 592 break; 593 case VIDIOC_SUBDEV_G_CROP: 594 case VIDIOC_SUBDEV_S_CROP: 595 which = ((struct v4l2_subdev_crop *)arg)->which; 596 break; 597 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: 598 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; 599 break; 600 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: 601 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; 602 break; 603 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: 604 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; 605 break; 606 case VIDIOC_SUBDEV_G_SELECTION: 607 case VIDIOC_SUBDEV_S_SELECTION: 608 which = ((struct v4l2_subdev_selection *)arg)->which; 609 break; 610 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: 611 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { 612 struct v4l2_subdev_frame_interval *fi = arg; 613 614 if (!(subdev_fh->client_caps & 615 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) 616 fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; 617 618 which = fi->which; 619 break; 620 } 621 case VIDIOC_SUBDEV_G_ROUTING: 622 case VIDIOC_SUBDEV_S_ROUTING: 623 which = ((struct v4l2_subdev_routing *)arg)->which; 624 break; 625 } 626 627 return which == V4L2_SUBDEV_FORMAT_TRY ? 628 subdev_fh->state : 629 v4l2_subdev_get_unlocked_active_state(sd); 630 } 631 632 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, 633 struct v4l2_subdev_state *state) 634 { 635 struct video_device *vdev = video_devdata(file); 636 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 637 struct v4l2_fh *vfh = file_to_v4l2_fh(file); 638 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); 639 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); 640 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; 641 bool client_supports_streams = subdev_fh->client_caps & 642 V4L2_SUBDEV_CLIENT_CAP_STREAMS; 643 int rval; 644 645 /* 646 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. 647 * Remove this when the API is no longer experimental. 648 */ 649 if (!v4l2_subdev_enable_streams_api) 650 streams_subdev = false; 651 652 switch (cmd) { 653 case VIDIOC_SUBDEV_QUERYCAP: { 654 struct v4l2_subdev_capability *cap = arg; 655 656 memset(cap->reserved, 0, sizeof(cap->reserved)); 657 cap->version = LINUX_VERSION_CODE; 658 cap->capabilities = 659 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | 660 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); 661 662 return 0; 663 } 664 665 case VIDIOC_QUERYCTRL: 666 /* 667 * TODO: this really should be folded into v4l2_queryctrl (this 668 * currently returns -EINVAL for NULL control handlers). 669 * However, v4l2_queryctrl() is still called directly by 670 * drivers as well and until that has been addressed I believe 671 * it is safer to do the check here. The same is true for the 672 * other control ioctls below. 673 */ 674 if (!vfh->ctrl_handler) 675 return -ENOTTY; 676 return v4l2_queryctrl(vfh->ctrl_handler, arg); 677 678 case VIDIOC_QUERY_EXT_CTRL: 679 if (!vfh->ctrl_handler) 680 return -ENOTTY; 681 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); 682 683 case VIDIOC_QUERYMENU: 684 if (!vfh->ctrl_handler) 685 return -ENOTTY; 686 return v4l2_querymenu(vfh->ctrl_handler, arg); 687 688 case VIDIOC_G_CTRL: 689 if (!vfh->ctrl_handler) 690 return -ENOTTY; 691 return v4l2_g_ctrl(vfh->ctrl_handler, arg); 692 693 case VIDIOC_S_CTRL: 694 if (!vfh->ctrl_handler) 695 return -ENOTTY; 696 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); 697 698 case VIDIOC_G_EXT_CTRLS: 699 if (!vfh->ctrl_handler) 700 return -ENOTTY; 701 return v4l2_g_ext_ctrls(vfh->ctrl_handler, 702 vdev, sd->v4l2_dev->mdev, arg); 703 704 case VIDIOC_S_EXT_CTRLS: 705 if (!vfh->ctrl_handler) 706 return -ENOTTY; 707 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, 708 vdev, sd->v4l2_dev->mdev, arg); 709 710 case VIDIOC_TRY_EXT_CTRLS: 711 if (!vfh->ctrl_handler) 712 return -ENOTTY; 713 return v4l2_try_ext_ctrls(vfh->ctrl_handler, 714 vdev, sd->v4l2_dev->mdev, arg); 715 716 case VIDIOC_DQEVENT: 717 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 718 return -ENOIOCTLCMD; 719 720 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); 721 722 case VIDIOC_SUBSCRIBE_EVENT: 723 if (v4l2_subdev_has_op(sd, core, subscribe_event)) 724 return v4l2_subdev_call(sd, core, subscribe_event, 725 vfh, arg); 726 727 if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && 728 vfh->ctrl_handler) 729 return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg); 730 731 return -ENOIOCTLCMD; 732 733 case VIDIOC_UNSUBSCRIBE_EVENT: 734 if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) 735 return v4l2_subdev_call(sd, core, unsubscribe_event, 736 vfh, arg); 737 738 if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) 739 return v4l2_event_subdev_unsubscribe(sd, vfh, arg); 740 741 return -ENOIOCTLCMD; 742 743 #ifdef CONFIG_VIDEO_ADV_DEBUG 744 case VIDIOC_DBG_G_REGISTER: 745 { 746 struct v4l2_dbg_register *p = arg; 747 748 if (!capable(CAP_SYS_ADMIN)) 749 return -EPERM; 750 return v4l2_subdev_call(sd, core, g_register, p); 751 } 752 case VIDIOC_DBG_S_REGISTER: 753 { 754 struct v4l2_dbg_register *p = arg; 755 756 if (!capable(CAP_SYS_ADMIN)) 757 return -EPERM; 758 return v4l2_subdev_call(sd, core, s_register, p); 759 } 760 case VIDIOC_DBG_G_CHIP_INFO: 761 { 762 struct v4l2_dbg_chip_info *p = arg; 763 764 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) 765 return -EINVAL; 766 if (sd->ops->core && sd->ops->core->s_register) 767 p->flags |= V4L2_CHIP_FL_WRITABLE; 768 if (sd->ops->core && sd->ops->core->g_register) 769 p->flags |= V4L2_CHIP_FL_READABLE; 770 strscpy(p->name, sd->name, sizeof(p->name)); 771 return 0; 772 } 773 #endif 774 775 case VIDIOC_LOG_STATUS: { 776 int ret; 777 778 pr_info("%s: ================= START STATUS =================\n", 779 sd->name); 780 ret = v4l2_subdev_call(sd, core, log_status); 781 pr_info("%s: ================== END STATUS ==================\n", 782 sd->name); 783 return ret; 784 } 785 786 case VIDIOC_SUBDEV_G_FMT: { 787 struct v4l2_subdev_format *format = arg; 788 789 if (!client_supports_streams) 790 format->stream = 0; 791 792 memset(format->reserved, 0, sizeof(format->reserved)); 793 memset(format->format.reserved, 0, sizeof(format->format.reserved)); 794 return v4l2_subdev_call(sd, pad, get_fmt, state, format); 795 } 796 797 case VIDIOC_SUBDEV_S_FMT: { 798 struct v4l2_subdev_format *format = arg; 799 800 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 801 return -EPERM; 802 803 if (!client_supports_streams) 804 format->stream = 0; 805 806 memset(format->reserved, 0, sizeof(format->reserved)); 807 memset(format->format.reserved, 0, sizeof(format->format.reserved)); 808 return v4l2_subdev_call(sd, pad, set_fmt, state, format); 809 } 810 811 case VIDIOC_SUBDEV_G_CROP: { 812 struct v4l2_subdev_crop *crop = arg; 813 struct v4l2_subdev_selection sel; 814 815 if (!client_supports_streams) 816 crop->stream = 0; 817 818 memset(crop->reserved, 0, sizeof(crop->reserved)); 819 memset(&sel, 0, sizeof(sel)); 820 sel.which = crop->which; 821 sel.pad = crop->pad; 822 sel.stream = crop->stream; 823 sel.target = V4L2_SEL_TGT_CROP; 824 825 rval = v4l2_subdev_call( 826 sd, pad, get_selection, state, &sel); 827 828 crop->rect = sel.r; 829 830 return rval; 831 } 832 833 case VIDIOC_SUBDEV_S_CROP: { 834 struct v4l2_subdev_crop *crop = arg; 835 struct v4l2_subdev_selection sel; 836 837 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 838 return -EPERM; 839 840 if (!client_supports_streams) 841 crop->stream = 0; 842 843 memset(crop->reserved, 0, sizeof(crop->reserved)); 844 memset(&sel, 0, sizeof(sel)); 845 sel.which = crop->which; 846 sel.pad = crop->pad; 847 sel.stream = crop->stream; 848 sel.target = V4L2_SEL_TGT_CROP; 849 sel.r = crop->rect; 850 851 rval = v4l2_subdev_call( 852 sd, pad, set_selection, state, &sel); 853 854 crop->rect = sel.r; 855 856 return rval; 857 } 858 859 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { 860 struct v4l2_subdev_mbus_code_enum *code = arg; 861 862 if (!client_supports_streams) 863 code->stream = 0; 864 865 memset(code->reserved, 0, sizeof(code->reserved)); 866 return v4l2_subdev_call(sd, pad, enum_mbus_code, state, 867 code); 868 } 869 870 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { 871 struct v4l2_subdev_frame_size_enum *fse = arg; 872 873 if (!client_supports_streams) 874 fse->stream = 0; 875 876 memset(fse->reserved, 0, sizeof(fse->reserved)); 877 return v4l2_subdev_call(sd, pad, enum_frame_size, state, 878 fse); 879 } 880 881 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { 882 struct v4l2_subdev_frame_interval *fi = arg; 883 884 if (!client_supports_streams) 885 fi->stream = 0; 886 887 memset(fi->reserved, 0, sizeof(fi->reserved)); 888 return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); 889 } 890 891 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { 892 struct v4l2_subdev_frame_interval *fi = arg; 893 894 if (!client_supports_streams) 895 fi->stream = 0; 896 897 if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 898 return -EPERM; 899 900 memset(fi->reserved, 0, sizeof(fi->reserved)); 901 return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); 902 } 903 904 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { 905 struct v4l2_subdev_frame_interval_enum *fie = arg; 906 907 if (!client_supports_streams) 908 fie->stream = 0; 909 910 memset(fie->reserved, 0, sizeof(fie->reserved)); 911 return v4l2_subdev_call(sd, pad, enum_frame_interval, state, 912 fie); 913 } 914 915 case VIDIOC_SUBDEV_G_SELECTION: { 916 struct v4l2_subdev_selection *sel = arg; 917 918 if (!client_supports_streams) 919 sel->stream = 0; 920 921 memset(sel->reserved, 0, sizeof(sel->reserved)); 922 return v4l2_subdev_call( 923 sd, pad, get_selection, state, sel); 924 } 925 926 case VIDIOC_SUBDEV_S_SELECTION: { 927 struct v4l2_subdev_selection *sel = arg; 928 929 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 930 return -EPERM; 931 932 if (!client_supports_streams) 933 sel->stream = 0; 934 935 memset(sel->reserved, 0, sizeof(sel->reserved)); 936 return v4l2_subdev_call( 937 sd, pad, set_selection, state, sel); 938 } 939 940 case VIDIOC_G_EDID: { 941 struct v4l2_subdev_edid *edid = arg; 942 943 return v4l2_subdev_call(sd, pad, get_edid, edid); 944 } 945 946 case VIDIOC_S_EDID: { 947 struct v4l2_subdev_edid *edid = arg; 948 949 return v4l2_subdev_call(sd, pad, set_edid, edid); 950 } 951 952 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { 953 struct v4l2_dv_timings_cap *cap = arg; 954 955 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); 956 } 957 958 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { 959 struct v4l2_enum_dv_timings *dvt = arg; 960 961 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); 962 } 963 964 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: 965 return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); 966 967 case VIDIOC_SUBDEV_G_DV_TIMINGS: 968 return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); 969 970 case VIDIOC_SUBDEV_S_DV_TIMINGS: 971 if (ro_subdev) 972 return -EPERM; 973 974 return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); 975 976 case VIDIOC_SUBDEV_G_STD: 977 return v4l2_subdev_call(sd, video, g_std, arg); 978 979 case VIDIOC_SUBDEV_S_STD: { 980 v4l2_std_id *std = arg; 981 982 if (ro_subdev) 983 return -EPERM; 984 985 return v4l2_subdev_call(sd, video, s_std, *std); 986 } 987 988 case VIDIOC_SUBDEV_ENUMSTD: { 989 struct v4l2_standard *p = arg; 990 v4l2_std_id id; 991 992 if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) 993 return -EINVAL; 994 995 return v4l_video_std_enumstd(p, id); 996 } 997 998 case VIDIOC_SUBDEV_QUERYSTD: 999 return v4l2_subdev_call(sd, video, querystd, arg); 1000 1001 case VIDIOC_SUBDEV_G_ROUTING: { 1002 struct v4l2_subdev_routing *routing = arg; 1003 struct v4l2_subdev_krouting *krouting; 1004 1005 if (!v4l2_subdev_enable_streams_api) 1006 return -ENOIOCTLCMD; 1007 1008 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) 1009 return -ENOIOCTLCMD; 1010 1011 memset(routing->reserved, 0, sizeof(routing->reserved)); 1012 1013 krouting = &state->routing; 1014 1015 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, 1016 krouting->routes, 1017 min(krouting->num_routes, routing->len_routes) * 1018 sizeof(*krouting->routes)); 1019 routing->num_routes = krouting->num_routes; 1020 1021 return 0; 1022 } 1023 1024 case VIDIOC_SUBDEV_S_ROUTING: { 1025 struct v4l2_subdev_routing *routing = arg; 1026 struct v4l2_subdev_route *routes = 1027 (struct v4l2_subdev_route *)(uintptr_t)routing->routes; 1028 struct v4l2_subdev_krouting krouting = {}; 1029 unsigned int num_active_routes = 0; 1030 unsigned int i; 1031 1032 if (!v4l2_subdev_enable_streams_api) 1033 return -ENOIOCTLCMD; 1034 1035 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) 1036 return -ENOIOCTLCMD; 1037 1038 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 1039 return -EPERM; 1040 1041 if (routing->num_routes > routing->len_routes) 1042 return -EINVAL; 1043 1044 memset(routing->reserved, 0, sizeof(routing->reserved)); 1045 1046 for (i = 0; i < routing->num_routes; ++i) { 1047 const struct v4l2_subdev_route *route = &routes[i]; 1048 const struct media_pad *pads = sd->entity.pads; 1049 1050 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || 1051 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) 1052 return -EINVAL; 1053 1054 if (route->sink_pad >= sd->entity.num_pads) 1055 return -EINVAL; 1056 1057 if (!(pads[route->sink_pad].flags & 1058 MEDIA_PAD_FL_SINK)) 1059 return -EINVAL; 1060 1061 if (route->source_pad >= sd->entity.num_pads) 1062 return -EINVAL; 1063 1064 if (!(pads[route->source_pad].flags & 1065 MEDIA_PAD_FL_SOURCE)) 1066 return -EINVAL; 1067 1068 if (route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) 1069 num_active_routes++; 1070 } 1071 1072 /* 1073 * Drivers that implement routing need to report a frame 1074 * descriptor accordingly, with up to one entry per route. Until 1075 * the frame descriptors entries get allocated dynamically, 1076 * limit the number of active routes to 1077 * V4L2_FRAME_DESC_ENTRY_MAX. 1078 */ 1079 if (num_active_routes > V4L2_FRAME_DESC_ENTRY_MAX) 1080 return -E2BIG; 1081 1082 /* 1083 * If the driver doesn't support setting routing, just return 1084 * the routing table. 1085 */ 1086 if (!v4l2_subdev_has_op(sd, pad, set_routing)) { 1087 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, 1088 state->routing.routes, 1089 min(state->routing.num_routes, routing->len_routes) * 1090 sizeof(*state->routing.routes)); 1091 routing->num_routes = state->routing.num_routes; 1092 1093 return 0; 1094 } 1095 1096 krouting.num_routes = routing->num_routes; 1097 krouting.len_routes = routing->len_routes; 1098 krouting.routes = routes; 1099 1100 rval = v4l2_subdev_call(sd, pad, set_routing, state, 1101 routing->which, &krouting); 1102 if (rval < 0) 1103 return rval; 1104 1105 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, 1106 state->routing.routes, 1107 min(state->routing.num_routes, routing->len_routes) * 1108 sizeof(*state->routing.routes)); 1109 routing->num_routes = state->routing.num_routes; 1110 1111 return 0; 1112 } 1113 1114 case VIDIOC_SUBDEV_G_CLIENT_CAP: { 1115 struct v4l2_subdev_client_capability *client_cap = arg; 1116 1117 client_cap->capabilities = subdev_fh->client_caps; 1118 1119 return 0; 1120 } 1121 1122 case VIDIOC_SUBDEV_S_CLIENT_CAP: { 1123 struct v4l2_subdev_client_capability *client_cap = arg; 1124 1125 /* 1126 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not 1127 * enabled. Remove this when streams API is no longer 1128 * experimental. 1129 */ 1130 if (!v4l2_subdev_enable_streams_api) 1131 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; 1132 1133 /* Filter out unsupported capabilities */ 1134 client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | 1135 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); 1136 1137 subdev_fh->client_caps = client_cap->capabilities; 1138 1139 return 0; 1140 } 1141 1142 default: 1143 return v4l2_subdev_call(sd, core, ioctl, cmd, arg); 1144 } 1145 1146 return 0; 1147 } 1148 1149 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) 1150 { 1151 struct video_device *vdev = video_devdata(file); 1152 struct mutex *lock = vdev->lock; 1153 long ret = -ENODEV; 1154 1155 if (lock && mutex_lock_interruptible(lock)) 1156 return -ERESTARTSYS; 1157 1158 if (video_is_registered(vdev)) { 1159 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 1160 struct v4l2_fh *vfh = file_to_v4l2_fh(file); 1161 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); 1162 struct v4l2_subdev_state *state; 1163 1164 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); 1165 1166 if (state) 1167 v4l2_subdev_lock_state(state); 1168 1169 ret = subdev_do_ioctl(file, cmd, arg, state); 1170 1171 if (state) 1172 v4l2_subdev_unlock_state(state); 1173 } 1174 1175 if (lock) 1176 mutex_unlock(lock); 1177 return ret; 1178 } 1179 1180 static long subdev_ioctl(struct file *file, unsigned int cmd, 1181 unsigned long arg) 1182 { 1183 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); 1184 } 1185 1186 #ifdef CONFIG_COMPAT 1187 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, 1188 unsigned long arg) 1189 { 1190 struct video_device *vdev = video_devdata(file); 1191 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 1192 1193 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); 1194 } 1195 #endif 1196 1197 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 1198 static long subdev_ioctl(struct file *file, unsigned int cmd, 1199 unsigned long arg) 1200 { 1201 return -ENODEV; 1202 } 1203 1204 #ifdef CONFIG_COMPAT 1205 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, 1206 unsigned long arg) 1207 { 1208 return -ENODEV; 1209 } 1210 #endif 1211 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 1212 1213 static __poll_t subdev_poll(struct file *file, poll_table *wait) 1214 { 1215 struct video_device *vdev = video_devdata(file); 1216 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 1217 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1218 1219 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 1220 return EPOLLERR; 1221 1222 poll_wait(file, &fh->wait, wait); 1223 1224 if (v4l2_event_pending(fh)) 1225 return EPOLLPRI; 1226 1227 return 0; 1228 } 1229 1230 const struct v4l2_file_operations v4l2_subdev_fops = { 1231 .owner = THIS_MODULE, 1232 .open = subdev_open, 1233 .unlocked_ioctl = subdev_ioctl, 1234 #ifdef CONFIG_COMPAT 1235 .compat_ioctl32 = subdev_compat_ioctl32, 1236 #endif 1237 .release = subdev_close, 1238 .poll = subdev_poll, 1239 }; 1240 1241 #ifdef CONFIG_MEDIA_CONTROLLER 1242 1243 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, 1244 struct fwnode_endpoint *endpoint) 1245 { 1246 struct fwnode_handle *fwnode; 1247 struct v4l2_subdev *sd; 1248 1249 if (!is_media_entity_v4l2_subdev(entity)) 1250 return -EINVAL; 1251 1252 sd = media_entity_to_v4l2_subdev(entity); 1253 1254 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); 1255 fwnode_handle_put(fwnode); 1256 1257 if (device_match_fwnode(sd->dev, fwnode)) 1258 return endpoint->port; 1259 1260 return -ENXIO; 1261 } 1262 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); 1263 1264 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, 1265 struct media_link *link, 1266 struct v4l2_subdev_format *source_fmt, 1267 struct v4l2_subdev_format *sink_fmt) 1268 { 1269 bool pass = true; 1270 1271 /* The width, height and code must match. */ 1272 if (source_fmt->format.width != sink_fmt->format.width) { 1273 dev_dbg(sd->entity.graph_obj.mdev->dev, 1274 "%s: width does not match (source %u, sink %u)\n", 1275 __func__, 1276 source_fmt->format.width, sink_fmt->format.width); 1277 pass = false; 1278 } 1279 1280 if (source_fmt->format.height != sink_fmt->format.height) { 1281 dev_dbg(sd->entity.graph_obj.mdev->dev, 1282 "%s: height does not match (source %u, sink %u)\n", 1283 __func__, 1284 source_fmt->format.height, sink_fmt->format.height); 1285 pass = false; 1286 } 1287 1288 if (source_fmt->format.code != sink_fmt->format.code) { 1289 dev_dbg(sd->entity.graph_obj.mdev->dev, 1290 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", 1291 __func__, 1292 source_fmt->format.code, sink_fmt->format.code); 1293 pass = false; 1294 } 1295 1296 /* The field order must match, or the sink field order must be NONE 1297 * to support interlaced hardware connected to bridges that support 1298 * progressive formats only. 1299 */ 1300 if (source_fmt->format.field != sink_fmt->format.field && 1301 sink_fmt->format.field != V4L2_FIELD_NONE) { 1302 dev_dbg(sd->entity.graph_obj.mdev->dev, 1303 "%s: field does not match (source %u, sink %u)\n", 1304 __func__, 1305 source_fmt->format.field, sink_fmt->format.field); 1306 pass = false; 1307 } 1308 1309 if (pass) 1310 return 0; 1311 1312 dev_dbg(sd->entity.graph_obj.mdev->dev, 1313 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, 1314 link->source->entity->name, link->source->index, 1315 link->sink->entity->name, link->sink->index); 1316 1317 return -EPIPE; 1318 } 1319 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); 1320 1321 static int 1322 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, 1323 struct v4l2_subdev_format *fmt, 1324 bool states_locked) 1325 { 1326 struct v4l2_subdev_state *state; 1327 struct v4l2_subdev *sd; 1328 int ret; 1329 1330 sd = media_entity_to_v4l2_subdev(pad->entity); 1331 1332 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; 1333 fmt->pad = pad->index; 1334 fmt->stream = stream; 1335 1336 if (states_locked) 1337 state = v4l2_subdev_get_locked_active_state(sd); 1338 else 1339 state = v4l2_subdev_lock_and_get_active_state(sd); 1340 1341 ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); 1342 1343 if (!states_locked && state) 1344 v4l2_subdev_unlock_state(state); 1345 1346 return ret; 1347 } 1348 1349 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1350 1351 static void __v4l2_link_validate_get_streams(struct media_pad *pad, 1352 u64 *streams_mask, 1353 bool states_locked) 1354 { 1355 struct v4l2_subdev_route *route; 1356 struct v4l2_subdev_state *state; 1357 struct v4l2_subdev *subdev; 1358 1359 subdev = media_entity_to_v4l2_subdev(pad->entity); 1360 1361 *streams_mask = 0; 1362 1363 if (states_locked) 1364 state = v4l2_subdev_get_locked_active_state(subdev); 1365 else 1366 state = v4l2_subdev_lock_and_get_active_state(subdev); 1367 1368 if (WARN_ON(!state)) 1369 return; 1370 1371 for_each_active_route(&state->routing, route) { 1372 u32 route_pad; 1373 u32 route_stream; 1374 1375 if (pad->flags & MEDIA_PAD_FL_SOURCE) { 1376 route_pad = route->source_pad; 1377 route_stream = route->source_stream; 1378 } else { 1379 route_pad = route->sink_pad; 1380 route_stream = route->sink_stream; 1381 } 1382 1383 if (route_pad != pad->index) 1384 continue; 1385 1386 *streams_mask |= BIT_ULL(route_stream); 1387 } 1388 1389 if (!states_locked) 1390 v4l2_subdev_unlock_state(state); 1391 } 1392 1393 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 1394 1395 static void v4l2_link_validate_get_streams(struct media_pad *pad, 1396 u64 *streams_mask, 1397 bool states_locked) 1398 { 1399 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); 1400 1401 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { 1402 /* Non-streams subdevs have an implicit stream 0 */ 1403 *streams_mask = BIT_ULL(0); 1404 return; 1405 } 1406 1407 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1408 __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); 1409 #else 1410 /* This shouldn't happen */ 1411 *streams_mask = 0; 1412 #endif 1413 } 1414 1415 static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) 1416 { 1417 struct v4l2_subdev *sink_subdev = 1418 media_entity_to_v4l2_subdev(link->sink->entity); 1419 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; 1420 u64 source_streams_mask; 1421 u64 sink_streams_mask; 1422 u64 dangling_sink_streams; 1423 u32 stream; 1424 int ret; 1425 1426 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", 1427 link->source->entity->name, link->source->index, 1428 link->sink->entity->name, link->sink->index); 1429 1430 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); 1431 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); 1432 1433 /* 1434 * It is ok to have more source streams than sink streams as extra 1435 * source streams can just be ignored by the receiver, but having extra 1436 * sink streams is an error as streams must have a source. 1437 */ 1438 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & 1439 sink_streams_mask; 1440 if (dangling_sink_streams) { 1441 dev_err(dev, "Dangling sink streams: mask %#llx\n", 1442 dangling_sink_streams); 1443 return -EINVAL; 1444 } 1445 1446 /* Validate source and sink stream formats */ 1447 1448 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { 1449 struct v4l2_subdev_format sink_fmt, source_fmt; 1450 1451 if (!(sink_streams_mask & BIT_ULL(stream))) 1452 continue; 1453 1454 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", 1455 link->source->entity->name, link->source->index, stream, 1456 link->sink->entity->name, link->sink->index, stream); 1457 1458 ret = v4l2_subdev_link_validate_get_format(link->source, stream, 1459 &source_fmt, states_locked); 1460 if (ret < 0) { 1461 dev_dbg(dev, 1462 "Failed to get format for \"%s\":%u:%u (but that's ok)\n", 1463 link->source->entity->name, link->source->index, 1464 stream); 1465 continue; 1466 } 1467 1468 ret = v4l2_subdev_link_validate_get_format(link->sink, stream, 1469 &sink_fmt, states_locked); 1470 if (ret < 0) { 1471 dev_dbg(dev, 1472 "Failed to get format for \"%s\":%u:%u (but that's ok)\n", 1473 link->sink->entity->name, link->sink->index, 1474 stream); 1475 continue; 1476 } 1477 1478 /* TODO: add stream number to link_validate() */ 1479 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, 1480 &source_fmt, &sink_fmt); 1481 if (!ret) 1482 continue; 1483 1484 if (ret != -ENOIOCTLCMD) 1485 return ret; 1486 1487 ret = v4l2_subdev_link_validate_default(sink_subdev, link, 1488 &source_fmt, &sink_fmt); 1489 1490 if (ret) 1491 return ret; 1492 } 1493 1494 return 0; 1495 } 1496 1497 int v4l2_subdev_link_validate(struct media_link *link) 1498 { 1499 struct v4l2_subdev *source_sd, *sink_sd; 1500 struct v4l2_subdev_state *source_state, *sink_state; 1501 bool states_locked; 1502 int ret; 1503 1504 /* 1505 * Links are validated in the context of the sink entity. Usage of this 1506 * helper on a sink that is not a subdev is a clear driver bug. 1507 */ 1508 if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) 1509 return -EINVAL; 1510 1511 /* 1512 * If the source is a video device, delegate link validation to it. This 1513 * allows usage of this helper for subdev connected to a video output 1514 * device, provided that the driver implement the video output device's 1515 * .link_validate() operation. 1516 */ 1517 if (is_media_entity_v4l2_video_device(link->source->entity)) { 1518 struct media_entity *source = link->source->entity; 1519 1520 if (!source->ops || !source->ops->link_validate) { 1521 /* 1522 * Many existing drivers do not implement the required 1523 * .link_validate() operation for their video devices. 1524 * Print a warning to get the drivers fixed, and return 1525 * 0 to avoid breaking userspace. This should 1526 * eventually be turned into a WARN_ON() when all 1527 * drivers will have been fixed. 1528 */ 1529 pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", 1530 source->name); 1531 return 0; 1532 } 1533 1534 /* 1535 * Avoid infinite loops in case a video device incorrectly uses 1536 * this helper function as its .link_validate() handler. 1537 */ 1538 if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) 1539 return -EINVAL; 1540 1541 return source->ops->link_validate(link); 1542 } 1543 1544 /* 1545 * If the source is still not a subdev, usage of this helper is a clear 1546 * driver bug. 1547 */ 1548 if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) 1549 return -EINVAL; 1550 1551 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); 1552 source_sd = media_entity_to_v4l2_subdev(link->source->entity); 1553 1554 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); 1555 source_state = v4l2_subdev_get_unlocked_active_state(source_sd); 1556 1557 states_locked = sink_state && source_state; 1558 1559 if (states_locked) 1560 v4l2_subdev_lock_states(sink_state, source_state); 1561 1562 ret = v4l2_subdev_link_validate_locked(link, states_locked); 1563 1564 if (states_locked) 1565 v4l2_subdev_unlock_states(sink_state, source_state); 1566 1567 return ret; 1568 } 1569 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); 1570 1571 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, 1572 unsigned int pad0, unsigned int pad1) 1573 { 1574 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); 1575 struct v4l2_subdev_krouting *routing; 1576 struct v4l2_subdev_state *state; 1577 unsigned int i; 1578 1579 state = v4l2_subdev_lock_and_get_active_state(sd); 1580 1581 routing = &state->routing; 1582 1583 for (i = 0; i < routing->num_routes; ++i) { 1584 struct v4l2_subdev_route *route = &routing->routes[i]; 1585 1586 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) 1587 continue; 1588 1589 if ((route->sink_pad == pad0 && route->source_pad == pad1) || 1590 (route->source_pad == pad0 && route->sink_pad == pad1)) { 1591 v4l2_subdev_unlock_state(state); 1592 return true; 1593 } 1594 } 1595 1596 v4l2_subdev_unlock_state(state); 1597 1598 return false; 1599 } 1600 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); 1601 1602 struct v4l2_subdev_state * 1603 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, 1604 struct lock_class_key *lock_key) 1605 { 1606 struct v4l2_subdev_state *state; 1607 int ret; 1608 1609 state = kzalloc_obj(*state); 1610 if (!state) 1611 return ERR_PTR(-ENOMEM); 1612 1613 __mutex_init(&state->_lock, lock_name, lock_key); 1614 if (sd->state_lock) 1615 state->lock = sd->state_lock; 1616 else 1617 state->lock = &state->_lock; 1618 1619 state->sd = sd; 1620 1621 /* Drivers that support streams do not need the legacy pad config */ 1622 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { 1623 state->pads = kvzalloc_objs(*state->pads, sd->entity.num_pads); 1624 if (!state->pads) { 1625 ret = -ENOMEM; 1626 goto err; 1627 } 1628 } 1629 1630 if (sd->internal_ops && sd->internal_ops->init_state) { 1631 /* 1632 * There can be no race at this point, but we lock the state 1633 * anyway to satisfy lockdep checks. 1634 */ 1635 v4l2_subdev_lock_state(state); 1636 ret = sd->internal_ops->init_state(sd, state); 1637 v4l2_subdev_unlock_state(state); 1638 1639 if (ret) 1640 goto err; 1641 } 1642 1643 return state; 1644 1645 err: 1646 if (state && state->pads) 1647 kvfree(state->pads); 1648 1649 kfree(state); 1650 1651 return ERR_PTR(ret); 1652 } 1653 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); 1654 1655 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) 1656 { 1657 if (!state) 1658 return; 1659 1660 mutex_destroy(&state->_lock); 1661 1662 kfree(state->routing.routes); 1663 kvfree(state->stream_configs.configs); 1664 kvfree(state->pads); 1665 kfree(state); 1666 } 1667 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); 1668 1669 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, 1670 struct lock_class_key *key) 1671 { 1672 struct v4l2_subdev_state *state; 1673 struct device *dev = sd->dev; 1674 bool has_disable_streams; 1675 bool has_enable_streams; 1676 bool has_s_stream; 1677 1678 /* Check that the subdevice implements the required features */ 1679 1680 has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); 1681 has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); 1682 has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); 1683 1684 if (has_enable_streams != has_disable_streams) { 1685 dev_err(dev, 1686 "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", 1687 sd->name); 1688 return -EINVAL; 1689 } 1690 1691 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { 1692 if (has_s_stream && !has_enable_streams) { 1693 dev_err(dev, 1694 "subdev '%s' must implement .enable/disable_streams()\n", 1695 sd->name); 1696 1697 return -EINVAL; 1698 } 1699 } 1700 1701 if (sd->ctrl_handler) 1702 sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; 1703 1704 state = __v4l2_subdev_state_alloc(sd, name, key); 1705 if (IS_ERR(state)) 1706 return PTR_ERR(state); 1707 1708 sd->active_state = state; 1709 1710 return 0; 1711 } 1712 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); 1713 1714 void v4l2_subdev_cleanup(struct v4l2_subdev *sd) 1715 { 1716 struct v4l2_async_subdev_endpoint *ase, *ase_tmp; 1717 1718 __v4l2_subdev_state_free(sd->active_state); 1719 sd->active_state = NULL; 1720 1721 /* Uninitialised sub-device, bail out here. */ 1722 if (!sd->async_subdev_endpoint_list.next) 1723 return; 1724 1725 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, 1726 async_subdev_endpoint_entry) { 1727 list_del(&ase->async_subdev_endpoint_entry); 1728 1729 kfree(ase); 1730 } 1731 } 1732 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); 1733 1734 struct v4l2_mbus_framefmt * 1735 __v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, 1736 unsigned int pad, u32 stream) 1737 { 1738 struct v4l2_subdev_stream_configs *stream_configs; 1739 unsigned int i; 1740 1741 if (WARN_ON_ONCE(!state)) 1742 return NULL; 1743 1744 if (state->pads) { 1745 if (stream) 1746 return NULL; 1747 1748 if (pad >= state->sd->entity.num_pads) 1749 return NULL; 1750 1751 return &state->pads[pad].format; 1752 } 1753 1754 lockdep_assert_held(state->lock); 1755 1756 stream_configs = &state->stream_configs; 1757 1758 for (i = 0; i < stream_configs->num_configs; ++i) { 1759 if (stream_configs->configs[i].pad == pad && 1760 stream_configs->configs[i].stream == stream) 1761 return &stream_configs->configs[i].fmt; 1762 } 1763 1764 return NULL; 1765 } 1766 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); 1767 1768 struct v4l2_rect * 1769 __v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, 1770 u32 stream) 1771 { 1772 struct v4l2_subdev_stream_configs *stream_configs; 1773 unsigned int i; 1774 1775 if (WARN_ON_ONCE(!state)) 1776 return NULL; 1777 1778 if (state->pads) { 1779 if (stream) 1780 return NULL; 1781 1782 if (pad >= state->sd->entity.num_pads) 1783 return NULL; 1784 1785 return &state->pads[pad].crop; 1786 } 1787 1788 lockdep_assert_held(state->lock); 1789 1790 stream_configs = &state->stream_configs; 1791 1792 for (i = 0; i < stream_configs->num_configs; ++i) { 1793 if (stream_configs->configs[i].pad == pad && 1794 stream_configs->configs[i].stream == stream) 1795 return &stream_configs->configs[i].crop; 1796 } 1797 1798 return NULL; 1799 } 1800 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); 1801 1802 struct v4l2_rect * 1803 __v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, 1804 unsigned int pad, u32 stream) 1805 { 1806 struct v4l2_subdev_stream_configs *stream_configs; 1807 unsigned int i; 1808 1809 if (WARN_ON_ONCE(!state)) 1810 return NULL; 1811 1812 if (state->pads) { 1813 if (stream) 1814 return NULL; 1815 1816 if (pad >= state->sd->entity.num_pads) 1817 return NULL; 1818 1819 return &state->pads[pad].compose; 1820 } 1821 1822 lockdep_assert_held(state->lock); 1823 1824 stream_configs = &state->stream_configs; 1825 1826 for (i = 0; i < stream_configs->num_configs; ++i) { 1827 if (stream_configs->configs[i].pad == pad && 1828 stream_configs->configs[i].stream == stream) 1829 return &stream_configs->configs[i].compose; 1830 } 1831 1832 return NULL; 1833 } 1834 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); 1835 1836 struct v4l2_fract * 1837 __v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, 1838 unsigned int pad, u32 stream) 1839 { 1840 struct v4l2_subdev_stream_configs *stream_configs; 1841 unsigned int i; 1842 1843 if (WARN_ON(!state)) 1844 return NULL; 1845 1846 lockdep_assert_held(state->lock); 1847 1848 if (state->pads) { 1849 if (stream) 1850 return NULL; 1851 1852 if (pad >= state->sd->entity.num_pads) 1853 return NULL; 1854 1855 return &state->pads[pad].interval; 1856 } 1857 1858 lockdep_assert_held(state->lock); 1859 1860 stream_configs = &state->stream_configs; 1861 1862 for (i = 0; i < stream_configs->num_configs; ++i) { 1863 if (stream_configs->configs[i].pad == pad && 1864 stream_configs->configs[i].stream == stream) 1865 return &stream_configs->configs[i].interval; 1866 } 1867 1868 return NULL; 1869 } 1870 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); 1871 1872 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1873 1874 static int 1875 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, 1876 const struct v4l2_subdev_krouting *routing) 1877 { 1878 struct v4l2_subdev_stream_configs new_configs = { 0 }; 1879 struct v4l2_subdev_route *route; 1880 u32 idx; 1881 1882 /* Count number of formats needed */ 1883 for_each_active_route(routing, route) { 1884 /* 1885 * Each route needs a format on both ends of the route. 1886 */ 1887 new_configs.num_configs += 2; 1888 } 1889 1890 if (new_configs.num_configs) { 1891 new_configs.configs = kvzalloc_objs(*new_configs.configs, 1892 new_configs.num_configs); 1893 1894 if (!new_configs.configs) 1895 return -ENOMEM; 1896 } 1897 1898 /* 1899 * Fill in the 'pad' and stream' value for each item in the array from 1900 * the routing table 1901 */ 1902 idx = 0; 1903 1904 for_each_active_route(routing, route) { 1905 new_configs.configs[idx].pad = route->sink_pad; 1906 new_configs.configs[idx].stream = route->sink_stream; 1907 1908 idx++; 1909 1910 new_configs.configs[idx].pad = route->source_pad; 1911 new_configs.configs[idx].stream = route->source_stream; 1912 1913 idx++; 1914 } 1915 1916 kvfree(stream_configs->configs); 1917 *stream_configs = new_configs; 1918 1919 return 0; 1920 } 1921 1922 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, 1923 struct v4l2_subdev_format *format) 1924 { 1925 struct v4l2_mbus_framefmt *fmt; 1926 1927 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); 1928 if (!fmt) 1929 return -EINVAL; 1930 1931 format->format = *fmt; 1932 1933 return 0; 1934 } 1935 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); 1936 1937 int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, 1938 struct v4l2_subdev_state *state, 1939 struct v4l2_subdev_frame_interval *fi) 1940 { 1941 struct v4l2_fract *interval; 1942 1943 interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); 1944 if (!interval) 1945 return -EINVAL; 1946 1947 fi->interval = *interval; 1948 1949 return 0; 1950 } 1951 EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); 1952 1953 int v4l2_subdev_set_routing(struct v4l2_subdev *sd, 1954 struct v4l2_subdev_state *state, 1955 const struct v4l2_subdev_krouting *routing) 1956 { 1957 struct v4l2_subdev_krouting *dst = &state->routing; 1958 const struct v4l2_subdev_krouting *src = routing; 1959 struct v4l2_subdev_krouting new_routing = { 0 }; 1960 size_t bytes; 1961 int r; 1962 1963 if (unlikely(check_mul_overflow((size_t)src->num_routes, 1964 sizeof(*src->routes), &bytes))) 1965 return -EOVERFLOW; 1966 1967 lockdep_assert_held(state->lock); 1968 1969 if (src->num_routes > 0) { 1970 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); 1971 if (!new_routing.routes) 1972 return -ENOMEM; 1973 } 1974 1975 new_routing.num_routes = src->num_routes; 1976 1977 r = v4l2_subdev_init_stream_configs(&state->stream_configs, 1978 &new_routing); 1979 if (r) { 1980 kfree(new_routing.routes); 1981 return r; 1982 } 1983 1984 kfree(dst->routes); 1985 *dst = new_routing; 1986 1987 return 0; 1988 } 1989 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); 1990 1991 struct v4l2_subdev_route * 1992 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, 1993 struct v4l2_subdev_route *route) 1994 { 1995 if (route) 1996 ++route; 1997 else 1998 route = &routing->routes[0]; 1999 2000 for (; route < routing->routes + routing->num_routes; ++route) { 2001 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) 2002 continue; 2003 2004 return route; 2005 } 2006 2007 return NULL; 2008 } 2009 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); 2010 2011 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, 2012 struct v4l2_subdev_state *state, 2013 const struct v4l2_subdev_krouting *routing, 2014 const struct v4l2_mbus_framefmt *fmt) 2015 { 2016 struct v4l2_subdev_stream_configs *stream_configs; 2017 unsigned int i; 2018 int ret; 2019 2020 ret = v4l2_subdev_set_routing(sd, state, routing); 2021 if (ret) 2022 return ret; 2023 2024 stream_configs = &state->stream_configs; 2025 2026 for (i = 0; i < stream_configs->num_configs; ++i) 2027 stream_configs->configs[i].fmt = *fmt; 2028 2029 return 0; 2030 } 2031 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); 2032 2033 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, 2034 u32 pad, u32 stream, u32 *other_pad, 2035 u32 *other_stream) 2036 { 2037 unsigned int i; 2038 2039 for (i = 0; i < routing->num_routes; ++i) { 2040 struct v4l2_subdev_route *route = &routing->routes[i]; 2041 2042 if (route->source_pad == pad && 2043 route->source_stream == stream) { 2044 if (other_pad) 2045 *other_pad = route->sink_pad; 2046 if (other_stream) 2047 *other_stream = route->sink_stream; 2048 return 0; 2049 } 2050 2051 if (route->sink_pad == pad && route->sink_stream == stream) { 2052 if (other_pad) 2053 *other_pad = route->source_pad; 2054 if (other_stream) 2055 *other_stream = route->source_stream; 2056 return 0; 2057 } 2058 } 2059 2060 return -EINVAL; 2061 } 2062 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); 2063 2064 struct v4l2_mbus_framefmt * 2065 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, 2066 u32 pad, u32 stream) 2067 { 2068 u32 other_pad, other_stream; 2069 int ret; 2070 2071 ret = v4l2_subdev_routing_find_opposite_end(&state->routing, 2072 pad, stream, 2073 &other_pad, &other_stream); 2074 if (ret) 2075 return NULL; 2076 2077 return v4l2_subdev_state_get_format(state, other_pad, other_stream); 2078 } 2079 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); 2080 2081 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, 2082 u32 pad0, u32 pad1, u64 *streams) 2083 { 2084 const struct v4l2_subdev_krouting *routing = &state->routing; 2085 struct v4l2_subdev_route *route; 2086 u64 streams0 = 0; 2087 u64 streams1 = 0; 2088 2089 for_each_active_route(routing, route) { 2090 if (route->sink_pad == pad0 && route->source_pad == pad1 && 2091 (*streams & BIT_ULL(route->sink_stream))) { 2092 streams0 |= BIT_ULL(route->sink_stream); 2093 streams1 |= BIT_ULL(route->source_stream); 2094 } 2095 if (route->source_pad == pad0 && route->sink_pad == pad1 && 2096 (*streams & BIT_ULL(route->source_stream))) { 2097 streams0 |= BIT_ULL(route->source_stream); 2098 streams1 |= BIT_ULL(route->sink_stream); 2099 } 2100 } 2101 2102 *streams = streams0; 2103 return streams1; 2104 } 2105 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); 2106 2107 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, 2108 const struct v4l2_subdev_krouting *routing, 2109 enum v4l2_subdev_routing_restriction disallow) 2110 { 2111 u32 *remote_pads = NULL; 2112 unsigned int i, j; 2113 int ret = -ENXIO; 2114 2115 if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | 2116 V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { 2117 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), 2118 GFP_KERNEL); 2119 if (!remote_pads) 2120 return -ENOMEM; 2121 2122 for (i = 0; i < sd->entity.num_pads; ++i) 2123 remote_pads[i] = U32_MAX; 2124 } 2125 2126 for (i = 0; i < routing->num_routes; ++i) { 2127 const struct v4l2_subdev_route *route = &routing->routes[i]; 2128 2129 /* Validate the sink and source pad numbers. */ 2130 if (route->sink_pad >= sd->entity.num_pads || 2131 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { 2132 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", 2133 i, route->sink_pad); 2134 goto out; 2135 } 2136 2137 if (route->source_pad >= sd->entity.num_pads || 2138 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { 2139 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", 2140 i, route->source_pad); 2141 goto out; 2142 } 2143 2144 /* 2145 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a 2146 * sink pad must be routed to a single source pad. 2147 */ 2148 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { 2149 if (remote_pads[route->sink_pad] != U32_MAX && 2150 remote_pads[route->sink_pad] != route->source_pad) { 2151 dev_dbg(sd->dev, 2152 "route %u attempts to mix %s streams\n", 2153 i, "sink"); 2154 goto out; 2155 } 2156 } 2157 2158 /* 2159 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a 2160 * source pad must originate from a single sink pad. 2161 */ 2162 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { 2163 if (remote_pads[route->source_pad] != U32_MAX && 2164 remote_pads[route->source_pad] != route->sink_pad) { 2165 dev_dbg(sd->dev, 2166 "route %u attempts to mix %s streams\n", 2167 i, "source"); 2168 goto out; 2169 } 2170 } 2171 2172 /* 2173 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink 2174 * side can not do stream multiplexing, i.e. there can be only 2175 * a single stream in a sink pad. 2176 */ 2177 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { 2178 if (remote_pads[route->sink_pad] != U32_MAX) { 2179 dev_dbg(sd->dev, 2180 "route %u attempts to multiplex on %s pad %u\n", 2181 i, "sink", route->sink_pad); 2182 goto out; 2183 } 2184 } 2185 2186 /* 2187 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the 2188 * source side can not do stream multiplexing, i.e. there can 2189 * be only a single stream in a source pad. 2190 */ 2191 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { 2192 if (remote_pads[route->source_pad] != U32_MAX) { 2193 dev_dbg(sd->dev, 2194 "route %u attempts to multiplex on %s pad %u\n", 2195 i, "source", route->source_pad); 2196 goto out; 2197 } 2198 } 2199 2200 if (remote_pads) { 2201 remote_pads[route->sink_pad] = route->source_pad; 2202 remote_pads[route->source_pad] = route->sink_pad; 2203 } 2204 2205 for (j = i + 1; j < routing->num_routes; ++j) { 2206 const struct v4l2_subdev_route *r = &routing->routes[j]; 2207 2208 /* 2209 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can 2210 * originate from the same (sink) stream. 2211 */ 2212 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && 2213 route->sink_pad == r->sink_pad && 2214 route->sink_stream == r->sink_stream) { 2215 dev_dbg(sd->dev, 2216 "routes %u and %u originate from same sink (%u/%u)\n", 2217 i, j, route->sink_pad, 2218 route->sink_stream); 2219 goto out; 2220 } 2221 2222 /* 2223 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end 2224 * at the same (source) stream. 2225 */ 2226 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && 2227 route->source_pad == r->source_pad && 2228 route->source_stream == r->source_stream) { 2229 dev_dbg(sd->dev, 2230 "routes %u and %u end at same source (%u/%u)\n", 2231 i, j, route->source_pad, 2232 route->source_stream); 2233 goto out; 2234 } 2235 } 2236 } 2237 2238 ret = 0; 2239 2240 out: 2241 kfree(remote_pads); 2242 return ret; 2243 } 2244 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); 2245 2246 static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, 2247 struct v4l2_subdev_state *state, 2248 u32 pad, u64 streams_mask, 2249 u64 *found_streams, 2250 u64 *enabled_streams) 2251 { 2252 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { 2253 *found_streams = BIT_ULL(0); 2254 *enabled_streams = 2255 (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; 2256 dev_dbg(sd->dev, 2257 "collect_streams: sub-device \"%s\" does not support streams\n", 2258 sd->entity.name); 2259 return; 2260 } 2261 2262 *found_streams = 0; 2263 *enabled_streams = 0; 2264 2265 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { 2266 const struct v4l2_subdev_stream_config *cfg = 2267 &state->stream_configs.configs[i]; 2268 2269 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) 2270 continue; 2271 2272 *found_streams |= BIT_ULL(cfg->stream); 2273 if (cfg->enabled) 2274 *enabled_streams |= BIT_ULL(cfg->stream); 2275 } 2276 2277 dev_dbg(sd->dev, 2278 "collect_streams: \"%s\":%u: found %#llx enabled %#llx\n", 2279 sd->entity.name, pad, *found_streams, *enabled_streams); 2280 } 2281 2282 static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, 2283 struct v4l2_subdev_state *state, 2284 u32 pad, u64 streams_mask, 2285 bool enabled) 2286 { 2287 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { 2288 if (enabled) 2289 sd->enabled_pads |= BIT_ULL(pad); 2290 else 2291 sd->enabled_pads &= ~BIT_ULL(pad); 2292 return; 2293 } 2294 2295 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { 2296 struct v4l2_subdev_stream_config *cfg = 2297 &state->stream_configs.configs[i]; 2298 2299 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) 2300 cfg->enabled = enabled; 2301 } 2302 } 2303 2304 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, 2305 u64 streams_mask) 2306 { 2307 struct device *dev = sd->entity.graph_obj.mdev->dev; 2308 struct v4l2_subdev_state *state; 2309 bool already_streaming; 2310 u64 enabled_streams; 2311 u64 found_streams; 2312 bool use_s_stream; 2313 int ret; 2314 2315 dev_dbg(dev, "enable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, 2316 streams_mask); 2317 2318 /* A few basic sanity checks first. */ 2319 if (pad >= sd->entity.num_pads) 2320 return -EINVAL; 2321 2322 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) 2323 return -EOPNOTSUPP; 2324 2325 /* 2326 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices 2327 * with 64 pads or less can be supported. 2328 */ 2329 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) 2330 return -EOPNOTSUPP; 2331 2332 if (!streams_mask) 2333 return 0; 2334 2335 /* Fallback on .s_stream() if .enable_streams() isn't available. */ 2336 use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); 2337 2338 if (!use_s_stream) 2339 state = v4l2_subdev_lock_and_get_active_state(sd); 2340 else 2341 state = NULL; 2342 2343 /* 2344 * Verify that the requested streams exist and that they are not 2345 * already enabled. 2346 */ 2347 2348 v4l2_subdev_collect_streams(sd, state, pad, streams_mask, 2349 &found_streams, &enabled_streams); 2350 2351 if (found_streams != streams_mask) { 2352 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", 2353 streams_mask & ~found_streams, sd->entity.name, pad); 2354 ret = -EINVAL; 2355 goto done; 2356 } 2357 2358 if (enabled_streams) { 2359 dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", 2360 enabled_streams, sd->entity.name, pad); 2361 ret = -EALREADY; 2362 goto done; 2363 } 2364 2365 already_streaming = v4l2_subdev_is_streaming(sd); 2366 2367 if (!use_s_stream) { 2368 /* Call the .enable_streams() operation. */ 2369 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, 2370 streams_mask); 2371 } else { 2372 /* Start streaming when the first pad is enabled. */ 2373 if (!already_streaming) 2374 ret = v4l2_subdev_call(sd, video, s_stream, 1); 2375 else 2376 ret = 0; 2377 } 2378 2379 if (ret) { 2380 dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, 2381 streams_mask, ret); 2382 goto done; 2383 } 2384 2385 /* Mark the streams as enabled. */ 2386 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); 2387 2388 /* 2389 * TODO: When all the drivers have been changed to use 2390 * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), 2391 * instead of calling .s_stream() operation directly, we can remove 2392 * the privacy LED handling from call_s_stream() and do it here 2393 * for all cases. 2394 */ 2395 if (!use_s_stream && !already_streaming) 2396 v4l2_subdev_enable_privacy_led(sd); 2397 2398 done: 2399 if (!use_s_stream) 2400 v4l2_subdev_unlock_state(state); 2401 2402 return ret; 2403 } 2404 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); 2405 2406 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, 2407 u64 streams_mask) 2408 { 2409 struct device *dev = sd->entity.graph_obj.mdev->dev; 2410 struct v4l2_subdev_state *state; 2411 u64 enabled_streams; 2412 u64 found_streams; 2413 bool use_s_stream; 2414 int ret; 2415 2416 dev_dbg(dev, "disable streams \"%s\":%u/%#llx\n", sd->entity.name, pad, 2417 streams_mask); 2418 2419 /* A few basic sanity checks first. */ 2420 if (pad >= sd->entity.num_pads) 2421 return -EINVAL; 2422 2423 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) 2424 return -EOPNOTSUPP; 2425 2426 /* 2427 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices 2428 * with 64 pads or less can be supported. 2429 */ 2430 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) 2431 return -EOPNOTSUPP; 2432 2433 if (!streams_mask) 2434 return 0; 2435 2436 /* Fallback on .s_stream() if .disable_streams() isn't available. */ 2437 use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); 2438 2439 if (!use_s_stream) 2440 state = v4l2_subdev_lock_and_get_active_state(sd); 2441 else 2442 state = NULL; 2443 2444 /* 2445 * Verify that the requested streams exist and that they are not 2446 * already disabled. 2447 */ 2448 2449 v4l2_subdev_collect_streams(sd, state, pad, streams_mask, 2450 &found_streams, &enabled_streams); 2451 2452 if (found_streams != streams_mask) { 2453 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", 2454 streams_mask & ~found_streams, sd->entity.name, pad); 2455 ret = -EINVAL; 2456 goto done; 2457 } 2458 2459 if (enabled_streams != streams_mask) { 2460 dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", 2461 streams_mask & ~enabled_streams, sd->entity.name, pad); 2462 ret = -EALREADY; 2463 goto done; 2464 } 2465 2466 if (!use_s_stream) { 2467 /* Call the .disable_streams() operation. */ 2468 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, 2469 streams_mask); 2470 } else { 2471 /* Stop streaming when the last streams are disabled. */ 2472 2473 if (!(sd->enabled_pads & ~BIT_ULL(pad))) 2474 ret = v4l2_subdev_call(sd, video, s_stream, 0); 2475 else 2476 ret = 0; 2477 } 2478 2479 if (ret) { 2480 dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, 2481 streams_mask, ret); 2482 goto done; 2483 } 2484 2485 v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); 2486 2487 done: 2488 if (!use_s_stream) { 2489 if (!v4l2_subdev_is_streaming(sd)) 2490 v4l2_subdev_disable_privacy_led(sd); 2491 2492 v4l2_subdev_unlock_state(state); 2493 } 2494 2495 return ret; 2496 } 2497 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); 2498 2499 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) 2500 { 2501 struct v4l2_subdev_state *state; 2502 struct v4l2_subdev_route *route; 2503 struct media_pad *pad; 2504 u64 source_mask = 0; 2505 int pad_index = -1; 2506 2507 /* 2508 * Find the source pad. This helper is meant for subdevs that have a 2509 * single source pad, so failures shouldn't happen, but catch them 2510 * loudly nonetheless as they indicate a driver bug. 2511 */ 2512 media_entity_for_each_pad(&sd->entity, pad) { 2513 if (pad->flags & MEDIA_PAD_FL_SOURCE) { 2514 pad_index = pad->index; 2515 break; 2516 } 2517 } 2518 2519 if (WARN_ON(pad_index == -1)) 2520 return -EINVAL; 2521 2522 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { 2523 /* 2524 * As there's a single source pad, just collect all the source 2525 * streams. 2526 */ 2527 state = v4l2_subdev_lock_and_get_active_state(sd); 2528 2529 for_each_active_route(&state->routing, route) 2530 source_mask |= BIT_ULL(route->source_stream); 2531 2532 v4l2_subdev_unlock_state(state); 2533 } else { 2534 /* 2535 * For non-streams subdevices, there's a single implicit stream 2536 * per pad. 2537 */ 2538 source_mask = BIT_ULL(0); 2539 } 2540 2541 if (enable) 2542 return v4l2_subdev_enable_streams(sd, pad_index, source_mask); 2543 else 2544 return v4l2_subdev_disable_streams(sd, pad_index, source_mask); 2545 } 2546 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); 2547 2548 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 2549 2550 #endif /* CONFIG_MEDIA_CONTROLLER */ 2551 2552 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) 2553 { 2554 INIT_LIST_HEAD(&sd->list); 2555 BUG_ON(!ops); 2556 sd->ops = ops; 2557 sd->v4l2_dev = NULL; 2558 sd->flags = 0; 2559 sd->name[0] = '\0'; 2560 sd->grp_id = 0; 2561 sd->dev_priv = NULL; 2562 sd->host_priv = NULL; 2563 sd->privacy_led = NULL; 2564 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); 2565 #if defined(CONFIG_MEDIA_CONTROLLER) 2566 sd->entity.name = sd->name; 2567 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; 2568 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; 2569 #endif 2570 } 2571 EXPORT_SYMBOL(v4l2_subdev_init); 2572 2573 void v4l2_subdev_notify_event(struct v4l2_subdev *sd, 2574 const struct v4l2_event *ev) 2575 { 2576 v4l2_event_queue(sd->devnode, ev); 2577 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); 2578 } 2579 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); 2580 2581 bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) 2582 { 2583 struct v4l2_subdev_state *state; 2584 2585 if (!v4l2_subdev_has_op(sd, pad, enable_streams)) 2586 return sd->s_stream_enabled; 2587 2588 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) 2589 return !!sd->enabled_pads; 2590 2591 state = v4l2_subdev_get_locked_active_state(sd); 2592 2593 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { 2594 const struct v4l2_subdev_stream_config *cfg; 2595 2596 cfg = &state->stream_configs.configs[i]; 2597 2598 if (cfg->enabled) 2599 return true; 2600 } 2601 2602 return false; 2603 } 2604 EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); 2605 2606 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) 2607 { 2608 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 2609 sd->privacy_led = led_get(sd->dev, "privacy"); 2610 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) 2611 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), 2612 "getting privacy LED\n"); 2613 2614 if (!IS_ERR_OR_NULL(sd->privacy_led)) { 2615 mutex_lock(&sd->privacy_led->led_access); 2616 led_sysfs_disable(sd->privacy_led); 2617 led_trigger_remove(sd->privacy_led); 2618 led_set_brightness(sd->privacy_led, 0); 2619 mutex_unlock(&sd->privacy_led->led_access); 2620 } 2621 #endif 2622 return 0; 2623 } 2624 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); 2625 2626 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) 2627 { 2628 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 2629 if (!IS_ERR_OR_NULL(sd->privacy_led)) { 2630 mutex_lock(&sd->privacy_led->led_access); 2631 led_sysfs_enable(sd->privacy_led); 2632 mutex_unlock(&sd->privacy_led->led_access); 2633 led_put(sd->privacy_led); 2634 } 2635 #endif 2636 } 2637 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led); 2638