1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * V4L2 sub-device 4 * 5 * Copyright (C) 2010 Nokia Corporation 6 * 7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 8 * Sakari Ailus <sakari.ailus@iki.fi> 9 */ 10 11 #include <linux/export.h> 12 #include <linux/ioctl.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/overflow.h> 16 #include <linux/slab.h> 17 #include <linux/types.h> 18 #include <linux/version.h> 19 #include <linux/videodev2.h> 20 21 #include <media/v4l2-ctrls.h> 22 #include <media/v4l2-device.h> 23 #include <media/v4l2-event.h> 24 #include <media/v4l2-fh.h> 25 #include <media/v4l2-ioctl.h> 26 27 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 28 /* 29 * The Streams API is an experimental feature. To use the Streams API, set 30 * 'v4l2_subdev_enable_streams_api' to 1 below. 31 */ 32 33 static bool v4l2_subdev_enable_streams_api; 34 #endif 35 36 /* 37 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set 38 * of streams. 39 * 40 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX 41 * restricts the total number of streams in a pad, although the stream ID is 42 * not restricted. 43 */ 44 #define V4L2_SUBDEV_MAX_STREAM_ID 63 45 46 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 47 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) 48 { 49 struct v4l2_subdev_state *state; 50 static struct lock_class_key key; 51 52 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); 53 if (IS_ERR(state)) 54 return PTR_ERR(state); 55 56 fh->state = state; 57 58 return 0; 59 } 60 61 static void subdev_fh_free(struct v4l2_subdev_fh *fh) 62 { 63 __v4l2_subdev_state_free(fh->state); 64 fh->state = NULL; 65 } 66 67 static int subdev_open(struct file *file) 68 { 69 struct video_device *vdev = video_devdata(file); 70 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 71 struct v4l2_subdev_fh *subdev_fh; 72 int ret; 73 74 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); 75 if (subdev_fh == NULL) 76 return -ENOMEM; 77 78 ret = subdev_fh_init(subdev_fh, sd); 79 if (ret) { 80 kfree(subdev_fh); 81 return ret; 82 } 83 84 v4l2_fh_init(&subdev_fh->vfh, vdev); 85 v4l2_fh_add(&subdev_fh->vfh); 86 file->private_data = &subdev_fh->vfh; 87 88 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { 89 struct module *owner; 90 91 owner = sd->entity.graph_obj.mdev->dev->driver->owner; 92 if (!try_module_get(owner)) { 93 ret = -EBUSY; 94 goto err; 95 } 96 subdev_fh->owner = owner; 97 } 98 99 if (sd->internal_ops && sd->internal_ops->open) { 100 ret = sd->internal_ops->open(sd, subdev_fh); 101 if (ret < 0) 102 goto err; 103 } 104 105 return 0; 106 107 err: 108 module_put(subdev_fh->owner); 109 v4l2_fh_del(&subdev_fh->vfh); 110 v4l2_fh_exit(&subdev_fh->vfh); 111 subdev_fh_free(subdev_fh); 112 kfree(subdev_fh); 113 114 return ret; 115 } 116 117 static int subdev_close(struct file *file) 118 { 119 struct video_device *vdev = video_devdata(file); 120 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 121 struct v4l2_fh *vfh = file->private_data; 122 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); 123 124 if (sd->internal_ops && sd->internal_ops->close) 125 sd->internal_ops->close(sd, subdev_fh); 126 module_put(subdev_fh->owner); 127 v4l2_fh_del(vfh); 128 v4l2_fh_exit(vfh); 129 subdev_fh_free(subdev_fh); 130 kfree(subdev_fh); 131 file->private_data = NULL; 132 133 return 0; 134 } 135 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 136 static int subdev_open(struct file *file) 137 { 138 return -ENODEV; 139 } 140 141 static int subdev_close(struct file *file) 142 { 143 return -ENODEV; 144 } 145 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 146 147 static inline int check_which(u32 which) 148 { 149 if (which != V4L2_SUBDEV_FORMAT_TRY && 150 which != V4L2_SUBDEV_FORMAT_ACTIVE) 151 return -EINVAL; 152 153 return 0; 154 } 155 156 static inline int check_pad(struct v4l2_subdev *sd, u32 pad) 157 { 158 #if defined(CONFIG_MEDIA_CONTROLLER) 159 if (sd->entity.num_pads) { 160 if (pad >= sd->entity.num_pads) 161 return -EINVAL; 162 return 0; 163 } 164 #endif 165 /* allow pad 0 on subdevices not registered as media entities */ 166 if (pad > 0) 167 return -EINVAL; 168 return 0; 169 } 170 171 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, 172 u32 which, u32 pad, u32 stream) 173 { 174 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { 175 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 176 if (!v4l2_subdev_state_get_stream_format(state, pad, stream)) 177 return -EINVAL; 178 return 0; 179 #else 180 return -EINVAL; 181 #endif 182 } 183 184 if (stream != 0) 185 return -EINVAL; 186 187 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) 188 return -EINVAL; 189 190 return 0; 191 } 192 193 static inline int check_format(struct v4l2_subdev *sd, 194 struct v4l2_subdev_state *state, 195 struct v4l2_subdev_format *format) 196 { 197 if (!format) 198 return -EINVAL; 199 200 return check_which(format->which) ? : check_pad(sd, format->pad) ? : 201 check_state(sd, state, format->which, format->pad, format->stream); 202 } 203 204 static int call_get_fmt(struct v4l2_subdev *sd, 205 struct v4l2_subdev_state *state, 206 struct v4l2_subdev_format *format) 207 { 208 return check_format(sd, state, format) ? : 209 sd->ops->pad->get_fmt(sd, state, format); 210 } 211 212 static int call_set_fmt(struct v4l2_subdev *sd, 213 struct v4l2_subdev_state *state, 214 struct v4l2_subdev_format *format) 215 { 216 return check_format(sd, state, format) ? : 217 sd->ops->pad->set_fmt(sd, state, format); 218 } 219 220 static int call_enum_mbus_code(struct v4l2_subdev *sd, 221 struct v4l2_subdev_state *state, 222 struct v4l2_subdev_mbus_code_enum *code) 223 { 224 if (!code) 225 return -EINVAL; 226 227 return check_which(code->which) ? : check_pad(sd, code->pad) ? : 228 check_state(sd, state, code->which, code->pad, code->stream) ? : 229 sd->ops->pad->enum_mbus_code(sd, state, code); 230 } 231 232 static int call_enum_frame_size(struct v4l2_subdev *sd, 233 struct v4l2_subdev_state *state, 234 struct v4l2_subdev_frame_size_enum *fse) 235 { 236 if (!fse) 237 return -EINVAL; 238 239 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : 240 check_state(sd, state, fse->which, fse->pad, fse->stream) ? : 241 sd->ops->pad->enum_frame_size(sd, state, fse); 242 } 243 244 static inline int check_frame_interval(struct v4l2_subdev *sd, 245 struct v4l2_subdev_frame_interval *fi) 246 { 247 if (!fi) 248 return -EINVAL; 249 250 return check_pad(sd, fi->pad); 251 } 252 253 static int call_g_frame_interval(struct v4l2_subdev *sd, 254 struct v4l2_subdev_frame_interval *fi) 255 { 256 return check_frame_interval(sd, fi) ? : 257 sd->ops->video->g_frame_interval(sd, fi); 258 } 259 260 static int call_s_frame_interval(struct v4l2_subdev *sd, 261 struct v4l2_subdev_frame_interval *fi) 262 { 263 return check_frame_interval(sd, fi) ? : 264 sd->ops->video->s_frame_interval(sd, fi); 265 } 266 267 static int call_enum_frame_interval(struct v4l2_subdev *sd, 268 struct v4l2_subdev_state *state, 269 struct v4l2_subdev_frame_interval_enum *fie) 270 { 271 if (!fie) 272 return -EINVAL; 273 274 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : 275 check_state(sd, state, fie->which, fie->pad, fie->stream) ? : 276 sd->ops->pad->enum_frame_interval(sd, state, fie); 277 } 278 279 static inline int check_selection(struct v4l2_subdev *sd, 280 struct v4l2_subdev_state *state, 281 struct v4l2_subdev_selection *sel) 282 { 283 if (!sel) 284 return -EINVAL; 285 286 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : 287 check_state(sd, state, sel->which, sel->pad, sel->stream); 288 } 289 290 static int call_get_selection(struct v4l2_subdev *sd, 291 struct v4l2_subdev_state *state, 292 struct v4l2_subdev_selection *sel) 293 { 294 return check_selection(sd, state, sel) ? : 295 sd->ops->pad->get_selection(sd, state, sel); 296 } 297 298 static int call_set_selection(struct v4l2_subdev *sd, 299 struct v4l2_subdev_state *state, 300 struct v4l2_subdev_selection *sel) 301 { 302 return check_selection(sd, state, sel) ? : 303 sd->ops->pad->set_selection(sd, state, sel); 304 } 305 306 static inline int check_edid(struct v4l2_subdev *sd, 307 struct v4l2_subdev_edid *edid) 308 { 309 if (!edid) 310 return -EINVAL; 311 312 if (edid->blocks && edid->edid == NULL) 313 return -EINVAL; 314 315 return check_pad(sd, edid->pad); 316 } 317 318 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) 319 { 320 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); 321 } 322 323 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) 324 { 325 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); 326 } 327 328 static int call_dv_timings_cap(struct v4l2_subdev *sd, 329 struct v4l2_dv_timings_cap *cap) 330 { 331 if (!cap) 332 return -EINVAL; 333 334 return check_pad(sd, cap->pad) ? : 335 sd->ops->pad->dv_timings_cap(sd, cap); 336 } 337 338 static int call_enum_dv_timings(struct v4l2_subdev *sd, 339 struct v4l2_enum_dv_timings *dvt) 340 { 341 if (!dvt) 342 return -EINVAL; 343 344 return check_pad(sd, dvt->pad) ? : 345 sd->ops->pad->enum_dv_timings(sd, dvt); 346 } 347 348 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, 349 struct v4l2_mbus_config *config) 350 { 351 return check_pad(sd, pad) ? : 352 sd->ops->pad->get_mbus_config(sd, pad, config); 353 } 354 355 static int call_s_stream(struct v4l2_subdev *sd, int enable) 356 { 357 int ret; 358 359 ret = sd->ops->video->s_stream(sd, enable); 360 361 if (!enable && ret < 0) { 362 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); 363 return 0; 364 } 365 366 return ret; 367 } 368 369 #ifdef CONFIG_MEDIA_CONTROLLER 370 /* 371 * Create state-management wrapper for pad ops dealing with subdev state. The 372 * wrapper handles the case where the caller does not provide the called 373 * subdev's state. This should be removed when all the callers are fixed. 374 */ 375 #define DEFINE_STATE_WRAPPER(f, arg_type) \ 376 static int call_##f##_state(struct v4l2_subdev *sd, \ 377 struct v4l2_subdev_state *_state, \ 378 arg_type *arg) \ 379 { \ 380 struct v4l2_subdev_state *state = _state; \ 381 int ret; \ 382 if (!_state) \ 383 state = v4l2_subdev_lock_and_get_active_state(sd); \ 384 ret = call_##f(sd, state, arg); \ 385 if (!_state && state) \ 386 v4l2_subdev_unlock_state(state); \ 387 return ret; \ 388 } 389 390 #else /* CONFIG_MEDIA_CONTROLLER */ 391 392 #define DEFINE_STATE_WRAPPER(f, arg_type) \ 393 static int call_##f##_state(struct v4l2_subdev *sd, \ 394 struct v4l2_subdev_state *state, \ 395 arg_type *arg) \ 396 { \ 397 return call_##f(sd, state, arg); \ 398 } 399 400 #endif /* CONFIG_MEDIA_CONTROLLER */ 401 402 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); 403 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); 404 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); 405 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); 406 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); 407 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); 408 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); 409 410 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { 411 .get_fmt = call_get_fmt_state, 412 .set_fmt = call_set_fmt_state, 413 .enum_mbus_code = call_enum_mbus_code_state, 414 .enum_frame_size = call_enum_frame_size_state, 415 .enum_frame_interval = call_enum_frame_interval_state, 416 .get_selection = call_get_selection_state, 417 .set_selection = call_set_selection_state, 418 .get_edid = call_get_edid, 419 .set_edid = call_set_edid, 420 .dv_timings_cap = call_dv_timings_cap, 421 .enum_dv_timings = call_enum_dv_timings, 422 .get_mbus_config = call_get_mbus_config, 423 }; 424 425 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { 426 .g_frame_interval = call_g_frame_interval, 427 .s_frame_interval = call_s_frame_interval, 428 .s_stream = call_s_stream, 429 }; 430 431 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { 432 .pad = &v4l2_subdev_call_pad_wrappers, 433 .video = &v4l2_subdev_call_video_wrappers, 434 }; 435 EXPORT_SYMBOL(v4l2_subdev_call_wrappers); 436 437 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 438 439 static struct v4l2_subdev_state * 440 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, 441 unsigned int cmd, void *arg) 442 { 443 u32 which; 444 445 switch (cmd) { 446 default: 447 return NULL; 448 case VIDIOC_SUBDEV_G_FMT: 449 case VIDIOC_SUBDEV_S_FMT: 450 which = ((struct v4l2_subdev_format *)arg)->which; 451 break; 452 case VIDIOC_SUBDEV_G_CROP: 453 case VIDIOC_SUBDEV_S_CROP: 454 which = ((struct v4l2_subdev_crop *)arg)->which; 455 break; 456 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: 457 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; 458 break; 459 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: 460 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; 461 break; 462 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: 463 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; 464 break; 465 case VIDIOC_SUBDEV_G_SELECTION: 466 case VIDIOC_SUBDEV_S_SELECTION: 467 which = ((struct v4l2_subdev_selection *)arg)->which; 468 break; 469 case VIDIOC_SUBDEV_G_ROUTING: 470 case VIDIOC_SUBDEV_S_ROUTING: 471 which = ((struct v4l2_subdev_routing *)arg)->which; 472 break; 473 } 474 475 return which == V4L2_SUBDEV_FORMAT_TRY ? 476 subdev_fh->state : 477 v4l2_subdev_get_unlocked_active_state(sd); 478 } 479 480 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, 481 struct v4l2_subdev_state *state) 482 { 483 struct video_device *vdev = video_devdata(file); 484 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 485 struct v4l2_fh *vfh = file->private_data; 486 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); 487 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; 488 int rval; 489 490 switch (cmd) { 491 case VIDIOC_SUBDEV_QUERYCAP: { 492 struct v4l2_subdev_capability *cap = arg; 493 494 memset(cap->reserved, 0, sizeof(cap->reserved)); 495 cap->version = LINUX_VERSION_CODE; 496 cap->capabilities = 497 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | 498 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); 499 500 return 0; 501 } 502 503 case VIDIOC_QUERYCTRL: 504 /* 505 * TODO: this really should be folded into v4l2_queryctrl (this 506 * currently returns -EINVAL for NULL control handlers). 507 * However, v4l2_queryctrl() is still called directly by 508 * drivers as well and until that has been addressed I believe 509 * it is safer to do the check here. The same is true for the 510 * other control ioctls below. 511 */ 512 if (!vfh->ctrl_handler) 513 return -ENOTTY; 514 return v4l2_queryctrl(vfh->ctrl_handler, arg); 515 516 case VIDIOC_QUERY_EXT_CTRL: 517 if (!vfh->ctrl_handler) 518 return -ENOTTY; 519 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); 520 521 case VIDIOC_QUERYMENU: 522 if (!vfh->ctrl_handler) 523 return -ENOTTY; 524 return v4l2_querymenu(vfh->ctrl_handler, arg); 525 526 case VIDIOC_G_CTRL: 527 if (!vfh->ctrl_handler) 528 return -ENOTTY; 529 return v4l2_g_ctrl(vfh->ctrl_handler, arg); 530 531 case VIDIOC_S_CTRL: 532 if (!vfh->ctrl_handler) 533 return -ENOTTY; 534 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); 535 536 case VIDIOC_G_EXT_CTRLS: 537 if (!vfh->ctrl_handler) 538 return -ENOTTY; 539 return v4l2_g_ext_ctrls(vfh->ctrl_handler, 540 vdev, sd->v4l2_dev->mdev, arg); 541 542 case VIDIOC_S_EXT_CTRLS: 543 if (!vfh->ctrl_handler) 544 return -ENOTTY; 545 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, 546 vdev, sd->v4l2_dev->mdev, arg); 547 548 case VIDIOC_TRY_EXT_CTRLS: 549 if (!vfh->ctrl_handler) 550 return -ENOTTY; 551 return v4l2_try_ext_ctrls(vfh->ctrl_handler, 552 vdev, sd->v4l2_dev->mdev, arg); 553 554 case VIDIOC_DQEVENT: 555 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 556 return -ENOIOCTLCMD; 557 558 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); 559 560 case VIDIOC_SUBSCRIBE_EVENT: 561 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); 562 563 case VIDIOC_UNSUBSCRIBE_EVENT: 564 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); 565 566 #ifdef CONFIG_VIDEO_ADV_DEBUG 567 case VIDIOC_DBG_G_REGISTER: 568 { 569 struct v4l2_dbg_register *p = arg; 570 571 if (!capable(CAP_SYS_ADMIN)) 572 return -EPERM; 573 return v4l2_subdev_call(sd, core, g_register, p); 574 } 575 case VIDIOC_DBG_S_REGISTER: 576 { 577 struct v4l2_dbg_register *p = arg; 578 579 if (!capable(CAP_SYS_ADMIN)) 580 return -EPERM; 581 return v4l2_subdev_call(sd, core, s_register, p); 582 } 583 case VIDIOC_DBG_G_CHIP_INFO: 584 { 585 struct v4l2_dbg_chip_info *p = arg; 586 587 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) 588 return -EINVAL; 589 if (sd->ops->core && sd->ops->core->s_register) 590 p->flags |= V4L2_CHIP_FL_WRITABLE; 591 if (sd->ops->core && sd->ops->core->g_register) 592 p->flags |= V4L2_CHIP_FL_READABLE; 593 strscpy(p->name, sd->name, sizeof(p->name)); 594 return 0; 595 } 596 #endif 597 598 case VIDIOC_LOG_STATUS: { 599 int ret; 600 601 pr_info("%s: ================= START STATUS =================\n", 602 sd->name); 603 ret = v4l2_subdev_call(sd, core, log_status); 604 pr_info("%s: ================== END STATUS ==================\n", 605 sd->name); 606 return ret; 607 } 608 609 case VIDIOC_SUBDEV_G_FMT: { 610 struct v4l2_subdev_format *format = arg; 611 612 memset(format->reserved, 0, sizeof(format->reserved)); 613 memset(format->format.reserved, 0, sizeof(format->format.reserved)); 614 return v4l2_subdev_call(sd, pad, get_fmt, state, format); 615 } 616 617 case VIDIOC_SUBDEV_S_FMT: { 618 struct v4l2_subdev_format *format = arg; 619 620 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 621 return -EPERM; 622 623 memset(format->reserved, 0, sizeof(format->reserved)); 624 memset(format->format.reserved, 0, sizeof(format->format.reserved)); 625 return v4l2_subdev_call(sd, pad, set_fmt, state, format); 626 } 627 628 case VIDIOC_SUBDEV_G_CROP: { 629 struct v4l2_subdev_crop *crop = arg; 630 struct v4l2_subdev_selection sel; 631 632 memset(crop->reserved, 0, sizeof(crop->reserved)); 633 memset(&sel, 0, sizeof(sel)); 634 sel.which = crop->which; 635 sel.pad = crop->pad; 636 sel.target = V4L2_SEL_TGT_CROP; 637 638 rval = v4l2_subdev_call( 639 sd, pad, get_selection, state, &sel); 640 641 crop->rect = sel.r; 642 643 return rval; 644 } 645 646 case VIDIOC_SUBDEV_S_CROP: { 647 struct v4l2_subdev_crop *crop = arg; 648 struct v4l2_subdev_selection sel; 649 650 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 651 return -EPERM; 652 653 memset(crop->reserved, 0, sizeof(crop->reserved)); 654 memset(&sel, 0, sizeof(sel)); 655 sel.which = crop->which; 656 sel.pad = crop->pad; 657 sel.target = V4L2_SEL_TGT_CROP; 658 sel.r = crop->rect; 659 660 rval = v4l2_subdev_call( 661 sd, pad, set_selection, state, &sel); 662 663 crop->rect = sel.r; 664 665 return rval; 666 } 667 668 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { 669 struct v4l2_subdev_mbus_code_enum *code = arg; 670 671 memset(code->reserved, 0, sizeof(code->reserved)); 672 return v4l2_subdev_call(sd, pad, enum_mbus_code, state, 673 code); 674 } 675 676 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { 677 struct v4l2_subdev_frame_size_enum *fse = arg; 678 679 memset(fse->reserved, 0, sizeof(fse->reserved)); 680 return v4l2_subdev_call(sd, pad, enum_frame_size, state, 681 fse); 682 } 683 684 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { 685 struct v4l2_subdev_frame_interval *fi = arg; 686 687 memset(fi->reserved, 0, sizeof(fi->reserved)); 688 return v4l2_subdev_call(sd, video, g_frame_interval, arg); 689 } 690 691 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { 692 struct v4l2_subdev_frame_interval *fi = arg; 693 694 if (ro_subdev) 695 return -EPERM; 696 697 memset(fi->reserved, 0, sizeof(fi->reserved)); 698 return v4l2_subdev_call(sd, video, s_frame_interval, arg); 699 } 700 701 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { 702 struct v4l2_subdev_frame_interval_enum *fie = arg; 703 704 memset(fie->reserved, 0, sizeof(fie->reserved)); 705 return v4l2_subdev_call(sd, pad, enum_frame_interval, state, 706 fie); 707 } 708 709 case VIDIOC_SUBDEV_G_SELECTION: { 710 struct v4l2_subdev_selection *sel = arg; 711 712 memset(sel->reserved, 0, sizeof(sel->reserved)); 713 return v4l2_subdev_call( 714 sd, pad, get_selection, state, sel); 715 } 716 717 case VIDIOC_SUBDEV_S_SELECTION: { 718 struct v4l2_subdev_selection *sel = arg; 719 720 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 721 return -EPERM; 722 723 memset(sel->reserved, 0, sizeof(sel->reserved)); 724 return v4l2_subdev_call( 725 sd, pad, set_selection, state, sel); 726 } 727 728 case VIDIOC_G_EDID: { 729 struct v4l2_subdev_edid *edid = arg; 730 731 return v4l2_subdev_call(sd, pad, get_edid, edid); 732 } 733 734 case VIDIOC_S_EDID: { 735 struct v4l2_subdev_edid *edid = arg; 736 737 return v4l2_subdev_call(sd, pad, set_edid, edid); 738 } 739 740 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { 741 struct v4l2_dv_timings_cap *cap = arg; 742 743 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); 744 } 745 746 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { 747 struct v4l2_enum_dv_timings *dvt = arg; 748 749 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); 750 } 751 752 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: 753 return v4l2_subdev_call(sd, video, query_dv_timings, arg); 754 755 case VIDIOC_SUBDEV_G_DV_TIMINGS: 756 return v4l2_subdev_call(sd, video, g_dv_timings, arg); 757 758 case VIDIOC_SUBDEV_S_DV_TIMINGS: 759 if (ro_subdev) 760 return -EPERM; 761 762 return v4l2_subdev_call(sd, video, s_dv_timings, arg); 763 764 case VIDIOC_SUBDEV_G_STD: 765 return v4l2_subdev_call(sd, video, g_std, arg); 766 767 case VIDIOC_SUBDEV_S_STD: { 768 v4l2_std_id *std = arg; 769 770 if (ro_subdev) 771 return -EPERM; 772 773 return v4l2_subdev_call(sd, video, s_std, *std); 774 } 775 776 case VIDIOC_SUBDEV_ENUMSTD: { 777 struct v4l2_standard *p = arg; 778 v4l2_std_id id; 779 780 if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) 781 return -EINVAL; 782 783 return v4l_video_std_enumstd(p, id); 784 } 785 786 case VIDIOC_SUBDEV_QUERYSTD: 787 return v4l2_subdev_call(sd, video, querystd, arg); 788 789 case VIDIOC_SUBDEV_G_ROUTING: { 790 struct v4l2_subdev_routing *routing = arg; 791 struct v4l2_subdev_krouting *krouting; 792 793 if (!v4l2_subdev_enable_streams_api) 794 return -ENOIOCTLCMD; 795 796 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) 797 return -ENOIOCTLCMD; 798 799 memset(routing->reserved, 0, sizeof(routing->reserved)); 800 801 krouting = &state->routing; 802 803 if (routing->num_routes < krouting->num_routes) { 804 routing->num_routes = krouting->num_routes; 805 return -ENOSPC; 806 } 807 808 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, 809 krouting->routes, 810 krouting->num_routes * sizeof(*krouting->routes)); 811 routing->num_routes = krouting->num_routes; 812 813 return 0; 814 } 815 816 case VIDIOC_SUBDEV_S_ROUTING: { 817 struct v4l2_subdev_routing *routing = arg; 818 struct v4l2_subdev_route *routes = 819 (struct v4l2_subdev_route *)(uintptr_t)routing->routes; 820 struct v4l2_subdev_krouting krouting = {}; 821 unsigned int i; 822 823 if (!v4l2_subdev_enable_streams_api) 824 return -ENOIOCTLCMD; 825 826 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) 827 return -ENOIOCTLCMD; 828 829 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) 830 return -EPERM; 831 832 memset(routing->reserved, 0, sizeof(routing->reserved)); 833 834 for (i = 0; i < routing->num_routes; ++i) { 835 const struct v4l2_subdev_route *route = &routes[i]; 836 const struct media_pad *pads = sd->entity.pads; 837 838 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || 839 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) 840 return -EINVAL; 841 842 if (route->sink_pad >= sd->entity.num_pads) 843 return -EINVAL; 844 845 if (!(pads[route->sink_pad].flags & 846 MEDIA_PAD_FL_SINK)) 847 return -EINVAL; 848 849 if (route->source_pad >= sd->entity.num_pads) 850 return -EINVAL; 851 852 if (!(pads[route->source_pad].flags & 853 MEDIA_PAD_FL_SOURCE)) 854 return -EINVAL; 855 } 856 857 krouting.num_routes = routing->num_routes; 858 krouting.routes = routes; 859 860 return v4l2_subdev_call(sd, pad, set_routing, state, 861 routing->which, &krouting); 862 } 863 864 default: 865 return v4l2_subdev_call(sd, core, ioctl, cmd, arg); 866 } 867 868 return 0; 869 } 870 871 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) 872 { 873 struct video_device *vdev = video_devdata(file); 874 struct mutex *lock = vdev->lock; 875 long ret = -ENODEV; 876 877 if (lock && mutex_lock_interruptible(lock)) 878 return -ERESTARTSYS; 879 880 if (video_is_registered(vdev)) { 881 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 882 struct v4l2_fh *vfh = file->private_data; 883 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); 884 struct v4l2_subdev_state *state; 885 886 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); 887 888 if (state) 889 v4l2_subdev_lock_state(state); 890 891 ret = subdev_do_ioctl(file, cmd, arg, state); 892 893 if (state) 894 v4l2_subdev_unlock_state(state); 895 } 896 897 if (lock) 898 mutex_unlock(lock); 899 return ret; 900 } 901 902 static long subdev_ioctl(struct file *file, unsigned int cmd, 903 unsigned long arg) 904 { 905 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); 906 } 907 908 #ifdef CONFIG_COMPAT 909 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, 910 unsigned long arg) 911 { 912 struct video_device *vdev = video_devdata(file); 913 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 914 915 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); 916 } 917 #endif 918 919 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 920 static long subdev_ioctl(struct file *file, unsigned int cmd, 921 unsigned long arg) 922 { 923 return -ENODEV; 924 } 925 926 #ifdef CONFIG_COMPAT 927 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, 928 unsigned long arg) 929 { 930 return -ENODEV; 931 } 932 #endif 933 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 934 935 static __poll_t subdev_poll(struct file *file, poll_table *wait) 936 { 937 struct video_device *vdev = video_devdata(file); 938 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); 939 struct v4l2_fh *fh = file->private_data; 940 941 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 942 return EPOLLERR; 943 944 poll_wait(file, &fh->wait, wait); 945 946 if (v4l2_event_pending(fh)) 947 return EPOLLPRI; 948 949 return 0; 950 } 951 952 const struct v4l2_file_operations v4l2_subdev_fops = { 953 .owner = THIS_MODULE, 954 .open = subdev_open, 955 .unlocked_ioctl = subdev_ioctl, 956 #ifdef CONFIG_COMPAT 957 .compat_ioctl32 = subdev_compat_ioctl32, 958 #endif 959 .release = subdev_close, 960 .poll = subdev_poll, 961 }; 962 963 #ifdef CONFIG_MEDIA_CONTROLLER 964 965 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, 966 struct fwnode_endpoint *endpoint) 967 { 968 struct fwnode_handle *fwnode; 969 struct v4l2_subdev *sd; 970 971 if (!is_media_entity_v4l2_subdev(entity)) 972 return -EINVAL; 973 974 sd = media_entity_to_v4l2_subdev(entity); 975 976 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); 977 fwnode_handle_put(fwnode); 978 979 if (device_match_fwnode(sd->dev, fwnode)) 980 return endpoint->port; 981 982 return -ENXIO; 983 } 984 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); 985 986 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, 987 struct media_link *link, 988 struct v4l2_subdev_format *source_fmt, 989 struct v4l2_subdev_format *sink_fmt) 990 { 991 bool pass = true; 992 993 /* The width, height and code must match. */ 994 if (source_fmt->format.width != sink_fmt->format.width) { 995 dev_dbg(sd->entity.graph_obj.mdev->dev, 996 "%s: width does not match (source %u, sink %u)\n", 997 __func__, 998 source_fmt->format.width, sink_fmt->format.width); 999 pass = false; 1000 } 1001 1002 if (source_fmt->format.height != sink_fmt->format.height) { 1003 dev_dbg(sd->entity.graph_obj.mdev->dev, 1004 "%s: height does not match (source %u, sink %u)\n", 1005 __func__, 1006 source_fmt->format.height, sink_fmt->format.height); 1007 pass = false; 1008 } 1009 1010 if (source_fmt->format.code != sink_fmt->format.code) { 1011 dev_dbg(sd->entity.graph_obj.mdev->dev, 1012 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", 1013 __func__, 1014 source_fmt->format.code, sink_fmt->format.code); 1015 pass = false; 1016 } 1017 1018 /* The field order must match, or the sink field order must be NONE 1019 * to support interlaced hardware connected to bridges that support 1020 * progressive formats only. 1021 */ 1022 if (source_fmt->format.field != sink_fmt->format.field && 1023 sink_fmt->format.field != V4L2_FIELD_NONE) { 1024 dev_dbg(sd->entity.graph_obj.mdev->dev, 1025 "%s: field does not match (source %u, sink %u)\n", 1026 __func__, 1027 source_fmt->format.field, sink_fmt->format.field); 1028 pass = false; 1029 } 1030 1031 if (pass) 1032 return 0; 1033 1034 dev_dbg(sd->entity.graph_obj.mdev->dev, 1035 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, 1036 link->source->entity->name, link->source->index, 1037 link->sink->entity->name, link->sink->index); 1038 1039 return -EPIPE; 1040 } 1041 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); 1042 1043 static int 1044 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, 1045 struct v4l2_subdev_format *fmt) 1046 { 1047 if (is_media_entity_v4l2_subdev(pad->entity)) { 1048 struct v4l2_subdev *sd = 1049 media_entity_to_v4l2_subdev(pad->entity); 1050 1051 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; 1052 fmt->pad = pad->index; 1053 fmt->stream = stream; 1054 1055 return v4l2_subdev_call(sd, pad, get_fmt, 1056 v4l2_subdev_get_locked_active_state(sd), 1057 fmt); 1058 } 1059 1060 WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L, 1061 "Driver bug! Wrong media entity type 0x%08x, entity %s\n", 1062 pad->entity->function, pad->entity->name); 1063 1064 return -EINVAL; 1065 } 1066 1067 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1068 1069 static void __v4l2_link_validate_get_streams(struct media_pad *pad, 1070 u64 *streams_mask) 1071 { 1072 struct v4l2_subdev_route *route; 1073 struct v4l2_subdev_state *state; 1074 struct v4l2_subdev *subdev; 1075 1076 subdev = media_entity_to_v4l2_subdev(pad->entity); 1077 1078 *streams_mask = 0; 1079 1080 state = v4l2_subdev_get_locked_active_state(subdev); 1081 if (WARN_ON(!state)) 1082 return; 1083 1084 for_each_active_route(&state->routing, route) { 1085 u32 route_pad; 1086 u32 route_stream; 1087 1088 if (pad->flags & MEDIA_PAD_FL_SOURCE) { 1089 route_pad = route->source_pad; 1090 route_stream = route->source_stream; 1091 } else { 1092 route_pad = route->sink_pad; 1093 route_stream = route->sink_stream; 1094 } 1095 1096 if (route_pad != pad->index) 1097 continue; 1098 1099 *streams_mask |= BIT_ULL(route_stream); 1100 } 1101 } 1102 1103 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 1104 1105 static void v4l2_link_validate_get_streams(struct media_pad *pad, 1106 u64 *streams_mask) 1107 { 1108 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); 1109 1110 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { 1111 /* Non-streams subdevs have an implicit stream 0 */ 1112 *streams_mask = BIT_ULL(0); 1113 return; 1114 } 1115 1116 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1117 __v4l2_link_validate_get_streams(pad, streams_mask); 1118 #else 1119 /* This shouldn't happen */ 1120 *streams_mask = 0; 1121 #endif 1122 } 1123 1124 static int v4l2_subdev_link_validate_locked(struct media_link *link) 1125 { 1126 struct v4l2_subdev *sink_subdev = 1127 media_entity_to_v4l2_subdev(link->sink->entity); 1128 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; 1129 u64 source_streams_mask; 1130 u64 sink_streams_mask; 1131 u64 dangling_sink_streams; 1132 u32 stream; 1133 int ret; 1134 1135 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", 1136 link->source->entity->name, link->source->index, 1137 link->sink->entity->name, link->sink->index); 1138 1139 v4l2_link_validate_get_streams(link->source, &source_streams_mask); 1140 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask); 1141 1142 /* 1143 * It is ok to have more source streams than sink streams as extra 1144 * source streams can just be ignored by the receiver, but having extra 1145 * sink streams is an error as streams must have a source. 1146 */ 1147 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & 1148 sink_streams_mask; 1149 if (dangling_sink_streams) { 1150 dev_err(dev, "Dangling sink streams: mask %#llx\n", 1151 dangling_sink_streams); 1152 return -EINVAL; 1153 } 1154 1155 /* Validate source and sink stream formats */ 1156 1157 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { 1158 struct v4l2_subdev_format sink_fmt, source_fmt; 1159 1160 if (!(sink_streams_mask & BIT_ULL(stream))) 1161 continue; 1162 1163 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", 1164 link->source->entity->name, link->source->index, stream, 1165 link->sink->entity->name, link->sink->index, stream); 1166 1167 ret = v4l2_subdev_link_validate_get_format(link->source, stream, 1168 &source_fmt); 1169 if (ret < 0) { 1170 dev_dbg(dev, 1171 "Failed to get format for \"%s\":%u:%u (but that's ok)\n", 1172 link->source->entity->name, link->source->index, 1173 stream); 1174 continue; 1175 } 1176 1177 ret = v4l2_subdev_link_validate_get_format(link->sink, stream, 1178 &sink_fmt); 1179 if (ret < 0) { 1180 dev_dbg(dev, 1181 "Failed to get format for \"%s\":%u:%u (but that's ok)\n", 1182 link->sink->entity->name, link->sink->index, 1183 stream); 1184 continue; 1185 } 1186 1187 /* TODO: add stream number to link_validate() */ 1188 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, 1189 &source_fmt, &sink_fmt); 1190 if (!ret) 1191 continue; 1192 1193 if (ret != -ENOIOCTLCMD) 1194 return ret; 1195 1196 ret = v4l2_subdev_link_validate_default(sink_subdev, link, 1197 &source_fmt, &sink_fmt); 1198 1199 if (ret) 1200 return ret; 1201 } 1202 1203 return 0; 1204 } 1205 1206 int v4l2_subdev_link_validate(struct media_link *link) 1207 { 1208 struct v4l2_subdev *source_sd, *sink_sd; 1209 struct v4l2_subdev_state *source_state, *sink_state; 1210 int ret; 1211 1212 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); 1213 source_sd = media_entity_to_v4l2_subdev(link->source->entity); 1214 1215 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); 1216 source_state = v4l2_subdev_get_unlocked_active_state(source_sd); 1217 1218 if (sink_state) 1219 v4l2_subdev_lock_state(sink_state); 1220 1221 if (source_state) 1222 v4l2_subdev_lock_state(source_state); 1223 1224 ret = v4l2_subdev_link_validate_locked(link); 1225 1226 if (sink_state) 1227 v4l2_subdev_unlock_state(sink_state); 1228 1229 if (source_state) 1230 v4l2_subdev_unlock_state(source_state); 1231 1232 return ret; 1233 } 1234 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); 1235 1236 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, 1237 unsigned int pad0, unsigned int pad1) 1238 { 1239 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); 1240 struct v4l2_subdev_krouting *routing; 1241 struct v4l2_subdev_state *state; 1242 unsigned int i; 1243 1244 state = v4l2_subdev_lock_and_get_active_state(sd); 1245 1246 routing = &state->routing; 1247 1248 for (i = 0; i < routing->num_routes; ++i) { 1249 struct v4l2_subdev_route *route = &routing->routes[i]; 1250 1251 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) 1252 continue; 1253 1254 if ((route->sink_pad == pad0 && route->source_pad == pad1) || 1255 (route->source_pad == pad0 && route->sink_pad == pad1)) { 1256 v4l2_subdev_unlock_state(state); 1257 return true; 1258 } 1259 } 1260 1261 v4l2_subdev_unlock_state(state); 1262 1263 return false; 1264 } 1265 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); 1266 1267 struct v4l2_subdev_state * 1268 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, 1269 struct lock_class_key *lock_key) 1270 { 1271 struct v4l2_subdev_state *state; 1272 int ret; 1273 1274 state = kzalloc(sizeof(*state), GFP_KERNEL); 1275 if (!state) 1276 return ERR_PTR(-ENOMEM); 1277 1278 __mutex_init(&state->_lock, lock_name, lock_key); 1279 if (sd->state_lock) 1280 state->lock = sd->state_lock; 1281 else 1282 state->lock = &state->_lock; 1283 1284 /* Drivers that support streams do not need the legacy pad config */ 1285 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { 1286 state->pads = kvcalloc(sd->entity.num_pads, 1287 sizeof(*state->pads), GFP_KERNEL); 1288 if (!state->pads) { 1289 ret = -ENOMEM; 1290 goto err; 1291 } 1292 } 1293 1294 /* 1295 * There can be no race at this point, but we lock the state anyway to 1296 * satisfy lockdep checks. 1297 */ 1298 v4l2_subdev_lock_state(state); 1299 ret = v4l2_subdev_call(sd, pad, init_cfg, state); 1300 v4l2_subdev_unlock_state(state); 1301 1302 if (ret < 0 && ret != -ENOIOCTLCMD) 1303 goto err; 1304 1305 return state; 1306 1307 err: 1308 if (state && state->pads) 1309 kvfree(state->pads); 1310 1311 kfree(state); 1312 1313 return ERR_PTR(ret); 1314 } 1315 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); 1316 1317 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) 1318 { 1319 if (!state) 1320 return; 1321 1322 mutex_destroy(&state->_lock); 1323 1324 kfree(state->routing.routes); 1325 kvfree(state->stream_configs.configs); 1326 kvfree(state->pads); 1327 kfree(state); 1328 } 1329 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); 1330 1331 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, 1332 struct lock_class_key *key) 1333 { 1334 struct v4l2_subdev_state *state; 1335 1336 state = __v4l2_subdev_state_alloc(sd, name, key); 1337 if (IS_ERR(state)) 1338 return PTR_ERR(state); 1339 1340 sd->active_state = state; 1341 1342 return 0; 1343 } 1344 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); 1345 1346 void v4l2_subdev_cleanup(struct v4l2_subdev *sd) 1347 { 1348 __v4l2_subdev_state_free(sd->active_state); 1349 sd->active_state = NULL; 1350 } 1351 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); 1352 1353 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) 1354 1355 static int 1356 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, 1357 const struct v4l2_subdev_krouting *routing) 1358 { 1359 struct v4l2_subdev_stream_configs new_configs = { 0 }; 1360 struct v4l2_subdev_route *route; 1361 u32 idx; 1362 1363 /* Count number of formats needed */ 1364 for_each_active_route(routing, route) { 1365 /* 1366 * Each route needs a format on both ends of the route. 1367 */ 1368 new_configs.num_configs += 2; 1369 } 1370 1371 if (new_configs.num_configs) { 1372 new_configs.configs = kvcalloc(new_configs.num_configs, 1373 sizeof(*new_configs.configs), 1374 GFP_KERNEL); 1375 1376 if (!new_configs.configs) 1377 return -ENOMEM; 1378 } 1379 1380 /* 1381 * Fill in the 'pad' and stream' value for each item in the array from 1382 * the routing table 1383 */ 1384 idx = 0; 1385 1386 for_each_active_route(routing, route) { 1387 new_configs.configs[idx].pad = route->sink_pad; 1388 new_configs.configs[idx].stream = route->sink_stream; 1389 1390 idx++; 1391 1392 new_configs.configs[idx].pad = route->source_pad; 1393 new_configs.configs[idx].stream = route->source_stream; 1394 1395 idx++; 1396 } 1397 1398 kvfree(stream_configs->configs); 1399 *stream_configs = new_configs; 1400 1401 return 0; 1402 } 1403 1404 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, 1405 struct v4l2_subdev_format *format) 1406 { 1407 struct v4l2_mbus_framefmt *fmt; 1408 1409 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) 1410 fmt = v4l2_subdev_state_get_stream_format(state, format->pad, 1411 format->stream); 1412 else if (format->pad < sd->entity.num_pads && format->stream == 0) 1413 fmt = v4l2_subdev_get_pad_format(sd, state, format->pad); 1414 else 1415 fmt = NULL; 1416 1417 if (!fmt) 1418 return -EINVAL; 1419 1420 format->format = *fmt; 1421 1422 return 0; 1423 } 1424 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); 1425 1426 int v4l2_subdev_set_routing(struct v4l2_subdev *sd, 1427 struct v4l2_subdev_state *state, 1428 const struct v4l2_subdev_krouting *routing) 1429 { 1430 struct v4l2_subdev_krouting *dst = &state->routing; 1431 const struct v4l2_subdev_krouting *src = routing; 1432 struct v4l2_subdev_krouting new_routing = { 0 }; 1433 size_t bytes; 1434 int r; 1435 1436 if (unlikely(check_mul_overflow((size_t)src->num_routes, 1437 sizeof(*src->routes), &bytes))) 1438 return -EOVERFLOW; 1439 1440 lockdep_assert_held(state->lock); 1441 1442 if (src->num_routes > 0) { 1443 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); 1444 if (!new_routing.routes) 1445 return -ENOMEM; 1446 } 1447 1448 new_routing.num_routes = src->num_routes; 1449 1450 r = v4l2_subdev_init_stream_configs(&state->stream_configs, 1451 &new_routing); 1452 if (r) { 1453 kfree(new_routing.routes); 1454 return r; 1455 } 1456 1457 kfree(dst->routes); 1458 *dst = new_routing; 1459 1460 return 0; 1461 } 1462 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); 1463 1464 struct v4l2_subdev_route * 1465 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, 1466 struct v4l2_subdev_route *route) 1467 { 1468 if (route) 1469 ++route; 1470 else 1471 route = &routing->routes[0]; 1472 1473 for (; route < routing->routes + routing->num_routes; ++route) { 1474 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) 1475 continue; 1476 1477 return route; 1478 } 1479 1480 return NULL; 1481 } 1482 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); 1483 1484 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, 1485 struct v4l2_subdev_state *state, 1486 struct v4l2_subdev_krouting *routing, 1487 const struct v4l2_mbus_framefmt *fmt) 1488 { 1489 struct v4l2_subdev_stream_configs *stream_configs; 1490 unsigned int i; 1491 int ret; 1492 1493 ret = v4l2_subdev_set_routing(sd, state, routing); 1494 if (ret) 1495 return ret; 1496 1497 stream_configs = &state->stream_configs; 1498 1499 for (i = 0; i < stream_configs->num_configs; ++i) 1500 stream_configs->configs[i].fmt = *fmt; 1501 1502 return 0; 1503 } 1504 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); 1505 1506 struct v4l2_mbus_framefmt * 1507 v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state, 1508 unsigned int pad, u32 stream) 1509 { 1510 struct v4l2_subdev_stream_configs *stream_configs; 1511 unsigned int i; 1512 1513 lockdep_assert_held(state->lock); 1514 1515 stream_configs = &state->stream_configs; 1516 1517 for (i = 0; i < stream_configs->num_configs; ++i) { 1518 if (stream_configs->configs[i].pad == pad && 1519 stream_configs->configs[i].stream == stream) 1520 return &stream_configs->configs[i].fmt; 1521 } 1522 1523 return NULL; 1524 } 1525 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format); 1526 1527 struct v4l2_rect * 1528 v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state, 1529 unsigned int pad, u32 stream) 1530 { 1531 struct v4l2_subdev_stream_configs *stream_configs; 1532 unsigned int i; 1533 1534 lockdep_assert_held(state->lock); 1535 1536 stream_configs = &state->stream_configs; 1537 1538 for (i = 0; i < stream_configs->num_configs; ++i) { 1539 if (stream_configs->configs[i].pad == pad && 1540 stream_configs->configs[i].stream == stream) 1541 return &stream_configs->configs[i].crop; 1542 } 1543 1544 return NULL; 1545 } 1546 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop); 1547 1548 struct v4l2_rect * 1549 v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state, 1550 unsigned int pad, u32 stream) 1551 { 1552 struct v4l2_subdev_stream_configs *stream_configs; 1553 unsigned int i; 1554 1555 lockdep_assert_held(state->lock); 1556 1557 stream_configs = &state->stream_configs; 1558 1559 for (i = 0; i < stream_configs->num_configs; ++i) { 1560 if (stream_configs->configs[i].pad == pad && 1561 stream_configs->configs[i].stream == stream) 1562 return &stream_configs->configs[i].compose; 1563 } 1564 1565 return NULL; 1566 } 1567 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose); 1568 1569 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, 1570 u32 pad, u32 stream, u32 *other_pad, 1571 u32 *other_stream) 1572 { 1573 unsigned int i; 1574 1575 for (i = 0; i < routing->num_routes; ++i) { 1576 struct v4l2_subdev_route *route = &routing->routes[i]; 1577 1578 if (route->source_pad == pad && 1579 route->source_stream == stream) { 1580 if (other_pad) 1581 *other_pad = route->sink_pad; 1582 if (other_stream) 1583 *other_stream = route->sink_stream; 1584 return 0; 1585 } 1586 1587 if (route->sink_pad == pad && route->sink_stream == stream) { 1588 if (other_pad) 1589 *other_pad = route->source_pad; 1590 if (other_stream) 1591 *other_stream = route->source_stream; 1592 return 0; 1593 } 1594 } 1595 1596 return -EINVAL; 1597 } 1598 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); 1599 1600 struct v4l2_mbus_framefmt * 1601 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, 1602 u32 pad, u32 stream) 1603 { 1604 u32 other_pad, other_stream; 1605 int ret; 1606 1607 ret = v4l2_subdev_routing_find_opposite_end(&state->routing, 1608 pad, stream, 1609 &other_pad, &other_stream); 1610 if (ret) 1611 return NULL; 1612 1613 return v4l2_subdev_state_get_stream_format(state, other_pad, 1614 other_stream); 1615 } 1616 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); 1617 1618 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, 1619 u32 pad0, u32 pad1, u64 *streams) 1620 { 1621 const struct v4l2_subdev_krouting *routing = &state->routing; 1622 struct v4l2_subdev_route *route; 1623 u64 streams0 = 0; 1624 u64 streams1 = 0; 1625 1626 for_each_active_route(routing, route) { 1627 if (route->sink_pad == pad0 && route->source_pad == pad1 && 1628 (*streams & BIT_ULL(route->sink_stream))) { 1629 streams0 |= BIT_ULL(route->sink_stream); 1630 streams1 |= BIT_ULL(route->source_stream); 1631 } 1632 if (route->source_pad == pad0 && route->sink_pad == pad1 && 1633 (*streams & BIT_ULL(route->source_stream))) { 1634 streams0 |= BIT_ULL(route->source_stream); 1635 streams1 |= BIT_ULL(route->sink_stream); 1636 } 1637 } 1638 1639 *streams = streams0; 1640 return streams1; 1641 } 1642 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); 1643 1644 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, 1645 const struct v4l2_subdev_krouting *routing, 1646 enum v4l2_subdev_routing_restriction disallow) 1647 { 1648 u32 *remote_pads = NULL; 1649 unsigned int i, j; 1650 int ret = -EINVAL; 1651 1652 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) { 1653 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), 1654 GFP_KERNEL); 1655 if (!remote_pads) 1656 return -ENOMEM; 1657 1658 for (i = 0; i < sd->entity.num_pads; ++i) 1659 remote_pads[i] = U32_MAX; 1660 } 1661 1662 for (i = 0; i < routing->num_routes; ++i) { 1663 const struct v4l2_subdev_route *route = &routing->routes[i]; 1664 1665 /* Validate the sink and source pad numbers. */ 1666 if (route->sink_pad >= sd->entity.num_pads || 1667 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { 1668 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", 1669 i, route->sink_pad); 1670 goto out; 1671 } 1672 1673 if (route->source_pad >= sd->entity.num_pads || 1674 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { 1675 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", 1676 i, route->source_pad); 1677 goto out; 1678 } 1679 1680 /* 1681 * V4L2_SUBDEV_ROUTING_NO_STREAM_MIX: Streams on the same pad 1682 * may not be routed to streams on different pads. 1683 */ 1684 if (disallow & V4L2_SUBDEV_ROUTING_NO_STREAM_MIX) { 1685 if (remote_pads[route->sink_pad] != U32_MAX && 1686 remote_pads[route->sink_pad] != route->source_pad) { 1687 dev_dbg(sd->dev, 1688 "route %u attempts to mix %s streams\n", 1689 i, "sink"); 1690 goto out; 1691 } 1692 1693 if (remote_pads[route->source_pad] != U32_MAX && 1694 remote_pads[route->source_pad] != route->sink_pad) { 1695 dev_dbg(sd->dev, 1696 "route %u attempts to mix %s streams\n", 1697 i, "source"); 1698 goto out; 1699 } 1700 1701 remote_pads[route->sink_pad] = route->source_pad; 1702 remote_pads[route->source_pad] = route->sink_pad; 1703 } 1704 1705 for (j = i + 1; j < routing->num_routes; ++j) { 1706 const struct v4l2_subdev_route *r = &routing->routes[j]; 1707 1708 /* 1709 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can 1710 * originate from the same (sink) stream. 1711 */ 1712 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && 1713 route->sink_pad == r->sink_pad && 1714 route->sink_stream == r->sink_stream) { 1715 dev_dbg(sd->dev, 1716 "routes %u and %u originate from same sink (%u/%u)\n", 1717 i, j, route->sink_pad, 1718 route->sink_stream); 1719 goto out; 1720 } 1721 1722 /* 1723 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end 1724 * at the same (source) stream. 1725 */ 1726 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && 1727 route->source_pad == r->source_pad && 1728 route->source_stream == r->source_stream) { 1729 dev_dbg(sd->dev, 1730 "routes %u and %u end at same source (%u/%u)\n", 1731 i, j, route->source_pad, 1732 route->source_stream); 1733 goto out; 1734 } 1735 } 1736 } 1737 1738 ret = 0; 1739 1740 out: 1741 kfree(remote_pads); 1742 return ret; 1743 } 1744 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); 1745 1746 static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad, 1747 u64 streams_mask) 1748 { 1749 struct device *dev = sd->entity.graph_obj.mdev->dev; 1750 unsigned int i; 1751 int ret; 1752 1753 /* 1754 * The subdev doesn't implement pad-based stream enable, fall back 1755 * on the .s_stream() operation. This can only be done for subdevs that 1756 * have a single source pad, as sd->enabled_streams is global to the 1757 * subdev. 1758 */ 1759 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) 1760 return -EOPNOTSUPP; 1761 1762 for (i = 0; i < sd->entity.num_pads; ++i) { 1763 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) 1764 return -EOPNOTSUPP; 1765 } 1766 1767 if (sd->enabled_streams & streams_mask) { 1768 dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n", 1769 streams_mask, sd->entity.name, pad); 1770 return -EALREADY; 1771 } 1772 1773 /* Start streaming when the first streams are enabled. */ 1774 if (!sd->enabled_streams) { 1775 ret = v4l2_subdev_call(sd, video, s_stream, 1); 1776 if (ret) 1777 return ret; 1778 } 1779 1780 sd->enabled_streams |= streams_mask; 1781 1782 return 0; 1783 } 1784 1785 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, 1786 u64 streams_mask) 1787 { 1788 struct device *dev = sd->entity.graph_obj.mdev->dev; 1789 struct v4l2_subdev_state *state; 1790 u64 found_streams = 0; 1791 unsigned int i; 1792 int ret; 1793 1794 /* A few basic sanity checks first. */ 1795 if (pad >= sd->entity.num_pads) 1796 return -EINVAL; 1797 1798 if (!streams_mask) 1799 return 0; 1800 1801 /* Fallback on .s_stream() if .enable_streams() isn't available. */ 1802 if (!sd->ops->pad || !sd->ops->pad->enable_streams) 1803 return v4l2_subdev_enable_streams_fallback(sd, pad, 1804 streams_mask); 1805 1806 state = v4l2_subdev_lock_and_get_active_state(sd); 1807 1808 /* 1809 * Verify that the requested streams exist and that they are not 1810 * already enabled. 1811 */ 1812 for (i = 0; i < state->stream_configs.num_configs; ++i) { 1813 struct v4l2_subdev_stream_config *cfg = 1814 &state->stream_configs.configs[i]; 1815 1816 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) 1817 continue; 1818 1819 found_streams |= BIT_ULL(cfg->stream); 1820 1821 if (cfg->enabled) { 1822 dev_dbg(dev, "stream %u already enabled on %s:%u\n", 1823 cfg->stream, sd->entity.name, pad); 1824 ret = -EALREADY; 1825 goto done; 1826 } 1827 } 1828 1829 if (found_streams != streams_mask) { 1830 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", 1831 streams_mask & ~found_streams, sd->entity.name, pad); 1832 ret = -EINVAL; 1833 goto done; 1834 } 1835 1836 /* Call the .enable_streams() operation. */ 1837 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, 1838 streams_mask); 1839 if (ret) 1840 goto done; 1841 1842 /* Mark the streams as enabled. */ 1843 for (i = 0; i < state->stream_configs.num_configs; ++i) { 1844 struct v4l2_subdev_stream_config *cfg = 1845 &state->stream_configs.configs[i]; 1846 1847 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) 1848 cfg->enabled = true; 1849 } 1850 1851 done: 1852 v4l2_subdev_unlock_state(state); 1853 1854 return ret; 1855 } 1856 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); 1857 1858 static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad, 1859 u64 streams_mask) 1860 { 1861 struct device *dev = sd->entity.graph_obj.mdev->dev; 1862 unsigned int i; 1863 int ret; 1864 1865 /* 1866 * If the subdev doesn't implement pad-based stream enable, fall back 1867 * on the .s_stream() operation. This can only be done for subdevs that 1868 * have a single source pad, as sd->enabled_streams is global to the 1869 * subdev. 1870 */ 1871 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) 1872 return -EOPNOTSUPP; 1873 1874 for (i = 0; i < sd->entity.num_pads; ++i) { 1875 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) 1876 return -EOPNOTSUPP; 1877 } 1878 1879 if ((sd->enabled_streams & streams_mask) != streams_mask) { 1880 dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n", 1881 streams_mask, sd->entity.name, pad); 1882 return -EALREADY; 1883 } 1884 1885 /* Stop streaming when the last streams are disabled. */ 1886 if (!(sd->enabled_streams & ~streams_mask)) { 1887 ret = v4l2_subdev_call(sd, video, s_stream, 0); 1888 if (ret) 1889 return ret; 1890 } 1891 1892 sd->enabled_streams &= ~streams_mask; 1893 1894 return 0; 1895 } 1896 1897 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, 1898 u64 streams_mask) 1899 { 1900 struct device *dev = sd->entity.graph_obj.mdev->dev; 1901 struct v4l2_subdev_state *state; 1902 u64 found_streams = 0; 1903 unsigned int i; 1904 int ret; 1905 1906 /* A few basic sanity checks first. */ 1907 if (pad >= sd->entity.num_pads) 1908 return -EINVAL; 1909 1910 if (!streams_mask) 1911 return 0; 1912 1913 /* Fallback on .s_stream() if .disable_streams() isn't available. */ 1914 if (!sd->ops->pad || !sd->ops->pad->disable_streams) 1915 return v4l2_subdev_disable_streams_fallback(sd, pad, 1916 streams_mask); 1917 1918 state = v4l2_subdev_lock_and_get_active_state(sd); 1919 1920 /* 1921 * Verify that the requested streams exist and that they are not 1922 * already disabled. 1923 */ 1924 for (i = 0; i < state->stream_configs.num_configs; ++i) { 1925 struct v4l2_subdev_stream_config *cfg = 1926 &state->stream_configs.configs[i]; 1927 1928 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) 1929 continue; 1930 1931 found_streams |= BIT_ULL(cfg->stream); 1932 1933 if (!cfg->enabled) { 1934 dev_dbg(dev, "stream %u already disabled on %s:%u\n", 1935 cfg->stream, sd->entity.name, pad); 1936 ret = -EALREADY; 1937 goto done; 1938 } 1939 } 1940 1941 if (found_streams != streams_mask) { 1942 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", 1943 streams_mask & ~found_streams, sd->entity.name, pad); 1944 ret = -EINVAL; 1945 goto done; 1946 } 1947 1948 /* Call the .disable_streams() operation. */ 1949 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, 1950 streams_mask); 1951 if (ret) 1952 goto done; 1953 1954 /* Mark the streams as disabled. */ 1955 for (i = 0; i < state->stream_configs.num_configs; ++i) { 1956 struct v4l2_subdev_stream_config *cfg = 1957 &state->stream_configs.configs[i]; 1958 1959 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) 1960 cfg->enabled = false; 1961 } 1962 1963 done: 1964 v4l2_subdev_unlock_state(state); 1965 1966 return ret; 1967 } 1968 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); 1969 1970 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) 1971 { 1972 struct v4l2_subdev_state *state; 1973 struct v4l2_subdev_route *route; 1974 struct media_pad *pad; 1975 u64 source_mask = 0; 1976 int pad_index = -1; 1977 1978 /* 1979 * Find the source pad. This helper is meant for subdevs that have a 1980 * single source pad, so failures shouldn't happen, but catch them 1981 * loudly nonetheless as they indicate a driver bug. 1982 */ 1983 media_entity_for_each_pad(&sd->entity, pad) { 1984 if (pad->flags & MEDIA_PAD_FL_SOURCE) { 1985 pad_index = pad->index; 1986 break; 1987 } 1988 } 1989 1990 if (WARN_ON(pad_index == -1)) 1991 return -EINVAL; 1992 1993 /* 1994 * As there's a single source pad, just collect all the source streams. 1995 */ 1996 state = v4l2_subdev_lock_and_get_active_state(sd); 1997 1998 for_each_active_route(&state->routing, route) 1999 source_mask |= BIT_ULL(route->source_stream); 2000 2001 v4l2_subdev_unlock_state(state); 2002 2003 if (enable) 2004 return v4l2_subdev_enable_streams(sd, pad_index, source_mask); 2005 else 2006 return v4l2_subdev_disable_streams(sd, pad_index, source_mask); 2007 } 2008 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); 2009 2010 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ 2011 2012 #endif /* CONFIG_MEDIA_CONTROLLER */ 2013 2014 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) 2015 { 2016 INIT_LIST_HEAD(&sd->list); 2017 BUG_ON(!ops); 2018 sd->ops = ops; 2019 sd->v4l2_dev = NULL; 2020 sd->flags = 0; 2021 sd->name[0] = '\0'; 2022 sd->grp_id = 0; 2023 sd->dev_priv = NULL; 2024 sd->host_priv = NULL; 2025 #if defined(CONFIG_MEDIA_CONTROLLER) 2026 sd->entity.name = sd->name; 2027 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; 2028 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; 2029 #endif 2030 } 2031 EXPORT_SYMBOL(v4l2_subdev_init); 2032 2033 void v4l2_subdev_notify_event(struct v4l2_subdev *sd, 2034 const struct v4l2_event *ev) 2035 { 2036 v4l2_event_queue(sd->devnode, ev); 2037 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); 2038 } 2039 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); 2040