1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * V4L2 asynchronous subdevice registration API 4 * 5 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de> 6 */ 7 8 #include <linux/debugfs.h> 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/i2c.h> 12 #include <linux/list.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/seq_file.h> 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 22 #include <media/v4l2-async.h> 23 #include <media/v4l2-device.h> 24 #include <media/v4l2-fwnode.h> 25 #include <media/v4l2-subdev.h> 26 27 #include "v4l2-subdev-priv.h" 28 29 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n, 30 struct v4l2_subdev *subdev, 31 struct v4l2_async_connection *asc) 32 { 33 if (!n->ops || !n->ops->bound) 34 return 0; 35 36 return n->ops->bound(n, subdev, asc); 37 } 38 39 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n, 40 struct v4l2_subdev *subdev, 41 struct v4l2_async_connection *asc) 42 { 43 if (!n->ops || !n->ops->unbind) 44 return; 45 46 n->ops->unbind(n, subdev, asc); 47 } 48 49 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n) 50 { 51 if (!n->ops || !n->ops->complete) 52 return 0; 53 54 return n->ops->complete(n); 55 } 56 57 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n, 58 struct v4l2_async_connection *asc) 59 { 60 if (!n->ops || !n->ops->destroy) 61 return; 62 63 n->ops->destroy(asc); 64 } 65 66 static bool match_i2c(struct v4l2_async_notifier *notifier, 67 struct v4l2_subdev *sd, 68 struct v4l2_async_match_desc *match) 69 { 70 #if IS_ENABLED(CONFIG_I2C) 71 struct i2c_client *client = i2c_verify_client(sd->dev); 72 73 return client && 74 match->i2c.adapter_id == client->adapter->nr && 75 match->i2c.address == client->addr; 76 #else 77 return false; 78 #endif 79 } 80 81 static struct device *notifier_dev(struct v4l2_async_notifier *notifier) 82 { 83 if (notifier->sd) 84 return notifier->sd->dev; 85 86 if (notifier->v4l2_dev) 87 return notifier->v4l2_dev->dev; 88 89 return NULL; 90 } 91 92 static bool 93 match_fwnode_one(struct v4l2_async_notifier *notifier, 94 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode, 95 struct v4l2_async_match_desc *match) 96 { 97 struct fwnode_handle *asd_dev_fwnode; 98 bool ret; 99 100 dev_dbg(notifier_dev(notifier), 101 "v4l2-async: fwnode match: need %pfw, trying %pfw\n", 102 sd_fwnode, match->fwnode); 103 104 if (sd_fwnode == match->fwnode) { 105 dev_dbg(notifier_dev(notifier), 106 "v4l2-async: direct match found\n"); 107 return true; 108 } 109 110 if (!fwnode_graph_is_endpoint(match->fwnode)) { 111 dev_dbg(notifier_dev(notifier), 112 "v4l2-async: direct match not found\n"); 113 return false; 114 } 115 116 asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode); 117 118 ret = sd_fwnode == asd_dev_fwnode; 119 120 fwnode_handle_put(asd_dev_fwnode); 121 122 dev_dbg(notifier_dev(notifier), 123 "v4l2-async: device--endpoint match %sfound\n", 124 ret ? "" : "not "); 125 126 return ret; 127 } 128 129 static bool match_fwnode(struct v4l2_async_notifier *notifier, 130 struct v4l2_subdev *sd, 131 struct v4l2_async_match_desc *match) 132 { 133 dev_dbg(notifier_dev(notifier), 134 "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n", 135 dev_fwnode(notifier_dev(notifier)), sd->fwnode); 136 137 if (!list_empty(&sd->async_subdev_endpoint_list)) { 138 struct v4l2_async_subdev_endpoint *ase; 139 140 dev_dbg(sd->dev, 141 "v4l2-async: endpoint fwnode list available, looking for %pfw\n", 142 match->fwnode); 143 144 list_for_each_entry(ase, &sd->async_subdev_endpoint_list, 145 async_subdev_endpoint_entry) { 146 bool matched = ase->endpoint == match->fwnode; 147 148 dev_dbg(sd->dev, 149 "v4l2-async: endpoint-endpoint match %sfound with %pfw\n", 150 matched ? "" : "not ", ase->endpoint); 151 152 if (matched) 153 return true; 154 } 155 156 dev_dbg(sd->dev, "async: no endpoint matched\n"); 157 158 return false; 159 } 160 161 if (match_fwnode_one(notifier, sd, sd->fwnode, match)) 162 return true; 163 164 /* Also check the secondary fwnode. */ 165 if (IS_ERR_OR_NULL(sd->fwnode->secondary)) 166 return false; 167 168 dev_dbg(notifier_dev(notifier), 169 "v4l2-async: trying secondary fwnode match\n"); 170 171 return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match); 172 } 173 174 static LIST_HEAD(subdev_list); 175 static LIST_HEAD(notifier_list); 176 static DEFINE_MUTEX(list_lock); 177 178 static struct v4l2_async_connection * 179 v4l2_async_find_match(struct v4l2_async_notifier *notifier, 180 struct v4l2_subdev *sd) 181 { 182 bool (*match)(struct v4l2_async_notifier *notifier, 183 struct v4l2_subdev *sd, 184 struct v4l2_async_match_desc *match); 185 struct v4l2_async_connection *asc; 186 187 list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { 188 /* bus_type has been verified valid before */ 189 switch (asc->match.type) { 190 case V4L2_ASYNC_MATCH_TYPE_I2C: 191 match = match_i2c; 192 break; 193 case V4L2_ASYNC_MATCH_TYPE_FWNODE: 194 match = match_fwnode; 195 break; 196 default: 197 /* Cannot happen, unless someone breaks us */ 198 WARN_ON(true); 199 return NULL; 200 } 201 202 /* match cannot be NULL here */ 203 if (match(notifier, sd, &asc->match)) 204 return asc; 205 } 206 207 return NULL; 208 } 209 210 /* Compare two async match descriptors for equivalence */ 211 static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1, 212 struct v4l2_async_match_desc *match2) 213 { 214 if (match1->type != match2->type) 215 return false; 216 217 switch (match1->type) { 218 case V4L2_ASYNC_MATCH_TYPE_I2C: 219 return match1->i2c.adapter_id == match2->i2c.adapter_id && 220 match1->i2c.address == match2->i2c.address; 221 case V4L2_ASYNC_MATCH_TYPE_FWNODE: 222 return match1->fwnode == match2->fwnode; 223 default: 224 break; 225 } 226 227 return false; 228 } 229 230 /* Find the sub-device notifier registered by a sub-device driver. */ 231 static struct v4l2_async_notifier * 232 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) 233 { 234 struct v4l2_async_notifier *n; 235 236 list_for_each_entry(n, ¬ifier_list, notifier_entry) 237 if (n->sd == sd) 238 return n; 239 240 return NULL; 241 } 242 243 /* Get v4l2_device related to the notifier if one can be found. */ 244 static struct v4l2_device * 245 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier) 246 { 247 while (notifier->parent) 248 notifier = notifier->parent; 249 250 return notifier->v4l2_dev; 251 } 252 253 /* 254 * Return true if all child sub-device notifiers are complete, false otherwise. 255 */ 256 static bool 257 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier) 258 { 259 struct v4l2_async_connection *asc; 260 261 if (!list_empty(¬ifier->waiting_list)) 262 return false; 263 264 list_for_each_entry(asc, ¬ifier->done_list, asc_entry) { 265 struct v4l2_async_notifier *subdev_notifier = 266 v4l2_async_find_subdev_notifier(asc->sd); 267 268 if (subdev_notifier && 269 !v4l2_async_nf_can_complete(subdev_notifier)) 270 return false; 271 } 272 273 return true; 274 } 275 276 /* 277 * Complete the master notifier if possible. This is done when all async 278 * sub-devices have been bound; v4l2_device is also available then. 279 */ 280 static int 281 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier) 282 { 283 struct v4l2_async_notifier *__notifier = notifier; 284 285 /* Quick check whether there are still more sub-devices here. */ 286 if (!list_empty(¬ifier->waiting_list)) 287 return 0; 288 289 if (notifier->sd) 290 dev_dbg(notifier_dev(notifier), 291 "v4l2-async: trying to complete\n"); 292 293 /* Check the entire notifier tree; find the root notifier first. */ 294 while (notifier->parent) 295 notifier = notifier->parent; 296 297 /* This is root if it has v4l2_dev. */ 298 if (!notifier->v4l2_dev) { 299 dev_dbg(notifier_dev(__notifier), 300 "v4l2-async: V4L2 device not available\n"); 301 return 0; 302 } 303 304 /* Is everything ready? */ 305 if (!v4l2_async_nf_can_complete(notifier)) 306 return 0; 307 308 dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n"); 309 310 return v4l2_async_nf_call_complete(notifier); 311 } 312 313 static int 314 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier); 315 316 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n, 317 struct v4l2_subdev *sd) 318 { 319 struct media_link *link = NULL; 320 321 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) 322 323 if (sd->entity.function != MEDIA_ENT_F_LENS && 324 sd->entity.function != MEDIA_ENT_F_FLASH) 325 return 0; 326 327 link = media_create_ancillary_link(&n->sd->entity, &sd->entity); 328 329 #endif 330 331 return IS_ERR(link) ? PTR_ERR(link) : 0; 332 } 333 334 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, 335 struct v4l2_device *v4l2_dev, 336 struct v4l2_subdev *sd, 337 struct v4l2_async_connection *asc) 338 { 339 struct v4l2_async_notifier *subdev_notifier; 340 bool registered = false; 341 int ret; 342 343 if (list_empty(&sd->asc_list)) { 344 ret = v4l2_device_register_subdev(v4l2_dev, sd); 345 if (ret < 0) 346 return ret; 347 registered = true; 348 } 349 350 ret = v4l2_async_nf_call_bound(notifier, sd, asc); 351 if (ret < 0) { 352 if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) 353 dev_dbg(notifier_dev(notifier), 354 "failed binding %pfw (%d)\n", 355 asc->match.fwnode, ret); 356 goto err_unregister_subdev; 357 } 358 359 if (registered) { 360 /* 361 * Depending of the function of the entities involved, we may 362 * want to create links between them (for example between a 363 * sensor and its lens or between a sensor's source pad and the 364 * connected device's sink pad). 365 */ 366 ret = v4l2_async_create_ancillary_links(notifier, sd); 367 if (ret) { 368 if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) 369 dev_dbg(notifier_dev(notifier), 370 "failed creating links for %pfw (%d)\n", 371 asc->match.fwnode, ret); 372 goto err_call_unbind; 373 } 374 } 375 376 list_add(&asc->asc_subdev_entry, &sd->asc_list); 377 asc->sd = sd; 378 379 /* Move from the waiting list to notifier's done */ 380 list_move(&asc->asc_entry, ¬ifier->done_list); 381 382 dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n", 383 dev_name(sd->dev), ret); 384 385 /* 386 * See if the sub-device has a notifier. If not, return here. 387 */ 388 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 389 if (!subdev_notifier || subdev_notifier->parent) 390 return 0; 391 392 /* 393 * Proceed with checking for the sub-device notifier's async 394 * sub-devices, and return the result. The error will be handled by the 395 * caller. 396 */ 397 subdev_notifier->parent = notifier; 398 399 return v4l2_async_nf_try_all_subdevs(subdev_notifier); 400 401 err_call_unbind: 402 v4l2_async_nf_call_unbind(notifier, sd, asc); 403 list_del(&asc->asc_subdev_entry); 404 405 err_unregister_subdev: 406 if (registered) 407 v4l2_device_unregister_subdev(sd); 408 409 return ret; 410 } 411 412 /* Test all async sub-devices in a notifier for a match. */ 413 static int 414 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier) 415 { 416 struct v4l2_device *v4l2_dev = 417 v4l2_async_nf_find_v4l2_dev(notifier); 418 struct v4l2_subdev *sd; 419 420 if (!v4l2_dev) 421 return 0; 422 423 dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n"); 424 425 again: 426 list_for_each_entry(sd, &subdev_list, async_list) { 427 struct v4l2_async_connection *asc; 428 int ret; 429 430 asc = v4l2_async_find_match(notifier, sd); 431 if (!asc) 432 continue; 433 434 dev_dbg(notifier_dev(notifier), 435 "v4l2-async: match found, subdev %s\n", sd->name); 436 437 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc); 438 if (ret < 0) 439 return ret; 440 441 /* 442 * v4l2_async_match_notify() may lead to registering a 443 * new notifier and thus changing the async subdevs 444 * list. In order to proceed safely from here, restart 445 * parsing the list from the beginning. 446 */ 447 goto again; 448 } 449 450 return 0; 451 } 452 453 static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier, 454 struct v4l2_async_connection *asc) 455 { 456 list_move_tail(&asc->asc_entry, ¬ifier->waiting_list); 457 if (list_is_singular(&asc->asc_subdev_entry)) { 458 v4l2_async_nf_call_unbind(notifier, asc->sd, asc); 459 v4l2_device_unregister_subdev(asc->sd); 460 asc->sd = NULL; 461 } 462 list_del(&asc->asc_subdev_entry); 463 } 464 465 /* Unbind all sub-devices in the notifier tree. */ 466 static void 467 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier) 468 { 469 struct v4l2_async_connection *asc, *asc_tmp; 470 471 list_for_each_entry_safe(asc, asc_tmp, ¬ifier->done_list, 472 asc_entry) { 473 struct v4l2_async_notifier *subdev_notifier = 474 v4l2_async_find_subdev_notifier(asc->sd); 475 476 if (subdev_notifier) 477 v4l2_async_nf_unbind_all_subdevs(subdev_notifier); 478 479 v4l2_async_unbind_subdev_one(notifier, asc); 480 } 481 482 notifier->parent = NULL; 483 } 484 485 /* See if an async sub-device can be found in a notifier's lists. */ 486 static bool 487 v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier, 488 struct v4l2_async_match_desc *match) 489 { 490 struct v4l2_async_connection *asc; 491 492 list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) 493 if (v4l2_async_match_equal(&asc->match, match)) 494 return true; 495 496 list_for_each_entry(asc, ¬ifier->done_list, asc_entry) 497 if (v4l2_async_match_equal(&asc->match, match)) 498 return true; 499 500 return false; 501 } 502 503 /* 504 * Find out whether an async sub-device was set up already or whether it exists 505 * in a given notifier. 506 */ 507 static bool 508 v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier, 509 struct v4l2_async_match_desc *match) 510 { 511 struct list_head *heads[] = { 512 ¬ifier->waiting_list, 513 ¬ifier->done_list, 514 }; 515 unsigned int i; 516 517 lockdep_assert_held(&list_lock); 518 519 /* Check that an asd is not being added more than once. */ 520 for (i = 0; i < ARRAY_SIZE(heads); i++) { 521 struct v4l2_async_connection *asc; 522 523 list_for_each_entry(asc, heads[i], asc_entry) { 524 if (&asc->match == match) 525 continue; 526 if (v4l2_async_match_equal(&asc->match, match)) 527 return true; 528 } 529 } 530 531 /* Check that an asc does not exist in other notifiers. */ 532 list_for_each_entry(notifier, ¬ifier_list, notifier_entry) 533 if (v4l2_async_nf_has_async_match_entry(notifier, match)) 534 return true; 535 536 return false; 537 } 538 539 static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier, 540 struct v4l2_async_match_desc *match) 541 { 542 struct device *dev = notifier_dev(notifier); 543 544 switch (match->type) { 545 case V4L2_ASYNC_MATCH_TYPE_I2C: 546 case V4L2_ASYNC_MATCH_TYPE_FWNODE: 547 if (v4l2_async_nf_has_async_match(notifier, match)) { 548 dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n"); 549 return -EEXIST; 550 } 551 break; 552 default: 553 dev_err(dev, "v4l2-async: Invalid match type %u on %p\n", 554 match->type, match); 555 return -EINVAL; 556 } 557 558 return 0; 559 } 560 561 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, 562 struct v4l2_device *v4l2_dev) 563 { 564 INIT_LIST_HEAD(¬ifier->waiting_list); 565 INIT_LIST_HEAD(¬ifier->done_list); 566 notifier->v4l2_dev = v4l2_dev; 567 } 568 EXPORT_SYMBOL(v4l2_async_nf_init); 569 570 void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, 571 struct v4l2_subdev *sd) 572 { 573 INIT_LIST_HEAD(¬ifier->waiting_list); 574 INIT_LIST_HEAD(¬ifier->done_list); 575 notifier->sd = sd; 576 } 577 EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init); 578 579 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier) 580 { 581 struct v4l2_async_connection *asc; 582 int ret; 583 584 mutex_lock(&list_lock); 585 586 list_for_each_entry(asc, ¬ifier->waiting_list, asc_entry) { 587 ret = v4l2_async_nf_match_valid(notifier, &asc->match); 588 if (ret) 589 goto err_unlock; 590 } 591 592 ret = v4l2_async_nf_try_all_subdevs(notifier); 593 if (ret < 0) 594 goto err_unbind; 595 596 ret = v4l2_async_nf_try_complete(notifier); 597 if (ret < 0) 598 goto err_unbind; 599 600 /* Keep also completed notifiers on the list */ 601 list_add(¬ifier->notifier_entry, ¬ifier_list); 602 603 mutex_unlock(&list_lock); 604 605 return 0; 606 607 err_unbind: 608 /* 609 * On failure, unbind all sub-devices registered through this notifier. 610 */ 611 v4l2_async_nf_unbind_all_subdevs(notifier); 612 613 err_unlock: 614 mutex_unlock(&list_lock); 615 616 return ret; 617 } 618 619 int v4l2_async_nf_register(struct v4l2_async_notifier *notifier) 620 { 621 int ret; 622 623 if (WARN_ON(!notifier->v4l2_dev == !notifier->sd)) 624 return -EINVAL; 625 626 ret = __v4l2_async_nf_register(notifier); 627 if (ret) 628 notifier->v4l2_dev = NULL; 629 630 return ret; 631 } 632 EXPORT_SYMBOL(v4l2_async_nf_register); 633 634 static void 635 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) 636 { 637 if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) 638 return; 639 640 v4l2_async_nf_unbind_all_subdevs(notifier); 641 642 list_del(¬ifier->notifier_entry); 643 } 644 645 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) 646 { 647 mutex_lock(&list_lock); 648 649 __v4l2_async_nf_unregister(notifier); 650 651 mutex_unlock(&list_lock); 652 } 653 EXPORT_SYMBOL(v4l2_async_nf_unregister); 654 655 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) 656 { 657 struct v4l2_async_connection *asc, *tmp; 658 659 if (!notifier || !notifier->waiting_list.next) 660 return; 661 662 WARN_ON(!list_empty(¬ifier->done_list)); 663 664 list_for_each_entry_safe(asc, tmp, ¬ifier->waiting_list, asc_entry) { 665 list_del(&asc->asc_entry); 666 v4l2_async_nf_call_destroy(notifier, asc); 667 668 if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE) 669 fwnode_handle_put(asc->match.fwnode); 670 671 kfree(asc); 672 } 673 674 notifier->sd = NULL; 675 notifier->v4l2_dev = NULL; 676 } 677 678 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier) 679 { 680 mutex_lock(&list_lock); 681 682 __v4l2_async_nf_cleanup(notifier); 683 684 mutex_unlock(&list_lock); 685 } 686 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup); 687 688 static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier, 689 struct v4l2_async_connection *asc) 690 { 691 mutex_lock(&list_lock); 692 693 list_add_tail(&asc->asc_entry, ¬ifier->waiting_list); 694 695 mutex_unlock(&list_lock); 696 } 697 698 struct v4l2_async_connection * 699 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier, 700 struct fwnode_handle *fwnode, 701 unsigned int asc_struct_size) 702 { 703 struct v4l2_async_connection *asc; 704 705 asc = kzalloc(asc_struct_size, GFP_KERNEL); 706 if (!asc) 707 return ERR_PTR(-ENOMEM); 708 709 asc->notifier = notifier; 710 asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE; 711 asc->match.fwnode = fwnode_handle_get(fwnode); 712 713 __v4l2_async_nf_add_connection(notifier, asc); 714 715 return asc; 716 } 717 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode); 718 719 struct v4l2_async_connection * 720 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif, 721 struct fwnode_handle *endpoint, 722 unsigned int asc_struct_size) 723 { 724 struct v4l2_async_connection *asc; 725 struct fwnode_handle *remote; 726 727 remote = fwnode_graph_get_remote_endpoint(endpoint); 728 if (!remote) 729 return ERR_PTR(-ENOTCONN); 730 731 asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size); 732 /* 733 * Calling __v4l2_async_nf_add_fwnode grabs a refcount, 734 * so drop the one we got in fwnode_graph_get_remote_port_parent. 735 */ 736 fwnode_handle_put(remote); 737 return asc; 738 } 739 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote); 740 741 struct v4l2_async_connection * 742 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id, 743 unsigned short address, unsigned int asc_struct_size) 744 { 745 struct v4l2_async_connection *asc; 746 747 asc = kzalloc(asc_struct_size, GFP_KERNEL); 748 if (!asc) 749 return ERR_PTR(-ENOMEM); 750 751 asc->notifier = notifier; 752 asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C; 753 asc->match.i2c.adapter_id = adapter_id; 754 asc->match.i2c.address = address; 755 756 __v4l2_async_nf_add_connection(notifier, asc); 757 758 return asc; 759 } 760 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c); 761 762 int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd, 763 struct fwnode_handle *fwnode) 764 { 765 struct v4l2_async_subdev_endpoint *ase; 766 767 ase = kmalloc(sizeof(*ase), GFP_KERNEL); 768 if (!ase) 769 return -ENOMEM; 770 771 ase->endpoint = fwnode; 772 list_add(&ase->async_subdev_endpoint_entry, 773 &sd->async_subdev_endpoint_list); 774 775 return 0; 776 } 777 EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add); 778 779 struct v4l2_async_connection * 780 v4l2_async_connection_unique(struct v4l2_subdev *sd) 781 { 782 if (!list_is_singular(&sd->asc_list)) 783 return NULL; 784 785 return list_first_entry(&sd->asc_list, 786 struct v4l2_async_connection, asc_subdev_entry); 787 } 788 EXPORT_SYMBOL_GPL(v4l2_async_connection_unique); 789 790 int v4l2_async_register_subdev(struct v4l2_subdev *sd) 791 { 792 struct v4l2_async_notifier *subdev_notifier; 793 struct v4l2_async_notifier *notifier; 794 struct v4l2_async_connection *asc; 795 int ret; 796 797 INIT_LIST_HEAD(&sd->asc_list); 798 799 /* 800 * No reference taken. The reference is held by the device (struct 801 * v4l2_subdev.dev), and async sub-device does not exist independently 802 * of the device at any point of time. 803 * 804 * The async sub-device shall always be registered for its device node, 805 * not the endpoint node. 806 */ 807 if (!sd->fwnode && sd->dev) { 808 sd->fwnode = dev_fwnode(sd->dev); 809 } else if (fwnode_graph_is_endpoint(sd->fwnode)) { 810 dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n"); 811 return -EINVAL; 812 } 813 814 mutex_lock(&list_lock); 815 816 list_for_each_entry(notifier, ¬ifier_list, notifier_entry) { 817 struct v4l2_device *v4l2_dev = 818 v4l2_async_nf_find_v4l2_dev(notifier); 819 820 if (!v4l2_dev) 821 continue; 822 823 while ((asc = v4l2_async_find_match(notifier, sd))) { 824 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, 825 asc); 826 if (ret) 827 goto err_unbind; 828 829 ret = v4l2_async_nf_try_complete(notifier); 830 if (ret) 831 goto err_unbind; 832 } 833 } 834 835 /* None matched, wait for hot-plugging */ 836 list_add(&sd->async_list, &subdev_list); 837 838 mutex_unlock(&list_lock); 839 840 return 0; 841 842 err_unbind: 843 /* 844 * Complete failed. Unbind the sub-devices bound through registering 845 * this async sub-device. 846 */ 847 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 848 if (subdev_notifier) 849 v4l2_async_nf_unbind_all_subdevs(subdev_notifier); 850 851 if (asc) 852 v4l2_async_unbind_subdev_one(notifier, asc); 853 854 mutex_unlock(&list_lock); 855 856 return ret; 857 } 858 EXPORT_SYMBOL(v4l2_async_register_subdev); 859 860 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) 861 { 862 struct v4l2_async_connection *asc, *asc_tmp; 863 864 if (!sd->async_list.next) 865 return; 866 867 v4l2_subdev_put_privacy_led(sd); 868 869 mutex_lock(&list_lock); 870 871 __v4l2_async_nf_unregister(sd->subdev_notifier); 872 __v4l2_async_nf_cleanup(sd->subdev_notifier); 873 kfree(sd->subdev_notifier); 874 sd->subdev_notifier = NULL; 875 876 if (sd->asc_list.next) { 877 list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list, 878 asc_subdev_entry) { 879 list_move(&asc->asc_entry, 880 &asc->notifier->waiting_list); 881 882 v4l2_async_unbind_subdev_one(asc->notifier, asc); 883 list_del(&asc->asc_subdev_entry); 884 } 885 } 886 887 list_del(&sd->async_list); 888 sd->async_list.next = NULL; 889 890 mutex_unlock(&list_lock); 891 } 892 EXPORT_SYMBOL(v4l2_async_unregister_subdev); 893 894 static void print_waiting_match(struct seq_file *s, 895 struct v4l2_async_match_desc *match) 896 { 897 switch (match->type) { 898 case V4L2_ASYNC_MATCH_TYPE_I2C: 899 seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id, 900 match->i2c.address); 901 break; 902 case V4L2_ASYNC_MATCH_TYPE_FWNODE: { 903 struct fwnode_handle *devnode, *fwnode = match->fwnode; 904 905 devnode = fwnode_graph_is_endpoint(fwnode) ? 906 fwnode_graph_get_port_parent(fwnode) : 907 fwnode_handle_get(fwnode); 908 909 seq_printf(s, " [fwnode] dev=%s, node=%pfw\n", 910 devnode->dev ? dev_name(devnode->dev) : "nil", 911 fwnode); 912 913 fwnode_handle_put(devnode); 914 break; 915 } 916 } 917 } 918 919 static const char * 920 v4l2_async_nf_name(struct v4l2_async_notifier *notifier) 921 { 922 if (notifier->v4l2_dev) 923 return notifier->v4l2_dev->name; 924 else if (notifier->sd) 925 return notifier->sd->name; 926 else 927 return "nil"; 928 } 929 930 static int pending_subdevs_show(struct seq_file *s, void *data) 931 { 932 struct v4l2_async_notifier *notif; 933 struct v4l2_async_connection *asc; 934 935 mutex_lock(&list_lock); 936 937 list_for_each_entry(notif, ¬ifier_list, notifier_entry) { 938 seq_printf(s, "%s:\n", v4l2_async_nf_name(notif)); 939 list_for_each_entry(asc, ¬if->waiting_list, asc_entry) 940 print_waiting_match(s, &asc->match); 941 } 942 943 mutex_unlock(&list_lock); 944 945 return 0; 946 } 947 DEFINE_SHOW_ATTRIBUTE(pending_subdevs); 948 949 static struct dentry *v4l2_async_debugfs_dir; 950 951 static int __init v4l2_async_init(void) 952 { 953 v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL); 954 debugfs_create_file("pending_async_subdevices", 0444, 955 v4l2_async_debugfs_dir, NULL, 956 &pending_subdevs_fops); 957 958 return 0; 959 } 960 961 static void __exit v4l2_async_exit(void) 962 { 963 debugfs_remove_recursive(v4l2_async_debugfs_dir); 964 } 965 966 subsys_initcall(v4l2_async_init); 967 module_exit(v4l2_async_exit); 968 969 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 970 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>"); 971 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>"); 972 MODULE_LICENSE("GPL"); 973