1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * phy-core.c -- Generic Phy framework. 4 * 5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Author: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/export.h> 12 #include <linux/module.h> 13 #include <linux/err.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/of.h> 18 #include <linux/phy/phy.h> 19 #include <linux/idr.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/regulator/consumer.h> 22 23 static void phy_release(struct device *dev); 24 static const struct class phy_class = { 25 .name = "phy", 26 .dev_release = phy_release, 27 }; 28 29 static struct dentry *phy_debugfs_root; 30 static DEFINE_MUTEX(phy_provider_mutex); 31 static LIST_HEAD(phy_provider_list); 32 static LIST_HEAD(phys); 33 static DEFINE_IDA(phy_ida); 34 35 static void devm_phy_release(struct device *dev, void *res) 36 { 37 struct phy *phy = *(struct phy **)res; 38 39 phy_put(dev, phy); 40 } 41 42 static void devm_phy_provider_release(struct device *dev, void *res) 43 { 44 struct phy_provider *phy_provider = *(struct phy_provider **)res; 45 46 of_phy_provider_unregister(phy_provider); 47 } 48 49 static void devm_phy_consume(struct device *dev, void *res) 50 { 51 struct phy *phy = *(struct phy **)res; 52 53 phy_destroy(phy); 54 } 55 56 static int devm_phy_match(struct device *dev, void *res, void *match_data) 57 { 58 struct phy **phy = res; 59 60 return *phy == match_data; 61 } 62 63 /** 64 * phy_create_lookup() - allocate and register PHY/device association 65 * @phy: the phy of the association 66 * @con_id: connection ID string on device 67 * @dev_id: the device of the association 68 * 69 * Creates and registers phy_lookup entry. 70 */ 71 int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) 72 { 73 struct phy_lookup *pl; 74 75 if (!phy || !dev_id || !con_id) 76 return -EINVAL; 77 78 pl = kzalloc(sizeof(*pl), GFP_KERNEL); 79 if (!pl) 80 return -ENOMEM; 81 82 pl->dev_id = dev_id; 83 pl->con_id = con_id; 84 pl->phy = phy; 85 86 mutex_lock(&phy_provider_mutex); 87 list_add_tail(&pl->node, &phys); 88 mutex_unlock(&phy_provider_mutex); 89 90 return 0; 91 } 92 EXPORT_SYMBOL_GPL(phy_create_lookup); 93 94 /** 95 * phy_remove_lookup() - find and remove PHY/device association 96 * @phy: the phy of the association 97 * @con_id: connection ID string on device 98 * @dev_id: the device of the association 99 * 100 * Finds and unregisters phy_lookup entry that was created with 101 * phy_create_lookup(). 102 */ 103 void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id) 104 { 105 struct phy_lookup *pl; 106 107 if (!phy || !dev_id || !con_id) 108 return; 109 110 mutex_lock(&phy_provider_mutex); 111 list_for_each_entry(pl, &phys, node) 112 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) && 113 !strcmp(pl->con_id, con_id)) { 114 list_del(&pl->node); 115 kfree(pl); 116 break; 117 } 118 mutex_unlock(&phy_provider_mutex); 119 } 120 EXPORT_SYMBOL_GPL(phy_remove_lookup); 121 122 static struct phy *phy_find(struct device *dev, const char *con_id) 123 { 124 const char *dev_id = dev_name(dev); 125 struct phy_lookup *p, *pl = NULL; 126 127 mutex_lock(&phy_provider_mutex); 128 list_for_each_entry(p, &phys, node) 129 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) { 130 pl = p; 131 break; 132 } 133 mutex_unlock(&phy_provider_mutex); 134 135 return pl ? pl->phy : ERR_PTR(-ENODEV); 136 } 137 138 static struct phy_provider *of_phy_provider_lookup(struct device_node *node) 139 { 140 struct phy_provider *phy_provider; 141 142 list_for_each_entry(phy_provider, &phy_provider_list, list) { 143 if (phy_provider->dev->of_node == node) 144 return phy_provider; 145 146 for_each_child_of_node_scoped(phy_provider->children, child) 147 if (child == node) 148 return phy_provider; 149 } 150 151 return ERR_PTR(-EPROBE_DEFER); 152 } 153 154 int phy_pm_runtime_get(struct phy *phy) 155 { 156 int ret; 157 158 if (!phy) 159 return 0; 160 161 if (!pm_runtime_enabled(&phy->dev)) 162 return -ENOTSUPP; 163 164 ret = pm_runtime_get(&phy->dev); 165 if (ret < 0 && ret != -EINPROGRESS) 166 pm_runtime_put_noidle(&phy->dev); 167 168 return ret; 169 } 170 EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 171 172 int phy_pm_runtime_get_sync(struct phy *phy) 173 { 174 int ret; 175 176 if (!phy) 177 return 0; 178 179 if (!pm_runtime_enabled(&phy->dev)) 180 return -ENOTSUPP; 181 182 ret = pm_runtime_get_sync(&phy->dev); 183 if (ret < 0) 184 pm_runtime_put_sync(&phy->dev); 185 186 return ret; 187 } 188 EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 189 190 void phy_pm_runtime_put(struct phy *phy) 191 { 192 if (!phy) 193 return; 194 195 if (!pm_runtime_enabled(&phy->dev)) 196 return; 197 198 pm_runtime_put(&phy->dev); 199 } 200 EXPORT_SYMBOL_GPL(phy_pm_runtime_put); 201 202 int phy_pm_runtime_put_sync(struct phy *phy) 203 { 204 if (!phy) 205 return 0; 206 207 if (!pm_runtime_enabled(&phy->dev)) 208 return -ENOTSUPP; 209 210 return pm_runtime_put_sync(&phy->dev); 211 } 212 EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync); 213 214 /** 215 * phy_init - phy internal initialization before phy operation 216 * @phy: the phy returned by phy_get() 217 * 218 * Used to allow phy's driver to perform phy internal initialization, 219 * such as PLL block powering, clock initialization or anything that's 220 * is required by the phy to perform the start of operation. 221 * Must be called before phy_power_on(). 222 * 223 * Return: %0 if successful, a negative error code otherwise 224 */ 225 int phy_init(struct phy *phy) 226 { 227 int ret; 228 229 if (!phy) 230 return 0; 231 232 ret = phy_pm_runtime_get_sync(phy); 233 if (ret < 0 && ret != -ENOTSUPP) 234 return ret; 235 ret = 0; /* Override possible ret == -ENOTSUPP */ 236 237 mutex_lock(&phy->mutex); 238 if (phy->power_count > phy->init_count) 239 dev_warn(&phy->dev, "phy_power_on was called before phy_init\n"); 240 241 if (phy->init_count == 0 && phy->ops->init) { 242 ret = phy->ops->init(phy); 243 if (ret < 0) { 244 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 245 goto out; 246 } 247 } 248 ++phy->init_count; 249 250 out: 251 mutex_unlock(&phy->mutex); 252 phy_pm_runtime_put(phy); 253 return ret; 254 } 255 EXPORT_SYMBOL_GPL(phy_init); 256 257 /** 258 * phy_exit - Phy internal un-initialization 259 * @phy: the phy returned by phy_get() 260 * 261 * Must be called after phy_power_off(). 262 * 263 * Return: %0 if successful, a negative error code otherwise 264 */ 265 int phy_exit(struct phy *phy) 266 { 267 int ret; 268 269 if (!phy) 270 return 0; 271 272 ret = phy_pm_runtime_get_sync(phy); 273 if (ret < 0 && ret != -ENOTSUPP) 274 return ret; 275 ret = 0; /* Override possible ret == -ENOTSUPP */ 276 277 mutex_lock(&phy->mutex); 278 if (phy->init_count == 1 && phy->ops->exit) { 279 ret = phy->ops->exit(phy); 280 if (ret < 0) { 281 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 282 goto out; 283 } 284 } 285 --phy->init_count; 286 287 out: 288 mutex_unlock(&phy->mutex); 289 phy_pm_runtime_put(phy); 290 return ret; 291 } 292 EXPORT_SYMBOL_GPL(phy_exit); 293 294 /** 295 * phy_power_on - Enable the phy and enter proper operation 296 * @phy: the phy returned by phy_get() 297 * 298 * Must be called after phy_init(). 299 * 300 * Return: %0 if successful, a negative error code otherwise 301 */ 302 int phy_power_on(struct phy *phy) 303 { 304 int ret = 0; 305 306 if (!phy) 307 goto out; 308 309 if (phy->pwr) { 310 ret = regulator_enable(phy->pwr); 311 if (ret) 312 goto out; 313 } 314 315 ret = phy_pm_runtime_get_sync(phy); 316 if (ret < 0 && ret != -ENOTSUPP) 317 goto err_pm_sync; 318 319 ret = 0; /* Override possible ret == -ENOTSUPP */ 320 321 mutex_lock(&phy->mutex); 322 if (phy->power_count == 0 && phy->ops->power_on) { 323 ret = phy->ops->power_on(phy); 324 if (ret < 0) { 325 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 326 goto err_pwr_on; 327 } 328 } 329 ++phy->power_count; 330 mutex_unlock(&phy->mutex); 331 return 0; 332 333 err_pwr_on: 334 mutex_unlock(&phy->mutex); 335 phy_pm_runtime_put_sync(phy); 336 err_pm_sync: 337 if (phy->pwr) 338 regulator_disable(phy->pwr); 339 out: 340 return ret; 341 } 342 EXPORT_SYMBOL_GPL(phy_power_on); 343 344 /** 345 * phy_power_off - Disable the phy. 346 * @phy: the phy returned by phy_get() 347 * 348 * Must be called before phy_exit(). 349 * 350 * Return: %0 if successful, a negative error code otherwise 351 */ 352 int phy_power_off(struct phy *phy) 353 { 354 int ret; 355 356 if (!phy) 357 return 0; 358 359 mutex_lock(&phy->mutex); 360 if (phy->power_count == 1 && phy->ops->power_off) { 361 ret = phy->ops->power_off(phy); 362 if (ret < 0) { 363 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 364 mutex_unlock(&phy->mutex); 365 return ret; 366 } 367 } 368 --phy->power_count; 369 mutex_unlock(&phy->mutex); 370 phy_pm_runtime_put(phy); 371 372 if (phy->pwr) 373 regulator_disable(phy->pwr); 374 375 return 0; 376 } 377 EXPORT_SYMBOL_GPL(phy_power_off); 378 379 int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) 380 { 381 int ret = 0; 382 383 if (!phy) 384 return 0; 385 386 mutex_lock(&phy->mutex); 387 if (phy->ops->set_mode) 388 ret = phy->ops->set_mode(phy, mode, submode); 389 if (!ret) 390 phy->attrs.mode = mode; 391 mutex_unlock(&phy->mutex); 392 393 return ret; 394 } 395 EXPORT_SYMBOL_GPL(phy_set_mode_ext); 396 397 int phy_set_media(struct phy *phy, enum phy_media media) 398 { 399 int ret; 400 401 if (!phy || !phy->ops->set_media) 402 return 0; 403 404 mutex_lock(&phy->mutex); 405 ret = phy->ops->set_media(phy, media); 406 mutex_unlock(&phy->mutex); 407 408 return ret; 409 } 410 EXPORT_SYMBOL_GPL(phy_set_media); 411 412 int phy_set_speed(struct phy *phy, int speed) 413 { 414 int ret; 415 416 if (!phy || !phy->ops->set_speed) 417 return 0; 418 419 mutex_lock(&phy->mutex); 420 ret = phy->ops->set_speed(phy, speed); 421 mutex_unlock(&phy->mutex); 422 423 return ret; 424 } 425 EXPORT_SYMBOL_GPL(phy_set_speed); 426 427 int phy_reset(struct phy *phy) 428 { 429 int ret; 430 431 if (!phy || !phy->ops->reset) 432 return 0; 433 434 ret = phy_pm_runtime_get_sync(phy); 435 if (ret < 0 && ret != -ENOTSUPP) 436 return ret; 437 438 mutex_lock(&phy->mutex); 439 ret = phy->ops->reset(phy); 440 mutex_unlock(&phy->mutex); 441 442 phy_pm_runtime_put(phy); 443 444 return ret; 445 } 446 EXPORT_SYMBOL_GPL(phy_reset); 447 448 /** 449 * phy_calibrate() - Tunes the phy hw parameters for current configuration 450 * @phy: the phy returned by phy_get() 451 * 452 * Used to calibrate phy hardware, typically by adjusting some parameters in 453 * runtime, which are otherwise lost after host controller reset and cannot 454 * be applied in phy_init() or phy_power_on(). 455 * 456 * Return: %0 if successful, a negative error code otherwise 457 */ 458 int phy_calibrate(struct phy *phy) 459 { 460 int ret; 461 462 if (!phy || !phy->ops->calibrate) 463 return 0; 464 465 mutex_lock(&phy->mutex); 466 ret = phy->ops->calibrate(phy); 467 mutex_unlock(&phy->mutex); 468 469 return ret; 470 } 471 EXPORT_SYMBOL_GPL(phy_calibrate); 472 473 /** 474 * phy_notify_connect() - phy connect notification 475 * @phy: the phy returned by phy_get() 476 * @port: the port index for connect 477 * 478 * If the phy needs to get connection status, the callback can be used. 479 * Returns: %0 if successful, a negative error code otherwise 480 */ 481 int phy_notify_connect(struct phy *phy, int port) 482 { 483 int ret; 484 485 if (!phy || !phy->ops->connect) 486 return 0; 487 488 mutex_lock(&phy->mutex); 489 ret = phy->ops->connect(phy, port); 490 mutex_unlock(&phy->mutex); 491 492 return ret; 493 } 494 EXPORT_SYMBOL_GPL(phy_notify_connect); 495 496 /** 497 * phy_notify_disconnect() - phy disconnect notification 498 * @phy: the phy returned by phy_get() 499 * @port: the port index for disconnect 500 * 501 * If the phy needs to get connection status, the callback can be used. 502 * 503 * Returns: %0 if successful, a negative error code otherwise 504 */ 505 int phy_notify_disconnect(struct phy *phy, int port) 506 { 507 int ret; 508 509 if (!phy || !phy->ops->disconnect) 510 return 0; 511 512 mutex_lock(&phy->mutex); 513 ret = phy->ops->disconnect(phy, port); 514 mutex_unlock(&phy->mutex); 515 516 return ret; 517 } 518 EXPORT_SYMBOL_GPL(phy_notify_disconnect); 519 520 /** 521 * phy_notify_state() - phy state notification 522 * @phy: the PHY returned by phy_get() 523 * @state: the PHY state 524 * 525 * Notify the PHY of a state transition. Used to notify and 526 * configure the PHY accordingly. 527 * 528 * Returns: %0 if successful, a negative error code otherwise 529 */ 530 int phy_notify_state(struct phy *phy, union phy_notify state) 531 { 532 int ret; 533 534 if (!phy || !phy->ops->notify_phystate) 535 return 0; 536 537 mutex_lock(&phy->mutex); 538 ret = phy->ops->notify_phystate(phy, state); 539 mutex_unlock(&phy->mutex); 540 541 return ret; 542 } 543 EXPORT_SYMBOL_GPL(phy_notify_state); 544 545 /** 546 * phy_configure() - Changes the phy parameters 547 * @phy: the phy returned by phy_get() 548 * @opts: New configuration to apply 549 * 550 * Used to change the PHY parameters. phy_init() must have been called 551 * on the phy. The configuration will be applied on the current phy 552 * mode, that can be changed using phy_set_mode(). 553 * 554 * Return: %0 if successful, a negative error code otherwise 555 */ 556 int phy_configure(struct phy *phy, union phy_configure_opts *opts) 557 { 558 int ret; 559 560 if (!phy) 561 return -EINVAL; 562 563 if (!phy->ops->configure) 564 return -EOPNOTSUPP; 565 566 mutex_lock(&phy->mutex); 567 ret = phy->ops->configure(phy, opts); 568 mutex_unlock(&phy->mutex); 569 570 return ret; 571 } 572 EXPORT_SYMBOL_GPL(phy_configure); 573 574 /** 575 * phy_validate() - Checks the phy parameters 576 * @phy: the phy returned by phy_get() 577 * @mode: phy_mode the configuration is applicable to. 578 * @submode: PHY submode the configuration is applicable to. 579 * @opts: Configuration to check 580 * 581 * Used to check that the current set of parameters can be handled by 582 * the phy. Implementations are free to tune the parameters passed as 583 * arguments if needed by some implementation detail or 584 * constraints. It will not change any actual configuration of the 585 * PHY, so calling it as many times as deemed fit will have no side 586 * effect. 587 * 588 * Return: %0 if successful, a negative error code otherwise 589 */ 590 int phy_validate(struct phy *phy, enum phy_mode mode, int submode, 591 union phy_configure_opts *opts) 592 { 593 int ret; 594 595 if (!phy) 596 return -EINVAL; 597 598 if (!phy->ops->validate) 599 return -EOPNOTSUPP; 600 601 mutex_lock(&phy->mutex); 602 ret = phy->ops->validate(phy, mode, submode, opts); 603 mutex_unlock(&phy->mutex); 604 605 return ret; 606 } 607 EXPORT_SYMBOL_GPL(phy_validate); 608 609 /** 610 * _of_phy_get() - lookup and obtain a reference to a phy by phandle 611 * @np: device_node for which to get the phy 612 * @index: the index of the phy 613 * 614 * Returns the phy associated with the given phandle value, 615 * after getting a refcount to it or -ENODEV if there is no such phy or 616 * -EPROBE_DEFER if there is a phandle to the phy, but the device is 617 * not yet loaded. This function uses of_xlate call back function provided 618 * while registering the phy_provider to find the phy instance. 619 */ 620 static struct phy *_of_phy_get(struct device_node *np, int index) 621 { 622 int ret; 623 struct phy_provider *phy_provider; 624 struct phy *phy = NULL; 625 struct of_phandle_args args; 626 627 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells", 628 index, &args); 629 if (ret) 630 return ERR_PTR(-ENODEV); 631 632 /* This phy type handled by the usb-phy subsystem for now */ 633 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) { 634 phy = ERR_PTR(-ENODEV); 635 goto out_put_node; 636 } 637 638 mutex_lock(&phy_provider_mutex); 639 phy_provider = of_phy_provider_lookup(args.np); 640 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { 641 phy = ERR_PTR(-EPROBE_DEFER); 642 goto out_unlock; 643 } 644 645 if (!of_device_is_available(args.np)) { 646 dev_warn(phy_provider->dev, "Requested PHY is disabled\n"); 647 phy = ERR_PTR(-ENODEV); 648 goto out_put_module; 649 } 650 651 phy = phy_provider->of_xlate(phy_provider->dev, &args); 652 653 out_put_module: 654 module_put(phy_provider->owner); 655 656 out_unlock: 657 mutex_unlock(&phy_provider_mutex); 658 out_put_node: 659 of_node_put(args.np); 660 661 return phy; 662 } 663 664 /** 665 * of_phy_get() - lookup and obtain a reference to a phy using a device_node. 666 * @np: device_node for which to get the phy 667 * @con_id: name of the phy from device's point of view 668 * 669 * Returns the phy driver, after getting a refcount to it; or 670 * -ENODEV if there is no such phy. The caller is responsible for 671 * calling of_phy_put() to release that count. 672 */ 673 struct phy *of_phy_get(struct device_node *np, const char *con_id) 674 { 675 struct phy *phy = NULL; 676 int index = 0; 677 678 if (con_id) 679 index = of_property_match_string(np, "phy-names", con_id); 680 681 phy = _of_phy_get(np, index); 682 if (IS_ERR(phy)) 683 return phy; 684 685 if (!try_module_get(phy->ops->owner)) 686 return ERR_PTR(-EPROBE_DEFER); 687 688 get_device(&phy->dev); 689 690 return phy; 691 } 692 EXPORT_SYMBOL_GPL(of_phy_get); 693 694 /** 695 * of_phy_put() - release the PHY 696 * @phy: the phy returned by of_phy_get() 697 * 698 * Releases a refcount the caller received from of_phy_get(). 699 */ 700 void of_phy_put(struct phy *phy) 701 { 702 if (!phy || IS_ERR(phy)) 703 return; 704 705 mutex_lock(&phy->mutex); 706 if (phy->ops->release) 707 phy->ops->release(phy); 708 mutex_unlock(&phy->mutex); 709 710 module_put(phy->ops->owner); 711 put_device(&phy->dev); 712 } 713 EXPORT_SYMBOL_GPL(of_phy_put); 714 715 /** 716 * phy_put() - release the PHY 717 * @dev: device that wants to release this phy 718 * @phy: the phy returned by phy_get() 719 * 720 * Releases a refcount the caller received from phy_get(). 721 */ 722 void phy_put(struct device *dev, struct phy *phy) 723 { 724 device_link_remove(dev, &phy->dev); 725 of_phy_put(phy); 726 } 727 EXPORT_SYMBOL_GPL(phy_put); 728 729 /** 730 * devm_phy_put() - release the PHY 731 * @dev: device that wants to release this phy 732 * @phy: the phy returned by devm_phy_get() 733 * 734 * destroys the devres associated with this phy and invokes phy_put 735 * to release the phy. 736 */ 737 void devm_phy_put(struct device *dev, struct phy *phy) 738 { 739 int r; 740 741 if (!phy) 742 return; 743 744 r = devres_release(dev, devm_phy_release, devm_phy_match, phy); 745 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 746 } 747 EXPORT_SYMBOL_GPL(devm_phy_put); 748 749 /** 750 * of_phy_simple_xlate() - returns the phy instance from phy provider 751 * @dev: the PHY provider device (not used here) 752 * @args: of_phandle_args 753 * 754 * Intended to be used by phy provider for the common case where #phy-cells is 755 * 0. For other cases where #phy-cells is greater than '0', the phy provider 756 * should provide a custom of_xlate function that reads the *args* and returns 757 * the appropriate phy. 758 */ 759 struct phy *of_phy_simple_xlate(struct device *dev, 760 const struct of_phandle_args *args) 761 { 762 struct device *target_dev; 763 764 target_dev = class_find_device_by_of_node(&phy_class, args->np); 765 if (!target_dev) 766 return ERR_PTR(-ENODEV); 767 768 put_device(target_dev); 769 return to_phy(target_dev); 770 } 771 EXPORT_SYMBOL_GPL(of_phy_simple_xlate); 772 773 /** 774 * phy_get() - lookup and obtain a reference to a phy. 775 * @dev: device that requests this phy 776 * @string: the phy name as given in the dt data or the name of the controller 777 * port for non-dt case 778 * 779 * Returns the phy driver, after getting a refcount to it; or 780 * -ENODEV if there is no such phy. The caller is responsible for 781 * calling phy_put() to release that count. 782 */ 783 struct phy *phy_get(struct device *dev, const char *string) 784 { 785 int index = 0; 786 struct phy *phy; 787 struct device_link *link; 788 789 if (dev->of_node) { 790 if (string) 791 index = of_property_match_string(dev->of_node, "phy-names", 792 string); 793 else 794 index = 0; 795 phy = _of_phy_get(dev->of_node, index); 796 } else { 797 if (string == NULL) { 798 dev_WARN(dev, "missing string\n"); 799 return ERR_PTR(-EINVAL); 800 } 801 phy = phy_find(dev, string); 802 } 803 if (IS_ERR(phy)) 804 return phy; 805 806 if (!try_module_get(phy->ops->owner)) 807 return ERR_PTR(-EPROBE_DEFER); 808 809 get_device(&phy->dev); 810 811 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 812 if (!link) 813 dev_dbg(dev, "failed to create device link to %s\n", 814 dev_name(phy->dev.parent)); 815 816 return phy; 817 } 818 EXPORT_SYMBOL_GPL(phy_get); 819 820 /** 821 * devm_phy_get() - lookup and obtain a reference to a phy. 822 * @dev: device that requests this phy 823 * @string: the phy name as given in the dt data or phy device name 824 * for non-dt case 825 * 826 * Gets the phy using phy_get(), and associates a device with it using 827 * devres. On driver detach, release function is invoked on the devres data, 828 * then, devres data is freed. 829 */ 830 struct phy *devm_phy_get(struct device *dev, const char *string) 831 { 832 struct phy **ptr, *phy; 833 834 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 835 if (!ptr) 836 return ERR_PTR(-ENOMEM); 837 838 phy = phy_get(dev, string); 839 if (!IS_ERR(phy)) { 840 *ptr = phy; 841 devres_add(dev, ptr); 842 } else { 843 devres_free(ptr); 844 } 845 846 return phy; 847 } 848 EXPORT_SYMBOL_GPL(devm_phy_get); 849 850 /** 851 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. 852 * @dev: device that requests this phy 853 * @string: the phy name as given in the dt data or phy device name 854 * for non-dt case 855 * 856 * Gets the phy using phy_get(), and associates a device with it using 857 * devres. On driver detach, release function is invoked on the devres 858 * data, then, devres data is freed. This differs to devm_phy_get() in 859 * that if the phy does not exist, it is not considered an error and 860 * -ENODEV will not be returned. Instead the NULL phy is returned, 861 * which can be passed to all other phy consumer calls. 862 */ 863 struct phy *devm_phy_optional_get(struct device *dev, const char *string) 864 { 865 struct phy *phy = devm_phy_get(dev, string); 866 867 if (PTR_ERR(phy) == -ENODEV) 868 phy = NULL; 869 870 return phy; 871 } 872 EXPORT_SYMBOL_GPL(devm_phy_optional_get); 873 874 /** 875 * devm_of_phy_get() - lookup and obtain a reference to a phy. 876 * @dev: device that requests this phy 877 * @np: node containing the phy 878 * @con_id: name of the phy from device's point of view 879 * 880 * Gets the phy using of_phy_get(), and associates a device with it using 881 * devres. On driver detach, release function is invoked on the devres data, 882 * then, devres data is freed. 883 */ 884 struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, 885 const char *con_id) 886 { 887 struct phy **ptr, *phy; 888 struct device_link *link; 889 890 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 891 if (!ptr) 892 return ERR_PTR(-ENOMEM); 893 894 phy = of_phy_get(np, con_id); 895 if (!IS_ERR(phy)) { 896 *ptr = phy; 897 devres_add(dev, ptr); 898 } else { 899 devres_free(ptr); 900 return phy; 901 } 902 903 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 904 if (!link) 905 dev_dbg(dev, "failed to create device link to %s\n", 906 dev_name(phy->dev.parent)); 907 908 return phy; 909 } 910 EXPORT_SYMBOL_GPL(devm_of_phy_get); 911 912 /** 913 * devm_of_phy_optional_get() - lookup and obtain a reference to an optional 914 * phy. 915 * @dev: device that requests this phy 916 * @np: node containing the phy 917 * @con_id: name of the phy from device's point of view 918 * 919 * Gets the phy using of_phy_get(), and associates a device with it using 920 * devres. On driver detach, release function is invoked on the devres data, 921 * then, devres data is freed. This differs to devm_of_phy_get() in 922 * that if the phy does not exist, it is not considered an error and 923 * -ENODEV will not be returned. Instead the NULL phy is returned, 924 * which can be passed to all other phy consumer calls. 925 */ 926 struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np, 927 const char *con_id) 928 { 929 struct phy *phy = devm_of_phy_get(dev, np, con_id); 930 931 if (PTR_ERR(phy) == -ENODEV) 932 phy = NULL; 933 934 if (IS_ERR(phy)) 935 dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s", 936 np, con_id); 937 938 return phy; 939 } 940 EXPORT_SYMBOL_GPL(devm_of_phy_optional_get); 941 942 /** 943 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index. 944 * @dev: device that requests this phy 945 * @np: node containing the phy 946 * @index: index of the phy 947 * 948 * Gets the phy using _of_phy_get(), then gets a refcount to it, 949 * and associates a device with it using devres. On driver detach, 950 * release function is invoked on the devres data, 951 * then, devres data is freed. 952 * 953 */ 954 struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, 955 int index) 956 { 957 struct phy **ptr, *phy; 958 struct device_link *link; 959 960 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL); 961 if (!ptr) 962 return ERR_PTR(-ENOMEM); 963 964 phy = _of_phy_get(np, index); 965 if (IS_ERR(phy)) { 966 devres_free(ptr); 967 return phy; 968 } 969 970 if (!try_module_get(phy->ops->owner)) { 971 devres_free(ptr); 972 return ERR_PTR(-EPROBE_DEFER); 973 } 974 975 get_device(&phy->dev); 976 977 *ptr = phy; 978 devres_add(dev, ptr); 979 980 link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS); 981 if (!link) 982 dev_dbg(dev, "failed to create device link to %s\n", 983 dev_name(phy->dev.parent)); 984 985 return phy; 986 } 987 EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 988 989 /** 990 * phy_create() - create a new phy 991 * @dev: device that is creating the new phy 992 * @node: device node of the phy 993 * @ops: function pointers for performing phy operations 994 * 995 * Called to create a phy using phy framework. 996 */ 997 struct phy *phy_create(struct device *dev, struct device_node *node, 998 const struct phy_ops *ops) 999 { 1000 int ret; 1001 int id; 1002 struct phy *phy; 1003 1004 if (WARN_ON(!dev)) 1005 return ERR_PTR(-EINVAL); 1006 1007 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 1008 if (!phy) 1009 return ERR_PTR(-ENOMEM); 1010 1011 id = ida_alloc(&phy_ida, GFP_KERNEL); 1012 if (id < 0) { 1013 dev_err(dev, "unable to get id\n"); 1014 ret = id; 1015 goto free_phy; 1016 } 1017 1018 device_initialize(&phy->dev); 1019 lockdep_register_key(&phy->lockdep_key); 1020 mutex_init_with_key(&phy->mutex, &phy->lockdep_key); 1021 1022 phy->dev.class = &phy_class; 1023 phy->dev.parent = dev; 1024 phy->dev.of_node = node ?: dev->of_node; 1025 phy->id = id; 1026 phy->ops = ops; 1027 1028 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 1029 if (ret) 1030 goto put_dev; 1031 1032 /* phy-supply */ 1033 phy->pwr = regulator_get_optional(&phy->dev, "phy"); 1034 if (IS_ERR(phy->pwr)) { 1035 ret = PTR_ERR(phy->pwr); 1036 if (ret == -EPROBE_DEFER) 1037 goto put_dev; 1038 1039 phy->pwr = NULL; 1040 } 1041 1042 ret = device_add(&phy->dev); 1043 if (ret) 1044 goto put_dev; 1045 1046 if (pm_runtime_enabled(dev)) { 1047 pm_runtime_enable(&phy->dev); 1048 pm_runtime_no_callbacks(&phy->dev); 1049 } 1050 1051 phy->debugfs = debugfs_create_dir(dev_name(&phy->dev), phy_debugfs_root); 1052 1053 return phy; 1054 1055 put_dev: 1056 put_device(&phy->dev); /* calls phy_release() which frees resources */ 1057 return ERR_PTR(ret); 1058 1059 free_phy: 1060 kfree(phy); 1061 return ERR_PTR(ret); 1062 } 1063 EXPORT_SYMBOL_GPL(phy_create); 1064 1065 /** 1066 * devm_phy_create() - create a new phy 1067 * @dev: device that is creating the new phy 1068 * @node: device node of the phy 1069 * @ops: function pointers for performing phy operations 1070 * 1071 * Creates a new PHY device adding it to the PHY class. 1072 * While at that, it also associates the device with the phy using devres. 1073 * On driver detach, release function is invoked on the devres data, 1074 * then, devres data is freed. 1075 */ 1076 struct phy *devm_phy_create(struct device *dev, struct device_node *node, 1077 const struct phy_ops *ops) 1078 { 1079 struct phy **ptr, *phy; 1080 1081 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL); 1082 if (!ptr) 1083 return ERR_PTR(-ENOMEM); 1084 1085 phy = phy_create(dev, node, ops); 1086 if (!IS_ERR(phy)) { 1087 *ptr = phy; 1088 devres_add(dev, ptr); 1089 } else { 1090 devres_free(ptr); 1091 } 1092 1093 return phy; 1094 } 1095 EXPORT_SYMBOL_GPL(devm_phy_create); 1096 1097 /** 1098 * phy_destroy() - destroy the phy 1099 * @phy: the phy to be destroyed 1100 * 1101 * Called to destroy the phy. 1102 */ 1103 void phy_destroy(struct phy *phy) 1104 { 1105 pm_runtime_disable(&phy->dev); 1106 device_unregister(&phy->dev); 1107 } 1108 EXPORT_SYMBOL_GPL(phy_destroy); 1109 1110 /** 1111 * devm_phy_destroy() - destroy the PHY 1112 * @dev: device that wants to release this phy 1113 * @phy: the phy returned by devm_phy_get() 1114 * 1115 * destroys the devres associated with this phy and invokes phy_destroy 1116 * to destroy the phy. 1117 */ 1118 void devm_phy_destroy(struct device *dev, struct phy *phy) 1119 { 1120 int r; 1121 1122 r = devres_release(dev, devm_phy_consume, devm_phy_match, phy); 1123 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 1124 } 1125 EXPORT_SYMBOL_GPL(devm_phy_destroy); 1126 1127 /** 1128 * __of_phy_provider_register() - create/register phy provider with the framework 1129 * @dev: struct device of the phy provider 1130 * @children: device node containing children (if different from dev->of_node) 1131 * @owner: the module owner containing of_xlate 1132 * @of_xlate: function pointer to obtain phy instance from phy provider 1133 * 1134 * Creates struct phy_provider from dev and of_xlate function pointer. 1135 * This is used in the case of dt boot for finding the phy instance from 1136 * phy provider. 1137 * 1138 * If the PHY provider doesn't nest children directly but uses a separate 1139 * child node to contain the individual children, the @children parameter 1140 * can be used to override the default. If NULL, the default (dev->of_node) 1141 * will be used. If non-NULL, the device node must be a child (or further 1142 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL 1143 * error code is returned. 1144 */ 1145 struct phy_provider *__of_phy_provider_register(struct device *dev, 1146 struct device_node *children, struct module *owner, 1147 struct phy * (*of_xlate)(struct device *dev, 1148 const struct of_phandle_args *args)) 1149 { 1150 struct phy_provider *phy_provider; 1151 1152 /* 1153 * If specified, the device node containing the children must itself 1154 * be the provider's device node or a child (or further descendant) 1155 * thereof. 1156 */ 1157 if (children) { 1158 struct device_node *parent = of_node_get(children), *next; 1159 1160 while (parent) { 1161 if (parent == dev->of_node) 1162 break; 1163 1164 next = of_get_parent(parent); 1165 of_node_put(parent); 1166 parent = next; 1167 } 1168 1169 if (!parent) 1170 return ERR_PTR(-EINVAL); 1171 1172 of_node_put(parent); 1173 } else { 1174 children = dev->of_node; 1175 } 1176 1177 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL); 1178 if (!phy_provider) 1179 return ERR_PTR(-ENOMEM); 1180 1181 phy_provider->dev = dev; 1182 phy_provider->children = of_node_get(children); 1183 phy_provider->owner = owner; 1184 phy_provider->of_xlate = of_xlate; 1185 1186 mutex_lock(&phy_provider_mutex); 1187 list_add_tail(&phy_provider->list, &phy_provider_list); 1188 mutex_unlock(&phy_provider_mutex); 1189 1190 return phy_provider; 1191 } 1192 EXPORT_SYMBOL_GPL(__of_phy_provider_register); 1193 1194 /** 1195 * __devm_of_phy_provider_register() - create/register phy provider with the 1196 * framework 1197 * @dev: struct device of the phy provider 1198 * @children: device node containing children (if different from dev->of_node) 1199 * @owner: the module owner containing of_xlate 1200 * @of_xlate: function pointer to obtain phy instance from phy provider 1201 * 1202 * Creates struct phy_provider from dev and of_xlate function pointer. 1203 * This is used in the case of dt boot for finding the phy instance from 1204 * phy provider. While at that, it also associates the device with the 1205 * phy provider using devres. On driver detach, release function is invoked 1206 * on the devres data, then, devres data is freed. 1207 */ 1208 struct phy_provider *__devm_of_phy_provider_register(struct device *dev, 1209 struct device_node *children, struct module *owner, 1210 struct phy * (*of_xlate)(struct device *dev, 1211 const struct of_phandle_args *args)) 1212 { 1213 struct phy_provider **ptr, *phy_provider; 1214 1215 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL); 1216 if (!ptr) 1217 return ERR_PTR(-ENOMEM); 1218 1219 phy_provider = __of_phy_provider_register(dev, children, owner, 1220 of_xlate); 1221 if (!IS_ERR(phy_provider)) { 1222 *ptr = phy_provider; 1223 devres_add(dev, ptr); 1224 } else { 1225 devres_free(ptr); 1226 } 1227 1228 return phy_provider; 1229 } 1230 EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register); 1231 1232 /** 1233 * of_phy_provider_unregister() - unregister phy provider from the framework 1234 * @phy_provider: phy provider returned by of_phy_provider_register() 1235 * 1236 * Removes the phy_provider created using of_phy_provider_register(). 1237 */ 1238 void of_phy_provider_unregister(struct phy_provider *phy_provider) 1239 { 1240 if (IS_ERR(phy_provider)) 1241 return; 1242 1243 mutex_lock(&phy_provider_mutex); 1244 list_del(&phy_provider->list); 1245 of_node_put(phy_provider->children); 1246 kfree(phy_provider); 1247 mutex_unlock(&phy_provider_mutex); 1248 } 1249 EXPORT_SYMBOL_GPL(of_phy_provider_unregister); 1250 1251 /** 1252 * devm_of_phy_provider_unregister() - remove phy provider from the framework 1253 * @dev: struct device of the phy provider 1254 * @phy_provider: phy provider returned by of_phy_provider_register() 1255 * 1256 * destroys the devres associated with this phy provider and invokes 1257 * of_phy_provider_unregister to unregister the phy provider. 1258 */ 1259 void devm_of_phy_provider_unregister(struct device *dev, 1260 struct phy_provider *phy_provider) 1261 { 1262 int r; 1263 1264 r = devres_release(dev, devm_phy_provider_release, devm_phy_match, 1265 phy_provider); 1266 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); 1267 } 1268 EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); 1269 1270 /** 1271 * phy_release() - release the phy 1272 * @dev: the dev member within phy 1273 * 1274 * When the last reference to the device is removed, it is called 1275 * from the embedded kobject as release method. 1276 */ 1277 static void phy_release(struct device *dev) 1278 { 1279 struct phy *phy; 1280 1281 phy = to_phy(dev); 1282 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 1283 debugfs_remove_recursive(phy->debugfs); 1284 regulator_put(phy->pwr); 1285 mutex_destroy(&phy->mutex); 1286 lockdep_unregister_key(&phy->lockdep_key); 1287 ida_free(&phy_ida, phy->id); 1288 kfree(phy); 1289 } 1290 1291 static int __init phy_core_init(void) 1292 { 1293 int err; 1294 1295 err = class_register(&phy_class); 1296 if (err) { 1297 pr_err("failed to register phy class"); 1298 return err; 1299 } 1300 1301 phy_debugfs_root = debugfs_create_dir("phy", NULL); 1302 1303 return 0; 1304 } 1305 device_initcall(phy_core_init); 1306 1307 static void __exit phy_core_exit(void) 1308 { 1309 debugfs_remove_recursive(phy_debugfs_root); 1310 class_unregister(&phy_class); 1311 } 1312 module_exit(phy_core_exit); 1313