1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2024 Linaro Ltd. 4 */ 5 6 #include <linux/bug.h> 7 #include <linux/cleanup.h> 8 #include <linux/debugfs.h> 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/export.h> 12 #include <linux/idr.h> 13 #include <linux/kernel.h> 14 #include <linux/kref.h> 15 #include <linux/list.h> 16 #include <linux/lockdep.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/property.h> 20 #include <linux/pwrseq/consumer.h> 21 #include <linux/pwrseq/provider.h> 22 #include <linux/radix-tree.h> 23 #include <linux/rwsem.h> 24 #include <linux/slab.h> 25 26 /* 27 * Power-sequencing framework for linux. 28 * 29 * This subsystem allows power sequence providers to register a set of targets 30 * that consumers may request and power-up/down. 31 * 32 * Glossary: 33 * 34 * Unit - a unit is a discreet chunk of a power sequence. For instance one unit 35 * may enable a set of regulators, another may enable a specific GPIO. Units 36 * can define dependencies in the form of other units that must be enabled 37 * before it itself can be. 38 * 39 * Target - a target is a set of units (composed of the "final" unit and its 40 * dependencies) that a consumer selects by its name when requesting a handle 41 * to the power sequencer. Via the dependency system, multiple targets may 42 * share the same parts of a power sequence but ignore parts that are 43 * irrelevant. 44 * 45 * Descriptor - a handle passed by the pwrseq core to every consumer that 46 * serves as the entry point to the provider layer. It ensures coherence 47 * between different users and keeps reference counting consistent. 48 * 49 * Each provider must define a .match() callback whose role is to determine 50 * whether a potential consumer is in fact associated with this sequencer. 51 * This allows creating abstraction layers on top of regular device-tree 52 * resources like regulators, clocks and other nodes connected to the consumer 53 * via phandle. 54 */ 55 56 static DEFINE_IDA(pwrseq_ida); 57 58 /* 59 * Protects the device list on the pwrseq bus from concurrent modifications 60 * but allows simultaneous read-only access. 61 */ 62 static DECLARE_RWSEM(pwrseq_sem); 63 64 /** 65 * struct pwrseq_unit - Private power-sequence unit data. 66 * @ref: Reference count for this object. When it goes to 0, the object is 67 * destroyed. 68 * @name: Name of this target. 69 * @list: Link to siblings on the list of all units of a single sequencer. 70 * @deps: List of units on which this unit depends. 71 * @enable: Callback running the part of the power-on sequence provided by 72 * this unit. 73 * @disable: Callback running the part of the power-off sequence provided 74 * by this unit. 75 * @enable_count: Current number of users that enabled this unit. May be the 76 * consumer of the power sequencer or other units that depend 77 * on this one. 78 */ 79 struct pwrseq_unit { 80 struct kref ref; 81 const char *name; 82 struct list_head list; 83 struct list_head deps; 84 pwrseq_power_state_func enable; 85 pwrseq_power_state_func disable; 86 unsigned int enable_count; 87 }; 88 89 static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data) 90 { 91 struct pwrseq_unit *unit; 92 93 unit = kzalloc(sizeof(*unit), GFP_KERNEL); 94 if (!unit) 95 return NULL; 96 97 unit->name = kstrdup_const(data->name, GFP_KERNEL); 98 if (!unit->name) { 99 kfree(unit); 100 return NULL; 101 } 102 103 kref_init(&unit->ref); 104 INIT_LIST_HEAD(&unit->deps); 105 unit->enable = data->enable; 106 unit->disable = data->disable; 107 108 return unit; 109 } 110 111 static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit) 112 { 113 kref_get(&unit->ref); 114 115 return unit; 116 } 117 118 static void pwrseq_unit_release(struct kref *ref); 119 120 static void pwrseq_unit_put(struct pwrseq_unit *unit) 121 { 122 kref_put(&unit->ref, pwrseq_unit_release); 123 } 124 125 /** 126 * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure 127 * allowing to keep it on multiple dependency lists 128 * in different units. 129 * @list: Siblings on the list. 130 * @unit: Address of the referenced unit. 131 */ 132 struct pwrseq_unit_dep { 133 struct list_head list; 134 struct pwrseq_unit *unit; 135 }; 136 137 static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit) 138 { 139 struct pwrseq_unit_dep *dep; 140 141 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 142 if (!dep) 143 return NULL; 144 145 dep->unit = unit; 146 147 return dep; 148 } 149 150 static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref) 151 { 152 pwrseq_unit_put(ref->unit); 153 kfree(ref); 154 } 155 156 static void pwrseq_unit_free_deps(struct list_head *list) 157 { 158 struct pwrseq_unit_dep *dep, *next; 159 160 list_for_each_entry_safe(dep, next, list, list) { 161 list_del(&dep->list); 162 pwrseq_unit_dep_free(dep); 163 } 164 } 165 166 static void pwrseq_unit_release(struct kref *ref) 167 { 168 struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref); 169 170 pwrseq_unit_free_deps(&unit->deps); 171 list_del(&unit->list); 172 kfree_const(unit->name); 173 kfree(unit); 174 } 175 176 /** 177 * struct pwrseq_target - Private power-sequence target data. 178 * @list: Siblings on the list of all targets exposed by a power sequencer. 179 * @name: Name of the target. 180 * @unit: Final unit for this target. 181 * @post_enable: Callback run after the target unit has been enabled, *after* 182 * the state lock has been released. It's useful for implementing 183 * boot-up delays without blocking other users from powering up 184 * using the same power sequencer. 185 */ 186 struct pwrseq_target { 187 struct list_head list; 188 const char *name; 189 struct pwrseq_unit *unit; 190 pwrseq_power_state_func post_enable; 191 }; 192 193 static struct pwrseq_target * 194 pwrseq_target_new(const struct pwrseq_target_data *data) 195 { 196 struct pwrseq_target *target; 197 198 target = kzalloc(sizeof(*target), GFP_KERNEL); 199 if (!target) 200 return NULL; 201 202 target->name = kstrdup_const(data->name, GFP_KERNEL); 203 if (!target->name) { 204 kfree(target); 205 return NULL; 206 } 207 208 target->post_enable = data->post_enable; 209 210 return target; 211 } 212 213 static void pwrseq_target_free(struct pwrseq_target *target) 214 { 215 pwrseq_unit_put(target->unit); 216 kfree_const(target->name); 217 kfree(target); 218 } 219 220 /** 221 * struct pwrseq_device - Private power sequencing data. 222 * @dev: Device struct associated with this sequencer. 223 * @id: Device ID. 224 * @owner: Prevents removal of active power sequencing providers. 225 * @rw_lock: Protects the device from being unregistered while in use. 226 * @state_lock: Prevents multiple users running the power sequence at the same 227 * time. 228 * @match: Power sequencer matching callback. 229 * @targets: List of targets exposed by this sequencer. 230 * @units: List of all units supported by this sequencer. 231 */ 232 struct pwrseq_device { 233 struct device dev; 234 int id; 235 struct module *owner; 236 struct rw_semaphore rw_lock; 237 struct mutex state_lock; 238 pwrseq_match_func match; 239 struct list_head targets; 240 struct list_head units; 241 }; 242 243 static struct pwrseq_device *to_pwrseq_device(struct device *dev) 244 { 245 return container_of(dev, struct pwrseq_device, dev); 246 } 247 248 static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq) 249 { 250 get_device(&pwrseq->dev); 251 252 return pwrseq; 253 } 254 255 static void pwrseq_device_put(struct pwrseq_device *pwrseq) 256 { 257 put_device(&pwrseq->dev); 258 } 259 260 /** 261 * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one 262 * user cannot break the reference counting for others. 263 * @pwrseq: Reference to the power sequencing device. 264 * @target: Reference to the target this descriptor allows to control. 265 * @powered_on: Power state set by the holder of the descriptor (not necessarily 266 * corresponding to the actual power state of the device). 267 */ 268 struct pwrseq_desc { 269 struct pwrseq_device *pwrseq; 270 struct pwrseq_target *target; 271 bool powered_on; 272 }; 273 274 static const struct bus_type pwrseq_bus = { 275 .name = "pwrseq", 276 }; 277 278 static void pwrseq_release(struct device *dev) 279 { 280 struct pwrseq_device *pwrseq = to_pwrseq_device(dev); 281 struct pwrseq_target *target, *pos; 282 283 list_for_each_entry_safe(target, pos, &pwrseq->targets, list) { 284 list_del(&target->list); 285 pwrseq_target_free(target); 286 } 287 288 mutex_destroy(&pwrseq->state_lock); 289 ida_free(&pwrseq_ida, pwrseq->id); 290 kfree(pwrseq); 291 } 292 293 static const struct device_type pwrseq_device_type = { 294 .name = "power_sequencer", 295 .release = pwrseq_release, 296 }; 297 298 static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data, 299 struct radix_tree_root *visited_units) 300 { 301 const struct pwrseq_unit_data *tmp, **cur; 302 int ret; 303 304 ret = radix_tree_insert(visited_units, (unsigned long)data, 305 (void *)data); 306 if (ret) 307 return ret; 308 309 for (cur = data->deps; cur && *cur; cur++) { 310 tmp = radix_tree_lookup(visited_units, (unsigned long)*cur); 311 if (tmp) { 312 WARN(1, "Circular dependency in power sequencing flow detected!\n"); 313 return -EINVAL; 314 } 315 316 ret = pwrseq_check_unit_deps(*cur, visited_units); 317 if (ret) 318 return ret; 319 } 320 321 return 0; 322 } 323 324 static int pwrseq_check_target_deps(const struct pwrseq_target_data *data) 325 { 326 struct radix_tree_root visited_units; 327 struct radix_tree_iter iter; 328 void __rcu **slot; 329 int ret; 330 331 if (!data->unit) 332 return -EINVAL; 333 334 INIT_RADIX_TREE(&visited_units, GFP_KERNEL); 335 ret = pwrseq_check_unit_deps(data->unit, &visited_units); 336 radix_tree_for_each_slot(slot, &visited_units, &iter, 0) 337 radix_tree_delete(&visited_units, iter.index); 338 339 return ret; 340 } 341 342 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, 343 struct list_head *dep_list, 344 struct list_head *unit_list, 345 struct radix_tree_root *processed_units); 346 347 static struct pwrseq_unit * 348 pwrseq_unit_setup(const struct pwrseq_unit_data *data, 349 struct list_head *unit_list, 350 struct radix_tree_root *processed_units) 351 { 352 struct pwrseq_unit *unit; 353 int ret; 354 355 unit = radix_tree_lookup(processed_units, (unsigned long)data); 356 if (unit) 357 return pwrseq_unit_get(unit); 358 359 unit = pwrseq_unit_new(data); 360 if (!unit) 361 return ERR_PTR(-ENOMEM); 362 363 if (data->deps) { 364 ret = pwrseq_unit_setup_deps(data->deps, &unit->deps, 365 unit_list, processed_units); 366 if (ret) { 367 pwrseq_unit_put(unit); 368 return ERR_PTR(ret); 369 } 370 } 371 372 ret = radix_tree_insert(processed_units, (unsigned long)data, unit); 373 if (ret) { 374 pwrseq_unit_put(unit); 375 return ERR_PTR(ret); 376 } 377 378 list_add_tail(&unit->list, unit_list); 379 380 return unit; 381 } 382 383 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, 384 struct list_head *dep_list, 385 struct list_head *unit_list, 386 struct radix_tree_root *processed_units) 387 { 388 const struct pwrseq_unit_data *pos; 389 struct pwrseq_unit_dep *dep; 390 struct pwrseq_unit *unit; 391 int i; 392 393 for (i = 0; data[i]; i++) { 394 pos = data[i]; 395 396 unit = pwrseq_unit_setup(pos, unit_list, processed_units); 397 if (IS_ERR(unit)) 398 return PTR_ERR(unit); 399 400 dep = pwrseq_unit_dep_new(unit); 401 if (!dep) { 402 pwrseq_unit_put(unit); 403 return -ENOMEM; 404 } 405 406 list_add_tail(&dep->list, dep_list); 407 } 408 409 return 0; 410 } 411 412 static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data, 413 struct pwrseq_device *pwrseq, 414 struct radix_tree_root *processed_units) 415 { 416 const struct pwrseq_target_data *pos; 417 struct pwrseq_target *target; 418 int ret, i; 419 420 for (i = 0; data[i]; i++) { 421 pos = data[i]; 422 423 ret = pwrseq_check_target_deps(pos); 424 if (ret) 425 return ret; 426 427 target = pwrseq_target_new(pos); 428 if (!target) 429 return -ENOMEM; 430 431 target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units, 432 processed_units); 433 if (IS_ERR(target->unit)) { 434 ret = PTR_ERR(target->unit); 435 pwrseq_target_free(target); 436 return ret; 437 } 438 439 list_add_tail(&target->list, &pwrseq->targets); 440 } 441 442 return 0; 443 } 444 445 static int pwrseq_setup_targets(const struct pwrseq_target_data **targets, 446 struct pwrseq_device *pwrseq) 447 { 448 struct radix_tree_root processed_units; 449 struct radix_tree_iter iter; 450 void __rcu **slot; 451 int ret; 452 453 INIT_RADIX_TREE(&processed_units, GFP_KERNEL); 454 ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units); 455 radix_tree_for_each_slot(slot, &processed_units, &iter, 0) 456 radix_tree_delete(&processed_units, iter.index); 457 458 return ret; 459 } 460 461 /** 462 * pwrseq_device_register() - Register a new power sequencer. 463 * @config: Configuration of the new power sequencing device. 464 * 465 * The config structure is only used during the call and can be freed after 466 * the function returns. The config structure *must* have the parent device 467 * as well as the match() callback and at least one target set. 468 * 469 * Returns: 470 * Returns the address of the new pwrseq device or ERR_PTR() on failure. 471 */ 472 struct pwrseq_device * 473 pwrseq_device_register(const struct pwrseq_config *config) 474 { 475 struct pwrseq_device *pwrseq; 476 int ret, id; 477 478 if (!config->parent || !config->match || !config->targets || 479 !config->targets[0]) 480 return ERR_PTR(-EINVAL); 481 482 pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL); 483 if (!pwrseq) 484 return ERR_PTR(-ENOMEM); 485 486 pwrseq->dev.type = &pwrseq_device_type; 487 pwrseq->dev.bus = &pwrseq_bus; 488 pwrseq->dev.parent = config->parent; 489 device_set_node(&pwrseq->dev, dev_fwnode(config->parent)); 490 dev_set_drvdata(&pwrseq->dev, config->drvdata); 491 492 id = ida_alloc(&pwrseq_ida, GFP_KERNEL); 493 if (id < 0) { 494 kfree(pwrseq); 495 return ERR_PTR(id); 496 } 497 498 pwrseq->id = id; 499 500 /* 501 * From this point onwards the device's release() callback is 502 * responsible for freeing resources. 503 */ 504 device_initialize(&pwrseq->dev); 505 506 ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id); 507 if (ret) 508 goto err_put_pwrseq; 509 510 pwrseq->owner = config->owner ?: THIS_MODULE; 511 pwrseq->match = config->match; 512 513 init_rwsem(&pwrseq->rw_lock); 514 mutex_init(&pwrseq->state_lock); 515 INIT_LIST_HEAD(&pwrseq->targets); 516 INIT_LIST_HEAD(&pwrseq->units); 517 518 ret = pwrseq_setup_targets(config->targets, pwrseq); 519 if (ret) 520 goto err_put_pwrseq; 521 522 scoped_guard(rwsem_write, &pwrseq_sem) { 523 ret = device_add(&pwrseq->dev); 524 if (ret) 525 goto err_put_pwrseq; 526 } 527 528 return pwrseq; 529 530 err_put_pwrseq: 531 pwrseq_device_put(pwrseq); 532 return ERR_PTR(ret); 533 } 534 EXPORT_SYMBOL_GPL(pwrseq_device_register); 535 536 /** 537 * pwrseq_device_unregister() - Unregister the power sequencer. 538 * @pwrseq: Power sequencer to unregister. 539 */ 540 void pwrseq_device_unregister(struct pwrseq_device *pwrseq) 541 { 542 struct device *dev = &pwrseq->dev; 543 struct pwrseq_target *target; 544 545 scoped_guard(mutex, &pwrseq->state_lock) { 546 guard(rwsem_write)(&pwrseq->rw_lock); 547 548 list_for_each_entry(target, &pwrseq->targets, list) 549 WARN(target->unit->enable_count, 550 "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n"); 551 552 guard(rwsem_write)(&pwrseq_sem); 553 554 device_del(dev); 555 } 556 557 pwrseq_device_put(pwrseq); 558 } 559 EXPORT_SYMBOL_GPL(pwrseq_device_unregister); 560 561 static void devm_pwrseq_device_unregister(void *data) 562 { 563 struct pwrseq_device *pwrseq = data; 564 565 pwrseq_device_unregister(pwrseq); 566 } 567 568 /** 569 * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register(). 570 * @dev: Managing device. 571 * @config: Configuration of the new power sequencing device. 572 * 573 * Returns: 574 * Returns the address of the new pwrseq device or ERR_PTR() on failure. 575 */ 576 struct pwrseq_device * 577 devm_pwrseq_device_register(struct device *dev, 578 const struct pwrseq_config *config) 579 { 580 struct pwrseq_device *pwrseq; 581 int ret; 582 583 pwrseq = pwrseq_device_register(config); 584 if (IS_ERR(pwrseq)) 585 return pwrseq; 586 587 ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister, 588 pwrseq); 589 if (ret) 590 return ERR_PTR(ret); 591 592 return pwrseq; 593 } 594 EXPORT_SYMBOL_GPL(devm_pwrseq_device_register); 595 596 /** 597 * pwrseq_device_get_drvdata() - Get the driver private data associated with 598 * this sequencer. 599 * @pwrseq: Power sequencer object. 600 * 601 * Returns: 602 * Address of the private driver data. 603 */ 604 void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq) 605 { 606 return dev_get_drvdata(&pwrseq->dev); 607 } 608 EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata); 609 610 struct pwrseq_match_data { 611 struct pwrseq_desc *desc; 612 struct device *dev; 613 const char *target; 614 }; 615 616 static int pwrseq_match_device(struct device *pwrseq_dev, void *data) 617 { 618 struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev); 619 struct pwrseq_match_data *match_data = data; 620 struct pwrseq_target *target; 621 int ret; 622 623 lockdep_assert_held_read(&pwrseq_sem); 624 625 guard(rwsem_read)(&pwrseq->rw_lock); 626 if (!device_is_registered(&pwrseq->dev)) 627 return 0; 628 629 ret = pwrseq->match(pwrseq, match_data->dev); 630 if (ret <= 0) 631 return ret; 632 633 /* We got the matching device, let's find the right target. */ 634 list_for_each_entry(target, &pwrseq->targets, list) { 635 if (strcmp(target->name, match_data->target)) 636 continue; 637 638 match_data->desc->target = target; 639 } 640 641 /* 642 * This device does not have this target. No point in deferring as it 643 * will not get a new target dynamically later. 644 */ 645 if (!match_data->desc->target) 646 return -ENOENT; 647 648 if (!try_module_get(pwrseq->owner)) 649 return -EPROBE_DEFER; 650 651 match_data->desc->pwrseq = pwrseq_device_get(pwrseq); 652 653 return 1; 654 } 655 656 /** 657 * pwrseq_get() - Get the power sequencer associated with this device. 658 * @dev: Device for which to get the sequencer. 659 * @target: Name of the target exposed by the sequencer this device wants to 660 * reach. 661 * 662 * Returns: 663 * New power sequencer descriptor for use by the consumer driver or ERR_PTR() 664 * on failure. 665 */ 666 struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target) 667 { 668 struct pwrseq_match_data match_data; 669 int ret; 670 671 struct pwrseq_desc *desc __free(kfree) = kzalloc(sizeof(*desc), 672 GFP_KERNEL); 673 if (!desc) 674 return ERR_PTR(-ENOMEM); 675 676 match_data.desc = desc; 677 match_data.dev = dev; 678 match_data.target = target; 679 680 guard(rwsem_read)(&pwrseq_sem); 681 682 ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data, 683 pwrseq_match_device); 684 if (ret < 0) 685 return ERR_PTR(ret); 686 if (ret == 0) 687 /* No device matched. */ 688 return ERR_PTR(-EPROBE_DEFER); 689 690 return_ptr(desc); 691 } 692 EXPORT_SYMBOL_GPL(pwrseq_get); 693 694 /** 695 * pwrseq_put() - Release the power sequencer descriptor. 696 * @desc: Descriptor to release. 697 */ 698 void pwrseq_put(struct pwrseq_desc *desc) 699 { 700 struct pwrseq_device *pwrseq; 701 702 if (!desc) 703 return; 704 705 pwrseq = desc->pwrseq; 706 707 if (desc->powered_on) 708 pwrseq_power_off(desc); 709 710 kfree(desc); 711 module_put(pwrseq->owner); 712 pwrseq_device_put(pwrseq); 713 } 714 EXPORT_SYMBOL_GPL(pwrseq_put); 715 716 static void devm_pwrseq_put(void *data) 717 { 718 struct pwrseq_desc *desc = data; 719 720 pwrseq_put(desc); 721 } 722 723 /** 724 * devm_pwrseq_get() - Managed variant of pwrseq_get(). 725 * @dev: Device for which to get the sequencer and which also manages its 726 * lifetime. 727 * @target: Name of the target exposed by the sequencer this device wants to 728 * reach. 729 * 730 * Returns: 731 * New power sequencer descriptor for use by the consumer driver or ERR_PTR() 732 * on failure. 733 */ 734 struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target) 735 { 736 struct pwrseq_desc *desc; 737 int ret; 738 739 desc = pwrseq_get(dev, target); 740 if (IS_ERR(desc)) 741 return desc; 742 743 ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc); 744 if (ret) 745 return ERR_PTR(ret); 746 747 return desc; 748 } 749 EXPORT_SYMBOL_GPL(devm_pwrseq_get); 750 751 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, 752 struct pwrseq_unit *target); 753 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, 754 struct pwrseq_unit *target); 755 756 static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq, 757 struct list_head *list) 758 { 759 struct pwrseq_unit_dep *pos; 760 int ret = 0; 761 762 list_for_each_entry(pos, list, list) { 763 ret = pwrseq_unit_enable(pwrseq, pos->unit); 764 if (ret) { 765 list_for_each_entry_continue_reverse(pos, list, list) 766 pwrseq_unit_disable(pwrseq, pos->unit); 767 break; 768 } 769 } 770 771 return ret; 772 } 773 774 static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq, 775 struct list_head *list) 776 { 777 struct pwrseq_unit_dep *pos; 778 int ret = 0; 779 780 list_for_each_entry_reverse(pos, list, list) { 781 ret = pwrseq_unit_disable(pwrseq, pos->unit); 782 if (ret) { 783 list_for_each_entry_continue(pos, list, list) 784 pwrseq_unit_enable(pwrseq, pos->unit); 785 break; 786 } 787 } 788 789 return ret; 790 } 791 792 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, 793 struct pwrseq_unit *unit) 794 { 795 int ret; 796 797 lockdep_assert_held_read(&pwrseq->rw_lock); 798 lockdep_assert_held(&pwrseq->state_lock); 799 800 if (unit->enable_count != 0) { 801 unit->enable_count++; 802 return 0; 803 } 804 805 ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps); 806 if (ret) { 807 dev_err(&pwrseq->dev, 808 "Failed to enable dependencies before power-on for target '%s': %d\n", 809 unit->name, ret); 810 return ret; 811 } 812 813 if (unit->enable) { 814 ret = unit->enable(pwrseq); 815 if (ret) { 816 dev_err(&pwrseq->dev, 817 "Failed to enable target '%s': %d\n", 818 unit->name, ret); 819 pwrseq_unit_disable_deps(pwrseq, &unit->deps); 820 return ret; 821 } 822 } 823 824 unit->enable_count++; 825 826 return 0; 827 } 828 829 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, 830 struct pwrseq_unit *unit) 831 { 832 int ret; 833 834 lockdep_assert_held_read(&pwrseq->rw_lock); 835 lockdep_assert_held(&pwrseq->state_lock); 836 837 if (unit->enable_count == 0) { 838 WARN(1, "Unmatched power-off for target '%s'\n", 839 unit->name); 840 return -EBUSY; 841 } 842 843 if (unit->enable_count != 1) { 844 unit->enable_count--; 845 return 0; 846 } 847 848 if (unit->disable) { 849 ret = unit->disable(pwrseq); 850 if (ret) { 851 dev_err(&pwrseq->dev, 852 "Failed to disable target '%s': %d\n", 853 unit->name, ret); 854 return ret; 855 } 856 } 857 858 ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps); 859 if (ret) { 860 dev_err(&pwrseq->dev, 861 "Failed to disable dependencies after power-off for target '%s': %d\n", 862 unit->name, ret); 863 if (unit->enable) 864 unit->enable(pwrseq); 865 return ret; 866 } 867 868 unit->enable_count--; 869 870 return 0; 871 } 872 873 /** 874 * pwrseq_power_on() - Issue a power-on request on behalf of the consumer 875 * device. 876 * @desc: Descriptor referencing the power sequencer. 877 * 878 * This function tells the power sequencer that the consumer wants to be 879 * powered-up. The sequencer may already have powered-up the device in which 880 * case the function returns 0. If the power-up sequence is already in 881 * progress, the function will block until it's done and return 0. If this is 882 * the first request, the device will be powered up. 883 * 884 * Returns: 885 * 0 on success, negative error number on failure. 886 */ 887 int pwrseq_power_on(struct pwrseq_desc *desc) 888 { 889 struct pwrseq_device *pwrseq; 890 struct pwrseq_target *target; 891 struct pwrseq_unit *unit; 892 int ret; 893 894 might_sleep(); 895 896 if (!desc || desc->powered_on) 897 return 0; 898 899 pwrseq = desc->pwrseq; 900 target = desc->target; 901 unit = target->unit; 902 903 guard(rwsem_read)(&pwrseq->rw_lock); 904 if (!device_is_registered(&pwrseq->dev)) 905 return -ENODEV; 906 907 scoped_guard(mutex, &pwrseq->state_lock) { 908 ret = pwrseq_unit_enable(pwrseq, unit); 909 if (!ret) 910 desc->powered_on = true; 911 } 912 913 if (target->post_enable) { 914 ret = target->post_enable(pwrseq); 915 if (ret) { 916 pwrseq_unit_disable(pwrseq, unit); 917 desc->powered_on = false; 918 } 919 } 920 921 return ret; 922 } 923 EXPORT_SYMBOL_GPL(pwrseq_power_on); 924 925 /** 926 * pwrseq_power_off() - Issue a power-off request on behalf of the consumer 927 * device. 928 * @desc: Descriptor referencing the power sequencer. 929 * 930 * This undoes the effects of pwrseq_power_on(). It issues a power-off request 931 * on behalf of the consumer and when the last remaining user does so, the 932 * power-down sequence will be started. If one is in progress, the function 933 * will block until it's complete and then return. 934 * 935 * Returns: 936 * 0 on success, negative error number on failure. 937 */ 938 int pwrseq_power_off(struct pwrseq_desc *desc) 939 { 940 struct pwrseq_device *pwrseq; 941 struct pwrseq_unit *unit; 942 int ret; 943 944 might_sleep(); 945 946 if (!desc || !desc->powered_on) 947 return 0; 948 949 pwrseq = desc->pwrseq; 950 unit = desc->target->unit; 951 952 guard(rwsem_read)(&pwrseq->rw_lock); 953 if (!device_is_registered(&pwrseq->dev)) 954 return -ENODEV; 955 956 guard(mutex)(&pwrseq->state_lock); 957 958 ret = pwrseq_unit_disable(pwrseq, unit); 959 if (!ret) 960 desc->powered_on = false; 961 962 return ret; 963 } 964 EXPORT_SYMBOL_GPL(pwrseq_power_off); 965 966 #if IS_ENABLED(CONFIG_DEBUG_FS) 967 968 struct pwrseq_debugfs_count_ctx { 969 struct device *dev; 970 loff_t index; 971 }; 972 973 static int pwrseq_debugfs_seq_count(struct device *dev, void *data) 974 { 975 struct pwrseq_debugfs_count_ctx *ctx = data; 976 977 ctx->dev = dev; 978 979 return ctx->index-- ? 0 : 1; 980 } 981 982 static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos) 983 { 984 struct pwrseq_debugfs_count_ctx ctx; 985 986 ctx.dev = NULL; 987 ctx.index = *pos; 988 989 /* 990 * We're holding the lock for the entire printout so no need to fiddle 991 * with device reference count. 992 */ 993 down_read(&pwrseq_sem); 994 995 bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count); 996 if (!ctx.index) 997 return NULL; 998 999 return ctx.dev; 1000 } 1001 1002 static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data, 1003 loff_t *pos) 1004 { 1005 struct device *curr = data; 1006 1007 ++*pos; 1008 1009 struct device *next __free(put_device) = 1010 bus_find_next_device(&pwrseq_bus, curr); 1011 return next; 1012 } 1013 1014 static void pwrseq_debugfs_seq_show_target(struct seq_file *seq, 1015 struct pwrseq_target *target) 1016 { 1017 seq_printf(seq, " target: [%s] (target unit: [%s])\n", 1018 target->name, target->unit->name); 1019 } 1020 1021 static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq, 1022 struct pwrseq_unit *unit) 1023 { 1024 struct pwrseq_unit_dep *ref; 1025 1026 seq_printf(seq, " unit: [%s] - enable count: %u\n", 1027 unit->name, unit->enable_count); 1028 1029 if (list_empty(&unit->deps)) 1030 return; 1031 1032 seq_puts(seq, " dependencies:\n"); 1033 list_for_each_entry(ref, &unit->deps, list) 1034 seq_printf(seq, " [%s]\n", ref->unit->name); 1035 } 1036 1037 static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data) 1038 { 1039 struct device *dev = data; 1040 struct pwrseq_device *pwrseq = to_pwrseq_device(dev); 1041 struct pwrseq_target *target; 1042 struct pwrseq_unit *unit; 1043 1044 seq_printf(seq, "%s:\n", dev_name(dev)); 1045 1046 seq_puts(seq, " targets:\n"); 1047 list_for_each_entry(target, &pwrseq->targets, list) 1048 pwrseq_debugfs_seq_show_target(seq, target); 1049 1050 seq_puts(seq, " units:\n"); 1051 list_for_each_entry(unit, &pwrseq->units, list) 1052 pwrseq_debugfs_seq_show_unit(seq, unit); 1053 1054 return 0; 1055 } 1056 1057 static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data) 1058 { 1059 up_read(&pwrseq_sem); 1060 } 1061 1062 static const struct seq_operations pwrseq_debugfs_sops = { 1063 .start = pwrseq_debugfs_seq_start, 1064 .next = pwrseq_debugfs_seq_next, 1065 .show = pwrseq_debugfs_seq_show, 1066 .stop = pwrseq_debugfs_seq_stop, 1067 }; 1068 DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs); 1069 1070 static struct dentry *pwrseq_debugfs_dentry; 1071 1072 #endif /* CONFIG_DEBUG_FS */ 1073 1074 static int __init pwrseq_init(void) 1075 { 1076 int ret; 1077 1078 ret = bus_register(&pwrseq_bus); 1079 if (ret) { 1080 pr_err("Failed to register the power sequencer bus\n"); 1081 return ret; 1082 } 1083 1084 #if IS_ENABLED(CONFIG_DEBUG_FS) 1085 pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL, 1086 &pwrseq_debugfs_fops); 1087 #endif /* CONFIG_DEBUG_FS */ 1088 1089 return 0; 1090 } 1091 subsys_initcall(pwrseq_init); 1092 1093 static void __exit pwrseq_exit(void) 1094 { 1095 #if IS_ENABLED(CONFIG_DEBUG_FS) 1096 debugfs_remove_recursive(pwrseq_debugfs_dentry); 1097 #endif /* CONFIG_DEBUG_FS */ 1098 1099 bus_unregister(&pwrseq_bus); 1100 } 1101 module_exit(pwrseq_exit); 1102 1103 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); 1104 MODULE_DESCRIPTION("Power Sequencing subsystem core"); 1105 MODULE_LICENSE("GPL"); 1106