1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Linaro Ltd.
4 */
5
6 #include <linux/bug.h>
7 #include <linux/cleanup.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/kernel.h>
14 #include <linux/kref.h>
15 #include <linux/list.h>
16 #include <linux/lockdep.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/property.h>
20 #include <linux/pwrseq/consumer.h>
21 #include <linux/pwrseq/provider.h>
22 #include <linux/radix-tree.h>
23 #include <linux/rwsem.h>
24 #include <linux/slab.h>
25
26 /*
27 * Power-sequencing framework for linux.
28 *
29 * This subsystem allows power sequence providers to register a set of targets
30 * that consumers may request and power-up/down.
31 *
32 * Glossary:
33 *
34 * Unit - a unit is a discreet chunk of a power sequence. For instance one unit
35 * may enable a set of regulators, another may enable a specific GPIO. Units
36 * can define dependencies in the form of other units that must be enabled
37 * before it itself can be.
38 *
39 * Target - a target is a set of units (composed of the "final" unit and its
40 * dependencies) that a consumer selects by its name when requesting a handle
41 * to the power sequencer. Via the dependency system, multiple targets may
42 * share the same parts of a power sequence but ignore parts that are
43 * irrelevant.
44 *
45 * Descriptor - a handle passed by the pwrseq core to every consumer that
46 * serves as the entry point to the provider layer. It ensures coherence
47 * between different users and keeps reference counting consistent.
48 *
49 * Each provider must define a .match() callback whose role is to determine
50 * whether a potential consumer is in fact associated with this sequencer.
51 * This allows creating abstraction layers on top of regular device-tree
52 * resources like regulators, clocks and other nodes connected to the consumer
53 * via phandle.
54 */
55
56 static DEFINE_IDA(pwrseq_ida);
57
58 /*
59 * Protects the device list on the pwrseq bus from concurrent modifications
60 * but allows simultaneous read-only access.
61 */
62 static DECLARE_RWSEM(pwrseq_sem);
63
64 /**
65 * struct pwrseq_unit - Private power-sequence unit data.
66 * @ref: Reference count for this object. When it goes to 0, the object is
67 * destroyed.
68 * @name: Name of this target.
69 * @list: Link to siblings on the list of all units of a single sequencer.
70 * @deps: List of units on which this unit depends.
71 * @enable: Callback running the part of the power-on sequence provided by
72 * this unit.
73 * @disable: Callback running the part of the power-off sequence provided
74 * by this unit.
75 * @enable_count: Current number of users that enabled this unit. May be the
76 * consumer of the power sequencer or other units that depend
77 * on this one.
78 */
79 struct pwrseq_unit {
80 struct kref ref;
81 const char *name;
82 struct list_head list;
83 struct list_head deps;
84 pwrseq_power_state_func enable;
85 pwrseq_power_state_func disable;
86 unsigned int enable_count;
87 };
88
pwrseq_unit_new(const struct pwrseq_unit_data * data)89 static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data)
90 {
91 struct pwrseq_unit *unit;
92
93 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
94 if (!unit)
95 return NULL;
96
97 unit->name = kstrdup_const(data->name, GFP_KERNEL);
98 if (!unit->name) {
99 kfree(unit);
100 return NULL;
101 }
102
103 kref_init(&unit->ref);
104 INIT_LIST_HEAD(&unit->deps);
105 unit->enable = data->enable;
106 unit->disable = data->disable;
107
108 return unit;
109 }
110
pwrseq_unit_get(struct pwrseq_unit * unit)111 static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit)
112 {
113 kref_get(&unit->ref);
114
115 return unit;
116 }
117
118 static void pwrseq_unit_release(struct kref *ref);
119
pwrseq_unit_put(struct pwrseq_unit * unit)120 static void pwrseq_unit_put(struct pwrseq_unit *unit)
121 {
122 kref_put(&unit->ref, pwrseq_unit_release);
123 }
124
125 /**
126 * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure
127 * allowing to keep it on multiple dependency lists
128 * in different units.
129 * @list: Siblings on the list.
130 * @unit: Address of the referenced unit.
131 */
132 struct pwrseq_unit_dep {
133 struct list_head list;
134 struct pwrseq_unit *unit;
135 };
136
pwrseq_unit_dep_new(struct pwrseq_unit * unit)137 static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit)
138 {
139 struct pwrseq_unit_dep *dep;
140
141 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
142 if (!dep)
143 return NULL;
144
145 dep->unit = unit;
146
147 return dep;
148 }
149
pwrseq_unit_dep_free(struct pwrseq_unit_dep * ref)150 static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref)
151 {
152 pwrseq_unit_put(ref->unit);
153 kfree(ref);
154 }
155
pwrseq_unit_free_deps(struct list_head * list)156 static void pwrseq_unit_free_deps(struct list_head *list)
157 {
158 struct pwrseq_unit_dep *dep, *next;
159
160 list_for_each_entry_safe(dep, next, list, list) {
161 list_del(&dep->list);
162 pwrseq_unit_dep_free(dep);
163 }
164 }
165
pwrseq_unit_release(struct kref * ref)166 static void pwrseq_unit_release(struct kref *ref)
167 {
168 struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref);
169
170 pwrseq_unit_free_deps(&unit->deps);
171 list_del(&unit->list);
172 kfree_const(unit->name);
173 kfree(unit);
174 }
175
176 /**
177 * struct pwrseq_target - Private power-sequence target data.
178 * @list: Siblings on the list of all targets exposed by a power sequencer.
179 * @name: Name of the target.
180 * @unit: Final unit for this target.
181 * @post_enable: Callback run after the target unit has been enabled, *after*
182 * the state lock has been released. It's useful for implementing
183 * boot-up delays without blocking other users from powering up
184 * using the same power sequencer.
185 */
186 struct pwrseq_target {
187 struct list_head list;
188 const char *name;
189 struct pwrseq_unit *unit;
190 pwrseq_power_state_func post_enable;
191 };
192
193 static struct pwrseq_target *
pwrseq_target_new(const struct pwrseq_target_data * data)194 pwrseq_target_new(const struct pwrseq_target_data *data)
195 {
196 struct pwrseq_target *target;
197
198 target = kzalloc(sizeof(*target), GFP_KERNEL);
199 if (!target)
200 return NULL;
201
202 target->name = kstrdup_const(data->name, GFP_KERNEL);
203 if (!target->name) {
204 kfree(target);
205 return NULL;
206 }
207
208 target->post_enable = data->post_enable;
209
210 return target;
211 }
212
pwrseq_target_free(struct pwrseq_target * target)213 static void pwrseq_target_free(struct pwrseq_target *target)
214 {
215 if (!IS_ERR_OR_NULL(target->unit))
216 pwrseq_unit_put(target->unit);
217 kfree_const(target->name);
218 kfree(target);
219 }
220
221 /**
222 * struct pwrseq_device - Private power sequencing data.
223 * @dev: Device struct associated with this sequencer.
224 * @id: Device ID.
225 * @owner: Prevents removal of active power sequencing providers.
226 * @rw_lock: Protects the device from being unregistered while in use.
227 * @state_lock: Prevents multiple users running the power sequence at the same
228 * time.
229 * @match: Power sequencer matching callback.
230 * @targets: List of targets exposed by this sequencer.
231 * @units: List of all units supported by this sequencer.
232 */
233 struct pwrseq_device {
234 struct device dev;
235 int id;
236 struct module *owner;
237 struct rw_semaphore rw_lock;
238 struct mutex state_lock;
239 pwrseq_match_func match;
240 struct list_head targets;
241 struct list_head units;
242 };
243
to_pwrseq_device(struct device * dev)244 static struct pwrseq_device *to_pwrseq_device(struct device *dev)
245 {
246 return container_of(dev, struct pwrseq_device, dev);
247 }
248
pwrseq_device_get(struct pwrseq_device * pwrseq)249 static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq)
250 {
251 get_device(&pwrseq->dev);
252
253 return pwrseq;
254 }
255
pwrseq_device_put(struct pwrseq_device * pwrseq)256 static void pwrseq_device_put(struct pwrseq_device *pwrseq)
257 {
258 put_device(&pwrseq->dev);
259 }
260
261 /**
262 * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one
263 * user cannot break the reference counting for others.
264 * @pwrseq: Reference to the power sequencing device.
265 * @target: Reference to the target this descriptor allows to control.
266 * @powered_on: Power state set by the holder of the descriptor (not necessarily
267 * corresponding to the actual power state of the device).
268 */
269 struct pwrseq_desc {
270 struct pwrseq_device *pwrseq;
271 struct pwrseq_target *target;
272 bool powered_on;
273 };
274
275 static const struct bus_type pwrseq_bus = {
276 .name = "pwrseq",
277 };
278
pwrseq_release(struct device * dev)279 static void pwrseq_release(struct device *dev)
280 {
281 struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
282 struct pwrseq_target *target, *pos;
283
284 list_for_each_entry_safe(target, pos, &pwrseq->targets, list) {
285 list_del(&target->list);
286 pwrseq_target_free(target);
287 }
288
289 mutex_destroy(&pwrseq->state_lock);
290 ida_free(&pwrseq_ida, pwrseq->id);
291 kfree(pwrseq);
292 }
293
294 static const struct device_type pwrseq_device_type = {
295 .name = "power_sequencer",
296 .release = pwrseq_release,
297 };
298
pwrseq_check_unit_deps(const struct pwrseq_unit_data * data,struct radix_tree_root * visited_units)299 static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data,
300 struct radix_tree_root *visited_units)
301 {
302 const struct pwrseq_unit_data *tmp, **cur;
303 int ret;
304
305 ret = radix_tree_insert(visited_units, (unsigned long)data,
306 (void *)data);
307 if (ret)
308 return ret;
309
310 for (cur = data->deps; cur && *cur; cur++) {
311 tmp = radix_tree_lookup(visited_units, (unsigned long)*cur);
312 if (tmp) {
313 WARN(1, "Circular dependency in power sequencing flow detected!\n");
314 return -EINVAL;
315 }
316
317 ret = pwrseq_check_unit_deps(*cur, visited_units);
318 if (ret)
319 return ret;
320 }
321
322 return 0;
323 }
324
pwrseq_check_target_deps(const struct pwrseq_target_data * data)325 static int pwrseq_check_target_deps(const struct pwrseq_target_data *data)
326 {
327 struct radix_tree_root visited_units;
328 struct radix_tree_iter iter;
329 void __rcu **slot;
330 int ret;
331
332 if (!data->unit)
333 return -EINVAL;
334
335 INIT_RADIX_TREE(&visited_units, GFP_KERNEL);
336 ret = pwrseq_check_unit_deps(data->unit, &visited_units);
337 radix_tree_for_each_slot(slot, &visited_units, &iter, 0)
338 radix_tree_delete(&visited_units, iter.index);
339
340 return ret;
341 }
342
343 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
344 struct list_head *dep_list,
345 struct list_head *unit_list,
346 struct radix_tree_root *processed_units);
347
348 static struct pwrseq_unit *
pwrseq_unit_setup(const struct pwrseq_unit_data * data,struct list_head * unit_list,struct radix_tree_root * processed_units)349 pwrseq_unit_setup(const struct pwrseq_unit_data *data,
350 struct list_head *unit_list,
351 struct radix_tree_root *processed_units)
352 {
353 struct pwrseq_unit *unit;
354 int ret;
355
356 unit = radix_tree_lookup(processed_units, (unsigned long)data);
357 if (unit)
358 return pwrseq_unit_get(unit);
359
360 unit = pwrseq_unit_new(data);
361 if (!unit)
362 return ERR_PTR(-ENOMEM);
363
364 if (data->deps) {
365 ret = pwrseq_unit_setup_deps(data->deps, &unit->deps,
366 unit_list, processed_units);
367 if (ret) {
368 pwrseq_unit_put(unit);
369 return ERR_PTR(ret);
370 }
371 }
372
373 ret = radix_tree_insert(processed_units, (unsigned long)data, unit);
374 if (ret) {
375 pwrseq_unit_put(unit);
376 return ERR_PTR(ret);
377 }
378
379 list_add_tail(&unit->list, unit_list);
380
381 return unit;
382 }
383
pwrseq_unit_setup_deps(const struct pwrseq_unit_data ** data,struct list_head * dep_list,struct list_head * unit_list,struct radix_tree_root * processed_units)384 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
385 struct list_head *dep_list,
386 struct list_head *unit_list,
387 struct radix_tree_root *processed_units)
388 {
389 const struct pwrseq_unit_data *pos;
390 struct pwrseq_unit_dep *dep;
391 struct pwrseq_unit *unit;
392 int i;
393
394 for (i = 0; data[i]; i++) {
395 pos = data[i];
396
397 unit = pwrseq_unit_setup(pos, unit_list, processed_units);
398 if (IS_ERR(unit))
399 return PTR_ERR(unit);
400
401 dep = pwrseq_unit_dep_new(unit);
402 if (!dep) {
403 pwrseq_unit_put(unit);
404 return -ENOMEM;
405 }
406
407 list_add_tail(&dep->list, dep_list);
408 }
409
410 return 0;
411 }
412
pwrseq_do_setup_targets(const struct pwrseq_target_data ** data,struct pwrseq_device * pwrseq,struct radix_tree_root * processed_units)413 static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data,
414 struct pwrseq_device *pwrseq,
415 struct radix_tree_root *processed_units)
416 {
417 const struct pwrseq_target_data *pos;
418 struct pwrseq_target *target;
419 int ret, i;
420
421 for (i = 0; data[i]; i++) {
422 pos = data[i];
423
424 ret = pwrseq_check_target_deps(pos);
425 if (ret)
426 return ret;
427
428 target = pwrseq_target_new(pos);
429 if (!target)
430 return -ENOMEM;
431
432 target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units,
433 processed_units);
434 if (IS_ERR(target->unit)) {
435 ret = PTR_ERR(target->unit);
436 pwrseq_target_free(target);
437 return ret;
438 }
439
440 list_add_tail(&target->list, &pwrseq->targets);
441 }
442
443 return 0;
444 }
445
pwrseq_setup_targets(const struct pwrseq_target_data ** targets,struct pwrseq_device * pwrseq)446 static int pwrseq_setup_targets(const struct pwrseq_target_data **targets,
447 struct pwrseq_device *pwrseq)
448 {
449 struct radix_tree_root processed_units;
450 struct radix_tree_iter iter;
451 void __rcu **slot;
452 int ret;
453
454 INIT_RADIX_TREE(&processed_units, GFP_KERNEL);
455 ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units);
456 radix_tree_for_each_slot(slot, &processed_units, &iter, 0)
457 radix_tree_delete(&processed_units, iter.index);
458
459 return ret;
460 }
461
462 /**
463 * pwrseq_device_register() - Register a new power sequencer.
464 * @config: Configuration of the new power sequencing device.
465 *
466 * The config structure is only used during the call and can be freed after
467 * the function returns. The config structure *must* have the parent device
468 * as well as the match() callback and at least one target set.
469 *
470 * Returns:
471 * Returns the address of the new pwrseq device or ERR_PTR() on failure.
472 */
473 struct pwrseq_device *
pwrseq_device_register(const struct pwrseq_config * config)474 pwrseq_device_register(const struct pwrseq_config *config)
475 {
476 struct pwrseq_device *pwrseq;
477 int ret, id;
478
479 if (!config->parent || !config->match || !config->targets ||
480 !config->targets[0])
481 return ERR_PTR(-EINVAL);
482
483 pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
484 if (!pwrseq)
485 return ERR_PTR(-ENOMEM);
486
487 pwrseq->dev.type = &pwrseq_device_type;
488 pwrseq->dev.bus = &pwrseq_bus;
489 pwrseq->dev.parent = config->parent;
490 device_set_node(&pwrseq->dev, dev_fwnode(config->parent));
491 dev_set_drvdata(&pwrseq->dev, config->drvdata);
492
493 id = ida_alloc(&pwrseq_ida, GFP_KERNEL);
494 if (id < 0) {
495 kfree(pwrseq);
496 return ERR_PTR(id);
497 }
498
499 pwrseq->id = id;
500
501 /*
502 * From this point onwards the device's release() callback is
503 * responsible for freeing resources.
504 */
505 device_initialize(&pwrseq->dev);
506
507 ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id);
508 if (ret)
509 goto err_put_pwrseq;
510
511 pwrseq->owner = config->owner ?: THIS_MODULE;
512 pwrseq->match = config->match;
513
514 init_rwsem(&pwrseq->rw_lock);
515 mutex_init(&pwrseq->state_lock);
516 INIT_LIST_HEAD(&pwrseq->targets);
517 INIT_LIST_HEAD(&pwrseq->units);
518
519 ret = pwrseq_setup_targets(config->targets, pwrseq);
520 if (ret)
521 goto err_put_pwrseq;
522
523 scoped_guard(rwsem_write, &pwrseq_sem) {
524 ret = device_add(&pwrseq->dev);
525 if (ret)
526 goto err_put_pwrseq;
527 }
528
529 return pwrseq;
530
531 err_put_pwrseq:
532 pwrseq_device_put(pwrseq);
533 return ERR_PTR(ret);
534 }
535 EXPORT_SYMBOL_GPL(pwrseq_device_register);
536
537 /**
538 * pwrseq_device_unregister() - Unregister the power sequencer.
539 * @pwrseq: Power sequencer to unregister.
540 */
pwrseq_device_unregister(struct pwrseq_device * pwrseq)541 void pwrseq_device_unregister(struct pwrseq_device *pwrseq)
542 {
543 struct device *dev = &pwrseq->dev;
544 struct pwrseq_target *target;
545
546 scoped_guard(mutex, &pwrseq->state_lock) {
547 guard(rwsem_write)(&pwrseq->rw_lock);
548
549 list_for_each_entry(target, &pwrseq->targets, list)
550 WARN(target->unit->enable_count,
551 "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n");
552
553 guard(rwsem_write)(&pwrseq_sem);
554
555 device_del(dev);
556 }
557
558 pwrseq_device_put(pwrseq);
559 }
560 EXPORT_SYMBOL_GPL(pwrseq_device_unregister);
561
devm_pwrseq_device_unregister(void * data)562 static void devm_pwrseq_device_unregister(void *data)
563 {
564 struct pwrseq_device *pwrseq = data;
565
566 pwrseq_device_unregister(pwrseq);
567 }
568
569 /**
570 * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register().
571 * @dev: Managing device.
572 * @config: Configuration of the new power sequencing device.
573 *
574 * Returns:
575 * Returns the address of the new pwrseq device or ERR_PTR() on failure.
576 */
577 struct pwrseq_device *
devm_pwrseq_device_register(struct device * dev,const struct pwrseq_config * config)578 devm_pwrseq_device_register(struct device *dev,
579 const struct pwrseq_config *config)
580 {
581 struct pwrseq_device *pwrseq;
582 int ret;
583
584 pwrseq = pwrseq_device_register(config);
585 if (IS_ERR(pwrseq))
586 return pwrseq;
587
588 ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister,
589 pwrseq);
590 if (ret)
591 return ERR_PTR(ret);
592
593 return pwrseq;
594 }
595 EXPORT_SYMBOL_GPL(devm_pwrseq_device_register);
596
597 /**
598 * pwrseq_device_get_drvdata() - Get the driver private data associated with
599 * this sequencer.
600 * @pwrseq: Power sequencer object.
601 *
602 * Returns:
603 * Address of the private driver data.
604 */
pwrseq_device_get_drvdata(struct pwrseq_device * pwrseq)605 void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq)
606 {
607 return dev_get_drvdata(&pwrseq->dev);
608 }
609 EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata);
610
611 struct pwrseq_match_data {
612 struct pwrseq_desc *desc;
613 struct device *dev;
614 const char *target;
615 };
616
pwrseq_match_device(struct device * pwrseq_dev,void * data)617 static int pwrseq_match_device(struct device *pwrseq_dev, void *data)
618 {
619 struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev);
620 struct pwrseq_match_data *match_data = data;
621 struct pwrseq_target *target;
622 int ret;
623
624 lockdep_assert_held_read(&pwrseq_sem);
625
626 guard(rwsem_read)(&pwrseq->rw_lock);
627 if (!device_is_registered(&pwrseq->dev))
628 return 0;
629
630 ret = pwrseq->match(pwrseq, match_data->dev);
631 if (ret <= 0)
632 return ret;
633
634 /* We got the matching device, let's find the right target. */
635 list_for_each_entry(target, &pwrseq->targets, list) {
636 if (strcmp(target->name, match_data->target))
637 continue;
638
639 match_data->desc->target = target;
640 }
641
642 /*
643 * This device does not have this target. No point in deferring as it
644 * will not get a new target dynamically later.
645 */
646 if (!match_data->desc->target)
647 return -ENOENT;
648
649 if (!try_module_get(pwrseq->owner))
650 return -EPROBE_DEFER;
651
652 match_data->desc->pwrseq = pwrseq_device_get(pwrseq);
653
654 return 1;
655 }
656
657 /**
658 * pwrseq_get() - Get the power sequencer associated with this device.
659 * @dev: Device for which to get the sequencer.
660 * @target: Name of the target exposed by the sequencer this device wants to
661 * reach.
662 *
663 * Returns:
664 * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
665 * on failure.
666 */
pwrseq_get(struct device * dev,const char * target)667 struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target)
668 {
669 struct pwrseq_match_data match_data;
670 int ret;
671
672 struct pwrseq_desc *desc __free(kfree) = kzalloc(sizeof(*desc),
673 GFP_KERNEL);
674 if (!desc)
675 return ERR_PTR(-ENOMEM);
676
677 match_data.desc = desc;
678 match_data.dev = dev;
679 match_data.target = target;
680
681 guard(rwsem_read)(&pwrseq_sem);
682
683 ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data,
684 pwrseq_match_device);
685 if (ret < 0)
686 return ERR_PTR(ret);
687 if (ret == 0)
688 /* No device matched. */
689 return ERR_PTR(-EPROBE_DEFER);
690
691 return_ptr(desc);
692 }
693 EXPORT_SYMBOL_GPL(pwrseq_get);
694
695 /**
696 * pwrseq_put() - Release the power sequencer descriptor.
697 * @desc: Descriptor to release.
698 */
pwrseq_put(struct pwrseq_desc * desc)699 void pwrseq_put(struct pwrseq_desc *desc)
700 {
701 struct pwrseq_device *pwrseq;
702
703 if (!desc)
704 return;
705
706 pwrseq = desc->pwrseq;
707
708 if (desc->powered_on)
709 pwrseq_power_off(desc);
710
711 kfree(desc);
712 module_put(pwrseq->owner);
713 pwrseq_device_put(pwrseq);
714 }
715 EXPORT_SYMBOL_GPL(pwrseq_put);
716
devm_pwrseq_put(void * data)717 static void devm_pwrseq_put(void *data)
718 {
719 struct pwrseq_desc *desc = data;
720
721 pwrseq_put(desc);
722 }
723
724 /**
725 * devm_pwrseq_get() - Managed variant of pwrseq_get().
726 * @dev: Device for which to get the sequencer and which also manages its
727 * lifetime.
728 * @target: Name of the target exposed by the sequencer this device wants to
729 * reach.
730 *
731 * Returns:
732 * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
733 * on failure.
734 */
devm_pwrseq_get(struct device * dev,const char * target)735 struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target)
736 {
737 struct pwrseq_desc *desc;
738 int ret;
739
740 desc = pwrseq_get(dev, target);
741 if (IS_ERR(desc))
742 return desc;
743
744 ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc);
745 if (ret)
746 return ERR_PTR(ret);
747
748 return desc;
749 }
750 EXPORT_SYMBOL_GPL(devm_pwrseq_get);
751
752 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
753 struct pwrseq_unit *target);
754 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
755 struct pwrseq_unit *target);
756
pwrseq_unit_enable_deps(struct pwrseq_device * pwrseq,struct list_head * list)757 static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq,
758 struct list_head *list)
759 {
760 struct pwrseq_unit_dep *pos;
761 int ret = 0;
762
763 list_for_each_entry(pos, list, list) {
764 ret = pwrseq_unit_enable(pwrseq, pos->unit);
765 if (ret) {
766 list_for_each_entry_continue_reverse(pos, list, list)
767 pwrseq_unit_disable(pwrseq, pos->unit);
768 break;
769 }
770 }
771
772 return ret;
773 }
774
pwrseq_unit_disable_deps(struct pwrseq_device * pwrseq,struct list_head * list)775 static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq,
776 struct list_head *list)
777 {
778 struct pwrseq_unit_dep *pos;
779 int ret = 0;
780
781 list_for_each_entry_reverse(pos, list, list) {
782 ret = pwrseq_unit_disable(pwrseq, pos->unit);
783 if (ret) {
784 list_for_each_entry_continue(pos, list, list)
785 pwrseq_unit_enable(pwrseq, pos->unit);
786 break;
787 }
788 }
789
790 return ret;
791 }
792
pwrseq_unit_enable(struct pwrseq_device * pwrseq,struct pwrseq_unit * unit)793 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
794 struct pwrseq_unit *unit)
795 {
796 int ret;
797
798 lockdep_assert_held_read(&pwrseq->rw_lock);
799 lockdep_assert_held(&pwrseq->state_lock);
800
801 if (unit->enable_count != 0) {
802 unit->enable_count++;
803 return 0;
804 }
805
806 ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps);
807 if (ret) {
808 dev_err(&pwrseq->dev,
809 "Failed to enable dependencies before power-on for target '%s': %d\n",
810 unit->name, ret);
811 return ret;
812 }
813
814 if (unit->enable) {
815 ret = unit->enable(pwrseq);
816 if (ret) {
817 dev_err(&pwrseq->dev,
818 "Failed to enable target '%s': %d\n",
819 unit->name, ret);
820 pwrseq_unit_disable_deps(pwrseq, &unit->deps);
821 return ret;
822 }
823 }
824
825 unit->enable_count++;
826
827 return 0;
828 }
829
pwrseq_unit_disable(struct pwrseq_device * pwrseq,struct pwrseq_unit * unit)830 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
831 struct pwrseq_unit *unit)
832 {
833 int ret;
834
835 lockdep_assert_held_read(&pwrseq->rw_lock);
836 lockdep_assert_held(&pwrseq->state_lock);
837
838 if (unit->enable_count == 0) {
839 WARN(1, "Unmatched power-off for target '%s'\n",
840 unit->name);
841 return -EBUSY;
842 }
843
844 if (unit->enable_count != 1) {
845 unit->enable_count--;
846 return 0;
847 }
848
849 if (unit->disable) {
850 ret = unit->disable(pwrseq);
851 if (ret) {
852 dev_err(&pwrseq->dev,
853 "Failed to disable target '%s': %d\n",
854 unit->name, ret);
855 return ret;
856 }
857 }
858
859 ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps);
860 if (ret) {
861 dev_err(&pwrseq->dev,
862 "Failed to disable dependencies after power-off for target '%s': %d\n",
863 unit->name, ret);
864 if (unit->enable)
865 unit->enable(pwrseq);
866 return ret;
867 }
868
869 unit->enable_count--;
870
871 return 0;
872 }
873
874 /**
875 * pwrseq_power_on() - Issue a power-on request on behalf of the consumer
876 * device.
877 * @desc: Descriptor referencing the power sequencer.
878 *
879 * This function tells the power sequencer that the consumer wants to be
880 * powered-up. The sequencer may already have powered-up the device in which
881 * case the function returns 0. If the power-up sequence is already in
882 * progress, the function will block until it's done and return 0. If this is
883 * the first request, the device will be powered up.
884 *
885 * Returns:
886 * 0 on success, negative error number on failure.
887 */
pwrseq_power_on(struct pwrseq_desc * desc)888 int pwrseq_power_on(struct pwrseq_desc *desc)
889 {
890 struct pwrseq_device *pwrseq;
891 struct pwrseq_target *target;
892 struct pwrseq_unit *unit;
893 int ret;
894
895 might_sleep();
896
897 if (!desc || desc->powered_on)
898 return 0;
899
900 pwrseq = desc->pwrseq;
901 target = desc->target;
902 unit = target->unit;
903
904 guard(rwsem_read)(&pwrseq->rw_lock);
905 if (!device_is_registered(&pwrseq->dev))
906 return -ENODEV;
907
908 scoped_guard(mutex, &pwrseq->state_lock) {
909 ret = pwrseq_unit_enable(pwrseq, unit);
910 if (!ret)
911 desc->powered_on = true;
912 }
913
914 if (target->post_enable) {
915 ret = target->post_enable(pwrseq);
916 if (ret) {
917 pwrseq_unit_disable(pwrseq, unit);
918 desc->powered_on = false;
919 }
920 }
921
922 return ret;
923 }
924 EXPORT_SYMBOL_GPL(pwrseq_power_on);
925
926 /**
927 * pwrseq_power_off() - Issue a power-off request on behalf of the consumer
928 * device.
929 * @desc: Descriptor referencing the power sequencer.
930 *
931 * This undoes the effects of pwrseq_power_on(). It issues a power-off request
932 * on behalf of the consumer and when the last remaining user does so, the
933 * power-down sequence will be started. If one is in progress, the function
934 * will block until it's complete and then return.
935 *
936 * Returns:
937 * 0 on success, negative error number on failure.
938 */
pwrseq_power_off(struct pwrseq_desc * desc)939 int pwrseq_power_off(struct pwrseq_desc *desc)
940 {
941 struct pwrseq_device *pwrseq;
942 struct pwrseq_unit *unit;
943 int ret;
944
945 might_sleep();
946
947 if (!desc || !desc->powered_on)
948 return 0;
949
950 pwrseq = desc->pwrseq;
951 unit = desc->target->unit;
952
953 guard(rwsem_read)(&pwrseq->rw_lock);
954 if (!device_is_registered(&pwrseq->dev))
955 return -ENODEV;
956
957 guard(mutex)(&pwrseq->state_lock);
958
959 ret = pwrseq_unit_disable(pwrseq, unit);
960 if (!ret)
961 desc->powered_on = false;
962
963 return ret;
964 }
965 EXPORT_SYMBOL_GPL(pwrseq_power_off);
966
967 #if IS_ENABLED(CONFIG_DEBUG_FS)
968
969 struct pwrseq_debugfs_count_ctx {
970 struct device *dev;
971 loff_t index;
972 };
973
pwrseq_debugfs_seq_count(struct device * dev,void * data)974 static int pwrseq_debugfs_seq_count(struct device *dev, void *data)
975 {
976 struct pwrseq_debugfs_count_ctx *ctx = data;
977
978 ctx->dev = dev;
979
980 return ctx->index-- ? 0 : 1;
981 }
982
pwrseq_debugfs_seq_start(struct seq_file * seq,loff_t * pos)983 static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos)
984 {
985 struct pwrseq_debugfs_count_ctx ctx;
986
987 ctx.dev = NULL;
988 ctx.index = *pos;
989
990 /*
991 * We're holding the lock for the entire printout so no need to fiddle
992 * with device reference count.
993 */
994 down_read(&pwrseq_sem);
995
996 bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count);
997 if (!ctx.index)
998 return NULL;
999
1000 return ctx.dev;
1001 }
1002
pwrseq_debugfs_seq_next(struct seq_file * seq,void * data,loff_t * pos)1003 static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data,
1004 loff_t *pos)
1005 {
1006 struct device *curr = data;
1007
1008 ++*pos;
1009
1010 struct device *next __free(put_device) =
1011 bus_find_next_device(&pwrseq_bus, curr);
1012 return next;
1013 }
1014
pwrseq_debugfs_seq_show_target(struct seq_file * seq,struct pwrseq_target * target)1015 static void pwrseq_debugfs_seq_show_target(struct seq_file *seq,
1016 struct pwrseq_target *target)
1017 {
1018 seq_printf(seq, " target: [%s] (target unit: [%s])\n",
1019 target->name, target->unit->name);
1020 }
1021
pwrseq_debugfs_seq_show_unit(struct seq_file * seq,struct pwrseq_unit * unit)1022 static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq,
1023 struct pwrseq_unit *unit)
1024 {
1025 struct pwrseq_unit_dep *ref;
1026
1027 seq_printf(seq, " unit: [%s] - enable count: %u\n",
1028 unit->name, unit->enable_count);
1029
1030 if (list_empty(&unit->deps))
1031 return;
1032
1033 seq_puts(seq, " dependencies:\n");
1034 list_for_each_entry(ref, &unit->deps, list)
1035 seq_printf(seq, " [%s]\n", ref->unit->name);
1036 }
1037
pwrseq_debugfs_seq_show(struct seq_file * seq,void * data)1038 static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data)
1039 {
1040 struct device *dev = data;
1041 struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
1042 struct pwrseq_target *target;
1043 struct pwrseq_unit *unit;
1044
1045 seq_printf(seq, "%s:\n", dev_name(dev));
1046
1047 seq_puts(seq, " targets:\n");
1048 list_for_each_entry(target, &pwrseq->targets, list)
1049 pwrseq_debugfs_seq_show_target(seq, target);
1050
1051 seq_puts(seq, " units:\n");
1052 list_for_each_entry(unit, &pwrseq->units, list)
1053 pwrseq_debugfs_seq_show_unit(seq, unit);
1054
1055 return 0;
1056 }
1057
pwrseq_debugfs_seq_stop(struct seq_file * seq,void * data)1058 static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data)
1059 {
1060 up_read(&pwrseq_sem);
1061 }
1062
1063 static const struct seq_operations pwrseq_debugfs_sops = {
1064 .start = pwrseq_debugfs_seq_start,
1065 .next = pwrseq_debugfs_seq_next,
1066 .show = pwrseq_debugfs_seq_show,
1067 .stop = pwrseq_debugfs_seq_stop,
1068 };
1069 DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs);
1070
1071 static struct dentry *pwrseq_debugfs_dentry;
1072
1073 #endif /* CONFIG_DEBUG_FS */
1074
pwrseq_init(void)1075 static int __init pwrseq_init(void)
1076 {
1077 int ret;
1078
1079 ret = bus_register(&pwrseq_bus);
1080 if (ret) {
1081 pr_err("Failed to register the power sequencer bus\n");
1082 return ret;
1083 }
1084
1085 #if IS_ENABLED(CONFIG_DEBUG_FS)
1086 pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL,
1087 &pwrseq_debugfs_fops);
1088 #endif /* CONFIG_DEBUG_FS */
1089
1090 return 0;
1091 }
1092 subsys_initcall(pwrseq_init);
1093
pwrseq_exit(void)1094 static void __exit pwrseq_exit(void)
1095 {
1096 #if IS_ENABLED(CONFIG_DEBUG_FS)
1097 debugfs_remove_recursive(pwrseq_debugfs_dentry);
1098 #endif /* CONFIG_DEBUG_FS */
1099
1100 bus_unregister(&pwrseq_bus);
1101 }
1102 module_exit(pwrseq_exit);
1103
1104 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
1105 MODULE_DESCRIPTION("Power Sequencing subsystem core");
1106 MODULE_LICENSE("GPL");
1107