xref: /linux/drivers/power/sequencing/core.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2024 Linaro Ltd.
4  */
5 
6 #include <linux/bug.h>
7 #include <linux/cleanup.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/kernel.h>
14 #include <linux/kref.h>
15 #include <linux/list.h>
16 #include <linux/lockdep.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/property.h>
20 #include <linux/pwrseq/consumer.h>
21 #include <linux/pwrseq/provider.h>
22 #include <linux/radix-tree.h>
23 #include <linux/rwsem.h>
24 #include <linux/slab.h>
25 
26 /*
27  * Power-sequencing framework for linux.
28  *
29  * This subsystem allows power sequence providers to register a set of targets
30  * that consumers may request and power-up/down.
31  *
32  * Glossary:
33  *
34  * Unit - a unit is a discreet chunk of a power sequence. For instance one unit
35  * may enable a set of regulators, another may enable a specific GPIO. Units
36  * can define dependencies in the form of other units that must be enabled
37  * before it itself can be.
38  *
39  * Target - a target is a set of units (composed of the "final" unit and its
40  * dependencies) that a consumer selects by its name when requesting a handle
41  * to the power sequencer. Via the dependency system, multiple targets may
42  * share the same parts of a power sequence but ignore parts that are
43  * irrelevant.
44  *
45  * Descriptor - a handle passed by the pwrseq core to every consumer that
46  * serves as the entry point to the provider layer. It ensures coherence
47  * between different users and keeps reference counting consistent.
48  *
49  * Each provider must define a .match() callback whose role is to determine
50  * whether a potential consumer is in fact associated with this sequencer.
51  * This allows creating abstraction layers on top of regular device-tree
52  * resources like regulators, clocks and other nodes connected to the consumer
53  * via phandle.
54  */
55 
56 static DEFINE_IDA(pwrseq_ida);
57 
58 /*
59  * Protects the device list on the pwrseq bus from concurrent modifications
60  * but allows simultaneous read-only access.
61  */
62 static DECLARE_RWSEM(pwrseq_sem);
63 
64 /**
65  * struct pwrseq_unit - Private power-sequence unit data.
66  * @ref: Reference count for this object. When it goes to 0, the object is
67  *       destroyed.
68  * @name: Name of this target.
69  * @list: Link to siblings on the list of all units of a single sequencer.
70  * @deps: List of units on which this unit depends.
71  * @enable: Callback running the part of the power-on sequence provided by
72  *          this unit.
73  * @disable: Callback running the part of the power-off sequence provided
74  *           by this unit.
75  * @enable_count: Current number of users that enabled this unit. May be the
76  *                consumer of the power sequencer or other units that depend
77  *                on this one.
78  */
79 struct pwrseq_unit {
80 	struct kref ref;
81 	const char *name;
82 	struct list_head list;
83 	struct list_head deps;
84 	pwrseq_power_state_func enable;
85 	pwrseq_power_state_func disable;
86 	unsigned int enable_count;
87 };
88 
89 static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data)
90 {
91 	struct pwrseq_unit *unit;
92 
93 	unit = kzalloc_obj(*unit, GFP_KERNEL);
94 	if (!unit)
95 		return NULL;
96 
97 	unit->name = kstrdup_const(data->name, GFP_KERNEL);
98 	if (!unit->name) {
99 		kfree(unit);
100 		return NULL;
101 	}
102 
103 	kref_init(&unit->ref);
104 	INIT_LIST_HEAD(&unit->deps);
105 	unit->enable = data->enable;
106 	unit->disable = data->disable;
107 
108 	return unit;
109 }
110 
111 static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit)
112 {
113 	kref_get(&unit->ref);
114 
115 	return unit;
116 }
117 
118 static void pwrseq_unit_release(struct kref *ref);
119 
120 static void pwrseq_unit_put(struct pwrseq_unit *unit)
121 {
122 	kref_put(&unit->ref, pwrseq_unit_release);
123 }
124 
125 /**
126  * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure
127  *                          allowing to keep it on multiple dependency lists
128  *                          in different units.
129  * @list: Siblings on the list.
130  * @unit: Address of the referenced unit.
131  */
132 struct pwrseq_unit_dep {
133 	struct list_head list;
134 	struct pwrseq_unit *unit;
135 };
136 
137 static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit)
138 {
139 	struct pwrseq_unit_dep *dep;
140 
141 	dep = kzalloc_obj(*dep, GFP_KERNEL);
142 	if (!dep)
143 		return NULL;
144 
145 	dep->unit = unit;
146 
147 	return dep;
148 }
149 
150 static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref)
151 {
152 	pwrseq_unit_put(ref->unit);
153 	kfree(ref);
154 }
155 
156 static void pwrseq_unit_free_deps(struct list_head *list)
157 {
158 	struct pwrseq_unit_dep *dep, *next;
159 
160 	list_for_each_entry_safe(dep, next, list, list) {
161 		list_del(&dep->list);
162 		pwrseq_unit_dep_free(dep);
163 	}
164 }
165 
166 static void pwrseq_unit_release(struct kref *ref)
167 {
168 	struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref);
169 
170 	pwrseq_unit_free_deps(&unit->deps);
171 	list_del(&unit->list);
172 	kfree_const(unit->name);
173 	kfree(unit);
174 }
175 
176 /**
177  * struct pwrseq_target - Private power-sequence target data.
178  * @list: Siblings on the list of all targets exposed by a power sequencer.
179  * @name: Name of the target.
180  * @unit: Final unit for this target.
181  * @post_enable: Callback run after the target unit has been enabled, *after*
182  *               the state lock has been released. It's useful for implementing
183  *               boot-up delays without blocking other users from powering up
184  *               using the same power sequencer.
185  */
186 struct pwrseq_target {
187 	struct list_head list;
188 	const char *name;
189 	struct pwrseq_unit *unit;
190 	pwrseq_power_state_func post_enable;
191 };
192 
193 static struct pwrseq_target *
194 pwrseq_target_new(const struct pwrseq_target_data *data)
195 {
196 	struct pwrseq_target *target;
197 
198 	target = kzalloc_obj(*target, GFP_KERNEL);
199 	if (!target)
200 		return NULL;
201 
202 	target->name = kstrdup_const(data->name, GFP_KERNEL);
203 	if (!target->name) {
204 		kfree(target);
205 		return NULL;
206 	}
207 
208 	target->post_enable = data->post_enable;
209 
210 	return target;
211 }
212 
213 static void pwrseq_target_free(struct pwrseq_target *target)
214 {
215 	if (!IS_ERR_OR_NULL(target->unit))
216 		pwrseq_unit_put(target->unit);
217 	kfree_const(target->name);
218 	kfree(target);
219 }
220 
221 /**
222  * struct pwrseq_device - Private power sequencing data.
223  * @dev: Device struct associated with this sequencer.
224  * @id: Device ID.
225  * @owner: Prevents removal of active power sequencing providers.
226  * @rw_lock: Protects the device from being unregistered while in use.
227  * @state_lock: Prevents multiple users running the power sequence at the same
228  *              time.
229  * @match: Power sequencer matching callback.
230  * @targets: List of targets exposed by this sequencer.
231  * @units: List of all units supported by this sequencer.
232  */
233 struct pwrseq_device {
234 	struct device dev;
235 	int id;
236 	struct module *owner;
237 	struct rw_semaphore rw_lock;
238 	struct mutex state_lock;
239 	pwrseq_match_func match;
240 	struct list_head targets;
241 	struct list_head units;
242 };
243 
244 static struct pwrseq_device *to_pwrseq_device(struct device *dev)
245 {
246 	return container_of(dev, struct pwrseq_device, dev);
247 }
248 
249 static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq)
250 {
251 	get_device(&pwrseq->dev);
252 
253 	return pwrseq;
254 }
255 
256 static void pwrseq_device_put(struct pwrseq_device *pwrseq)
257 {
258 	put_device(&pwrseq->dev);
259 }
260 
261 /**
262  * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one
263  *                      user cannot break the reference counting for others.
264  * @pwrseq: Reference to the power sequencing device.
265  * @target: Reference to the target this descriptor allows to control.
266  * @powered_on: Power state set by the holder of the descriptor (not necessarily
267  * corresponding to the actual power state of the device).
268  */
269 struct pwrseq_desc {
270 	struct pwrseq_device *pwrseq;
271 	struct pwrseq_target *target;
272 	bool powered_on;
273 };
274 
275 static const struct bus_type pwrseq_bus = {
276 	.name = "pwrseq",
277 };
278 
279 static void pwrseq_release(struct device *dev)
280 {
281 	struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
282 	struct pwrseq_target *target, *pos;
283 
284 	list_for_each_entry_safe(target, pos, &pwrseq->targets, list) {
285 		list_del(&target->list);
286 		pwrseq_target_free(target);
287 	}
288 
289 	mutex_destroy(&pwrseq->state_lock);
290 	ida_free(&pwrseq_ida, pwrseq->id);
291 	kfree(pwrseq);
292 }
293 
294 static const struct device_type pwrseq_device_type = {
295 	.name = "power_sequencer",
296 	.release = pwrseq_release,
297 };
298 
299 static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data,
300 				  struct radix_tree_root *visited_units)
301 {
302 	const struct pwrseq_unit_data *tmp, **cur;
303 	int ret;
304 
305 	ret = radix_tree_insert(visited_units, (unsigned long)data,
306 				(void *)data);
307 	if (ret)
308 		return ret;
309 
310 	for (cur = data->deps; cur && *cur; cur++) {
311 		tmp = radix_tree_lookup(visited_units, (unsigned long)*cur);
312 		if (tmp) {
313 			WARN(1, "Circular dependency in power sequencing flow detected!\n");
314 			return -EINVAL;
315 		}
316 
317 		ret = pwrseq_check_unit_deps(*cur, visited_units);
318 		if (ret)
319 			return ret;
320 	}
321 
322 	return 0;
323 }
324 
325 static int pwrseq_check_target_deps(const struct pwrseq_target_data *data)
326 {
327 	struct radix_tree_root visited_units;
328 	struct radix_tree_iter iter;
329 	void __rcu **slot;
330 	int ret;
331 
332 	if (!data->unit)
333 		return -EINVAL;
334 
335 	INIT_RADIX_TREE(&visited_units, GFP_KERNEL);
336 	ret = pwrseq_check_unit_deps(data->unit, &visited_units);
337 	radix_tree_for_each_slot(slot, &visited_units, &iter, 0)
338 		radix_tree_delete(&visited_units, iter.index);
339 
340 	return ret;
341 }
342 
343 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
344 				  struct list_head *dep_list,
345 				  struct list_head *unit_list,
346 				  struct radix_tree_root *processed_units);
347 
348 static struct pwrseq_unit *
349 pwrseq_unit_setup(const struct pwrseq_unit_data *data,
350 		  struct list_head *unit_list,
351 		  struct radix_tree_root *processed_units)
352 {
353 	struct pwrseq_unit *unit;
354 	int ret;
355 
356 	unit = radix_tree_lookup(processed_units, (unsigned long)data);
357 	if (unit)
358 		return pwrseq_unit_get(unit);
359 
360 	unit = pwrseq_unit_new(data);
361 	if (!unit)
362 		return ERR_PTR(-ENOMEM);
363 
364 	if (data->deps) {
365 		ret = pwrseq_unit_setup_deps(data->deps, &unit->deps,
366 					     unit_list, processed_units);
367 		if (ret) {
368 			pwrseq_unit_put(unit);
369 			return ERR_PTR(ret);
370 		}
371 	}
372 
373 	ret = radix_tree_insert(processed_units, (unsigned long)data, unit);
374 	if (ret) {
375 		pwrseq_unit_put(unit);
376 		return ERR_PTR(ret);
377 	}
378 
379 	list_add_tail(&unit->list, unit_list);
380 
381 	return unit;
382 }
383 
384 static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data,
385 				  struct list_head *dep_list,
386 				  struct list_head *unit_list,
387 				  struct radix_tree_root *processed_units)
388 {
389 	const struct pwrseq_unit_data *pos;
390 	struct pwrseq_unit_dep *dep;
391 	struct pwrseq_unit *unit;
392 	int i;
393 
394 	for (i = 0; data[i]; i++) {
395 		pos = data[i];
396 
397 		unit = pwrseq_unit_setup(pos, unit_list, processed_units);
398 		if (IS_ERR(unit))
399 			return PTR_ERR(unit);
400 
401 		dep = pwrseq_unit_dep_new(unit);
402 		if (!dep) {
403 			pwrseq_unit_put(unit);
404 			return -ENOMEM;
405 		}
406 
407 		list_add_tail(&dep->list, dep_list);
408 	}
409 
410 	return 0;
411 }
412 
413 static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data,
414 				   struct pwrseq_device *pwrseq,
415 				   struct radix_tree_root *processed_units)
416 {
417 	const struct pwrseq_target_data *pos;
418 	struct pwrseq_target *target;
419 	int ret, i;
420 
421 	for (i = 0; data[i]; i++) {
422 		pos = data[i];
423 
424 		ret = pwrseq_check_target_deps(pos);
425 		if (ret)
426 			return ret;
427 
428 		target = pwrseq_target_new(pos);
429 		if (!target)
430 			return -ENOMEM;
431 
432 		target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units,
433 						 processed_units);
434 		if (IS_ERR(target->unit)) {
435 			ret = PTR_ERR(target->unit);
436 			pwrseq_target_free(target);
437 			return ret;
438 		}
439 
440 		list_add_tail(&target->list, &pwrseq->targets);
441 	}
442 
443 	return 0;
444 }
445 
446 static int pwrseq_setup_targets(const struct pwrseq_target_data **targets,
447 				struct pwrseq_device *pwrseq)
448 {
449 	struct radix_tree_root processed_units;
450 	struct radix_tree_iter iter;
451 	void __rcu **slot;
452 	int ret;
453 
454 	INIT_RADIX_TREE(&processed_units, GFP_KERNEL);
455 	ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units);
456 	radix_tree_for_each_slot(slot, &processed_units, &iter, 0)
457 		radix_tree_delete(&processed_units, iter.index);
458 
459 	return ret;
460 }
461 
462 /**
463  * pwrseq_device_register() - Register a new power sequencer.
464  * @config: Configuration of the new power sequencing device.
465  *
466  * The config structure is only used during the call and can be freed after
467  * the function returns. The config structure *must* have the parent device
468  * as well as the match() callback and at least one target set.
469  *
470  * Returns:
471  * Returns the address of the new pwrseq device or ERR_PTR() on failure.
472  */
473 struct pwrseq_device *
474 pwrseq_device_register(const struct pwrseq_config *config)
475 {
476 	struct pwrseq_device *pwrseq;
477 	int ret, id;
478 
479 	if (!config->parent || !config->match || !config->targets ||
480 	    !config->targets[0])
481 		return ERR_PTR(-EINVAL);
482 
483 	pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
484 	if (!pwrseq)
485 		return ERR_PTR(-ENOMEM);
486 
487 	pwrseq->dev.type = &pwrseq_device_type;
488 	pwrseq->dev.bus = &pwrseq_bus;
489 	pwrseq->dev.parent = config->parent;
490 	device_set_node(&pwrseq->dev, dev_fwnode(config->parent));
491 	dev_set_drvdata(&pwrseq->dev, config->drvdata);
492 
493 	id = ida_alloc(&pwrseq_ida, GFP_KERNEL);
494 	if (id < 0) {
495 		kfree(pwrseq);
496 		return ERR_PTR(id);
497 	}
498 
499 	pwrseq->id = id;
500 
501 	/*
502 	 * From this point onwards the device's release() callback is
503 	 * responsible for freeing resources.
504 	 */
505 	device_initialize(&pwrseq->dev);
506 
507 	ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id);
508 	if (ret)
509 		goto err_put_pwrseq;
510 
511 	pwrseq->owner = config->owner ?: THIS_MODULE;
512 	pwrseq->match = config->match;
513 
514 	init_rwsem(&pwrseq->rw_lock);
515 	mutex_init(&pwrseq->state_lock);
516 	INIT_LIST_HEAD(&pwrseq->targets);
517 	INIT_LIST_HEAD(&pwrseq->units);
518 
519 	ret = pwrseq_setup_targets(config->targets, pwrseq);
520 	if (ret)
521 		goto err_put_pwrseq;
522 
523 	scoped_guard(rwsem_write, &pwrseq_sem) {
524 		ret = device_add(&pwrseq->dev);
525 		if (ret)
526 			goto err_put_pwrseq;
527 	}
528 
529 	return pwrseq;
530 
531 err_put_pwrseq:
532 	pwrseq_device_put(pwrseq);
533 	return ERR_PTR(ret);
534 }
535 EXPORT_SYMBOL_GPL(pwrseq_device_register);
536 
537 /**
538  * pwrseq_device_unregister() - Unregister the power sequencer.
539  * @pwrseq: Power sequencer to unregister.
540  */
541 void pwrseq_device_unregister(struct pwrseq_device *pwrseq)
542 {
543 	struct device *dev = &pwrseq->dev;
544 	struct pwrseq_target *target;
545 
546 	scoped_guard(mutex, &pwrseq->state_lock) {
547 		guard(rwsem_write)(&pwrseq->rw_lock);
548 
549 		list_for_each_entry(target, &pwrseq->targets, list)
550 			WARN(target->unit->enable_count,
551 			     "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n");
552 
553 		guard(rwsem_write)(&pwrseq_sem);
554 
555 		device_del(dev);
556 	}
557 
558 	pwrseq_device_put(pwrseq);
559 }
560 EXPORT_SYMBOL_GPL(pwrseq_device_unregister);
561 
562 static void devm_pwrseq_device_unregister(void *data)
563 {
564 	struct pwrseq_device *pwrseq = data;
565 
566 	pwrseq_device_unregister(pwrseq);
567 }
568 
569 /**
570  * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register().
571  * @dev: Managing device.
572  * @config: Configuration of the new power sequencing device.
573  *
574  * Returns:
575  * Returns the address of the new pwrseq device or ERR_PTR() on failure.
576  */
577 struct pwrseq_device *
578 devm_pwrseq_device_register(struct device *dev,
579 			    const struct pwrseq_config *config)
580 {
581 	struct pwrseq_device *pwrseq;
582 	int ret;
583 
584 	pwrseq = pwrseq_device_register(config);
585 	if (IS_ERR(pwrseq))
586 		return pwrseq;
587 
588 	ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister,
589 				       pwrseq);
590 	if (ret)
591 		return ERR_PTR(ret);
592 
593 	return pwrseq;
594 }
595 EXPORT_SYMBOL_GPL(devm_pwrseq_device_register);
596 
597 /**
598  * pwrseq_device_get_drvdata() - Get the driver private data associated with
599  *                               this sequencer.
600  * @pwrseq: Power sequencer object.
601  *
602  * Returns:
603  * Address of the private driver data.
604  */
605 void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq)
606 {
607 	return dev_get_drvdata(&pwrseq->dev);
608 }
609 EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata);
610 
611 struct pwrseq_match_data {
612 	struct pwrseq_desc *desc;
613 	struct device *dev;
614 	const char *target;
615 };
616 
617 static int pwrseq_match_device(struct device *pwrseq_dev, void *data)
618 {
619 	struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev);
620 	struct pwrseq_match_data *match_data = data;
621 	struct pwrseq_target *target;
622 	int ret;
623 
624 	lockdep_assert_held_read(&pwrseq_sem);
625 
626 	guard(rwsem_read)(&pwrseq->rw_lock);
627 	if (!device_is_registered(&pwrseq->dev))
628 		return 0;
629 
630 	ret = pwrseq->match(pwrseq, match_data->dev);
631 	if (ret == PWRSEQ_NO_MATCH || ret < 0)
632 		return ret;
633 
634 	/* We got the matching device, let's find the right target. */
635 	list_for_each_entry(target, &pwrseq->targets, list) {
636 		if (strcmp(target->name, match_data->target))
637 			continue;
638 
639 		match_data->desc->target = target;
640 	}
641 
642 	/*
643 	 * This device does not have this target. No point in deferring as it
644 	 * will not get a new target dynamically later.
645 	 */
646 	if (!match_data->desc->target)
647 		return -ENOENT;
648 
649 	if (!try_module_get(pwrseq->owner))
650 		return -EPROBE_DEFER;
651 
652 	match_data->desc->pwrseq = pwrseq_device_get(pwrseq);
653 
654 	return PWRSEQ_MATCH_OK;
655 }
656 
657 /**
658  * pwrseq_get() - Get the power sequencer associated with this device.
659  * @dev: Device for which to get the sequencer.
660  * @target: Name of the target exposed by the sequencer this device wants to
661  *          reach.
662  *
663  * Returns:
664  * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
665  * on failure.
666  */
667 struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target)
668 {
669 	struct pwrseq_match_data match_data;
670 	int ret;
671 
672 	struct pwrseq_desc *desc __free(kfree) = kzalloc_obj(*desc, GFP_KERNEL);
673 	if (!desc)
674 		return ERR_PTR(-ENOMEM);
675 
676 	match_data.desc = desc;
677 	match_data.dev = dev;
678 	match_data.target = target;
679 
680 	guard(rwsem_read)(&pwrseq_sem);
681 
682 	ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data,
683 			       pwrseq_match_device);
684 	if (ret < 0)
685 		return ERR_PTR(ret);
686 	if (ret == PWRSEQ_NO_MATCH)
687 		/* No device matched. */
688 		return ERR_PTR(-EPROBE_DEFER);
689 
690 	return_ptr(desc);
691 }
692 EXPORT_SYMBOL_GPL(pwrseq_get);
693 
694 /**
695  * pwrseq_put() - Release the power sequencer descriptor.
696  * @desc: Descriptor to release.
697  */
698 void pwrseq_put(struct pwrseq_desc *desc)
699 {
700 	struct pwrseq_device *pwrseq;
701 
702 	if (!desc)
703 		return;
704 
705 	pwrseq = desc->pwrseq;
706 
707 	if (desc->powered_on)
708 		pwrseq_power_off(desc);
709 
710 	kfree(desc);
711 	module_put(pwrseq->owner);
712 	pwrseq_device_put(pwrseq);
713 }
714 EXPORT_SYMBOL_GPL(pwrseq_put);
715 
716 static void devm_pwrseq_put(void *data)
717 {
718 	struct pwrseq_desc *desc = data;
719 
720 	pwrseq_put(desc);
721 }
722 
723 /**
724  * devm_pwrseq_get() - Managed variant of pwrseq_get().
725  * @dev: Device for which to get the sequencer and which also manages its
726  *       lifetime.
727  * @target: Name of the target exposed by the sequencer this device wants to
728  *          reach.
729  *
730  * Returns:
731  * New power sequencer descriptor for use by the consumer driver or ERR_PTR()
732  * on failure.
733  */
734 struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target)
735 {
736 	struct pwrseq_desc *desc;
737 	int ret;
738 
739 	desc = pwrseq_get(dev, target);
740 	if (IS_ERR(desc))
741 		return desc;
742 
743 	ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc);
744 	if (ret)
745 		return ERR_PTR(ret);
746 
747 	return desc;
748 }
749 EXPORT_SYMBOL_GPL(devm_pwrseq_get);
750 
751 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
752 			      struct pwrseq_unit *target);
753 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
754 			       struct pwrseq_unit *target);
755 
756 static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq,
757 				   struct list_head *list)
758 {
759 	struct pwrseq_unit_dep *pos;
760 	int ret = 0;
761 
762 	list_for_each_entry(pos, list, list) {
763 		ret = pwrseq_unit_enable(pwrseq, pos->unit);
764 		if (ret) {
765 			list_for_each_entry_continue_reverse(pos, list, list)
766 				pwrseq_unit_disable(pwrseq, pos->unit);
767 			break;
768 		}
769 	}
770 
771 	return ret;
772 }
773 
774 static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq,
775 				    struct list_head *list)
776 {
777 	struct pwrseq_unit_dep *pos;
778 	int ret = 0;
779 
780 	list_for_each_entry_reverse(pos, list, list) {
781 		ret = pwrseq_unit_disable(pwrseq, pos->unit);
782 		if (ret) {
783 			list_for_each_entry_continue(pos, list, list)
784 				pwrseq_unit_enable(pwrseq, pos->unit);
785 			break;
786 		}
787 	}
788 
789 	return ret;
790 }
791 
792 static int pwrseq_unit_enable(struct pwrseq_device *pwrseq,
793 			      struct pwrseq_unit *unit)
794 {
795 	int ret;
796 
797 	lockdep_assert_held_read(&pwrseq->rw_lock);
798 	lockdep_assert_held(&pwrseq->state_lock);
799 
800 	if (unit->enable_count != 0) {
801 		unit->enable_count++;
802 		return 0;
803 	}
804 
805 	ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps);
806 	if (ret) {
807 		dev_err(&pwrseq->dev,
808 			"Failed to enable dependencies before power-on for target '%s': %d\n",
809 			unit->name, ret);
810 		return ret;
811 	}
812 
813 	if (unit->enable) {
814 		ret = unit->enable(pwrseq);
815 		if (ret) {
816 			dev_err(&pwrseq->dev,
817 				"Failed to enable target '%s': %d\n",
818 				unit->name, ret);
819 			pwrseq_unit_disable_deps(pwrseq, &unit->deps);
820 			return ret;
821 		}
822 	}
823 
824 	unit->enable_count++;
825 
826 	return 0;
827 }
828 
829 static int pwrseq_unit_disable(struct pwrseq_device *pwrseq,
830 			       struct pwrseq_unit *unit)
831 {
832 	int ret;
833 
834 	lockdep_assert_held_read(&pwrseq->rw_lock);
835 	lockdep_assert_held(&pwrseq->state_lock);
836 
837 	if (unit->enable_count == 0) {
838 		WARN(1, "Unmatched power-off for target '%s'\n",
839 		     unit->name);
840 		return -EBUSY;
841 	}
842 
843 	if (unit->enable_count != 1) {
844 		unit->enable_count--;
845 		return 0;
846 	}
847 
848 	if (unit->disable) {
849 		ret = unit->disable(pwrseq);
850 		if (ret) {
851 			dev_err(&pwrseq->dev,
852 				"Failed to disable target '%s': %d\n",
853 				unit->name, ret);
854 			return ret;
855 		}
856 	}
857 
858 	ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps);
859 	if (ret) {
860 		dev_err(&pwrseq->dev,
861 			"Failed to disable dependencies after power-off for target '%s': %d\n",
862 			unit->name, ret);
863 		if (unit->enable)
864 			unit->enable(pwrseq);
865 		return ret;
866 	}
867 
868 	unit->enable_count--;
869 
870 	return 0;
871 }
872 
873 /**
874  * pwrseq_power_on() - Issue a power-on request on behalf of the consumer
875  *                     device.
876  * @desc: Descriptor referencing the power sequencer.
877  *
878  * This function tells the power sequencer that the consumer wants to be
879  * powered-up. The sequencer may already have powered-up the device in which
880  * case the function returns 0. If the power-up sequence is already in
881  * progress, the function will block until it's done and return 0. If this is
882  * the first request, the device will be powered up.
883  *
884  * Returns:
885  * 0 on success, negative error number on failure.
886  */
887 int pwrseq_power_on(struct pwrseq_desc *desc)
888 {
889 	struct pwrseq_device *pwrseq;
890 	struct pwrseq_target *target;
891 	struct pwrseq_unit *unit;
892 	int ret;
893 
894 	might_sleep();
895 
896 	if (!desc || desc->powered_on)
897 		return 0;
898 
899 	pwrseq = desc->pwrseq;
900 	target = desc->target;
901 	unit = target->unit;
902 
903 	guard(rwsem_read)(&pwrseq->rw_lock);
904 	if (!device_is_registered(&pwrseq->dev))
905 		return -ENODEV;
906 
907 	scoped_guard(mutex, &pwrseq->state_lock) {
908 		ret = pwrseq_unit_enable(pwrseq, unit);
909 		if (!ret)
910 			desc->powered_on = true;
911 	}
912 
913 	if (target->post_enable) {
914 		ret = target->post_enable(pwrseq);
915 		if (ret) {
916 			scoped_guard(mutex, &pwrseq->state_lock) {
917 				pwrseq_unit_disable(pwrseq, unit);
918 				desc->powered_on = false;
919 			}
920 		}
921 	}
922 
923 	return ret;
924 }
925 EXPORT_SYMBOL_GPL(pwrseq_power_on);
926 
927 /**
928  * pwrseq_power_off() - Issue a power-off request on behalf of the consumer
929  *                      device.
930  * @desc: Descriptor referencing the power sequencer.
931  *
932  * This undoes the effects of pwrseq_power_on(). It issues a power-off request
933  * on behalf of the consumer and when the last remaining user does so, the
934  * power-down sequence will be started. If one is in progress, the function
935  * will block until it's complete and then return.
936  *
937  * Returns:
938  * 0 on success, negative error number on failure.
939  */
940 int pwrseq_power_off(struct pwrseq_desc *desc)
941 {
942 	struct pwrseq_device *pwrseq;
943 	struct pwrseq_unit *unit;
944 	int ret;
945 
946 	might_sleep();
947 
948 	if (!desc || !desc->powered_on)
949 		return 0;
950 
951 	pwrseq = desc->pwrseq;
952 	unit = desc->target->unit;
953 
954 	guard(rwsem_read)(&pwrseq->rw_lock);
955 	if (!device_is_registered(&pwrseq->dev))
956 		return -ENODEV;
957 
958 	guard(mutex)(&pwrseq->state_lock);
959 
960 	ret = pwrseq_unit_disable(pwrseq, unit);
961 	if (!ret)
962 		desc->powered_on = false;
963 
964 	return ret;
965 }
966 EXPORT_SYMBOL_GPL(pwrseq_power_off);
967 
968 #if IS_ENABLED(CONFIG_DEBUG_FS)
969 
970 struct pwrseq_debugfs_count_ctx {
971 	struct device *dev;
972 	loff_t index;
973 };
974 
975 static int pwrseq_debugfs_seq_count(struct device *dev, void *data)
976 {
977 	struct pwrseq_debugfs_count_ctx *ctx = data;
978 
979 	ctx->dev = dev;
980 
981 	return ctx->index-- ? 0 : 1;
982 }
983 
984 static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos)
985 {
986 	struct pwrseq_debugfs_count_ctx ctx;
987 
988 	ctx.dev = NULL;
989 	ctx.index = *pos;
990 
991 	/*
992 	 * We're holding the lock for the entire printout so no need to fiddle
993 	 * with device reference count.
994 	 */
995 	down_read(&pwrseq_sem);
996 
997 	bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count);
998 	if (!ctx.index)
999 		return NULL;
1000 
1001 	return ctx.dev;
1002 }
1003 
1004 static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data,
1005 				     loff_t *pos)
1006 {
1007 	struct device *curr = data;
1008 
1009 	++*pos;
1010 
1011 	struct device *next __free(put_device) =
1012 			bus_find_next_device(&pwrseq_bus, curr);
1013 	return next;
1014 }
1015 
1016 static void pwrseq_debugfs_seq_show_target(struct seq_file *seq,
1017 					   struct pwrseq_target *target)
1018 {
1019 	seq_printf(seq, "    target: [%s] (target unit: [%s])\n",
1020 		   target->name, target->unit->name);
1021 }
1022 
1023 static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq,
1024 					 struct pwrseq_unit *unit)
1025 {
1026 	struct pwrseq_unit_dep *ref;
1027 
1028 	seq_printf(seq, "    unit: [%s] - enable count: %u\n",
1029 		   unit->name, unit->enable_count);
1030 
1031 	if (list_empty(&unit->deps))
1032 		return;
1033 
1034 	seq_puts(seq, "      dependencies:\n");
1035 	list_for_each_entry(ref, &unit->deps, list)
1036 		seq_printf(seq, "        [%s]\n", ref->unit->name);
1037 }
1038 
1039 static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data)
1040 {
1041 	struct device *dev = data;
1042 	struct pwrseq_device *pwrseq = to_pwrseq_device(dev);
1043 	struct pwrseq_target *target;
1044 	struct pwrseq_unit *unit;
1045 
1046 	seq_printf(seq, "%s:\n", dev_name(dev));
1047 
1048 	seq_puts(seq, "  targets:\n");
1049 	list_for_each_entry(target, &pwrseq->targets, list)
1050 		pwrseq_debugfs_seq_show_target(seq, target);
1051 
1052 	seq_puts(seq, "  units:\n");
1053 	list_for_each_entry(unit, &pwrseq->units, list)
1054 		pwrseq_debugfs_seq_show_unit(seq, unit);
1055 
1056 	return 0;
1057 }
1058 
1059 static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data)
1060 {
1061 	up_read(&pwrseq_sem);
1062 }
1063 
1064 static const struct seq_operations pwrseq_debugfs_sops = {
1065 	.start = pwrseq_debugfs_seq_start,
1066 	.next = pwrseq_debugfs_seq_next,
1067 	.show = pwrseq_debugfs_seq_show,
1068 	.stop = pwrseq_debugfs_seq_stop,
1069 };
1070 DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs);
1071 
1072 static struct dentry *pwrseq_debugfs_dentry;
1073 
1074 #endif /* CONFIG_DEBUG_FS */
1075 
1076 static int __init pwrseq_init(void)
1077 {
1078 	int ret;
1079 
1080 	ret = bus_register(&pwrseq_bus);
1081 	if (ret) {
1082 		pr_err("Failed to register the power sequencer bus\n");
1083 		return ret;
1084 	}
1085 
1086 #if IS_ENABLED(CONFIG_DEBUG_FS)
1087 	pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL,
1088 						    &pwrseq_debugfs_fops);
1089 #endif  /* CONFIG_DEBUG_FS */
1090 
1091 	return 0;
1092 }
1093 subsys_initcall(pwrseq_init);
1094 
1095 static void __exit pwrseq_exit(void)
1096 {
1097 #if IS_ENABLED(CONFIG_DEBUG_FS)
1098 	debugfs_remove_recursive(pwrseq_debugfs_dentry);
1099 #endif  /* CONFIG_DEBUG_FS */
1100 
1101 	bus_unregister(&pwrseq_bus);
1102 }
1103 module_exit(pwrseq_exit);
1104 
1105 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
1106 MODULE_DESCRIPTION("Power Sequencing subsystem core");
1107 MODULE_LICENSE("GPL");
1108