xref: /linux/drivers/pmdomain/core.c (revision ca677196a91f6869169ef31252c00ceec6ac0754)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_domain.h>
17 #include <linux/pm_qos.h>
18 #include <linux/pm_clock.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/export.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
26 
27 /* Provides a unique ID for each genpd device */
28 static DEFINE_IDA(genpd_ida);
29 
30 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
31 
32 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
33 ({								\
34 	type (*__routine)(struct device *__d); 			\
35 	type __ret = (type)0;					\
36 								\
37 	__routine = genpd->dev_ops.callback; 			\
38 	if (__routine) {					\
39 		__ret = __routine(dev); 			\
40 	}							\
41 	__ret;							\
42 })
43 
44 static LIST_HEAD(gpd_list);
45 static DEFINE_MUTEX(gpd_list_lock);
46 
47 struct genpd_lock_ops {
48 	void (*lock)(struct generic_pm_domain *genpd);
49 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
50 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
51 	void (*unlock)(struct generic_pm_domain *genpd);
52 };
53 
54 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
55 {
56 	mutex_lock(&genpd->mlock);
57 }
58 
59 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
60 					int depth)
61 {
62 	mutex_lock_nested(&genpd->mlock, depth);
63 }
64 
65 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
66 {
67 	return mutex_lock_interruptible(&genpd->mlock);
68 }
69 
70 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
71 {
72 	return mutex_unlock(&genpd->mlock);
73 }
74 
75 static const struct genpd_lock_ops genpd_mtx_ops = {
76 	.lock = genpd_lock_mtx,
77 	.lock_nested = genpd_lock_nested_mtx,
78 	.lock_interruptible = genpd_lock_interruptible_mtx,
79 	.unlock = genpd_unlock_mtx,
80 };
81 
82 static void genpd_lock_spin(struct generic_pm_domain *genpd)
83 	__acquires(&genpd->slock)
84 {
85 	unsigned long flags;
86 
87 	spin_lock_irqsave(&genpd->slock, flags);
88 	genpd->lock_flags = flags;
89 }
90 
91 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
92 					int depth)
93 	__acquires(&genpd->slock)
94 {
95 	unsigned long flags;
96 
97 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
98 	genpd->lock_flags = flags;
99 }
100 
101 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
102 	__acquires(&genpd->slock)
103 {
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&genpd->slock, flags);
107 	genpd->lock_flags = flags;
108 	return 0;
109 }
110 
111 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
112 	__releases(&genpd->slock)
113 {
114 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
115 }
116 
117 static const struct genpd_lock_ops genpd_spin_ops = {
118 	.lock = genpd_lock_spin,
119 	.lock_nested = genpd_lock_nested_spin,
120 	.lock_interruptible = genpd_lock_interruptible_spin,
121 	.unlock = genpd_unlock_spin,
122 };
123 
124 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
125 	__acquires(&genpd->raw_slock)
126 {
127 	unsigned long flags;
128 
129 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
130 	genpd->raw_lock_flags = flags;
131 }
132 
133 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
134 					int depth)
135 	__acquires(&genpd->raw_slock)
136 {
137 	unsigned long flags;
138 
139 	raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
140 	genpd->raw_lock_flags = flags;
141 }
142 
143 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
144 	__acquires(&genpd->raw_slock)
145 {
146 	unsigned long flags;
147 
148 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
149 	genpd->raw_lock_flags = flags;
150 	return 0;
151 }
152 
153 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
154 	__releases(&genpd->raw_slock)
155 {
156 	raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
157 }
158 
159 static const struct genpd_lock_ops genpd_raw_spin_ops = {
160 	.lock = genpd_lock_raw_spin,
161 	.lock_nested = genpd_lock_nested_raw_spin,
162 	.lock_interruptible = genpd_lock_interruptible_raw_spin,
163 	.unlock = genpd_unlock_raw_spin,
164 };
165 
166 #define genpd_lock(p)			p->lock_ops->lock(p)
167 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
168 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
169 #define genpd_unlock(p)			p->lock_ops->unlock(p)
170 
171 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
172 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
173 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
174 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
175 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
176 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
177 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
178 #define genpd_is_dev_name_fw(genpd)	(genpd->flags & GENPD_FLAG_DEV_NAME_FW)
179 
180 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
181 		const struct generic_pm_domain *genpd)
182 {
183 	bool ret;
184 
185 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
186 
187 	/*
188 	 * Warn once if an IRQ safe device is attached to a domain, which
189 	 * callbacks are allowed to sleep. This indicates a suboptimal
190 	 * configuration for PM, but it doesn't matter for an always on domain.
191 	 */
192 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
193 		return ret;
194 
195 	if (ret)
196 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
197 			      dev_name(&genpd->dev));
198 
199 	return ret;
200 }
201 
202 static int genpd_runtime_suspend(struct device *dev);
203 
204 /*
205  * Get the generic PM domain for a particular struct device.
206  * This validates the struct device pointer, the PM domain pointer,
207  * and checks that the PM domain pointer is a real generic PM domain.
208  * Any failure results in NULL being returned.
209  */
210 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
211 {
212 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
213 		return NULL;
214 
215 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
216 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
217 		return pd_to_genpd(dev->pm_domain);
218 
219 	return NULL;
220 }
221 
222 /*
223  * This should only be used where we are certain that the pm_domain
224  * attached to the device is a genpd domain.
225  */
226 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
227 {
228 	if (IS_ERR_OR_NULL(dev->pm_domain))
229 		return ERR_PTR(-EINVAL);
230 
231 	return pd_to_genpd(dev->pm_domain);
232 }
233 
234 struct device *dev_to_genpd_dev(struct device *dev)
235 {
236 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
237 
238 	if (IS_ERR(genpd))
239 		return ERR_CAST(genpd);
240 
241 	return &genpd->dev;
242 }
243 
244 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
245 			  struct device *dev)
246 {
247 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
248 }
249 
250 static int genpd_start_dev(const struct generic_pm_domain *genpd,
251 			   struct device *dev)
252 {
253 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
254 }
255 
256 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
257 {
258 	bool ret = false;
259 
260 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
261 		ret = !!atomic_dec_and_test(&genpd->sd_count);
262 
263 	return ret;
264 }
265 
266 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
267 {
268 	atomic_inc(&genpd->sd_count);
269 	smp_mb__after_atomic();
270 }
271 
272 #ifdef CONFIG_DEBUG_FS
273 static struct dentry *genpd_debugfs_dir;
274 
275 static void genpd_debug_add(struct generic_pm_domain *genpd);
276 
277 static void genpd_debug_remove(struct generic_pm_domain *genpd)
278 {
279 	if (!genpd_debugfs_dir)
280 		return;
281 
282 	debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
283 }
284 
285 static void genpd_update_accounting(struct generic_pm_domain *genpd)
286 {
287 	u64 delta, now;
288 
289 	now = ktime_get_mono_fast_ns();
290 	if (now <= genpd->accounting_time)
291 		return;
292 
293 	delta = now - genpd->accounting_time;
294 
295 	/*
296 	 * If genpd->status is active, it means we are just
297 	 * out of off and so update the idle time and vice
298 	 * versa.
299 	 */
300 	if (genpd->status == GENPD_STATE_ON)
301 		genpd->states[genpd->state_idx].idle_time += delta;
302 	else
303 		genpd->on_time += delta;
304 
305 	genpd->accounting_time = now;
306 }
307 
308 static void genpd_reflect_residency(struct generic_pm_domain *genpd)
309 {
310 	struct genpd_governor_data *gd = genpd->gd;
311 	struct genpd_power_state *state, *next_state;
312 	unsigned int state_idx;
313 	s64 sleep_ns, target_ns;
314 
315 	if (!gd || !gd->reflect_residency)
316 		return;
317 
318 	sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
319 	state_idx = genpd->state_idx;
320 	state = &genpd->states[state_idx];
321 	target_ns = state->power_off_latency_ns + state->residency_ns;
322 
323 	if (sleep_ns < target_ns) {
324 		state->above++;
325 	} else if (state_idx < (genpd->state_count -1)) {
326 		next_state = &genpd->states[state_idx + 1];
327 		target_ns = next_state->power_off_latency_ns +
328 			next_state->residency_ns;
329 
330 		if (sleep_ns >= target_ns)
331 			state->below++;
332 	}
333 
334 	gd->reflect_residency = false;
335 }
336 #else
337 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
338 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
339 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
340 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
341 #endif
342 
343 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
344 					   unsigned int state)
345 {
346 	struct generic_pm_domain_data *pd_data;
347 	struct pm_domain_data *pdd;
348 	struct gpd_link *link;
349 
350 	/* New requested state is same as Max requested state */
351 	if (state == genpd->performance_state)
352 		return state;
353 
354 	/* New requested state is higher than Max requested state */
355 	if (state > genpd->performance_state)
356 		return state;
357 
358 	/* Traverse all devices within the domain */
359 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
360 		pd_data = to_gpd_data(pdd);
361 
362 		if (pd_data->performance_state > state)
363 			state = pd_data->performance_state;
364 	}
365 
366 	/*
367 	 * Traverse all sub-domains within the domain. This can be
368 	 * done without any additional locking as the link->performance_state
369 	 * field is protected by the parent genpd->lock, which is already taken.
370 	 *
371 	 * Also note that link->performance_state (subdomain's performance state
372 	 * requirement to parent domain) is different from
373 	 * link->child->performance_state (current performance state requirement
374 	 * of the devices/sub-domains of the subdomain) and so can have a
375 	 * different value.
376 	 *
377 	 * Note that we also take vote from powered-off sub-domains into account
378 	 * as the same is done for devices right now.
379 	 */
380 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
381 		if (link->performance_state > state)
382 			state = link->performance_state;
383 	}
384 
385 	return state;
386 }
387 
388 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
389 					 struct generic_pm_domain *parent,
390 					 unsigned int pstate)
391 {
392 	if (!parent->set_performance_state)
393 		return pstate;
394 
395 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
396 						  parent->opp_table,
397 						  pstate);
398 }
399 
400 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
401 					unsigned int state, int depth);
402 
403 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
404 {
405 	struct generic_pm_domain *parent = link->parent;
406 	int parent_state;
407 
408 	genpd_lock_nested(parent, depth + 1);
409 
410 	parent_state = link->prev_performance_state;
411 	link->performance_state = parent_state;
412 
413 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
414 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
415 		pr_err("%s: Failed to roll back to %d performance state\n",
416 		       parent->name, parent_state);
417 	}
418 
419 	genpd_unlock(parent);
420 }
421 
422 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
423 				   struct gpd_link *link,
424 				   unsigned int state, int depth)
425 {
426 	struct generic_pm_domain *parent = link->parent;
427 	int parent_state, ret;
428 
429 	/* Find parent's performance state */
430 	ret = genpd_xlate_performance_state(genpd, parent, state);
431 	if (unlikely(ret < 0))
432 		return ret;
433 
434 	parent_state = ret;
435 
436 	genpd_lock_nested(parent, depth + 1);
437 
438 	link->prev_performance_state = link->performance_state;
439 	link->performance_state = parent_state;
440 
441 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
442 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
443 	if (ret)
444 		link->performance_state = link->prev_performance_state;
445 
446 	genpd_unlock(parent);
447 
448 	return ret;
449 }
450 
451 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
452 					unsigned int state, int depth)
453 {
454 	struct gpd_link *link = NULL;
455 	int ret;
456 
457 	if (state == genpd->performance_state)
458 		return 0;
459 
460 	/* When scaling up, propagate to parents first in normal order */
461 	if (state > genpd->performance_state) {
462 		list_for_each_entry(link, &genpd->child_links, child_node) {
463 			ret = _genpd_set_parent_state(genpd, link, state, depth);
464 			if (ret)
465 				goto rollback_parents_up;
466 		}
467 	}
468 
469 	if (genpd->set_performance_state) {
470 		ret = genpd->set_performance_state(genpd, state);
471 		if (ret) {
472 			if (link)
473 				goto rollback_parents_up;
474 			return ret;
475 		}
476 	}
477 
478 	/* When scaling down, propagate to parents last in reverse order */
479 	if (state < genpd->performance_state) {
480 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
481 			ret = _genpd_set_parent_state(genpd, link, state, depth);
482 			if (ret)
483 				goto rollback_parents_down;
484 		}
485 	}
486 
487 	genpd->performance_state = state;
488 	return 0;
489 
490 rollback_parents_up:
491 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
492 		_genpd_rollback_parent_state(link, depth);
493 	return ret;
494 rollback_parents_down:
495 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
496 		_genpd_rollback_parent_state(link, depth);
497 	return ret;
498 }
499 
500 static int genpd_set_performance_state(struct device *dev, unsigned int state)
501 {
502 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
503 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
504 	unsigned int prev_state;
505 	int ret;
506 
507 	prev_state = gpd_data->performance_state;
508 	if (prev_state == state)
509 		return 0;
510 
511 	gpd_data->performance_state = state;
512 	state = _genpd_reeval_performance_state(genpd, state);
513 
514 	ret = _genpd_set_performance_state(genpd, state, 0);
515 	if (ret)
516 		gpd_data->performance_state = prev_state;
517 
518 	return ret;
519 }
520 
521 static int genpd_drop_performance_state(struct device *dev)
522 {
523 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
524 
525 	if (!genpd_set_performance_state(dev, 0))
526 		return prev_state;
527 
528 	return 0;
529 }
530 
531 static void genpd_restore_performance_state(struct device *dev,
532 					    unsigned int state)
533 {
534 	if (state)
535 		genpd_set_performance_state(dev, state);
536 }
537 
538 static int genpd_dev_pm_set_performance_state(struct device *dev,
539 					      unsigned int state)
540 {
541 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
542 	int ret = 0;
543 
544 	genpd_lock(genpd);
545 	if (pm_runtime_suspended(dev)) {
546 		dev_gpd_data(dev)->rpm_pstate = state;
547 	} else {
548 		ret = genpd_set_performance_state(dev, state);
549 		if (!ret)
550 			dev_gpd_data(dev)->rpm_pstate = 0;
551 	}
552 	genpd_unlock(genpd);
553 
554 	return ret;
555 }
556 
557 /**
558  * dev_pm_genpd_set_performance_state- Set performance state of device's power
559  * domain.
560  *
561  * @dev: Device for which the performance-state needs to be set.
562  * @state: Target performance state of the device. This can be set as 0 when the
563  *	   device doesn't have any performance state constraints left (And so
564  *	   the device wouldn't participate anymore to find the target
565  *	   performance state of the genpd).
566  *
567  * It is assumed that the users guarantee that the genpd wouldn't be detached
568  * while this routine is getting called.
569  *
570  * Returns 0 on success and negative error values on failures.
571  */
572 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
573 {
574 	struct generic_pm_domain *genpd;
575 
576 	genpd = dev_to_genpd_safe(dev);
577 	if (!genpd)
578 		return -ENODEV;
579 
580 	if (WARN_ON(!dev->power.subsys_data ||
581 		     !dev->power.subsys_data->domain_data))
582 		return -EINVAL;
583 
584 	return genpd_dev_pm_set_performance_state(dev, state);
585 }
586 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
587 
588 /**
589  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
590  *
591  * @dev: Device to handle
592  * @next: impending interrupt/wakeup for the device
593  *
594  *
595  * Allow devices to inform of the next wakeup. It's assumed that the users
596  * guarantee that the genpd wouldn't be detached while this routine is getting
597  * called. Additionally, it's also assumed that @dev isn't runtime suspended
598  * (RPM_SUSPENDED)."
599  * Although devices are expected to update the next_wakeup after the end of
600  * their usecase as well, it is possible the devices themselves may not know
601  * about that, so stale @next will be ignored when powering off the domain.
602  */
603 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
604 {
605 	struct generic_pm_domain *genpd;
606 	struct gpd_timing_data *td;
607 
608 	genpd = dev_to_genpd_safe(dev);
609 	if (!genpd)
610 		return;
611 
612 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
613 	if (td)
614 		td->next_wakeup = next;
615 }
616 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
617 
618 /**
619  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
620  * @dev: A device that is attached to the genpd.
621  *
622  * This routine should typically be called for a device, at the point of when a
623  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
624  *
625  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
626  * valid value have been set.
627  */
628 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
629 {
630 	struct generic_pm_domain *genpd;
631 
632 	genpd = dev_to_genpd_safe(dev);
633 	if (!genpd)
634 		return KTIME_MAX;
635 
636 	if (genpd->gd)
637 		return genpd->gd->next_hrtimer;
638 
639 	return KTIME_MAX;
640 }
641 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
642 
643 /*
644  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
645  *
646  * @dev: A device that is attached to the genpd.
647  *
648  * Allows a consumer of the genpd to notify the provider that the next power off
649  * should be synchronous.
650  *
651  * It is assumed that the users guarantee that the genpd wouldn't be detached
652  * while this routine is getting called.
653  */
654 void dev_pm_genpd_synced_poweroff(struct device *dev)
655 {
656 	struct generic_pm_domain *genpd;
657 
658 	genpd = dev_to_genpd_safe(dev);
659 	if (!genpd)
660 		return;
661 
662 	genpd_lock(genpd);
663 	genpd->synced_poweroff = true;
664 	genpd_unlock(genpd);
665 }
666 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
667 
668 /**
669  * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
670  *
671  * @dev: Device for which the HW-mode should be changed.
672  * @enable: Value to set or unset the HW-mode.
673  *
674  * Some PM domains can rely on HW signals to control the power for a device. To
675  * allow a consumer driver to switch the behaviour for its device in runtime,
676  * which may be beneficial from a latency or energy point of view, this function
677  * may be called.
678  *
679  * It is assumed that the users guarantee that the genpd wouldn't be detached
680  * while this routine is getting called.
681  *
682  * Return: Returns 0 on success and negative error values on failures.
683  */
684 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
685 {
686 	struct generic_pm_domain *genpd;
687 	int ret = 0;
688 
689 	genpd = dev_to_genpd_safe(dev);
690 	if (!genpd)
691 		return -ENODEV;
692 
693 	if (!genpd->set_hwmode_dev)
694 		return -EOPNOTSUPP;
695 
696 	genpd_lock(genpd);
697 
698 	if (dev_gpd_data(dev)->hw_mode == enable)
699 		goto out;
700 
701 	ret = genpd->set_hwmode_dev(genpd, dev, enable);
702 	if (!ret)
703 		dev_gpd_data(dev)->hw_mode = enable;
704 
705 out:
706 	genpd_unlock(genpd);
707 	return ret;
708 }
709 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
710 
711 /**
712  * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
713  *
714  * @dev: Device for which the current HW-mode setting should be fetched.
715  *
716  * This helper function allows consumer drivers to fetch the current HW mode
717  * setting of its the device.
718  *
719  * It is assumed that the users guarantee that the genpd wouldn't be detached
720  * while this routine is getting called.
721  *
722  * Return: Returns the HW mode setting of device from SW cached hw_mode.
723  */
724 bool dev_pm_genpd_get_hwmode(struct device *dev)
725 {
726 	return dev_gpd_data(dev)->hw_mode;
727 }
728 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
729 
730 /**
731  * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
732  *
733  * @dev: Device for which the PM domain may need to stay on for.
734  * @on: Value to set or unset for the condition.
735  *
736  * For some usecases a consumer driver requires its device to remain power-on
737  * from the PM domain perspective during runtime. This function allows the
738  * behaviour to be dynamically controlled for a device attached to a genpd.
739  *
740  * It is assumed that the users guarantee that the genpd wouldn't be detached
741  * while this routine is getting called.
742  *
743  * Return: Returns 0 on success and negative error values on failures.
744  */
745 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
746 {
747 	struct generic_pm_domain *genpd;
748 
749 	genpd = dev_to_genpd_safe(dev);
750 	if (!genpd)
751 		return -ENODEV;
752 
753 	genpd_lock(genpd);
754 	dev_gpd_data(dev)->rpm_always_on = on;
755 	genpd_unlock(genpd);
756 
757 	return 0;
758 }
759 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
760 
761 /**
762  * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
763  *
764  * @genpd: The PM domain the idle-state belongs to.
765  * @state_idx: The index of the idle-state that failed.
766  *
767  * In some special cases the ->power_off() callback is asynchronously powering
768  * off the PM domain, leading to that it may return zero to indicate success,
769  * even though the actual power-off could fail. To account for this correctly in
770  * the rejected/usage counts for the idle-state statistics, users can call this
771  * function to adjust the values.
772  *
773  * It is assumed that the users guarantee that the genpd doesn't get removed
774  * while this routine is getting called.
775  */
776 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
777 			   unsigned int state_idx)
778 {
779 	genpd_lock(genpd);
780 	genpd->states[genpd->state_idx].rejected++;
781 	genpd->states[genpd->state_idx].usage--;
782 	genpd_unlock(genpd);
783 }
784 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
785 
786 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
787 {
788 	unsigned int state_idx = genpd->state_idx;
789 	ktime_t time_start;
790 	s64 elapsed_ns;
791 	int ret;
792 
793 	/* Notify consumers that we are about to power on. */
794 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
795 					     GENPD_NOTIFY_PRE_ON,
796 					     GENPD_NOTIFY_OFF, NULL);
797 	ret = notifier_to_errno(ret);
798 	if (ret)
799 		return ret;
800 
801 	if (!genpd->power_on)
802 		goto out;
803 
804 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
805 	if (!timed) {
806 		ret = genpd->power_on(genpd);
807 		if (ret)
808 			goto err;
809 
810 		goto out;
811 	}
812 
813 	time_start = ktime_get();
814 	ret = genpd->power_on(genpd);
815 	if (ret)
816 		goto err;
817 
818 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
819 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
820 		goto out;
821 
822 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
823 	genpd->gd->max_off_time_changed = true;
824 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
825 		 dev_name(&genpd->dev), "on", elapsed_ns);
826 
827 out:
828 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
829 	genpd->synced_poweroff = false;
830 	return 0;
831 err:
832 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
833 				NULL);
834 	return ret;
835 }
836 
837 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
838 {
839 	unsigned int state_idx = genpd->state_idx;
840 	ktime_t time_start;
841 	s64 elapsed_ns;
842 	int ret;
843 
844 	/* Notify consumers that we are about to power off. */
845 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
846 					     GENPD_NOTIFY_PRE_OFF,
847 					     GENPD_NOTIFY_ON, NULL);
848 	ret = notifier_to_errno(ret);
849 	if (ret)
850 		return ret;
851 
852 	if (!genpd->power_off)
853 		goto out;
854 
855 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
856 	if (!timed) {
857 		ret = genpd->power_off(genpd);
858 		if (ret)
859 			goto busy;
860 
861 		goto out;
862 	}
863 
864 	time_start = ktime_get();
865 	ret = genpd->power_off(genpd);
866 	if (ret)
867 		goto busy;
868 
869 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
870 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
871 		goto out;
872 
873 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
874 	genpd->gd->max_off_time_changed = true;
875 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
876 		 dev_name(&genpd->dev), "off", elapsed_ns);
877 
878 out:
879 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
880 				NULL);
881 	return 0;
882 busy:
883 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
884 	return ret;
885 }
886 
887 /**
888  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
889  * @genpd: PM domain to power off.
890  *
891  * Queue up the execution of genpd_power_off() unless it's already been done
892  * before.
893  */
894 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
895 {
896 	queue_work(pm_wq, &genpd->power_off_work);
897 }
898 
899 /**
900  * genpd_power_off - Remove power from a given PM domain.
901  * @genpd: PM domain to power down.
902  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
903  * RPM status of the releated device is in an intermediate state, not yet turned
904  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
905  * be RPM_SUSPENDED, while it tries to power off the PM domain.
906  * @depth: nesting count for lockdep.
907  *
908  * If all of the @genpd's devices have been suspended and all of its subdomains
909  * have been powered down, remove power from @genpd.
910  */
911 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
912 			   unsigned int depth)
913 {
914 	struct pm_domain_data *pdd;
915 	struct gpd_link *link;
916 	unsigned int not_suspended = 0;
917 	int ret;
918 
919 	/*
920 	 * Do not try to power off the domain in the following situations:
921 	 * (1) The domain is already in the "power off" state.
922 	 * (2) System suspend is in progress.
923 	 */
924 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
925 		return 0;
926 
927 	/*
928 	 * Abort power off for the PM domain in the following situations:
929 	 * (1) The domain is configured as always on.
930 	 * (2) When the domain has a subdomain being powered on.
931 	 */
932 	if (genpd_is_always_on(genpd) ||
933 			genpd_is_rpm_always_on(genpd) ||
934 			atomic_read(&genpd->sd_count) > 0)
935 		return -EBUSY;
936 
937 	/*
938 	 * The children must be in their deepest (powered-off) states to allow
939 	 * the parent to be powered off. Note that, there's no need for
940 	 * additional locking, as powering on a child, requires the parent's
941 	 * lock to be acquired first.
942 	 */
943 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
944 		struct generic_pm_domain *child = link->child;
945 		if (child->state_idx < child->state_count - 1)
946 			return -EBUSY;
947 	}
948 
949 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
950 		/*
951 		 * Do not allow PM domain to be powered off, when an IRQ safe
952 		 * device is part of a non-IRQ safe domain.
953 		 */
954 		if (!pm_runtime_suspended(pdd->dev) ||
955 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
956 			not_suspended++;
957 
958 		/* The device may need its PM domain to stay powered on. */
959 		if (to_gpd_data(pdd)->rpm_always_on)
960 			return -EBUSY;
961 	}
962 
963 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
964 		return -EBUSY;
965 
966 	if (genpd->gov && genpd->gov->power_down_ok) {
967 		if (!genpd->gov->power_down_ok(&genpd->domain))
968 			return -EAGAIN;
969 	}
970 
971 	/* Default to shallowest state. */
972 	if (!genpd->gov)
973 		genpd->state_idx = 0;
974 
975 	/* Don't power off, if a child domain is waiting to power on. */
976 	if (atomic_read(&genpd->sd_count) > 0)
977 		return -EBUSY;
978 
979 	ret = _genpd_power_off(genpd, true);
980 	if (ret) {
981 		genpd->states[genpd->state_idx].rejected++;
982 		return ret;
983 	}
984 
985 	genpd->status = GENPD_STATE_OFF;
986 	genpd_update_accounting(genpd);
987 	genpd->states[genpd->state_idx].usage++;
988 
989 	list_for_each_entry(link, &genpd->child_links, child_node) {
990 		genpd_sd_counter_dec(link->parent);
991 		genpd_lock_nested(link->parent, depth + 1);
992 		genpd_power_off(link->parent, false, depth + 1);
993 		genpd_unlock(link->parent);
994 	}
995 
996 	return 0;
997 }
998 
999 /**
1000  * genpd_power_on - Restore power to a given PM domain and its parents.
1001  * @genpd: PM domain to power up.
1002  * @depth: nesting count for lockdep.
1003  *
1004  * Restore power to @genpd and all of its parents so that it is possible to
1005  * resume a device belonging to it.
1006  */
1007 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
1008 {
1009 	struct gpd_link *link;
1010 	int ret = 0;
1011 
1012 	if (genpd_status_on(genpd))
1013 		return 0;
1014 
1015 	/* Reflect over the entered idle-states residency for debugfs. */
1016 	genpd_reflect_residency(genpd);
1017 
1018 	/*
1019 	 * The list is guaranteed not to change while the loop below is being
1020 	 * executed, unless one of the parents' .power_on() callbacks fiddles
1021 	 * with it.
1022 	 */
1023 	list_for_each_entry(link, &genpd->child_links, child_node) {
1024 		struct generic_pm_domain *parent = link->parent;
1025 
1026 		genpd_sd_counter_inc(parent);
1027 
1028 		genpd_lock_nested(parent, depth + 1);
1029 		ret = genpd_power_on(parent, depth + 1);
1030 		genpd_unlock(parent);
1031 
1032 		if (ret) {
1033 			genpd_sd_counter_dec(parent);
1034 			goto err;
1035 		}
1036 	}
1037 
1038 	ret = _genpd_power_on(genpd, true);
1039 	if (ret)
1040 		goto err;
1041 
1042 	genpd->status = GENPD_STATE_ON;
1043 	genpd_update_accounting(genpd);
1044 
1045 	return 0;
1046 
1047  err:
1048 	list_for_each_entry_continue_reverse(link,
1049 					&genpd->child_links,
1050 					child_node) {
1051 		genpd_sd_counter_dec(link->parent);
1052 		genpd_lock_nested(link->parent, depth + 1);
1053 		genpd_power_off(link->parent, false, depth + 1);
1054 		genpd_unlock(link->parent);
1055 	}
1056 
1057 	return ret;
1058 }
1059 
1060 static int genpd_dev_pm_start(struct device *dev)
1061 {
1062 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
1063 
1064 	return genpd_start_dev(genpd, dev);
1065 }
1066 
1067 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1068 				     unsigned long val, void *ptr)
1069 {
1070 	struct generic_pm_domain_data *gpd_data;
1071 	struct device *dev;
1072 
1073 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1074 	dev = gpd_data->base.dev;
1075 
1076 	for (;;) {
1077 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1078 		struct pm_domain_data *pdd;
1079 		struct gpd_timing_data *td;
1080 
1081 		spin_lock_irq(&dev->power.lock);
1082 
1083 		pdd = dev->power.subsys_data ?
1084 				dev->power.subsys_data->domain_data : NULL;
1085 		if (pdd) {
1086 			td = to_gpd_data(pdd)->td;
1087 			if (td) {
1088 				td->constraint_changed = true;
1089 				genpd = dev_to_genpd(dev);
1090 			}
1091 		}
1092 
1093 		spin_unlock_irq(&dev->power.lock);
1094 
1095 		if (!IS_ERR(genpd)) {
1096 			genpd_lock(genpd);
1097 			genpd->gd->max_off_time_changed = true;
1098 			genpd_unlock(genpd);
1099 		}
1100 
1101 		dev = dev->parent;
1102 		if (!dev || dev->power.ignore_children)
1103 			break;
1104 	}
1105 
1106 	return NOTIFY_DONE;
1107 }
1108 
1109 /**
1110  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1111  * @work: Work structure used for scheduling the execution of this function.
1112  */
1113 static void genpd_power_off_work_fn(struct work_struct *work)
1114 {
1115 	struct generic_pm_domain *genpd;
1116 
1117 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
1118 
1119 	genpd_lock(genpd);
1120 	genpd_power_off(genpd, false, 0);
1121 	genpd_unlock(genpd);
1122 }
1123 
1124 /**
1125  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1126  * @dev: Device to handle.
1127  */
1128 static int __genpd_runtime_suspend(struct device *dev)
1129 {
1130 	int (*cb)(struct device *__dev);
1131 
1132 	if (dev->type && dev->type->pm)
1133 		cb = dev->type->pm->runtime_suspend;
1134 	else if (dev->class && dev->class->pm)
1135 		cb = dev->class->pm->runtime_suspend;
1136 	else if (dev->bus && dev->bus->pm)
1137 		cb = dev->bus->pm->runtime_suspend;
1138 	else
1139 		cb = NULL;
1140 
1141 	if (!cb && dev->driver && dev->driver->pm)
1142 		cb = dev->driver->pm->runtime_suspend;
1143 
1144 	return cb ? cb(dev) : 0;
1145 }
1146 
1147 /**
1148  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1149  * @dev: Device to handle.
1150  */
1151 static int __genpd_runtime_resume(struct device *dev)
1152 {
1153 	int (*cb)(struct device *__dev);
1154 
1155 	if (dev->type && dev->type->pm)
1156 		cb = dev->type->pm->runtime_resume;
1157 	else if (dev->class && dev->class->pm)
1158 		cb = dev->class->pm->runtime_resume;
1159 	else if (dev->bus && dev->bus->pm)
1160 		cb = dev->bus->pm->runtime_resume;
1161 	else
1162 		cb = NULL;
1163 
1164 	if (!cb && dev->driver && dev->driver->pm)
1165 		cb = dev->driver->pm->runtime_resume;
1166 
1167 	return cb ? cb(dev) : 0;
1168 }
1169 
1170 /**
1171  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1172  * @dev: Device to suspend.
1173  *
1174  * Carry out a runtime suspend of a device under the assumption that its
1175  * pm_domain field points to the domain member of an object of type
1176  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1177  */
1178 static int genpd_runtime_suspend(struct device *dev)
1179 {
1180 	struct generic_pm_domain *genpd;
1181 	bool (*suspend_ok)(struct device *__dev);
1182 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1183 	struct gpd_timing_data *td = gpd_data->td;
1184 	bool runtime_pm = pm_runtime_enabled(dev);
1185 	ktime_t time_start = 0;
1186 	s64 elapsed_ns;
1187 	int ret;
1188 
1189 	dev_dbg(dev, "%s()\n", __func__);
1190 
1191 	genpd = dev_to_genpd(dev);
1192 	if (IS_ERR(genpd))
1193 		return -EINVAL;
1194 
1195 	/*
1196 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
1197 	 * callbacks for other purposes than runtime PM. In those scenarios
1198 	 * runtime PM is disabled. Under these circumstances, we shall skip
1199 	 * validating/measuring the PM QoS latency.
1200 	 */
1201 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1202 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
1203 		return -EBUSY;
1204 
1205 	/* Measure suspend latency. */
1206 	if (td && runtime_pm)
1207 		time_start = ktime_get();
1208 
1209 	ret = __genpd_runtime_suspend(dev);
1210 	if (ret)
1211 		return ret;
1212 
1213 	ret = genpd_stop_dev(genpd, dev);
1214 	if (ret) {
1215 		__genpd_runtime_resume(dev);
1216 		return ret;
1217 	}
1218 
1219 	/* Update suspend latency value if the measured time exceeds it. */
1220 	if (td && runtime_pm) {
1221 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1222 		if (elapsed_ns > td->suspend_latency_ns) {
1223 			td->suspend_latency_ns = elapsed_ns;
1224 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1225 				elapsed_ns);
1226 			genpd->gd->max_off_time_changed = true;
1227 			td->constraint_changed = true;
1228 		}
1229 	}
1230 
1231 	/*
1232 	 * If power.irq_safe is set, this routine may be run with
1233 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1234 	 */
1235 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1236 		return 0;
1237 
1238 	genpd_lock(genpd);
1239 	genpd_power_off(genpd, true, 0);
1240 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1241 	genpd_unlock(genpd);
1242 
1243 	return 0;
1244 }
1245 
1246 /**
1247  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1248  * @dev: Device to resume.
1249  *
1250  * Carry out a runtime resume of a device under the assumption that its
1251  * pm_domain field points to the domain member of an object of type
1252  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1253  */
1254 static int genpd_runtime_resume(struct device *dev)
1255 {
1256 	struct generic_pm_domain *genpd;
1257 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1258 	struct gpd_timing_data *td = gpd_data->td;
1259 	bool timed = td && pm_runtime_enabled(dev);
1260 	ktime_t time_start = 0;
1261 	s64 elapsed_ns;
1262 	int ret;
1263 
1264 	dev_dbg(dev, "%s()\n", __func__);
1265 
1266 	genpd = dev_to_genpd(dev);
1267 	if (IS_ERR(genpd))
1268 		return -EINVAL;
1269 
1270 	/*
1271 	 * As we don't power off a non IRQ safe domain, which holds
1272 	 * an IRQ safe device, we don't need to restore power to it.
1273 	 */
1274 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1275 		goto out;
1276 
1277 	genpd_lock(genpd);
1278 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1279 	ret = genpd_power_on(genpd, 0);
1280 	genpd_unlock(genpd);
1281 
1282 	if (ret)
1283 		return ret;
1284 
1285  out:
1286 	/* Measure resume latency. */
1287 	if (timed)
1288 		time_start = ktime_get();
1289 
1290 	ret = genpd_start_dev(genpd, dev);
1291 	if (ret)
1292 		goto err_poweroff;
1293 
1294 	ret = __genpd_runtime_resume(dev);
1295 	if (ret)
1296 		goto err_stop;
1297 
1298 	/* Update resume latency value if the measured time exceeds it. */
1299 	if (timed) {
1300 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1301 		if (elapsed_ns > td->resume_latency_ns) {
1302 			td->resume_latency_ns = elapsed_ns;
1303 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1304 				elapsed_ns);
1305 			genpd->gd->max_off_time_changed = true;
1306 			td->constraint_changed = true;
1307 		}
1308 	}
1309 
1310 	return 0;
1311 
1312 err_stop:
1313 	genpd_stop_dev(genpd, dev);
1314 err_poweroff:
1315 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1316 		genpd_lock(genpd);
1317 		genpd_power_off(genpd, true, 0);
1318 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1319 		genpd_unlock(genpd);
1320 	}
1321 
1322 	return ret;
1323 }
1324 
1325 static bool pd_ignore_unused;
1326 static int __init pd_ignore_unused_setup(char *__unused)
1327 {
1328 	pd_ignore_unused = true;
1329 	return 1;
1330 }
1331 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1332 
1333 /**
1334  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1335  */
1336 static int __init genpd_power_off_unused(void)
1337 {
1338 	struct generic_pm_domain *genpd;
1339 
1340 	if (pd_ignore_unused) {
1341 		pr_warn("genpd: Not disabling unused power domains\n");
1342 		return 0;
1343 	}
1344 
1345 	pr_info("genpd: Disabling unused power domains\n");
1346 	mutex_lock(&gpd_list_lock);
1347 
1348 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1349 		genpd_queue_power_off_work(genpd);
1350 
1351 	mutex_unlock(&gpd_list_lock);
1352 
1353 	return 0;
1354 }
1355 late_initcall_sync(genpd_power_off_unused);
1356 
1357 #ifdef CONFIG_PM_SLEEP
1358 
1359 /**
1360  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1361  * @genpd: PM domain to power off, if possible.
1362  * @use_lock: use the lock.
1363  * @depth: nesting count for lockdep.
1364  *
1365  * Check if the given PM domain can be powered off (during system suspend or
1366  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1367  *
1368  * This function is only called in "noirq" and "syscore" stages of system power
1369  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1370  * these cases the lock must be held.
1371  */
1372 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1373 				 unsigned int depth)
1374 {
1375 	struct gpd_link *link;
1376 
1377 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1378 		return;
1379 
1380 	if (genpd->suspended_count != genpd->device_count
1381 	    || atomic_read(&genpd->sd_count) > 0)
1382 		return;
1383 
1384 	/* Check that the children are in their deepest (powered-off) state. */
1385 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1386 		struct generic_pm_domain *child = link->child;
1387 		if (child->state_idx < child->state_count - 1)
1388 			return;
1389 	}
1390 
1391 	/* Choose the deepest state when suspending */
1392 	genpd->state_idx = genpd->state_count - 1;
1393 	if (_genpd_power_off(genpd, false)) {
1394 		genpd->states[genpd->state_idx].rejected++;
1395 		return;
1396 	} else {
1397 		genpd->states[genpd->state_idx].usage++;
1398 	}
1399 
1400 	genpd->status = GENPD_STATE_OFF;
1401 
1402 	list_for_each_entry(link, &genpd->child_links, child_node) {
1403 		genpd_sd_counter_dec(link->parent);
1404 
1405 		if (use_lock)
1406 			genpd_lock_nested(link->parent, depth + 1);
1407 
1408 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1409 
1410 		if (use_lock)
1411 			genpd_unlock(link->parent);
1412 	}
1413 }
1414 
1415 /**
1416  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1417  * @genpd: PM domain to power on.
1418  * @use_lock: use the lock.
1419  * @depth: nesting count for lockdep.
1420  *
1421  * This function is only called in "noirq" and "syscore" stages of system power
1422  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1423  * these cases the lock must be held.
1424  */
1425 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1426 				unsigned int depth)
1427 {
1428 	struct gpd_link *link;
1429 
1430 	if (genpd_status_on(genpd))
1431 		return;
1432 
1433 	list_for_each_entry(link, &genpd->child_links, child_node) {
1434 		genpd_sd_counter_inc(link->parent);
1435 
1436 		if (use_lock)
1437 			genpd_lock_nested(link->parent, depth + 1);
1438 
1439 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1440 
1441 		if (use_lock)
1442 			genpd_unlock(link->parent);
1443 	}
1444 
1445 	_genpd_power_on(genpd, false);
1446 	genpd->status = GENPD_STATE_ON;
1447 }
1448 
1449 /**
1450  * genpd_prepare - Start power transition of a device in a PM domain.
1451  * @dev: Device to start the transition of.
1452  *
1453  * Start a power transition of a device (during a system-wide power transition)
1454  * under the assumption that its pm_domain field points to the domain member of
1455  * an object of type struct generic_pm_domain representing a PM domain
1456  * consisting of I/O devices.
1457  */
1458 static int genpd_prepare(struct device *dev)
1459 {
1460 	struct generic_pm_domain *genpd;
1461 	int ret;
1462 
1463 	dev_dbg(dev, "%s()\n", __func__);
1464 
1465 	genpd = dev_to_genpd(dev);
1466 	if (IS_ERR(genpd))
1467 		return -EINVAL;
1468 
1469 	genpd_lock(genpd);
1470 	genpd->prepared_count++;
1471 	genpd_unlock(genpd);
1472 
1473 	ret = pm_generic_prepare(dev);
1474 	if (ret < 0) {
1475 		genpd_lock(genpd);
1476 
1477 		genpd->prepared_count--;
1478 
1479 		genpd_unlock(genpd);
1480 	}
1481 
1482 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1483 	return ret >= 0 ? 0 : ret;
1484 }
1485 
1486 /**
1487  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1488  *   I/O pm domain.
1489  * @dev: Device to suspend.
1490  * @suspend_noirq: Generic suspend_noirq callback.
1491  * @resume_noirq: Generic resume_noirq callback.
1492  *
1493  * Stop the device and remove power from the domain if all devices in it have
1494  * been stopped.
1495  */
1496 static int genpd_finish_suspend(struct device *dev,
1497 				int (*suspend_noirq)(struct device *dev),
1498 				int (*resume_noirq)(struct device *dev))
1499 {
1500 	struct generic_pm_domain *genpd;
1501 	int ret = 0;
1502 
1503 	genpd = dev_to_genpd(dev);
1504 	if (IS_ERR(genpd))
1505 		return -EINVAL;
1506 
1507 	ret = suspend_noirq(dev);
1508 	if (ret)
1509 		return ret;
1510 
1511 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1512 		return 0;
1513 
1514 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1515 	    !pm_runtime_status_suspended(dev)) {
1516 		ret = genpd_stop_dev(genpd, dev);
1517 		if (ret) {
1518 			resume_noirq(dev);
1519 			return ret;
1520 		}
1521 	}
1522 
1523 	genpd_lock(genpd);
1524 	genpd->suspended_count++;
1525 	genpd_sync_power_off(genpd, true, 0);
1526 	genpd_unlock(genpd);
1527 
1528 	return 0;
1529 }
1530 
1531 /**
1532  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1533  * @dev: Device to suspend.
1534  *
1535  * Stop the device and remove power from the domain if all devices in it have
1536  * been stopped.
1537  */
1538 static int genpd_suspend_noirq(struct device *dev)
1539 {
1540 	dev_dbg(dev, "%s()\n", __func__);
1541 
1542 	return genpd_finish_suspend(dev,
1543 				    pm_generic_suspend_noirq,
1544 				    pm_generic_resume_noirq);
1545 }
1546 
1547 /**
1548  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1549  * @dev: Device to resume.
1550  * @resume_noirq: Generic resume_noirq callback.
1551  *
1552  * Restore power to the device's PM domain, if necessary, and start the device.
1553  */
1554 static int genpd_finish_resume(struct device *dev,
1555 			       int (*resume_noirq)(struct device *dev))
1556 {
1557 	struct generic_pm_domain *genpd;
1558 	int ret;
1559 
1560 	dev_dbg(dev, "%s()\n", __func__);
1561 
1562 	genpd = dev_to_genpd(dev);
1563 	if (IS_ERR(genpd))
1564 		return -EINVAL;
1565 
1566 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1567 		return resume_noirq(dev);
1568 
1569 	genpd_lock(genpd);
1570 	genpd_sync_power_on(genpd, true, 0);
1571 	genpd->suspended_count--;
1572 	genpd_unlock(genpd);
1573 
1574 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1575 	    !pm_runtime_status_suspended(dev)) {
1576 		ret = genpd_start_dev(genpd, dev);
1577 		if (ret)
1578 			return ret;
1579 	}
1580 
1581 	return pm_generic_resume_noirq(dev);
1582 }
1583 
1584 /**
1585  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1586  * @dev: Device to resume.
1587  *
1588  * Restore power to the device's PM domain, if necessary, and start the device.
1589  */
1590 static int genpd_resume_noirq(struct device *dev)
1591 {
1592 	dev_dbg(dev, "%s()\n", __func__);
1593 
1594 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1595 }
1596 
1597 /**
1598  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1599  * @dev: Device to freeze.
1600  *
1601  * Carry out a late freeze of a device under the assumption that its
1602  * pm_domain field points to the domain member of an object of type
1603  * struct generic_pm_domain representing a power domain consisting of I/O
1604  * devices.
1605  */
1606 static int genpd_freeze_noirq(struct device *dev)
1607 {
1608 	dev_dbg(dev, "%s()\n", __func__);
1609 
1610 	return genpd_finish_suspend(dev,
1611 				    pm_generic_freeze_noirq,
1612 				    pm_generic_thaw_noirq);
1613 }
1614 
1615 /**
1616  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1617  * @dev: Device to thaw.
1618  *
1619  * Start the device, unless power has been removed from the domain already
1620  * before the system transition.
1621  */
1622 static int genpd_thaw_noirq(struct device *dev)
1623 {
1624 	dev_dbg(dev, "%s()\n", __func__);
1625 
1626 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1627 }
1628 
1629 /**
1630  * genpd_poweroff_noirq - Completion of hibernation of device in an
1631  *   I/O PM domain.
1632  * @dev: Device to poweroff.
1633  *
1634  * Stop the device and remove power from the domain if all devices in it have
1635  * been stopped.
1636  */
1637 static int genpd_poweroff_noirq(struct device *dev)
1638 {
1639 	dev_dbg(dev, "%s()\n", __func__);
1640 
1641 	return genpd_finish_suspend(dev,
1642 				    pm_generic_poweroff_noirq,
1643 				    pm_generic_restore_noirq);
1644 }
1645 
1646 /**
1647  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1648  * @dev: Device to resume.
1649  *
1650  * Make sure the domain will be in the same power state as before the
1651  * hibernation the system is resuming from and start the device if necessary.
1652  */
1653 static int genpd_restore_noirq(struct device *dev)
1654 {
1655 	dev_dbg(dev, "%s()\n", __func__);
1656 
1657 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1658 }
1659 
1660 /**
1661  * genpd_complete - Complete power transition of a device in a power domain.
1662  * @dev: Device to complete the transition of.
1663  *
1664  * Complete a power transition of a device (during a system-wide power
1665  * transition) under the assumption that its pm_domain field points to the
1666  * domain member of an object of type struct generic_pm_domain representing
1667  * a power domain consisting of I/O devices.
1668  */
1669 static void genpd_complete(struct device *dev)
1670 {
1671 	struct generic_pm_domain *genpd;
1672 
1673 	dev_dbg(dev, "%s()\n", __func__);
1674 
1675 	genpd = dev_to_genpd(dev);
1676 	if (IS_ERR(genpd))
1677 		return;
1678 
1679 	pm_generic_complete(dev);
1680 
1681 	genpd_lock(genpd);
1682 
1683 	genpd->prepared_count--;
1684 	if (!genpd->prepared_count)
1685 		genpd_queue_power_off_work(genpd);
1686 
1687 	genpd_unlock(genpd);
1688 }
1689 
1690 static void genpd_switch_state(struct device *dev, bool suspend)
1691 {
1692 	struct generic_pm_domain *genpd;
1693 	bool use_lock;
1694 
1695 	genpd = dev_to_genpd_safe(dev);
1696 	if (!genpd)
1697 		return;
1698 
1699 	use_lock = genpd_is_irq_safe(genpd);
1700 
1701 	if (use_lock)
1702 		genpd_lock(genpd);
1703 
1704 	if (suspend) {
1705 		genpd->suspended_count++;
1706 		genpd_sync_power_off(genpd, use_lock, 0);
1707 	} else {
1708 		genpd_sync_power_on(genpd, use_lock, 0);
1709 		genpd->suspended_count--;
1710 	}
1711 
1712 	if (use_lock)
1713 		genpd_unlock(genpd);
1714 }
1715 
1716 /**
1717  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1718  * @dev: The device that is attached to the genpd, that can be suspended.
1719  *
1720  * This routine should typically be called for a device that needs to be
1721  * suspended during the syscore suspend phase. It may also be called during
1722  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1723  * genpd.
1724  */
1725 void dev_pm_genpd_suspend(struct device *dev)
1726 {
1727 	genpd_switch_state(dev, true);
1728 }
1729 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1730 
1731 /**
1732  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1733  * @dev: The device that is attached to the genpd, which needs to be resumed.
1734  *
1735  * This routine should typically be called for a device that needs to be resumed
1736  * during the syscore resume phase. It may also be called during suspend-to-idle
1737  * to resume a corresponding CPU device that is attached to a genpd.
1738  */
1739 void dev_pm_genpd_resume(struct device *dev)
1740 {
1741 	genpd_switch_state(dev, false);
1742 }
1743 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1744 
1745 #else /* !CONFIG_PM_SLEEP */
1746 
1747 #define genpd_prepare		NULL
1748 #define genpd_suspend_noirq	NULL
1749 #define genpd_resume_noirq	NULL
1750 #define genpd_freeze_noirq	NULL
1751 #define genpd_thaw_noirq	NULL
1752 #define genpd_poweroff_noirq	NULL
1753 #define genpd_restore_noirq	NULL
1754 #define genpd_complete		NULL
1755 
1756 #endif /* CONFIG_PM_SLEEP */
1757 
1758 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1759 							   bool has_governor)
1760 {
1761 	struct generic_pm_domain_data *gpd_data;
1762 	struct gpd_timing_data *td;
1763 	int ret;
1764 
1765 	ret = dev_pm_get_subsys_data(dev);
1766 	if (ret)
1767 		return ERR_PTR(ret);
1768 
1769 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1770 	if (!gpd_data) {
1771 		ret = -ENOMEM;
1772 		goto err_put;
1773 	}
1774 
1775 	gpd_data->base.dev = dev;
1776 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1777 
1778 	/* Allocate data used by a governor. */
1779 	if (has_governor) {
1780 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1781 		if (!td) {
1782 			ret = -ENOMEM;
1783 			goto err_free;
1784 		}
1785 
1786 		td->constraint_changed = true;
1787 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1788 		td->next_wakeup = KTIME_MAX;
1789 		gpd_data->td = td;
1790 	}
1791 
1792 	spin_lock_irq(&dev->power.lock);
1793 
1794 	if (dev->power.subsys_data->domain_data)
1795 		ret = -EINVAL;
1796 	else
1797 		dev->power.subsys_data->domain_data = &gpd_data->base;
1798 
1799 	spin_unlock_irq(&dev->power.lock);
1800 
1801 	if (ret)
1802 		goto err_free;
1803 
1804 	return gpd_data;
1805 
1806  err_free:
1807 	kfree(gpd_data->td);
1808 	kfree(gpd_data);
1809  err_put:
1810 	dev_pm_put_subsys_data(dev);
1811 	return ERR_PTR(ret);
1812 }
1813 
1814 static void genpd_free_dev_data(struct device *dev,
1815 				struct generic_pm_domain_data *gpd_data)
1816 {
1817 	spin_lock_irq(&dev->power.lock);
1818 
1819 	dev->power.subsys_data->domain_data = NULL;
1820 
1821 	spin_unlock_irq(&dev->power.lock);
1822 
1823 	dev_pm_opp_clear_config(gpd_data->opp_token);
1824 	kfree(gpd_data->td);
1825 	kfree(gpd_data);
1826 	dev_pm_put_subsys_data(dev);
1827 }
1828 
1829 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1830 				 int cpu, bool set, unsigned int depth)
1831 {
1832 	struct gpd_link *link;
1833 
1834 	if (!genpd_is_cpu_domain(genpd))
1835 		return;
1836 
1837 	list_for_each_entry(link, &genpd->child_links, child_node) {
1838 		struct generic_pm_domain *parent = link->parent;
1839 
1840 		genpd_lock_nested(parent, depth + 1);
1841 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1842 		genpd_unlock(parent);
1843 	}
1844 
1845 	if (set)
1846 		cpumask_set_cpu(cpu, genpd->cpus);
1847 	else
1848 		cpumask_clear_cpu(cpu, genpd->cpus);
1849 }
1850 
1851 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1852 {
1853 	if (cpu >= 0)
1854 		genpd_update_cpumask(genpd, cpu, true, 0);
1855 }
1856 
1857 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1858 {
1859 	if (cpu >= 0)
1860 		genpd_update_cpumask(genpd, cpu, false, 0);
1861 }
1862 
1863 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1864 {
1865 	int cpu;
1866 
1867 	if (!genpd_is_cpu_domain(genpd))
1868 		return -1;
1869 
1870 	for_each_possible_cpu(cpu) {
1871 		if (get_cpu_device(cpu) == dev)
1872 			return cpu;
1873 	}
1874 
1875 	return -1;
1876 }
1877 
1878 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1879 			    struct device *base_dev)
1880 {
1881 	struct genpd_governor_data *gd = genpd->gd;
1882 	struct generic_pm_domain_data *gpd_data;
1883 	int ret;
1884 
1885 	dev_dbg(dev, "%s()\n", __func__);
1886 
1887 	gpd_data = genpd_alloc_dev_data(dev, gd);
1888 	if (IS_ERR(gpd_data))
1889 		return PTR_ERR(gpd_data);
1890 
1891 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1892 
1893 	gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1894 
1895 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1896 	if (ret)
1897 		goto out;
1898 
1899 	genpd_lock(genpd);
1900 
1901 	genpd_set_cpumask(genpd, gpd_data->cpu);
1902 
1903 	genpd->device_count++;
1904 	if (gd)
1905 		gd->max_off_time_changed = true;
1906 
1907 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1908 
1909 	genpd_unlock(genpd);
1910 	dev_pm_domain_set(dev, &genpd->domain);
1911  out:
1912 	if (ret)
1913 		genpd_free_dev_data(dev, gpd_data);
1914 	else
1915 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1916 					DEV_PM_QOS_RESUME_LATENCY);
1917 
1918 	return ret;
1919 }
1920 
1921 /**
1922  * pm_genpd_add_device - Add a device to an I/O PM domain.
1923  * @genpd: PM domain to add the device to.
1924  * @dev: Device to be added.
1925  */
1926 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1927 {
1928 	int ret;
1929 
1930 	if (!genpd || !dev)
1931 		return -EINVAL;
1932 
1933 	mutex_lock(&gpd_list_lock);
1934 	ret = genpd_add_device(genpd, dev, dev);
1935 	mutex_unlock(&gpd_list_lock);
1936 
1937 	return ret;
1938 }
1939 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1940 
1941 static int genpd_remove_device(struct generic_pm_domain *genpd,
1942 			       struct device *dev)
1943 {
1944 	struct generic_pm_domain_data *gpd_data;
1945 	struct pm_domain_data *pdd;
1946 	int ret = 0;
1947 
1948 	dev_dbg(dev, "%s()\n", __func__);
1949 
1950 	pdd = dev->power.subsys_data->domain_data;
1951 	gpd_data = to_gpd_data(pdd);
1952 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1953 				   DEV_PM_QOS_RESUME_LATENCY);
1954 
1955 	genpd_lock(genpd);
1956 
1957 	if (genpd->prepared_count > 0) {
1958 		ret = -EAGAIN;
1959 		goto out;
1960 	}
1961 
1962 	genpd->device_count--;
1963 	if (genpd->gd)
1964 		genpd->gd->max_off_time_changed = true;
1965 
1966 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1967 
1968 	list_del_init(&pdd->list_node);
1969 
1970 	genpd_unlock(genpd);
1971 
1972 	dev_pm_domain_set(dev, NULL);
1973 
1974 	if (genpd->detach_dev)
1975 		genpd->detach_dev(genpd, dev);
1976 
1977 	genpd_free_dev_data(dev, gpd_data);
1978 
1979 	return 0;
1980 
1981  out:
1982 	genpd_unlock(genpd);
1983 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1984 
1985 	return ret;
1986 }
1987 
1988 /**
1989  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1990  * @dev: Device to be removed.
1991  */
1992 int pm_genpd_remove_device(struct device *dev)
1993 {
1994 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1995 
1996 	if (!genpd)
1997 		return -EINVAL;
1998 
1999 	return genpd_remove_device(genpd, dev);
2000 }
2001 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
2002 
2003 /**
2004  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
2005  *
2006  * @dev: Device that should be associated with the notifier
2007  * @nb: The notifier block to register
2008  *
2009  * Users may call this function to add a genpd power on/off notifier for an
2010  * attached @dev. Only one notifier per device is allowed. The notifier is
2011  * sent when genpd is powering on/off the PM domain.
2012  *
2013  * It is assumed that the user guarantee that the genpd wouldn't be detached
2014  * while this routine is getting called.
2015  *
2016  * Returns 0 on success and negative error values on failures.
2017  */
2018 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2019 {
2020 	struct generic_pm_domain *genpd;
2021 	struct generic_pm_domain_data *gpd_data;
2022 	int ret;
2023 
2024 	genpd = dev_to_genpd_safe(dev);
2025 	if (!genpd)
2026 		return -ENODEV;
2027 
2028 	if (WARN_ON(!dev->power.subsys_data ||
2029 		     !dev->power.subsys_data->domain_data))
2030 		return -EINVAL;
2031 
2032 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2033 	if (gpd_data->power_nb)
2034 		return -EEXIST;
2035 
2036 	genpd_lock(genpd);
2037 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2038 	genpd_unlock(genpd);
2039 
2040 	if (ret) {
2041 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
2042 			 dev_name(&genpd->dev));
2043 		return ret;
2044 	}
2045 
2046 	gpd_data->power_nb = nb;
2047 	return 0;
2048 }
2049 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2050 
2051 /**
2052  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2053  *
2054  * @dev: Device that is associated with the notifier
2055  *
2056  * Users may call this function to remove a genpd power on/off notifier for an
2057  * attached @dev.
2058  *
2059  * It is assumed that the user guarantee that the genpd wouldn't be detached
2060  * while this routine is getting called.
2061  *
2062  * Returns 0 on success and negative error values on failures.
2063  */
2064 int dev_pm_genpd_remove_notifier(struct device *dev)
2065 {
2066 	struct generic_pm_domain *genpd;
2067 	struct generic_pm_domain_data *gpd_data;
2068 	int ret;
2069 
2070 	genpd = dev_to_genpd_safe(dev);
2071 	if (!genpd)
2072 		return -ENODEV;
2073 
2074 	if (WARN_ON(!dev->power.subsys_data ||
2075 		     !dev->power.subsys_data->domain_data))
2076 		return -EINVAL;
2077 
2078 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2079 	if (!gpd_data->power_nb)
2080 		return -ENODEV;
2081 
2082 	genpd_lock(genpd);
2083 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2084 					    gpd_data->power_nb);
2085 	genpd_unlock(genpd);
2086 
2087 	if (ret) {
2088 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2089 			 dev_name(&genpd->dev));
2090 		return ret;
2091 	}
2092 
2093 	gpd_data->power_nb = NULL;
2094 	return 0;
2095 }
2096 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2097 
2098 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2099 			       struct generic_pm_domain *subdomain)
2100 {
2101 	struct gpd_link *link, *itr;
2102 	int ret = 0;
2103 
2104 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2105 	    || genpd == subdomain)
2106 		return -EINVAL;
2107 
2108 	/*
2109 	 * If the domain can be powered on/off in an IRQ safe
2110 	 * context, ensure that the subdomain can also be
2111 	 * powered on/off in that context.
2112 	 */
2113 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2114 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2115 		     dev_name(&genpd->dev), subdomain->name);
2116 		return -EINVAL;
2117 	}
2118 
2119 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2120 	if (!link)
2121 		return -ENOMEM;
2122 
2123 	genpd_lock(subdomain);
2124 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2125 
2126 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2127 		ret = -EINVAL;
2128 		goto out;
2129 	}
2130 
2131 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2132 		if (itr->child == subdomain && itr->parent == genpd) {
2133 			ret = -EINVAL;
2134 			goto out;
2135 		}
2136 	}
2137 
2138 	link->parent = genpd;
2139 	list_add_tail(&link->parent_node, &genpd->parent_links);
2140 	link->child = subdomain;
2141 	list_add_tail(&link->child_node, &subdomain->child_links);
2142 	if (genpd_status_on(subdomain))
2143 		genpd_sd_counter_inc(genpd);
2144 
2145  out:
2146 	genpd_unlock(genpd);
2147 	genpd_unlock(subdomain);
2148 	if (ret)
2149 		kfree(link);
2150 	return ret;
2151 }
2152 
2153 /**
2154  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2155  * @genpd: Leader PM domain to add the subdomain to.
2156  * @subdomain: Subdomain to be added.
2157  */
2158 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2159 			   struct generic_pm_domain *subdomain)
2160 {
2161 	int ret;
2162 
2163 	mutex_lock(&gpd_list_lock);
2164 	ret = genpd_add_subdomain(genpd, subdomain);
2165 	mutex_unlock(&gpd_list_lock);
2166 
2167 	return ret;
2168 }
2169 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2170 
2171 /**
2172  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2173  * @genpd: Leader PM domain to remove the subdomain from.
2174  * @subdomain: Subdomain to be removed.
2175  */
2176 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2177 			      struct generic_pm_domain *subdomain)
2178 {
2179 	struct gpd_link *l, *link;
2180 	int ret = -EINVAL;
2181 
2182 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2183 		return -EINVAL;
2184 
2185 	genpd_lock(subdomain);
2186 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2187 
2188 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2189 		pr_warn("%s: unable to remove subdomain %s\n",
2190 			dev_name(&genpd->dev), subdomain->name);
2191 		ret = -EBUSY;
2192 		goto out;
2193 	}
2194 
2195 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2196 		if (link->child != subdomain)
2197 			continue;
2198 
2199 		list_del(&link->parent_node);
2200 		list_del(&link->child_node);
2201 		kfree(link);
2202 		if (genpd_status_on(subdomain))
2203 			genpd_sd_counter_dec(genpd);
2204 
2205 		ret = 0;
2206 		break;
2207 	}
2208 
2209 out:
2210 	genpd_unlock(genpd);
2211 	genpd_unlock(subdomain);
2212 
2213 	return ret;
2214 }
2215 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2216 
2217 static void genpd_free_default_power_state(struct genpd_power_state *states,
2218 					   unsigned int state_count)
2219 {
2220 	kfree(states);
2221 }
2222 
2223 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2224 {
2225 	struct genpd_power_state *state;
2226 
2227 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2228 	if (!state)
2229 		return -ENOMEM;
2230 
2231 	genpd->states = state;
2232 	genpd->state_count = 1;
2233 	genpd->free_states = genpd_free_default_power_state;
2234 
2235 	return 0;
2236 }
2237 
2238 static void genpd_provider_release(struct device *dev)
2239 {
2240 	/* nothing to be done here */
2241 }
2242 
2243 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2244 {
2245 	struct genpd_governor_data *gd = NULL;
2246 	int ret;
2247 
2248 	if (genpd_is_cpu_domain(genpd) &&
2249 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2250 		return -ENOMEM;
2251 
2252 	if (genpd->gov) {
2253 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2254 		if (!gd) {
2255 			ret = -ENOMEM;
2256 			goto free;
2257 		}
2258 
2259 		gd->max_off_time_ns = -1;
2260 		gd->max_off_time_changed = true;
2261 		gd->next_wakeup = KTIME_MAX;
2262 		gd->next_hrtimer = KTIME_MAX;
2263 	}
2264 
2265 	/* Use only one "off" state if there were no states declared */
2266 	if (genpd->state_count == 0) {
2267 		ret = genpd_set_default_power_state(genpd);
2268 		if (ret)
2269 			goto free;
2270 	}
2271 
2272 	genpd->gd = gd;
2273 	device_initialize(&genpd->dev);
2274 	genpd->dev.release = genpd_provider_release;
2275 
2276 	if (!genpd_is_dev_name_fw(genpd)) {
2277 		dev_set_name(&genpd->dev, "%s", genpd->name);
2278 	} else {
2279 		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2280 		if (ret < 0)
2281 			goto put;
2282 
2283 		genpd->device_id = ret;
2284 		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2285 	}
2286 
2287 	return 0;
2288 put:
2289 	put_device(&genpd->dev);
2290 	if (genpd->free_states == genpd_free_default_power_state) {
2291 		kfree(genpd->states);
2292 		genpd->states = NULL;
2293 	}
2294 free:
2295 	if (genpd_is_cpu_domain(genpd))
2296 		free_cpumask_var(genpd->cpus);
2297 	kfree(gd);
2298 	return ret;
2299 }
2300 
2301 static void genpd_free_data(struct generic_pm_domain *genpd)
2302 {
2303 	put_device(&genpd->dev);
2304 	if (genpd->device_id != -ENXIO)
2305 		ida_free(&genpd_ida, genpd->device_id);
2306 	if (genpd_is_cpu_domain(genpd))
2307 		free_cpumask_var(genpd->cpus);
2308 	if (genpd->free_states)
2309 		genpd->free_states(genpd->states, genpd->state_count);
2310 	kfree(genpd->gd);
2311 }
2312 
2313 static void genpd_lock_init(struct generic_pm_domain *genpd)
2314 {
2315 	if (genpd_is_cpu_domain(genpd)) {
2316 		raw_spin_lock_init(&genpd->raw_slock);
2317 		genpd->lock_ops = &genpd_raw_spin_ops;
2318 	} else if (genpd_is_irq_safe(genpd)) {
2319 		spin_lock_init(&genpd->slock);
2320 		genpd->lock_ops = &genpd_spin_ops;
2321 	} else {
2322 		mutex_init(&genpd->mlock);
2323 		genpd->lock_ops = &genpd_mtx_ops;
2324 	}
2325 }
2326 
2327 /**
2328  * pm_genpd_init - Initialize a generic I/O PM domain object.
2329  * @genpd: PM domain object to initialize.
2330  * @gov: PM domain governor to associate with the domain (may be NULL).
2331  * @is_off: Initial value of the domain's power_is_off field.
2332  *
2333  * Returns 0 on successful initialization, else a negative error code.
2334  */
2335 int pm_genpd_init(struct generic_pm_domain *genpd,
2336 		  struct dev_power_governor *gov, bool is_off)
2337 {
2338 	int ret;
2339 
2340 	if (IS_ERR_OR_NULL(genpd))
2341 		return -EINVAL;
2342 
2343 	INIT_LIST_HEAD(&genpd->parent_links);
2344 	INIT_LIST_HEAD(&genpd->child_links);
2345 	INIT_LIST_HEAD(&genpd->dev_list);
2346 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2347 	genpd_lock_init(genpd);
2348 	genpd->gov = gov;
2349 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2350 	atomic_set(&genpd->sd_count, 0);
2351 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2352 	genpd->device_count = 0;
2353 	genpd->provider = NULL;
2354 	genpd->device_id = -ENXIO;
2355 	genpd->has_provider = false;
2356 	genpd->accounting_time = ktime_get_mono_fast_ns();
2357 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2358 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2359 	genpd->domain.ops.prepare = genpd_prepare;
2360 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2361 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2362 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2363 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2364 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2365 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2366 	genpd->domain.ops.complete = genpd_complete;
2367 	genpd->domain.start = genpd_dev_pm_start;
2368 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2369 
2370 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2371 		genpd->dev_ops.stop = pm_clk_suspend;
2372 		genpd->dev_ops.start = pm_clk_resume;
2373 	}
2374 
2375 	/* The always-on governor works better with the corresponding flag. */
2376 	if (gov == &pm_domain_always_on_gov)
2377 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2378 
2379 	/* Always-on domains must be powered on at initialization. */
2380 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2381 			!genpd_status_on(genpd)) {
2382 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2383 		return -EINVAL;
2384 	}
2385 
2386 	/* Multiple states but no governor doesn't make sense. */
2387 	if (!gov && genpd->state_count > 1)
2388 		pr_warn("%s: no governor for states\n", genpd->name);
2389 
2390 	ret = genpd_alloc_data(genpd);
2391 	if (ret)
2392 		return ret;
2393 
2394 	mutex_lock(&gpd_list_lock);
2395 	list_add(&genpd->gpd_list_node, &gpd_list);
2396 	mutex_unlock(&gpd_list_lock);
2397 	genpd_debug_add(genpd);
2398 
2399 	return 0;
2400 }
2401 EXPORT_SYMBOL_GPL(pm_genpd_init);
2402 
2403 static int genpd_remove(struct generic_pm_domain *genpd)
2404 {
2405 	struct gpd_link *l, *link;
2406 
2407 	if (IS_ERR_OR_NULL(genpd))
2408 		return -EINVAL;
2409 
2410 	genpd_lock(genpd);
2411 
2412 	if (genpd->has_provider) {
2413 		genpd_unlock(genpd);
2414 		pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2415 		return -EBUSY;
2416 	}
2417 
2418 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2419 		genpd_unlock(genpd);
2420 		pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2421 		return -EBUSY;
2422 	}
2423 
2424 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2425 		list_del(&link->parent_node);
2426 		list_del(&link->child_node);
2427 		kfree(link);
2428 	}
2429 
2430 	list_del(&genpd->gpd_list_node);
2431 	genpd_unlock(genpd);
2432 	genpd_debug_remove(genpd);
2433 	cancel_work_sync(&genpd->power_off_work);
2434 	genpd_free_data(genpd);
2435 
2436 	pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2437 
2438 	return 0;
2439 }
2440 
2441 /**
2442  * pm_genpd_remove - Remove a generic I/O PM domain
2443  * @genpd: Pointer to PM domain that is to be removed.
2444  *
2445  * To remove the PM domain, this function:
2446  *  - Removes the PM domain as a subdomain to any parent domains,
2447  *    if it was added.
2448  *  - Removes the PM domain from the list of registered PM domains.
2449  *
2450  * The PM domain will only be removed, if the associated provider has
2451  * been removed, it is not a parent to any other PM domain and has no
2452  * devices associated with it.
2453  */
2454 int pm_genpd_remove(struct generic_pm_domain *genpd)
2455 {
2456 	int ret;
2457 
2458 	mutex_lock(&gpd_list_lock);
2459 	ret = genpd_remove(genpd);
2460 	mutex_unlock(&gpd_list_lock);
2461 
2462 	return ret;
2463 }
2464 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2465 
2466 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2467 
2468 /*
2469  * Device Tree based PM domain providers.
2470  *
2471  * The code below implements generic device tree based PM domain providers that
2472  * bind device tree nodes with generic PM domains registered in the system.
2473  *
2474  * Any driver that registers generic PM domains and needs to support binding of
2475  * devices to these domains is supposed to register a PM domain provider, which
2476  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2477  *
2478  * Two simple mapping functions have been provided for convenience:
2479  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2480  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2481  *    index.
2482  */
2483 
2484 /**
2485  * struct of_genpd_provider - PM domain provider registration structure
2486  * @link: Entry in global list of PM domain providers
2487  * @node: Pointer to device tree node of PM domain provider
2488  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2489  *         into a PM domain.
2490  * @data: context pointer to be passed into @xlate callback
2491  */
2492 struct of_genpd_provider {
2493 	struct list_head link;
2494 	struct device_node *node;
2495 	genpd_xlate_t xlate;
2496 	void *data;
2497 };
2498 
2499 /* List of registered PM domain providers. */
2500 static LIST_HEAD(of_genpd_providers);
2501 /* Mutex to protect the list above. */
2502 static DEFINE_MUTEX(of_genpd_mutex);
2503 
2504 /**
2505  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2506  * @genpdspec: OF phandle args to map into a PM domain
2507  * @data: xlate function private data - pointer to struct generic_pm_domain
2508  *
2509  * This is a generic xlate function that can be used to model PM domains that
2510  * have their own device tree nodes. The private data of xlate function needs
2511  * to be a valid pointer to struct generic_pm_domain.
2512  */
2513 static struct generic_pm_domain *genpd_xlate_simple(
2514 					const struct of_phandle_args *genpdspec,
2515 					void *data)
2516 {
2517 	return data;
2518 }
2519 
2520 /**
2521  * genpd_xlate_onecell() - Xlate function using a single index.
2522  * @genpdspec: OF phandle args to map into a PM domain
2523  * @data: xlate function private data - pointer to struct genpd_onecell_data
2524  *
2525  * This is a generic xlate function that can be used to model simple PM domain
2526  * controllers that have one device tree node and provide multiple PM domains.
2527  * A single cell is used as an index into an array of PM domains specified in
2528  * the genpd_onecell_data struct when registering the provider.
2529  */
2530 static struct generic_pm_domain *genpd_xlate_onecell(
2531 					const struct of_phandle_args *genpdspec,
2532 					void *data)
2533 {
2534 	struct genpd_onecell_data *genpd_data = data;
2535 	unsigned int idx = genpdspec->args[0];
2536 
2537 	if (genpdspec->args_count != 1)
2538 		return ERR_PTR(-EINVAL);
2539 
2540 	if (idx >= genpd_data->num_domains) {
2541 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2542 		return ERR_PTR(-EINVAL);
2543 	}
2544 
2545 	if (!genpd_data->domains[idx])
2546 		return ERR_PTR(-ENOENT);
2547 
2548 	return genpd_data->domains[idx];
2549 }
2550 
2551 /**
2552  * genpd_add_provider() - Register a PM domain provider for a node
2553  * @np: Device node pointer associated with the PM domain provider.
2554  * @xlate: Callback for decoding PM domain from phandle arguments.
2555  * @data: Context pointer for @xlate callback.
2556  */
2557 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2558 			      void *data)
2559 {
2560 	struct of_genpd_provider *cp;
2561 
2562 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2563 	if (!cp)
2564 		return -ENOMEM;
2565 
2566 	cp->node = of_node_get(np);
2567 	cp->data = data;
2568 	cp->xlate = xlate;
2569 	fwnode_dev_initialized(&np->fwnode, true);
2570 
2571 	mutex_lock(&of_genpd_mutex);
2572 	list_add(&cp->link, &of_genpd_providers);
2573 	mutex_unlock(&of_genpd_mutex);
2574 	pr_debug("Added domain provider from %pOF\n", np);
2575 
2576 	return 0;
2577 }
2578 
2579 static bool genpd_present(const struct generic_pm_domain *genpd)
2580 {
2581 	bool ret = false;
2582 	const struct generic_pm_domain *gpd;
2583 
2584 	mutex_lock(&gpd_list_lock);
2585 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2586 		if (gpd == genpd) {
2587 			ret = true;
2588 			break;
2589 		}
2590 	}
2591 	mutex_unlock(&gpd_list_lock);
2592 
2593 	return ret;
2594 }
2595 
2596 /**
2597  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2598  * @np: Device node pointer associated with the PM domain provider.
2599  * @genpd: Pointer to PM domain associated with the PM domain provider.
2600  */
2601 int of_genpd_add_provider_simple(struct device_node *np,
2602 				 struct generic_pm_domain *genpd)
2603 {
2604 	int ret;
2605 
2606 	if (!np || !genpd)
2607 		return -EINVAL;
2608 
2609 	if (!genpd_present(genpd))
2610 		return -EINVAL;
2611 
2612 	genpd->dev.of_node = np;
2613 
2614 	/* Parse genpd OPP table */
2615 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2616 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2617 		if (ret)
2618 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2619 
2620 		/*
2621 		 * Save table for faster processing while setting performance
2622 		 * state.
2623 		 */
2624 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2625 		WARN_ON(IS_ERR(genpd->opp_table));
2626 	}
2627 
2628 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2629 	if (ret) {
2630 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2631 			dev_pm_opp_put_opp_table(genpd->opp_table);
2632 			dev_pm_opp_of_remove_table(&genpd->dev);
2633 		}
2634 
2635 		return ret;
2636 	}
2637 
2638 	genpd->provider = &np->fwnode;
2639 	genpd->has_provider = true;
2640 
2641 	return 0;
2642 }
2643 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2644 
2645 /**
2646  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2647  * @np: Device node pointer associated with the PM domain provider.
2648  * @data: Pointer to the data associated with the PM domain provider.
2649  */
2650 int of_genpd_add_provider_onecell(struct device_node *np,
2651 				  struct genpd_onecell_data *data)
2652 {
2653 	struct generic_pm_domain *genpd;
2654 	unsigned int i;
2655 	int ret = -EINVAL;
2656 
2657 	if (!np || !data)
2658 		return -EINVAL;
2659 
2660 	if (!data->xlate)
2661 		data->xlate = genpd_xlate_onecell;
2662 
2663 	for (i = 0; i < data->num_domains; i++) {
2664 		genpd = data->domains[i];
2665 
2666 		if (!genpd)
2667 			continue;
2668 		if (!genpd_present(genpd))
2669 			goto error;
2670 
2671 		genpd->dev.of_node = np;
2672 
2673 		/* Parse genpd OPP table */
2674 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2675 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2676 			if (ret) {
2677 				dev_err_probe(&genpd->dev, ret,
2678 					      "Failed to add OPP table for index %d\n", i);
2679 				goto error;
2680 			}
2681 
2682 			/*
2683 			 * Save table for faster processing while setting
2684 			 * performance state.
2685 			 */
2686 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2687 			WARN_ON(IS_ERR(genpd->opp_table));
2688 		}
2689 
2690 		genpd->provider = &np->fwnode;
2691 		genpd->has_provider = true;
2692 	}
2693 
2694 	ret = genpd_add_provider(np, data->xlate, data);
2695 	if (ret < 0)
2696 		goto error;
2697 
2698 	return 0;
2699 
2700 error:
2701 	while (i--) {
2702 		genpd = data->domains[i];
2703 
2704 		if (!genpd)
2705 			continue;
2706 
2707 		genpd->provider = NULL;
2708 		genpd->has_provider = false;
2709 
2710 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2711 			dev_pm_opp_put_opp_table(genpd->opp_table);
2712 			dev_pm_opp_of_remove_table(&genpd->dev);
2713 		}
2714 	}
2715 
2716 	return ret;
2717 }
2718 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2719 
2720 /**
2721  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2722  * @np: Device node pointer associated with the PM domain provider
2723  */
2724 void of_genpd_del_provider(struct device_node *np)
2725 {
2726 	struct of_genpd_provider *cp, *tmp;
2727 	struct generic_pm_domain *gpd;
2728 
2729 	mutex_lock(&gpd_list_lock);
2730 	mutex_lock(&of_genpd_mutex);
2731 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2732 		if (cp->node == np) {
2733 			/*
2734 			 * For each PM domain associated with the
2735 			 * provider, set the 'has_provider' to false
2736 			 * so that the PM domain can be safely removed.
2737 			 */
2738 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2739 				if (gpd->provider == &np->fwnode) {
2740 					gpd->has_provider = false;
2741 
2742 					if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
2743 						continue;
2744 
2745 					dev_pm_opp_put_opp_table(gpd->opp_table);
2746 					dev_pm_opp_of_remove_table(&gpd->dev);
2747 				}
2748 			}
2749 
2750 			fwnode_dev_initialized(&cp->node->fwnode, false);
2751 			list_del(&cp->link);
2752 			of_node_put(cp->node);
2753 			kfree(cp);
2754 			break;
2755 		}
2756 	}
2757 	mutex_unlock(&of_genpd_mutex);
2758 	mutex_unlock(&gpd_list_lock);
2759 }
2760 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2761 
2762 /**
2763  * genpd_get_from_provider() - Look-up PM domain
2764  * @genpdspec: OF phandle args to use for look-up
2765  *
2766  * Looks for a PM domain provider under the node specified by @genpdspec and if
2767  * found, uses xlate function of the provider to map phandle args to a PM
2768  * domain.
2769  *
2770  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2771  * on failure.
2772  */
2773 static struct generic_pm_domain *genpd_get_from_provider(
2774 					const struct of_phandle_args *genpdspec)
2775 {
2776 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2777 	struct of_genpd_provider *provider;
2778 
2779 	if (!genpdspec)
2780 		return ERR_PTR(-EINVAL);
2781 
2782 	mutex_lock(&of_genpd_mutex);
2783 
2784 	/* Check if we have such a provider in our array */
2785 	list_for_each_entry(provider, &of_genpd_providers, link) {
2786 		if (provider->node == genpdspec->np)
2787 			genpd = provider->xlate(genpdspec, provider->data);
2788 		if (!IS_ERR(genpd))
2789 			break;
2790 	}
2791 
2792 	mutex_unlock(&of_genpd_mutex);
2793 
2794 	return genpd;
2795 }
2796 
2797 /**
2798  * of_genpd_add_device() - Add a device to an I/O PM domain
2799  * @genpdspec: OF phandle args to use for look-up PM domain
2800  * @dev: Device to be added.
2801  *
2802  * Looks-up an I/O PM domain based upon phandle args provided and adds
2803  * the device to the PM domain. Returns a negative error code on failure.
2804  */
2805 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2806 {
2807 	struct generic_pm_domain *genpd;
2808 	int ret;
2809 
2810 	if (!dev)
2811 		return -EINVAL;
2812 
2813 	mutex_lock(&gpd_list_lock);
2814 
2815 	genpd = genpd_get_from_provider(genpdspec);
2816 	if (IS_ERR(genpd)) {
2817 		ret = PTR_ERR(genpd);
2818 		goto out;
2819 	}
2820 
2821 	ret = genpd_add_device(genpd, dev, dev);
2822 
2823 out:
2824 	mutex_unlock(&gpd_list_lock);
2825 
2826 	return ret;
2827 }
2828 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2829 
2830 /**
2831  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2832  * @parent_spec: OF phandle args to use for parent PM domain look-up
2833  * @subdomain_spec: OF phandle args to use for subdomain look-up
2834  *
2835  * Looks-up a parent PM domain and subdomain based upon phandle args
2836  * provided and adds the subdomain to the parent PM domain. Returns a
2837  * negative error code on failure.
2838  */
2839 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2840 			   const struct of_phandle_args *subdomain_spec)
2841 {
2842 	struct generic_pm_domain *parent, *subdomain;
2843 	int ret;
2844 
2845 	mutex_lock(&gpd_list_lock);
2846 
2847 	parent = genpd_get_from_provider(parent_spec);
2848 	if (IS_ERR(parent)) {
2849 		ret = PTR_ERR(parent);
2850 		goto out;
2851 	}
2852 
2853 	subdomain = genpd_get_from_provider(subdomain_spec);
2854 	if (IS_ERR(subdomain)) {
2855 		ret = PTR_ERR(subdomain);
2856 		goto out;
2857 	}
2858 
2859 	ret = genpd_add_subdomain(parent, subdomain);
2860 
2861 out:
2862 	mutex_unlock(&gpd_list_lock);
2863 
2864 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2865 }
2866 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2867 
2868 /**
2869  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2870  * @parent_spec: OF phandle args to use for parent PM domain look-up
2871  * @subdomain_spec: OF phandle args to use for subdomain look-up
2872  *
2873  * Looks-up a parent PM domain and subdomain based upon phandle args
2874  * provided and removes the subdomain from the parent PM domain. Returns a
2875  * negative error code on failure.
2876  */
2877 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2878 			      const struct of_phandle_args *subdomain_spec)
2879 {
2880 	struct generic_pm_domain *parent, *subdomain;
2881 	int ret;
2882 
2883 	mutex_lock(&gpd_list_lock);
2884 
2885 	parent = genpd_get_from_provider(parent_spec);
2886 	if (IS_ERR(parent)) {
2887 		ret = PTR_ERR(parent);
2888 		goto out;
2889 	}
2890 
2891 	subdomain = genpd_get_from_provider(subdomain_spec);
2892 	if (IS_ERR(subdomain)) {
2893 		ret = PTR_ERR(subdomain);
2894 		goto out;
2895 	}
2896 
2897 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2898 
2899 out:
2900 	mutex_unlock(&gpd_list_lock);
2901 
2902 	return ret;
2903 }
2904 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2905 
2906 /**
2907  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2908  * @np: Pointer to device node associated with provider
2909  *
2910  * Find the last PM domain that was added by a particular provider and
2911  * remove this PM domain from the list of PM domains. The provider is
2912  * identified by the 'provider' device structure that is passed. The PM
2913  * domain will only be removed, if the provider associated with domain
2914  * has been removed.
2915  *
2916  * Returns a valid pointer to struct generic_pm_domain on success or
2917  * ERR_PTR() on failure.
2918  */
2919 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2920 {
2921 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2922 	int ret;
2923 
2924 	if (IS_ERR_OR_NULL(np))
2925 		return ERR_PTR(-EINVAL);
2926 
2927 	mutex_lock(&gpd_list_lock);
2928 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2929 		if (gpd->provider == &np->fwnode) {
2930 			ret = genpd_remove(gpd);
2931 			genpd = ret ? ERR_PTR(ret) : gpd;
2932 			break;
2933 		}
2934 	}
2935 	mutex_unlock(&gpd_list_lock);
2936 
2937 	return genpd;
2938 }
2939 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2940 
2941 static void genpd_release_dev(struct device *dev)
2942 {
2943 	of_node_put(dev->of_node);
2944 	kfree(dev);
2945 }
2946 
2947 static const struct bus_type genpd_bus_type = {
2948 	.name		= "genpd",
2949 };
2950 
2951 /**
2952  * genpd_dev_pm_detach - Detach a device from its PM domain.
2953  * @dev: Device to detach.
2954  * @power_off: Currently not used
2955  *
2956  * Try to locate a corresponding generic PM domain, which the device was
2957  * attached to previously. If such is found, the device is detached from it.
2958  */
2959 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2960 {
2961 	struct generic_pm_domain *pd;
2962 	unsigned int i;
2963 	int ret = 0;
2964 
2965 	pd = dev_to_genpd(dev);
2966 	if (IS_ERR(pd))
2967 		return;
2968 
2969 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2970 
2971 	/* Drop the default performance state */
2972 	if (dev_gpd_data(dev)->default_pstate) {
2973 		dev_pm_genpd_set_performance_state(dev, 0);
2974 		dev_gpd_data(dev)->default_pstate = 0;
2975 	}
2976 
2977 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2978 		ret = genpd_remove_device(pd, dev);
2979 		if (ret != -EAGAIN)
2980 			break;
2981 
2982 		mdelay(i);
2983 		cond_resched();
2984 	}
2985 
2986 	if (ret < 0) {
2987 		dev_err(dev, "failed to remove from PM domain %s: %d",
2988 			pd->name, ret);
2989 		return;
2990 	}
2991 
2992 	/* Check if PM domain can be powered off after removing this device. */
2993 	genpd_queue_power_off_work(pd);
2994 
2995 	/* Unregister the device if it was created by genpd. */
2996 	if (dev->bus == &genpd_bus_type)
2997 		device_unregister(dev);
2998 }
2999 
3000 static void genpd_dev_pm_sync(struct device *dev)
3001 {
3002 	struct generic_pm_domain *pd;
3003 
3004 	pd = dev_to_genpd(dev);
3005 	if (IS_ERR(pd))
3006 		return;
3007 
3008 	genpd_queue_power_off_work(pd);
3009 }
3010 
3011 static int genpd_set_required_opp_dev(struct device *dev,
3012 				      struct device *base_dev)
3013 {
3014 	struct dev_pm_opp_config config = {
3015 		.required_dev = dev,
3016 	};
3017 	int ret;
3018 
3019 	/* Limit support to non-providers for now. */
3020 	if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3021 		return 0;
3022 
3023 	if (!dev_pm_opp_of_has_required_opp(base_dev))
3024 		return 0;
3025 
3026 	ret = dev_pm_opp_set_config(base_dev, &config);
3027 	if (ret < 0)
3028 		return ret;
3029 
3030 	dev_gpd_data(dev)->opp_token = ret;
3031 	return 0;
3032 }
3033 
3034 static int genpd_set_required_opp(struct device *dev, unsigned int index)
3035 {
3036 	int ret, pstate;
3037 
3038 	/* Set the default performance state */
3039 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
3040 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3041 		ret = pstate;
3042 		goto err;
3043 	} else if (pstate > 0) {
3044 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
3045 		if (ret)
3046 			goto err;
3047 		dev_gpd_data(dev)->default_pstate = pstate;
3048 	}
3049 
3050 	return 0;
3051 err:
3052 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3053 		dev_to_genpd(dev)->name, ret);
3054 	return ret;
3055 }
3056 
3057 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3058 				 unsigned int index, unsigned int num_domains,
3059 				 bool power_on)
3060 {
3061 	struct of_phandle_args pd_args;
3062 	struct generic_pm_domain *pd;
3063 	int ret;
3064 
3065 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3066 				"#power-domain-cells", index, &pd_args);
3067 	if (ret < 0)
3068 		return ret;
3069 
3070 	mutex_lock(&gpd_list_lock);
3071 	pd = genpd_get_from_provider(&pd_args);
3072 	of_node_put(pd_args.np);
3073 	if (IS_ERR(pd)) {
3074 		mutex_unlock(&gpd_list_lock);
3075 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3076 			__func__, PTR_ERR(pd));
3077 		return driver_deferred_probe_check_state(base_dev);
3078 	}
3079 
3080 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3081 
3082 	ret = genpd_add_device(pd, dev, base_dev);
3083 	mutex_unlock(&gpd_list_lock);
3084 
3085 	if (ret < 0)
3086 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3087 
3088 	dev->pm_domain->detach = genpd_dev_pm_detach;
3089 	dev->pm_domain->sync = genpd_dev_pm_sync;
3090 
3091 	/*
3092 	 * For a single PM domain the index of the required OPP must be zero, so
3093 	 * let's try to assign a required dev in that case. In the multiple PM
3094 	 * domains case, we need platform code to specify the index.
3095 	 */
3096 	if (num_domains == 1) {
3097 		ret = genpd_set_required_opp_dev(dev, base_dev);
3098 		if (ret)
3099 			goto err;
3100 	}
3101 
3102 	ret = genpd_set_required_opp(dev, index);
3103 	if (ret)
3104 		goto err;
3105 
3106 	if (power_on) {
3107 		genpd_lock(pd);
3108 		ret = genpd_power_on(pd, 0);
3109 		genpd_unlock(pd);
3110 	}
3111 
3112 	if (ret) {
3113 		/* Drop the default performance state */
3114 		if (dev_gpd_data(dev)->default_pstate) {
3115 			dev_pm_genpd_set_performance_state(dev, 0);
3116 			dev_gpd_data(dev)->default_pstate = 0;
3117 		}
3118 
3119 		genpd_remove_device(pd, dev);
3120 		return -EPROBE_DEFER;
3121 	}
3122 
3123 	return 1;
3124 
3125 err:
3126 	genpd_remove_device(pd, dev);
3127 	return ret;
3128 }
3129 
3130 /**
3131  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3132  * @dev: Device to attach.
3133  *
3134  * Parse device's OF node to find a PM domain specifier. If such is found,
3135  * attaches the device to retrieved pm_domain ops.
3136  *
3137  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3138  * PM domain or when multiple power-domains exists for it, else a negative error
3139  * code. Note that if a power-domain exists for the device, but it cannot be
3140  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3141  * not probed and to re-try again later.
3142  */
3143 int genpd_dev_pm_attach(struct device *dev)
3144 {
3145 	if (!dev->of_node)
3146 		return 0;
3147 
3148 	/*
3149 	 * Devices with multiple PM domains must be attached separately, as we
3150 	 * can only attach one PM domain per device.
3151 	 */
3152 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
3153 				       "#power-domain-cells") != 1)
3154 		return 0;
3155 
3156 	return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3157 }
3158 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3159 
3160 /**
3161  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3162  * @dev: The device used to lookup the PM domain.
3163  * @index: The index of the PM domain.
3164  *
3165  * Parse device's OF node to find a PM domain specifier at the provided @index.
3166  * If such is found, creates a virtual device and attaches it to the retrieved
3167  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3168  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3169  *
3170  * Returns the created virtual device if successfully attached PM domain, NULL
3171  * when the device don't need a PM domain, else an ERR_PTR() in case of
3172  * failures. If a power-domain exists for the device, but cannot be found or
3173  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3174  * is not probed and to re-try again later.
3175  */
3176 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3177 					 unsigned int index)
3178 {
3179 	struct device *virt_dev;
3180 	int num_domains;
3181 	int ret;
3182 
3183 	if (!dev->of_node)
3184 		return NULL;
3185 
3186 	/* Verify that the index is within a valid range. */
3187 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3188 						 "#power-domain-cells");
3189 	if (index >= num_domains)
3190 		return NULL;
3191 
3192 	/* Allocate and register device on the genpd bus. */
3193 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3194 	if (!virt_dev)
3195 		return ERR_PTR(-ENOMEM);
3196 
3197 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3198 	virt_dev->bus = &genpd_bus_type;
3199 	virt_dev->release = genpd_release_dev;
3200 	virt_dev->of_node = of_node_get(dev->of_node);
3201 
3202 	ret = device_register(virt_dev);
3203 	if (ret) {
3204 		put_device(virt_dev);
3205 		return ERR_PTR(ret);
3206 	}
3207 
3208 	/* Try to attach the device to the PM domain at the specified index. */
3209 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3210 	if (ret < 1) {
3211 		device_unregister(virt_dev);
3212 		return ret ? ERR_PTR(ret) : NULL;
3213 	}
3214 
3215 	pm_runtime_enable(virt_dev);
3216 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3217 
3218 	return virt_dev;
3219 }
3220 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3221 
3222 /**
3223  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3224  * @dev: The device used to lookup the PM domain.
3225  * @name: The name of the PM domain.
3226  *
3227  * Parse device's OF node to find a PM domain specifier using the
3228  * power-domain-names DT property. For further description see
3229  * genpd_dev_pm_attach_by_id().
3230  */
3231 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3232 {
3233 	int index;
3234 
3235 	if (!dev->of_node)
3236 		return NULL;
3237 
3238 	index = of_property_match_string(dev->of_node, "power-domain-names",
3239 					 name);
3240 	if (index < 0)
3241 		return NULL;
3242 
3243 	return genpd_dev_pm_attach_by_id(dev, index);
3244 }
3245 
3246 static const struct of_device_id idle_state_match[] = {
3247 	{ .compatible = "domain-idle-state", },
3248 	{ }
3249 };
3250 
3251 static int genpd_parse_state(struct genpd_power_state *genpd_state,
3252 				    struct device_node *state_node)
3253 {
3254 	int err;
3255 	u32 residency;
3256 	u32 entry_latency, exit_latency;
3257 
3258 	err = of_property_read_u32(state_node, "entry-latency-us",
3259 						&entry_latency);
3260 	if (err) {
3261 		pr_debug(" * %pOF missing entry-latency-us property\n",
3262 			 state_node);
3263 		return -EINVAL;
3264 	}
3265 
3266 	err = of_property_read_u32(state_node, "exit-latency-us",
3267 						&exit_latency);
3268 	if (err) {
3269 		pr_debug(" * %pOF missing exit-latency-us property\n",
3270 			 state_node);
3271 		return -EINVAL;
3272 	}
3273 
3274 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
3275 	if (!err)
3276 		genpd_state->residency_ns = 1000LL * residency;
3277 
3278 	of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3279 
3280 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3281 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3282 	genpd_state->fwnode = &state_node->fwnode;
3283 
3284 	return 0;
3285 }
3286 
3287 static int genpd_iterate_idle_states(struct device_node *dn,
3288 				     struct genpd_power_state *states)
3289 {
3290 	int ret;
3291 	struct of_phandle_iterator it;
3292 	struct device_node *np;
3293 	int i = 0;
3294 
3295 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3296 	if (ret <= 0)
3297 		return ret == -ENOENT ? 0 : ret;
3298 
3299 	/* Loop over the phandles until all the requested entry is found */
3300 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3301 		np = it.node;
3302 		if (!of_match_node(idle_state_match, np))
3303 			continue;
3304 
3305 		if (!of_device_is_available(np))
3306 			continue;
3307 
3308 		if (states) {
3309 			ret = genpd_parse_state(&states[i], np);
3310 			if (ret) {
3311 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3312 				       np, ret);
3313 				of_node_put(np);
3314 				return ret;
3315 			}
3316 		}
3317 		i++;
3318 	}
3319 
3320 	return i;
3321 }
3322 
3323 /**
3324  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3325  *
3326  * @dn: The genpd device node
3327  * @states: The pointer to which the state array will be saved.
3328  * @n: The count of elements in the array returned from this function.
3329  *
3330  * Returns the device states parsed from the OF node. The memory for the states
3331  * is allocated by this function and is the responsibility of the caller to
3332  * free the memory after use. If any or zero compatible domain idle states is
3333  * found it returns 0 and in case of errors, a negative error code is returned.
3334  */
3335 int of_genpd_parse_idle_states(struct device_node *dn,
3336 			struct genpd_power_state **states, int *n)
3337 {
3338 	struct genpd_power_state *st;
3339 	int ret;
3340 
3341 	ret = genpd_iterate_idle_states(dn, NULL);
3342 	if (ret < 0)
3343 		return ret;
3344 
3345 	if (!ret) {
3346 		*states = NULL;
3347 		*n = 0;
3348 		return 0;
3349 	}
3350 
3351 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3352 	if (!st)
3353 		return -ENOMEM;
3354 
3355 	ret = genpd_iterate_idle_states(dn, st);
3356 	if (ret <= 0) {
3357 		kfree(st);
3358 		return ret < 0 ? ret : -EINVAL;
3359 	}
3360 
3361 	*states = st;
3362 	*n = ret;
3363 
3364 	return 0;
3365 }
3366 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3367 
3368 static int __init genpd_bus_init(void)
3369 {
3370 	return bus_register(&genpd_bus_type);
3371 }
3372 core_initcall(genpd_bus_init);
3373 
3374 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3375 
3376 
3377 /***        debugfs support        ***/
3378 
3379 #ifdef CONFIG_DEBUG_FS
3380 /*
3381  * TODO: This function is a slightly modified version of rtpm_status_show
3382  * from sysfs.c, so generalize it.
3383  */
3384 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3385 {
3386 	static const char * const status_lookup[] = {
3387 		[RPM_ACTIVE] = "active",
3388 		[RPM_RESUMING] = "resuming",
3389 		[RPM_SUSPENDED] = "suspended",
3390 		[RPM_SUSPENDING] = "suspending"
3391 	};
3392 	const char *p = "";
3393 
3394 	if (dev->power.runtime_error)
3395 		p = "error";
3396 	else if (dev->power.disable_depth)
3397 		p = "unsupported";
3398 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3399 		p = status_lookup[dev->power.runtime_status];
3400 	else
3401 		WARN_ON(1);
3402 
3403 	seq_printf(s, "%-26s  ", p);
3404 }
3405 
3406 static void perf_status_str(struct seq_file *s, struct device *dev)
3407 {
3408 	struct generic_pm_domain_data *gpd_data;
3409 
3410 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3411 
3412 	seq_printf(s, "%-10u  ", gpd_data->performance_state);
3413 }
3414 
3415 static void mode_status_str(struct seq_file *s, struct device *dev)
3416 {
3417 	struct generic_pm_domain_data *gpd_data;
3418 
3419 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3420 
3421 	seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3422 }
3423 
3424 static int genpd_summary_one(struct seq_file *s,
3425 			struct generic_pm_domain *genpd)
3426 {
3427 	static const char * const status_lookup[] = {
3428 		[GENPD_STATE_ON] = "on",
3429 		[GENPD_STATE_OFF] = "off"
3430 	};
3431 	struct pm_domain_data *pm_data;
3432 	struct gpd_link *link;
3433 	char state[16];
3434 	int ret;
3435 
3436 	ret = genpd_lock_interruptible(genpd);
3437 	if (ret)
3438 		return -ERESTARTSYS;
3439 
3440 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3441 		goto exit;
3442 	if (!genpd_status_on(genpd))
3443 		snprintf(state, sizeof(state), "%s-%u",
3444 			 status_lookup[genpd->status], genpd->state_idx);
3445 	else
3446 		snprintf(state, sizeof(state), "%s",
3447 			 status_lookup[genpd->status]);
3448 	seq_printf(s, "%-30s  %-30s  %u", dev_name(&genpd->dev), state, genpd->performance_state);
3449 
3450 	/*
3451 	 * Modifications on the list require holding locks on both
3452 	 * parent and child, so we are safe.
3453 	 * Also the device name is immutable.
3454 	 */
3455 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3456 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3457 			seq_printf(s, "\n%48s", " ");
3458 		seq_printf(s, "%s", link->child->name);
3459 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3460 			seq_puts(s, ", ");
3461 	}
3462 
3463 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3464 		seq_printf(s, "\n    %-30s  ", dev_name(pm_data->dev));
3465 		rtpm_status_str(s, pm_data->dev);
3466 		perf_status_str(s, pm_data->dev);
3467 		mode_status_str(s, pm_data->dev);
3468 	}
3469 
3470 	seq_puts(s, "\n");
3471 exit:
3472 	genpd_unlock(genpd);
3473 
3474 	return 0;
3475 }
3476 
3477 static int summary_show(struct seq_file *s, void *data)
3478 {
3479 	struct generic_pm_domain *genpd;
3480 	int ret = 0;
3481 
3482 	seq_puts(s, "domain                          status          children        performance\n");
3483 	seq_puts(s, "    /device                         runtime status                  managed by\n");
3484 	seq_puts(s, "------------------------------------------------------------------------------\n");
3485 
3486 	ret = mutex_lock_interruptible(&gpd_list_lock);
3487 	if (ret)
3488 		return -ERESTARTSYS;
3489 
3490 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3491 		ret = genpd_summary_one(s, genpd);
3492 		if (ret)
3493 			break;
3494 	}
3495 	mutex_unlock(&gpd_list_lock);
3496 
3497 	return ret;
3498 }
3499 
3500 static int status_show(struct seq_file *s, void *data)
3501 {
3502 	static const char * const status_lookup[] = {
3503 		[GENPD_STATE_ON] = "on",
3504 		[GENPD_STATE_OFF] = "off"
3505 	};
3506 
3507 	struct generic_pm_domain *genpd = s->private;
3508 	int ret = 0;
3509 
3510 	ret = genpd_lock_interruptible(genpd);
3511 	if (ret)
3512 		return -ERESTARTSYS;
3513 
3514 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3515 		goto exit;
3516 
3517 	if (genpd->status == GENPD_STATE_OFF)
3518 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3519 			genpd->state_idx);
3520 	else
3521 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3522 exit:
3523 	genpd_unlock(genpd);
3524 	return ret;
3525 }
3526 
3527 static int sub_domains_show(struct seq_file *s, void *data)
3528 {
3529 	struct generic_pm_domain *genpd = s->private;
3530 	struct gpd_link *link;
3531 	int ret = 0;
3532 
3533 	ret = genpd_lock_interruptible(genpd);
3534 	if (ret)
3535 		return -ERESTARTSYS;
3536 
3537 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3538 		seq_printf(s, "%s\n", link->child->name);
3539 
3540 	genpd_unlock(genpd);
3541 	return ret;
3542 }
3543 
3544 static int idle_states_show(struct seq_file *s, void *data)
3545 {
3546 	struct generic_pm_domain *genpd = s->private;
3547 	u64 now, delta, idle_time = 0;
3548 	unsigned int i;
3549 	int ret = 0;
3550 
3551 	ret = genpd_lock_interruptible(genpd);
3552 	if (ret)
3553 		return -ERESTARTSYS;
3554 
3555 	seq_puts(s, "State          Time Spent(ms) Usage      Rejected   Above      Below\n");
3556 
3557 	for (i = 0; i < genpd->state_count; i++) {
3558 		struct genpd_power_state *state = &genpd->states[i];
3559 		char state_name[15];
3560 
3561 		idle_time += state->idle_time;
3562 
3563 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3564 			now = ktime_get_mono_fast_ns();
3565 			if (now > genpd->accounting_time) {
3566 				delta = now - genpd->accounting_time;
3567 				idle_time += delta;
3568 			}
3569 		}
3570 
3571 		if (!state->name)
3572 			snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
3573 
3574 		do_div(idle_time, NSEC_PER_MSEC);
3575 		seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
3576 			   state->name ?: state_name, idle_time,
3577 			   state->usage, state->rejected, state->above,
3578 			   state->below);
3579 	}
3580 
3581 	genpd_unlock(genpd);
3582 	return ret;
3583 }
3584 
3585 static int active_time_show(struct seq_file *s, void *data)
3586 {
3587 	struct generic_pm_domain *genpd = s->private;
3588 	u64 now, on_time, delta = 0;
3589 	int ret = 0;
3590 
3591 	ret = genpd_lock_interruptible(genpd);
3592 	if (ret)
3593 		return -ERESTARTSYS;
3594 
3595 	if (genpd->status == GENPD_STATE_ON) {
3596 		now = ktime_get_mono_fast_ns();
3597 		if (now > genpd->accounting_time)
3598 			delta = now - genpd->accounting_time;
3599 	}
3600 
3601 	on_time = genpd->on_time + delta;
3602 	do_div(on_time, NSEC_PER_MSEC);
3603 	seq_printf(s, "%llu ms\n", on_time);
3604 
3605 	genpd_unlock(genpd);
3606 	return ret;
3607 }
3608 
3609 static int total_idle_time_show(struct seq_file *s, void *data)
3610 {
3611 	struct generic_pm_domain *genpd = s->private;
3612 	u64 now, delta, total = 0;
3613 	unsigned int i;
3614 	int ret = 0;
3615 
3616 	ret = genpd_lock_interruptible(genpd);
3617 	if (ret)
3618 		return -ERESTARTSYS;
3619 
3620 	for (i = 0; i < genpd->state_count; i++) {
3621 		total += genpd->states[i].idle_time;
3622 
3623 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3624 			now = ktime_get_mono_fast_ns();
3625 			if (now > genpd->accounting_time) {
3626 				delta = now - genpd->accounting_time;
3627 				total += delta;
3628 			}
3629 		}
3630 	}
3631 
3632 	do_div(total, NSEC_PER_MSEC);
3633 	seq_printf(s, "%llu ms\n", total);
3634 
3635 	genpd_unlock(genpd);
3636 	return ret;
3637 }
3638 
3639 
3640 static int devices_show(struct seq_file *s, void *data)
3641 {
3642 	struct generic_pm_domain *genpd = s->private;
3643 	struct pm_domain_data *pm_data;
3644 	int ret = 0;
3645 
3646 	ret = genpd_lock_interruptible(genpd);
3647 	if (ret)
3648 		return -ERESTARTSYS;
3649 
3650 	list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3651 		seq_printf(s, "%s\n", dev_name(pm_data->dev));
3652 
3653 	genpd_unlock(genpd);
3654 	return ret;
3655 }
3656 
3657 static int perf_state_show(struct seq_file *s, void *data)
3658 {
3659 	struct generic_pm_domain *genpd = s->private;
3660 
3661 	if (genpd_lock_interruptible(genpd))
3662 		return -ERESTARTSYS;
3663 
3664 	seq_printf(s, "%u\n", genpd->performance_state);
3665 
3666 	genpd_unlock(genpd);
3667 	return 0;
3668 }
3669 
3670 DEFINE_SHOW_ATTRIBUTE(summary);
3671 DEFINE_SHOW_ATTRIBUTE(status);
3672 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3673 DEFINE_SHOW_ATTRIBUTE(idle_states);
3674 DEFINE_SHOW_ATTRIBUTE(active_time);
3675 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3676 DEFINE_SHOW_ATTRIBUTE(devices);
3677 DEFINE_SHOW_ATTRIBUTE(perf_state);
3678 
3679 static void genpd_debug_add(struct generic_pm_domain *genpd)
3680 {
3681 	struct dentry *d;
3682 
3683 	if (!genpd_debugfs_dir)
3684 		return;
3685 
3686 	d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3687 
3688 	debugfs_create_file("current_state", 0444,
3689 			    d, genpd, &status_fops);
3690 	debugfs_create_file("sub_domains", 0444,
3691 			    d, genpd, &sub_domains_fops);
3692 	debugfs_create_file("idle_states", 0444,
3693 			    d, genpd, &idle_states_fops);
3694 	debugfs_create_file("active_time", 0444,
3695 			    d, genpd, &active_time_fops);
3696 	debugfs_create_file("total_idle_time", 0444,
3697 			    d, genpd, &total_idle_time_fops);
3698 	debugfs_create_file("devices", 0444,
3699 			    d, genpd, &devices_fops);
3700 	if (genpd->set_performance_state)
3701 		debugfs_create_file("perf_state", 0444,
3702 				    d, genpd, &perf_state_fops);
3703 }
3704 
3705 static int __init genpd_debug_init(void)
3706 {
3707 	struct generic_pm_domain *genpd;
3708 
3709 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3710 
3711 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3712 			    NULL, &summary_fops);
3713 
3714 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3715 		genpd_debug_add(genpd);
3716 
3717 	return 0;
3718 }
3719 late_initcall(genpd_debug_init);
3720 
3721 static void __exit genpd_debug_exit(void)
3722 {
3723 	debugfs_remove_recursive(genpd_debugfs_dir);
3724 }
3725 __exitcall(genpd_debug_exit);
3726 #endif /* CONFIG_DEBUG_FS */
3727