xref: /linux/drivers/pmdomain/core.c (revision 20dfee95936413708701eb151f419597fdd9d948)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
27 
28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
29 ({								\
30 	type (*__routine)(struct device *__d); 			\
31 	type __ret = (type)0;					\
32 								\
33 	__routine = genpd->dev_ops.callback; 			\
34 	if (__routine) {					\
35 		__ret = __routine(dev); 			\
36 	}							\
37 	__ret;							\
38 })
39 
40 static LIST_HEAD(gpd_list);
41 static DEFINE_MUTEX(gpd_list_lock);
42 
43 struct genpd_lock_ops {
44 	void (*lock)(struct generic_pm_domain *genpd);
45 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
46 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
47 	void (*unlock)(struct generic_pm_domain *genpd);
48 };
49 
50 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
51 {
52 	mutex_lock(&genpd->mlock);
53 }
54 
55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
56 					int depth)
57 {
58 	mutex_lock_nested(&genpd->mlock, depth);
59 }
60 
61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
62 {
63 	return mutex_lock_interruptible(&genpd->mlock);
64 }
65 
66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
67 {
68 	return mutex_unlock(&genpd->mlock);
69 }
70 
71 static const struct genpd_lock_ops genpd_mtx_ops = {
72 	.lock = genpd_lock_mtx,
73 	.lock_nested = genpd_lock_nested_mtx,
74 	.lock_interruptible = genpd_lock_interruptible_mtx,
75 	.unlock = genpd_unlock_mtx,
76 };
77 
78 static void genpd_lock_spin(struct generic_pm_domain *genpd)
79 	__acquires(&genpd->slock)
80 {
81 	unsigned long flags;
82 
83 	spin_lock_irqsave(&genpd->slock, flags);
84 	genpd->lock_flags = flags;
85 }
86 
87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
88 					int depth)
89 	__acquires(&genpd->slock)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
94 	genpd->lock_flags = flags;
95 }
96 
97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
98 	__acquires(&genpd->slock)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&genpd->slock, flags);
103 	genpd->lock_flags = flags;
104 	return 0;
105 }
106 
107 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
108 	__releases(&genpd->slock)
109 {
110 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
111 }
112 
113 static const struct genpd_lock_ops genpd_spin_ops = {
114 	.lock = genpd_lock_spin,
115 	.lock_nested = genpd_lock_nested_spin,
116 	.lock_interruptible = genpd_lock_interruptible_spin,
117 	.unlock = genpd_unlock_spin,
118 };
119 
120 #define genpd_lock(p)			p->lock_ops->lock(p)
121 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
122 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
123 #define genpd_unlock(p)			p->lock_ops->unlock(p)
124 
125 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
126 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
127 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
130 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
131 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
132 
133 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
134 		const struct generic_pm_domain *genpd)
135 {
136 	bool ret;
137 
138 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139 
140 	/*
141 	 * Warn once if an IRQ safe device is attached to a domain, which
142 	 * callbacks are allowed to sleep. This indicates a suboptimal
143 	 * configuration for PM, but it doesn't matter for an always on domain.
144 	 */
145 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
146 		return ret;
147 
148 	if (ret)
149 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
150 				genpd->name);
151 
152 	return ret;
153 }
154 
155 static int genpd_runtime_suspend(struct device *dev);
156 
157 /*
158  * Get the generic PM domain for a particular struct device.
159  * This validates the struct device pointer, the PM domain pointer,
160  * and checks that the PM domain pointer is a real generic PM domain.
161  * Any failure results in NULL being returned.
162  */
163 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
164 {
165 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
166 		return NULL;
167 
168 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
169 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
170 		return pd_to_genpd(dev->pm_domain);
171 
172 	return NULL;
173 }
174 
175 /*
176  * This should only be used where we are certain that the pm_domain
177  * attached to the device is a genpd domain.
178  */
179 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
180 {
181 	if (IS_ERR_OR_NULL(dev->pm_domain))
182 		return ERR_PTR(-EINVAL);
183 
184 	return pd_to_genpd(dev->pm_domain);
185 }
186 
187 struct device *dev_to_genpd_dev(struct device *dev)
188 {
189 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
190 
191 	if (IS_ERR(genpd))
192 		return ERR_CAST(genpd);
193 
194 	return &genpd->dev;
195 }
196 
197 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
198 			  struct device *dev)
199 {
200 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
201 }
202 
203 static int genpd_start_dev(const struct generic_pm_domain *genpd,
204 			   struct device *dev)
205 {
206 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
207 }
208 
209 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
210 {
211 	bool ret = false;
212 
213 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
214 		ret = !!atomic_dec_and_test(&genpd->sd_count);
215 
216 	return ret;
217 }
218 
219 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
220 {
221 	atomic_inc(&genpd->sd_count);
222 	smp_mb__after_atomic();
223 }
224 
225 #ifdef CONFIG_DEBUG_FS
226 static struct dentry *genpd_debugfs_dir;
227 
228 static void genpd_debug_add(struct generic_pm_domain *genpd);
229 
230 static void genpd_debug_remove(struct generic_pm_domain *genpd)
231 {
232 	if (!genpd_debugfs_dir)
233 		return;
234 
235 	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
236 }
237 
238 static void genpd_update_accounting(struct generic_pm_domain *genpd)
239 {
240 	u64 delta, now;
241 
242 	now = ktime_get_mono_fast_ns();
243 	if (now <= genpd->accounting_time)
244 		return;
245 
246 	delta = now - genpd->accounting_time;
247 
248 	/*
249 	 * If genpd->status is active, it means we are just
250 	 * out of off and so update the idle time and vice
251 	 * versa.
252 	 */
253 	if (genpd->status == GENPD_STATE_ON)
254 		genpd->states[genpd->state_idx].idle_time += delta;
255 	else
256 		genpd->on_time += delta;
257 
258 	genpd->accounting_time = now;
259 }
260 #else
261 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
262 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
263 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
264 #endif
265 
266 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
267 					   unsigned int state)
268 {
269 	struct generic_pm_domain_data *pd_data;
270 	struct pm_domain_data *pdd;
271 	struct gpd_link *link;
272 
273 	/* New requested state is same as Max requested state */
274 	if (state == genpd->performance_state)
275 		return state;
276 
277 	/* New requested state is higher than Max requested state */
278 	if (state > genpd->performance_state)
279 		return state;
280 
281 	/* Traverse all devices within the domain */
282 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
283 		pd_data = to_gpd_data(pdd);
284 
285 		if (pd_data->performance_state > state)
286 			state = pd_data->performance_state;
287 	}
288 
289 	/*
290 	 * Traverse all sub-domains within the domain. This can be
291 	 * done without any additional locking as the link->performance_state
292 	 * field is protected by the parent genpd->lock, which is already taken.
293 	 *
294 	 * Also note that link->performance_state (subdomain's performance state
295 	 * requirement to parent domain) is different from
296 	 * link->child->performance_state (current performance state requirement
297 	 * of the devices/sub-domains of the subdomain) and so can have a
298 	 * different value.
299 	 *
300 	 * Note that we also take vote from powered-off sub-domains into account
301 	 * as the same is done for devices right now.
302 	 */
303 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
304 		if (link->performance_state > state)
305 			state = link->performance_state;
306 	}
307 
308 	return state;
309 }
310 
311 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
312 					 struct generic_pm_domain *parent,
313 					 unsigned int pstate)
314 {
315 	if (!parent->set_performance_state)
316 		return pstate;
317 
318 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
319 						  parent->opp_table,
320 						  pstate);
321 }
322 
323 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
324 					unsigned int state, int depth);
325 
326 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
327 {
328 	struct generic_pm_domain *parent = link->parent;
329 	int parent_state;
330 
331 	genpd_lock_nested(parent, depth + 1);
332 
333 	parent_state = link->prev_performance_state;
334 	link->performance_state = parent_state;
335 
336 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
337 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
338 		pr_err("%s: Failed to roll back to %d performance state\n",
339 		       parent->name, parent_state);
340 	}
341 
342 	genpd_unlock(parent);
343 }
344 
345 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
346 				   struct gpd_link *link,
347 				   unsigned int state, int depth)
348 {
349 	struct generic_pm_domain *parent = link->parent;
350 	int parent_state, ret;
351 
352 	/* Find parent's performance state */
353 	ret = genpd_xlate_performance_state(genpd, parent, state);
354 	if (unlikely(ret < 0))
355 		return ret;
356 
357 	parent_state = ret;
358 
359 	genpd_lock_nested(parent, depth + 1);
360 
361 	link->prev_performance_state = link->performance_state;
362 	link->performance_state = parent_state;
363 
364 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
365 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
366 	if (ret)
367 		link->performance_state = link->prev_performance_state;
368 
369 	genpd_unlock(parent);
370 
371 	return ret;
372 }
373 
374 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
375 					unsigned int state, int depth)
376 {
377 	struct gpd_link *link = NULL;
378 	int ret;
379 
380 	if (state == genpd->performance_state)
381 		return 0;
382 
383 	/* When scaling up, propagate to parents first in normal order */
384 	if (state > genpd->performance_state) {
385 		list_for_each_entry(link, &genpd->child_links, child_node) {
386 			ret = _genpd_set_parent_state(genpd, link, state, depth);
387 			if (ret)
388 				goto rollback_parents_up;
389 		}
390 	}
391 
392 	if (genpd->set_performance_state) {
393 		ret = genpd->set_performance_state(genpd, state);
394 		if (ret) {
395 			if (link)
396 				goto rollback_parents_up;
397 			return ret;
398 		}
399 	}
400 
401 	/* When scaling down, propagate to parents last in reverse order */
402 	if (state < genpd->performance_state) {
403 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
404 			ret = _genpd_set_parent_state(genpd, link, state, depth);
405 			if (ret)
406 				goto rollback_parents_down;
407 		}
408 	}
409 
410 	genpd->performance_state = state;
411 	return 0;
412 
413 rollback_parents_up:
414 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
415 		_genpd_rollback_parent_state(link, depth);
416 	return ret;
417 rollback_parents_down:
418 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
419 		_genpd_rollback_parent_state(link, depth);
420 	return ret;
421 }
422 
423 static int genpd_set_performance_state(struct device *dev, unsigned int state)
424 {
425 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
426 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
427 	unsigned int prev_state;
428 	int ret;
429 
430 	prev_state = gpd_data->performance_state;
431 	if (prev_state == state)
432 		return 0;
433 
434 	gpd_data->performance_state = state;
435 	state = _genpd_reeval_performance_state(genpd, state);
436 
437 	ret = _genpd_set_performance_state(genpd, state, 0);
438 	if (ret)
439 		gpd_data->performance_state = prev_state;
440 
441 	return ret;
442 }
443 
444 static int genpd_drop_performance_state(struct device *dev)
445 {
446 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
447 
448 	if (!genpd_set_performance_state(dev, 0))
449 		return prev_state;
450 
451 	return 0;
452 }
453 
454 static void genpd_restore_performance_state(struct device *dev,
455 					    unsigned int state)
456 {
457 	if (state)
458 		genpd_set_performance_state(dev, state);
459 }
460 
461 static int genpd_dev_pm_set_performance_state(struct device *dev,
462 					      unsigned int state)
463 {
464 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
465 	int ret = 0;
466 
467 	genpd_lock(genpd);
468 	if (pm_runtime_suspended(dev)) {
469 		dev_gpd_data(dev)->rpm_pstate = state;
470 	} else {
471 		ret = genpd_set_performance_state(dev, state);
472 		if (!ret)
473 			dev_gpd_data(dev)->rpm_pstate = 0;
474 	}
475 	genpd_unlock(genpd);
476 
477 	return ret;
478 }
479 
480 /**
481  * dev_pm_genpd_set_performance_state- Set performance state of device's power
482  * domain.
483  *
484  * @dev: Device for which the performance-state needs to be set.
485  * @state: Target performance state of the device. This can be set as 0 when the
486  *	   device doesn't have any performance state constraints left (And so
487  *	   the device wouldn't participate anymore to find the target
488  *	   performance state of the genpd).
489  *
490  * It is assumed that the users guarantee that the genpd wouldn't be detached
491  * while this routine is getting called.
492  *
493  * Returns 0 on success and negative error values on failures.
494  */
495 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
496 {
497 	struct generic_pm_domain *genpd;
498 
499 	genpd = dev_to_genpd_safe(dev);
500 	if (!genpd)
501 		return -ENODEV;
502 
503 	if (WARN_ON(!dev->power.subsys_data ||
504 		     !dev->power.subsys_data->domain_data))
505 		return -EINVAL;
506 
507 	return genpd_dev_pm_set_performance_state(dev, state);
508 }
509 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
510 
511 /**
512  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
513  *
514  * @dev: Device to handle
515  * @next: impending interrupt/wakeup for the device
516  *
517  *
518  * Allow devices to inform of the next wakeup. It's assumed that the users
519  * guarantee that the genpd wouldn't be detached while this routine is getting
520  * called. Additionally, it's also assumed that @dev isn't runtime suspended
521  * (RPM_SUSPENDED)."
522  * Although devices are expected to update the next_wakeup after the end of
523  * their usecase as well, it is possible the devices themselves may not know
524  * about that, so stale @next will be ignored when powering off the domain.
525  */
526 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
527 {
528 	struct generic_pm_domain *genpd;
529 	struct gpd_timing_data *td;
530 
531 	genpd = dev_to_genpd_safe(dev);
532 	if (!genpd)
533 		return;
534 
535 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
536 	if (td)
537 		td->next_wakeup = next;
538 }
539 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
540 
541 /**
542  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
543  * @dev: A device that is attached to the genpd.
544  *
545  * This routine should typically be called for a device, at the point of when a
546  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
547  *
548  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
549  * valid value have been set.
550  */
551 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
552 {
553 	struct generic_pm_domain *genpd;
554 
555 	genpd = dev_to_genpd_safe(dev);
556 	if (!genpd)
557 		return KTIME_MAX;
558 
559 	if (genpd->gd)
560 		return genpd->gd->next_hrtimer;
561 
562 	return KTIME_MAX;
563 }
564 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
565 
566 /*
567  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
568  *
569  * @dev: A device that is attached to the genpd.
570  *
571  * Allows a consumer of the genpd to notify the provider that the next power off
572  * should be synchronous.
573  *
574  * It is assumed that the users guarantee that the genpd wouldn't be detached
575  * while this routine is getting called.
576  */
577 void dev_pm_genpd_synced_poweroff(struct device *dev)
578 {
579 	struct generic_pm_domain *genpd;
580 
581 	genpd = dev_to_genpd_safe(dev);
582 	if (!genpd)
583 		return;
584 
585 	genpd_lock(genpd);
586 	genpd->synced_poweroff = true;
587 	genpd_unlock(genpd);
588 }
589 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
590 
591 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
592 {
593 	unsigned int state_idx = genpd->state_idx;
594 	ktime_t time_start;
595 	s64 elapsed_ns;
596 	int ret;
597 
598 	/* Notify consumers that we are about to power on. */
599 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
600 					     GENPD_NOTIFY_PRE_ON,
601 					     GENPD_NOTIFY_OFF, NULL);
602 	ret = notifier_to_errno(ret);
603 	if (ret)
604 		return ret;
605 
606 	if (!genpd->power_on)
607 		goto out;
608 
609 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
610 	if (!timed) {
611 		ret = genpd->power_on(genpd);
612 		if (ret)
613 			goto err;
614 
615 		goto out;
616 	}
617 
618 	time_start = ktime_get();
619 	ret = genpd->power_on(genpd);
620 	if (ret)
621 		goto err;
622 
623 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
624 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
625 		goto out;
626 
627 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
628 	genpd->gd->max_off_time_changed = true;
629 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
630 		 genpd->name, "on", elapsed_ns);
631 
632 out:
633 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
634 	genpd->synced_poweroff = false;
635 	return 0;
636 err:
637 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
638 				NULL);
639 	return ret;
640 }
641 
642 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
643 {
644 	unsigned int state_idx = genpd->state_idx;
645 	ktime_t time_start;
646 	s64 elapsed_ns;
647 	int ret;
648 
649 	/* Notify consumers that we are about to power off. */
650 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
651 					     GENPD_NOTIFY_PRE_OFF,
652 					     GENPD_NOTIFY_ON, NULL);
653 	ret = notifier_to_errno(ret);
654 	if (ret)
655 		return ret;
656 
657 	if (!genpd->power_off)
658 		goto out;
659 
660 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
661 	if (!timed) {
662 		ret = genpd->power_off(genpd);
663 		if (ret)
664 			goto busy;
665 
666 		goto out;
667 	}
668 
669 	time_start = ktime_get();
670 	ret = genpd->power_off(genpd);
671 	if (ret)
672 		goto busy;
673 
674 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
675 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
676 		goto out;
677 
678 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
679 	genpd->gd->max_off_time_changed = true;
680 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
681 		 genpd->name, "off", elapsed_ns);
682 
683 out:
684 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
685 				NULL);
686 	return 0;
687 busy:
688 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
689 	return ret;
690 }
691 
692 /**
693  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
694  * @genpd: PM domain to power off.
695  *
696  * Queue up the execution of genpd_power_off() unless it's already been done
697  * before.
698  */
699 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
700 {
701 	queue_work(pm_wq, &genpd->power_off_work);
702 }
703 
704 /**
705  * genpd_power_off - Remove power from a given PM domain.
706  * @genpd: PM domain to power down.
707  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
708  * RPM status of the releated device is in an intermediate state, not yet turned
709  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
710  * be RPM_SUSPENDED, while it tries to power off the PM domain.
711  * @depth: nesting count for lockdep.
712  *
713  * If all of the @genpd's devices have been suspended and all of its subdomains
714  * have been powered down, remove power from @genpd.
715  */
716 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
717 			   unsigned int depth)
718 {
719 	struct pm_domain_data *pdd;
720 	struct gpd_link *link;
721 	unsigned int not_suspended = 0;
722 	int ret;
723 
724 	/*
725 	 * Do not try to power off the domain in the following situations:
726 	 * (1) The domain is already in the "power off" state.
727 	 * (2) System suspend is in progress.
728 	 */
729 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
730 		return 0;
731 
732 	/*
733 	 * Abort power off for the PM domain in the following situations:
734 	 * (1) The domain is configured as always on.
735 	 * (2) When the domain has a subdomain being powered on.
736 	 */
737 	if (genpd_is_always_on(genpd) ||
738 			genpd_is_rpm_always_on(genpd) ||
739 			atomic_read(&genpd->sd_count) > 0)
740 		return -EBUSY;
741 
742 	/*
743 	 * The children must be in their deepest (powered-off) states to allow
744 	 * the parent to be powered off. Note that, there's no need for
745 	 * additional locking, as powering on a child, requires the parent's
746 	 * lock to be acquired first.
747 	 */
748 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
749 		struct generic_pm_domain *child = link->child;
750 		if (child->state_idx < child->state_count - 1)
751 			return -EBUSY;
752 	}
753 
754 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
755 		/*
756 		 * Do not allow PM domain to be powered off, when an IRQ safe
757 		 * device is part of a non-IRQ safe domain.
758 		 */
759 		if (!pm_runtime_suspended(pdd->dev) ||
760 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
761 			not_suspended++;
762 	}
763 
764 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
765 		return -EBUSY;
766 
767 	if (genpd->gov && genpd->gov->power_down_ok) {
768 		if (!genpd->gov->power_down_ok(&genpd->domain))
769 			return -EAGAIN;
770 	}
771 
772 	/* Default to shallowest state. */
773 	if (!genpd->gov)
774 		genpd->state_idx = 0;
775 
776 	/* Don't power off, if a child domain is waiting to power on. */
777 	if (atomic_read(&genpd->sd_count) > 0)
778 		return -EBUSY;
779 
780 	ret = _genpd_power_off(genpd, true);
781 	if (ret) {
782 		genpd->states[genpd->state_idx].rejected++;
783 		return ret;
784 	}
785 
786 	genpd->status = GENPD_STATE_OFF;
787 	genpd_update_accounting(genpd);
788 	genpd->states[genpd->state_idx].usage++;
789 
790 	list_for_each_entry(link, &genpd->child_links, child_node) {
791 		genpd_sd_counter_dec(link->parent);
792 		genpd_lock_nested(link->parent, depth + 1);
793 		genpd_power_off(link->parent, false, depth + 1);
794 		genpd_unlock(link->parent);
795 	}
796 
797 	return 0;
798 }
799 
800 /**
801  * genpd_power_on - Restore power to a given PM domain and its parents.
802  * @genpd: PM domain to power up.
803  * @depth: nesting count for lockdep.
804  *
805  * Restore power to @genpd and all of its parents so that it is possible to
806  * resume a device belonging to it.
807  */
808 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
809 {
810 	struct gpd_link *link;
811 	int ret = 0;
812 
813 	if (genpd_status_on(genpd))
814 		return 0;
815 
816 	/*
817 	 * The list is guaranteed not to change while the loop below is being
818 	 * executed, unless one of the parents' .power_on() callbacks fiddles
819 	 * with it.
820 	 */
821 	list_for_each_entry(link, &genpd->child_links, child_node) {
822 		struct generic_pm_domain *parent = link->parent;
823 
824 		genpd_sd_counter_inc(parent);
825 
826 		genpd_lock_nested(parent, depth + 1);
827 		ret = genpd_power_on(parent, depth + 1);
828 		genpd_unlock(parent);
829 
830 		if (ret) {
831 			genpd_sd_counter_dec(parent);
832 			goto err;
833 		}
834 	}
835 
836 	ret = _genpd_power_on(genpd, true);
837 	if (ret)
838 		goto err;
839 
840 	genpd->status = GENPD_STATE_ON;
841 	genpd_update_accounting(genpd);
842 
843 	return 0;
844 
845  err:
846 	list_for_each_entry_continue_reverse(link,
847 					&genpd->child_links,
848 					child_node) {
849 		genpd_sd_counter_dec(link->parent);
850 		genpd_lock_nested(link->parent, depth + 1);
851 		genpd_power_off(link->parent, false, depth + 1);
852 		genpd_unlock(link->parent);
853 	}
854 
855 	return ret;
856 }
857 
858 static int genpd_dev_pm_start(struct device *dev)
859 {
860 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
861 
862 	return genpd_start_dev(genpd, dev);
863 }
864 
865 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
866 				     unsigned long val, void *ptr)
867 {
868 	struct generic_pm_domain_data *gpd_data;
869 	struct device *dev;
870 
871 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
872 	dev = gpd_data->base.dev;
873 
874 	for (;;) {
875 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
876 		struct pm_domain_data *pdd;
877 		struct gpd_timing_data *td;
878 
879 		spin_lock_irq(&dev->power.lock);
880 
881 		pdd = dev->power.subsys_data ?
882 				dev->power.subsys_data->domain_data : NULL;
883 		if (pdd) {
884 			td = to_gpd_data(pdd)->td;
885 			if (td) {
886 				td->constraint_changed = true;
887 				genpd = dev_to_genpd(dev);
888 			}
889 		}
890 
891 		spin_unlock_irq(&dev->power.lock);
892 
893 		if (!IS_ERR(genpd)) {
894 			genpd_lock(genpd);
895 			genpd->gd->max_off_time_changed = true;
896 			genpd_unlock(genpd);
897 		}
898 
899 		dev = dev->parent;
900 		if (!dev || dev->power.ignore_children)
901 			break;
902 	}
903 
904 	return NOTIFY_DONE;
905 }
906 
907 /**
908  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
909  * @work: Work structure used for scheduling the execution of this function.
910  */
911 static void genpd_power_off_work_fn(struct work_struct *work)
912 {
913 	struct generic_pm_domain *genpd;
914 
915 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
916 
917 	genpd_lock(genpd);
918 	genpd_power_off(genpd, false, 0);
919 	genpd_unlock(genpd);
920 }
921 
922 /**
923  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
924  * @dev: Device to handle.
925  */
926 static int __genpd_runtime_suspend(struct device *dev)
927 {
928 	int (*cb)(struct device *__dev);
929 
930 	if (dev->type && dev->type->pm)
931 		cb = dev->type->pm->runtime_suspend;
932 	else if (dev->class && dev->class->pm)
933 		cb = dev->class->pm->runtime_suspend;
934 	else if (dev->bus && dev->bus->pm)
935 		cb = dev->bus->pm->runtime_suspend;
936 	else
937 		cb = NULL;
938 
939 	if (!cb && dev->driver && dev->driver->pm)
940 		cb = dev->driver->pm->runtime_suspend;
941 
942 	return cb ? cb(dev) : 0;
943 }
944 
945 /**
946  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
947  * @dev: Device to handle.
948  */
949 static int __genpd_runtime_resume(struct device *dev)
950 {
951 	int (*cb)(struct device *__dev);
952 
953 	if (dev->type && dev->type->pm)
954 		cb = dev->type->pm->runtime_resume;
955 	else if (dev->class && dev->class->pm)
956 		cb = dev->class->pm->runtime_resume;
957 	else if (dev->bus && dev->bus->pm)
958 		cb = dev->bus->pm->runtime_resume;
959 	else
960 		cb = NULL;
961 
962 	if (!cb && dev->driver && dev->driver->pm)
963 		cb = dev->driver->pm->runtime_resume;
964 
965 	return cb ? cb(dev) : 0;
966 }
967 
968 /**
969  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
970  * @dev: Device to suspend.
971  *
972  * Carry out a runtime suspend of a device under the assumption that its
973  * pm_domain field points to the domain member of an object of type
974  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
975  */
976 static int genpd_runtime_suspend(struct device *dev)
977 {
978 	struct generic_pm_domain *genpd;
979 	bool (*suspend_ok)(struct device *__dev);
980 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
981 	struct gpd_timing_data *td = gpd_data->td;
982 	bool runtime_pm = pm_runtime_enabled(dev);
983 	ktime_t time_start = 0;
984 	s64 elapsed_ns;
985 	int ret;
986 
987 	dev_dbg(dev, "%s()\n", __func__);
988 
989 	genpd = dev_to_genpd(dev);
990 	if (IS_ERR(genpd))
991 		return -EINVAL;
992 
993 	/*
994 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
995 	 * callbacks for other purposes than runtime PM. In those scenarios
996 	 * runtime PM is disabled. Under these circumstances, we shall skip
997 	 * validating/measuring the PM QoS latency.
998 	 */
999 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1000 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
1001 		return -EBUSY;
1002 
1003 	/* Measure suspend latency. */
1004 	if (td && runtime_pm)
1005 		time_start = ktime_get();
1006 
1007 	ret = __genpd_runtime_suspend(dev);
1008 	if (ret)
1009 		return ret;
1010 
1011 	ret = genpd_stop_dev(genpd, dev);
1012 	if (ret) {
1013 		__genpd_runtime_resume(dev);
1014 		return ret;
1015 	}
1016 
1017 	/* Update suspend latency value if the measured time exceeds it. */
1018 	if (td && runtime_pm) {
1019 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1020 		if (elapsed_ns > td->suspend_latency_ns) {
1021 			td->suspend_latency_ns = elapsed_ns;
1022 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1023 				elapsed_ns);
1024 			genpd->gd->max_off_time_changed = true;
1025 			td->constraint_changed = true;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * If power.irq_safe is set, this routine may be run with
1031 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1032 	 */
1033 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1034 		return 0;
1035 
1036 	genpd_lock(genpd);
1037 	genpd_power_off(genpd, true, 0);
1038 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1039 	genpd_unlock(genpd);
1040 
1041 	return 0;
1042 }
1043 
1044 /**
1045  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1046  * @dev: Device to resume.
1047  *
1048  * Carry out a runtime resume of a device under the assumption that its
1049  * pm_domain field points to the domain member of an object of type
1050  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1051  */
1052 static int genpd_runtime_resume(struct device *dev)
1053 {
1054 	struct generic_pm_domain *genpd;
1055 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1056 	struct gpd_timing_data *td = gpd_data->td;
1057 	bool timed = td && pm_runtime_enabled(dev);
1058 	ktime_t time_start = 0;
1059 	s64 elapsed_ns;
1060 	int ret;
1061 
1062 	dev_dbg(dev, "%s()\n", __func__);
1063 
1064 	genpd = dev_to_genpd(dev);
1065 	if (IS_ERR(genpd))
1066 		return -EINVAL;
1067 
1068 	/*
1069 	 * As we don't power off a non IRQ safe domain, which holds
1070 	 * an IRQ safe device, we don't need to restore power to it.
1071 	 */
1072 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1073 		goto out;
1074 
1075 	genpd_lock(genpd);
1076 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1077 	ret = genpd_power_on(genpd, 0);
1078 	genpd_unlock(genpd);
1079 
1080 	if (ret)
1081 		return ret;
1082 
1083  out:
1084 	/* Measure resume latency. */
1085 	if (timed)
1086 		time_start = ktime_get();
1087 
1088 	ret = genpd_start_dev(genpd, dev);
1089 	if (ret)
1090 		goto err_poweroff;
1091 
1092 	ret = __genpd_runtime_resume(dev);
1093 	if (ret)
1094 		goto err_stop;
1095 
1096 	/* Update resume latency value if the measured time exceeds it. */
1097 	if (timed) {
1098 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1099 		if (elapsed_ns > td->resume_latency_ns) {
1100 			td->resume_latency_ns = elapsed_ns;
1101 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1102 				elapsed_ns);
1103 			genpd->gd->max_off_time_changed = true;
1104 			td->constraint_changed = true;
1105 		}
1106 	}
1107 
1108 	return 0;
1109 
1110 err_stop:
1111 	genpd_stop_dev(genpd, dev);
1112 err_poweroff:
1113 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1114 		genpd_lock(genpd);
1115 		genpd_power_off(genpd, true, 0);
1116 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1117 		genpd_unlock(genpd);
1118 	}
1119 
1120 	return ret;
1121 }
1122 
1123 static bool pd_ignore_unused;
1124 static int __init pd_ignore_unused_setup(char *__unused)
1125 {
1126 	pd_ignore_unused = true;
1127 	return 1;
1128 }
1129 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1130 
1131 /**
1132  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1133  */
1134 static int __init genpd_power_off_unused(void)
1135 {
1136 	struct generic_pm_domain *genpd;
1137 
1138 	if (pd_ignore_unused) {
1139 		pr_warn("genpd: Not disabling unused power domains\n");
1140 		return 0;
1141 	}
1142 
1143 	pr_info("genpd: Disabling unused power domains\n");
1144 	mutex_lock(&gpd_list_lock);
1145 
1146 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1147 		genpd_queue_power_off_work(genpd);
1148 
1149 	mutex_unlock(&gpd_list_lock);
1150 
1151 	return 0;
1152 }
1153 late_initcall_sync(genpd_power_off_unused);
1154 
1155 #ifdef CONFIG_PM_SLEEP
1156 
1157 /**
1158  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1159  * @genpd: PM domain to power off, if possible.
1160  * @use_lock: use the lock.
1161  * @depth: nesting count for lockdep.
1162  *
1163  * Check if the given PM domain can be powered off (during system suspend or
1164  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1165  *
1166  * This function is only called in "noirq" and "syscore" stages of system power
1167  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1168  * these cases the lock must be held.
1169  */
1170 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1171 				 unsigned int depth)
1172 {
1173 	struct gpd_link *link;
1174 
1175 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1176 		return;
1177 
1178 	if (genpd->suspended_count != genpd->device_count
1179 	    || atomic_read(&genpd->sd_count) > 0)
1180 		return;
1181 
1182 	/* Check that the children are in their deepest (powered-off) state. */
1183 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1184 		struct generic_pm_domain *child = link->child;
1185 		if (child->state_idx < child->state_count - 1)
1186 			return;
1187 	}
1188 
1189 	/* Choose the deepest state when suspending */
1190 	genpd->state_idx = genpd->state_count - 1;
1191 	if (_genpd_power_off(genpd, false)) {
1192 		genpd->states[genpd->state_idx].rejected++;
1193 		return;
1194 	} else {
1195 		genpd->states[genpd->state_idx].usage++;
1196 	}
1197 
1198 	genpd->status = GENPD_STATE_OFF;
1199 
1200 	list_for_each_entry(link, &genpd->child_links, child_node) {
1201 		genpd_sd_counter_dec(link->parent);
1202 
1203 		if (use_lock)
1204 			genpd_lock_nested(link->parent, depth + 1);
1205 
1206 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1207 
1208 		if (use_lock)
1209 			genpd_unlock(link->parent);
1210 	}
1211 }
1212 
1213 /**
1214  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1215  * @genpd: PM domain to power on.
1216  * @use_lock: use the lock.
1217  * @depth: nesting count for lockdep.
1218  *
1219  * This function is only called in "noirq" and "syscore" stages of system power
1220  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1221  * these cases the lock must be held.
1222  */
1223 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1224 				unsigned int depth)
1225 {
1226 	struct gpd_link *link;
1227 
1228 	if (genpd_status_on(genpd))
1229 		return;
1230 
1231 	list_for_each_entry(link, &genpd->child_links, child_node) {
1232 		genpd_sd_counter_inc(link->parent);
1233 
1234 		if (use_lock)
1235 			genpd_lock_nested(link->parent, depth + 1);
1236 
1237 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1238 
1239 		if (use_lock)
1240 			genpd_unlock(link->parent);
1241 	}
1242 
1243 	_genpd_power_on(genpd, false);
1244 	genpd->status = GENPD_STATE_ON;
1245 }
1246 
1247 /**
1248  * genpd_prepare - Start power transition of a device in a PM domain.
1249  * @dev: Device to start the transition of.
1250  *
1251  * Start a power transition of a device (during a system-wide power transition)
1252  * under the assumption that its pm_domain field points to the domain member of
1253  * an object of type struct generic_pm_domain representing a PM domain
1254  * consisting of I/O devices.
1255  */
1256 static int genpd_prepare(struct device *dev)
1257 {
1258 	struct generic_pm_domain *genpd;
1259 	int ret;
1260 
1261 	dev_dbg(dev, "%s()\n", __func__);
1262 
1263 	genpd = dev_to_genpd(dev);
1264 	if (IS_ERR(genpd))
1265 		return -EINVAL;
1266 
1267 	genpd_lock(genpd);
1268 	genpd->prepared_count++;
1269 	genpd_unlock(genpd);
1270 
1271 	ret = pm_generic_prepare(dev);
1272 	if (ret < 0) {
1273 		genpd_lock(genpd);
1274 
1275 		genpd->prepared_count--;
1276 
1277 		genpd_unlock(genpd);
1278 	}
1279 
1280 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1281 	return ret >= 0 ? 0 : ret;
1282 }
1283 
1284 /**
1285  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1286  *   I/O pm domain.
1287  * @dev: Device to suspend.
1288  * @suspend_noirq: Generic suspend_noirq callback.
1289  * @resume_noirq: Generic resume_noirq callback.
1290  *
1291  * Stop the device and remove power from the domain if all devices in it have
1292  * been stopped.
1293  */
1294 static int genpd_finish_suspend(struct device *dev,
1295 				int (*suspend_noirq)(struct device *dev),
1296 				int (*resume_noirq)(struct device *dev))
1297 {
1298 	struct generic_pm_domain *genpd;
1299 	int ret = 0;
1300 
1301 	genpd = dev_to_genpd(dev);
1302 	if (IS_ERR(genpd))
1303 		return -EINVAL;
1304 
1305 	ret = suspend_noirq(dev);
1306 	if (ret)
1307 		return ret;
1308 
1309 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1310 		return 0;
1311 
1312 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1313 	    !pm_runtime_status_suspended(dev)) {
1314 		ret = genpd_stop_dev(genpd, dev);
1315 		if (ret) {
1316 			resume_noirq(dev);
1317 			return ret;
1318 		}
1319 	}
1320 
1321 	genpd_lock(genpd);
1322 	genpd->suspended_count++;
1323 	genpd_sync_power_off(genpd, true, 0);
1324 	genpd_unlock(genpd);
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1331  * @dev: Device to suspend.
1332  *
1333  * Stop the device and remove power from the domain if all devices in it have
1334  * been stopped.
1335  */
1336 static int genpd_suspend_noirq(struct device *dev)
1337 {
1338 	dev_dbg(dev, "%s()\n", __func__);
1339 
1340 	return genpd_finish_suspend(dev,
1341 				    pm_generic_suspend_noirq,
1342 				    pm_generic_resume_noirq);
1343 }
1344 
1345 /**
1346  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1347  * @dev: Device to resume.
1348  * @resume_noirq: Generic resume_noirq callback.
1349  *
1350  * Restore power to the device's PM domain, if necessary, and start the device.
1351  */
1352 static int genpd_finish_resume(struct device *dev,
1353 			       int (*resume_noirq)(struct device *dev))
1354 {
1355 	struct generic_pm_domain *genpd;
1356 	int ret;
1357 
1358 	dev_dbg(dev, "%s()\n", __func__);
1359 
1360 	genpd = dev_to_genpd(dev);
1361 	if (IS_ERR(genpd))
1362 		return -EINVAL;
1363 
1364 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1365 		return resume_noirq(dev);
1366 
1367 	genpd_lock(genpd);
1368 	genpd_sync_power_on(genpd, true, 0);
1369 	genpd->suspended_count--;
1370 	genpd_unlock(genpd);
1371 
1372 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1373 	    !pm_runtime_status_suspended(dev)) {
1374 		ret = genpd_start_dev(genpd, dev);
1375 		if (ret)
1376 			return ret;
1377 	}
1378 
1379 	return pm_generic_resume_noirq(dev);
1380 }
1381 
1382 /**
1383  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1384  * @dev: Device to resume.
1385  *
1386  * Restore power to the device's PM domain, if necessary, and start the device.
1387  */
1388 static int genpd_resume_noirq(struct device *dev)
1389 {
1390 	dev_dbg(dev, "%s()\n", __func__);
1391 
1392 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1393 }
1394 
1395 /**
1396  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1397  * @dev: Device to freeze.
1398  *
1399  * Carry out a late freeze of a device under the assumption that its
1400  * pm_domain field points to the domain member of an object of type
1401  * struct generic_pm_domain representing a power domain consisting of I/O
1402  * devices.
1403  */
1404 static int genpd_freeze_noirq(struct device *dev)
1405 {
1406 	dev_dbg(dev, "%s()\n", __func__);
1407 
1408 	return genpd_finish_suspend(dev,
1409 				    pm_generic_freeze_noirq,
1410 				    pm_generic_thaw_noirq);
1411 }
1412 
1413 /**
1414  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1415  * @dev: Device to thaw.
1416  *
1417  * Start the device, unless power has been removed from the domain already
1418  * before the system transition.
1419  */
1420 static int genpd_thaw_noirq(struct device *dev)
1421 {
1422 	dev_dbg(dev, "%s()\n", __func__);
1423 
1424 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1425 }
1426 
1427 /**
1428  * genpd_poweroff_noirq - Completion of hibernation of device in an
1429  *   I/O PM domain.
1430  * @dev: Device to poweroff.
1431  *
1432  * Stop the device and remove power from the domain if all devices in it have
1433  * been stopped.
1434  */
1435 static int genpd_poweroff_noirq(struct device *dev)
1436 {
1437 	dev_dbg(dev, "%s()\n", __func__);
1438 
1439 	return genpd_finish_suspend(dev,
1440 				    pm_generic_poweroff_noirq,
1441 				    pm_generic_restore_noirq);
1442 }
1443 
1444 /**
1445  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1446  * @dev: Device to resume.
1447  *
1448  * Make sure the domain will be in the same power state as before the
1449  * hibernation the system is resuming from and start the device if necessary.
1450  */
1451 static int genpd_restore_noirq(struct device *dev)
1452 {
1453 	dev_dbg(dev, "%s()\n", __func__);
1454 
1455 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1456 }
1457 
1458 /**
1459  * genpd_complete - Complete power transition of a device in a power domain.
1460  * @dev: Device to complete the transition of.
1461  *
1462  * Complete a power transition of a device (during a system-wide power
1463  * transition) under the assumption that its pm_domain field points to the
1464  * domain member of an object of type struct generic_pm_domain representing
1465  * a power domain consisting of I/O devices.
1466  */
1467 static void genpd_complete(struct device *dev)
1468 {
1469 	struct generic_pm_domain *genpd;
1470 
1471 	dev_dbg(dev, "%s()\n", __func__);
1472 
1473 	genpd = dev_to_genpd(dev);
1474 	if (IS_ERR(genpd))
1475 		return;
1476 
1477 	pm_generic_complete(dev);
1478 
1479 	genpd_lock(genpd);
1480 
1481 	genpd->prepared_count--;
1482 	if (!genpd->prepared_count)
1483 		genpd_queue_power_off_work(genpd);
1484 
1485 	genpd_unlock(genpd);
1486 }
1487 
1488 static void genpd_switch_state(struct device *dev, bool suspend)
1489 {
1490 	struct generic_pm_domain *genpd;
1491 	bool use_lock;
1492 
1493 	genpd = dev_to_genpd_safe(dev);
1494 	if (!genpd)
1495 		return;
1496 
1497 	use_lock = genpd_is_irq_safe(genpd);
1498 
1499 	if (use_lock)
1500 		genpd_lock(genpd);
1501 
1502 	if (suspend) {
1503 		genpd->suspended_count++;
1504 		genpd_sync_power_off(genpd, use_lock, 0);
1505 	} else {
1506 		genpd_sync_power_on(genpd, use_lock, 0);
1507 		genpd->suspended_count--;
1508 	}
1509 
1510 	if (use_lock)
1511 		genpd_unlock(genpd);
1512 }
1513 
1514 /**
1515  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1516  * @dev: The device that is attached to the genpd, that can be suspended.
1517  *
1518  * This routine should typically be called for a device that needs to be
1519  * suspended during the syscore suspend phase. It may also be called during
1520  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1521  * genpd.
1522  */
1523 void dev_pm_genpd_suspend(struct device *dev)
1524 {
1525 	genpd_switch_state(dev, true);
1526 }
1527 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1528 
1529 /**
1530  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1531  * @dev: The device that is attached to the genpd, which needs to be resumed.
1532  *
1533  * This routine should typically be called for a device that needs to be resumed
1534  * during the syscore resume phase. It may also be called during suspend-to-idle
1535  * to resume a corresponding CPU device that is attached to a genpd.
1536  */
1537 void dev_pm_genpd_resume(struct device *dev)
1538 {
1539 	genpd_switch_state(dev, false);
1540 }
1541 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1542 
1543 #else /* !CONFIG_PM_SLEEP */
1544 
1545 #define genpd_prepare		NULL
1546 #define genpd_suspend_noirq	NULL
1547 #define genpd_resume_noirq	NULL
1548 #define genpd_freeze_noirq	NULL
1549 #define genpd_thaw_noirq	NULL
1550 #define genpd_poweroff_noirq	NULL
1551 #define genpd_restore_noirq	NULL
1552 #define genpd_complete		NULL
1553 
1554 #endif /* CONFIG_PM_SLEEP */
1555 
1556 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1557 							   bool has_governor)
1558 {
1559 	struct generic_pm_domain_data *gpd_data;
1560 	struct gpd_timing_data *td;
1561 	int ret;
1562 
1563 	ret = dev_pm_get_subsys_data(dev);
1564 	if (ret)
1565 		return ERR_PTR(ret);
1566 
1567 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1568 	if (!gpd_data) {
1569 		ret = -ENOMEM;
1570 		goto err_put;
1571 	}
1572 
1573 	gpd_data->base.dev = dev;
1574 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1575 
1576 	/* Allocate data used by a governor. */
1577 	if (has_governor) {
1578 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1579 		if (!td) {
1580 			ret = -ENOMEM;
1581 			goto err_free;
1582 		}
1583 
1584 		td->constraint_changed = true;
1585 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1586 		td->next_wakeup = KTIME_MAX;
1587 		gpd_data->td = td;
1588 	}
1589 
1590 	spin_lock_irq(&dev->power.lock);
1591 
1592 	if (dev->power.subsys_data->domain_data)
1593 		ret = -EINVAL;
1594 	else
1595 		dev->power.subsys_data->domain_data = &gpd_data->base;
1596 
1597 	spin_unlock_irq(&dev->power.lock);
1598 
1599 	if (ret)
1600 		goto err_free;
1601 
1602 	return gpd_data;
1603 
1604  err_free:
1605 	kfree(gpd_data->td);
1606 	kfree(gpd_data);
1607  err_put:
1608 	dev_pm_put_subsys_data(dev);
1609 	return ERR_PTR(ret);
1610 }
1611 
1612 static void genpd_free_dev_data(struct device *dev,
1613 				struct generic_pm_domain_data *gpd_data)
1614 {
1615 	spin_lock_irq(&dev->power.lock);
1616 
1617 	dev->power.subsys_data->domain_data = NULL;
1618 
1619 	spin_unlock_irq(&dev->power.lock);
1620 
1621 	kfree(gpd_data->td);
1622 	kfree(gpd_data);
1623 	dev_pm_put_subsys_data(dev);
1624 }
1625 
1626 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1627 				 int cpu, bool set, unsigned int depth)
1628 {
1629 	struct gpd_link *link;
1630 
1631 	if (!genpd_is_cpu_domain(genpd))
1632 		return;
1633 
1634 	list_for_each_entry(link, &genpd->child_links, child_node) {
1635 		struct generic_pm_domain *parent = link->parent;
1636 
1637 		genpd_lock_nested(parent, depth + 1);
1638 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1639 		genpd_unlock(parent);
1640 	}
1641 
1642 	if (set)
1643 		cpumask_set_cpu(cpu, genpd->cpus);
1644 	else
1645 		cpumask_clear_cpu(cpu, genpd->cpus);
1646 }
1647 
1648 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1649 {
1650 	if (cpu >= 0)
1651 		genpd_update_cpumask(genpd, cpu, true, 0);
1652 }
1653 
1654 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1655 {
1656 	if (cpu >= 0)
1657 		genpd_update_cpumask(genpd, cpu, false, 0);
1658 }
1659 
1660 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1661 {
1662 	int cpu;
1663 
1664 	if (!genpd_is_cpu_domain(genpd))
1665 		return -1;
1666 
1667 	for_each_possible_cpu(cpu) {
1668 		if (get_cpu_device(cpu) == dev)
1669 			return cpu;
1670 	}
1671 
1672 	return -1;
1673 }
1674 
1675 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1676 			    struct device *base_dev)
1677 {
1678 	struct genpd_governor_data *gd = genpd->gd;
1679 	struct generic_pm_domain_data *gpd_data;
1680 	int ret;
1681 
1682 	dev_dbg(dev, "%s()\n", __func__);
1683 
1684 	gpd_data = genpd_alloc_dev_data(dev, gd);
1685 	if (IS_ERR(gpd_data))
1686 		return PTR_ERR(gpd_data);
1687 
1688 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1689 
1690 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1691 	if (ret)
1692 		goto out;
1693 
1694 	genpd_lock(genpd);
1695 
1696 	genpd_set_cpumask(genpd, gpd_data->cpu);
1697 	dev_pm_domain_set(dev, &genpd->domain);
1698 
1699 	genpd->device_count++;
1700 	if (gd)
1701 		gd->max_off_time_changed = true;
1702 
1703 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1704 
1705 	genpd_unlock(genpd);
1706  out:
1707 	if (ret)
1708 		genpd_free_dev_data(dev, gpd_data);
1709 	else
1710 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1711 					DEV_PM_QOS_RESUME_LATENCY);
1712 
1713 	return ret;
1714 }
1715 
1716 /**
1717  * pm_genpd_add_device - Add a device to an I/O PM domain.
1718  * @genpd: PM domain to add the device to.
1719  * @dev: Device to be added.
1720  */
1721 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1722 {
1723 	int ret;
1724 
1725 	if (!genpd || !dev)
1726 		return -EINVAL;
1727 
1728 	mutex_lock(&gpd_list_lock);
1729 	ret = genpd_add_device(genpd, dev, dev);
1730 	mutex_unlock(&gpd_list_lock);
1731 
1732 	return ret;
1733 }
1734 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1735 
1736 static int genpd_remove_device(struct generic_pm_domain *genpd,
1737 			       struct device *dev)
1738 {
1739 	struct generic_pm_domain_data *gpd_data;
1740 	struct pm_domain_data *pdd;
1741 	int ret = 0;
1742 
1743 	dev_dbg(dev, "%s()\n", __func__);
1744 
1745 	pdd = dev->power.subsys_data->domain_data;
1746 	gpd_data = to_gpd_data(pdd);
1747 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1748 				   DEV_PM_QOS_RESUME_LATENCY);
1749 
1750 	genpd_lock(genpd);
1751 
1752 	if (genpd->prepared_count > 0) {
1753 		ret = -EAGAIN;
1754 		goto out;
1755 	}
1756 
1757 	genpd->device_count--;
1758 	if (genpd->gd)
1759 		genpd->gd->max_off_time_changed = true;
1760 
1761 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1762 	dev_pm_domain_set(dev, NULL);
1763 
1764 	list_del_init(&pdd->list_node);
1765 
1766 	genpd_unlock(genpd);
1767 
1768 	if (genpd->detach_dev)
1769 		genpd->detach_dev(genpd, dev);
1770 
1771 	genpd_free_dev_data(dev, gpd_data);
1772 
1773 	return 0;
1774 
1775  out:
1776 	genpd_unlock(genpd);
1777 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1778 
1779 	return ret;
1780 }
1781 
1782 /**
1783  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1784  * @dev: Device to be removed.
1785  */
1786 int pm_genpd_remove_device(struct device *dev)
1787 {
1788 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1789 
1790 	if (!genpd)
1791 		return -EINVAL;
1792 
1793 	return genpd_remove_device(genpd, dev);
1794 }
1795 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1796 
1797 /**
1798  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1799  *
1800  * @dev: Device that should be associated with the notifier
1801  * @nb: The notifier block to register
1802  *
1803  * Users may call this function to add a genpd power on/off notifier for an
1804  * attached @dev. Only one notifier per device is allowed. The notifier is
1805  * sent when genpd is powering on/off the PM domain.
1806  *
1807  * It is assumed that the user guarantee that the genpd wouldn't be detached
1808  * while this routine is getting called.
1809  *
1810  * Returns 0 on success and negative error values on failures.
1811  */
1812 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1813 {
1814 	struct generic_pm_domain *genpd;
1815 	struct generic_pm_domain_data *gpd_data;
1816 	int ret;
1817 
1818 	genpd = dev_to_genpd_safe(dev);
1819 	if (!genpd)
1820 		return -ENODEV;
1821 
1822 	if (WARN_ON(!dev->power.subsys_data ||
1823 		     !dev->power.subsys_data->domain_data))
1824 		return -EINVAL;
1825 
1826 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1827 	if (gpd_data->power_nb)
1828 		return -EEXIST;
1829 
1830 	genpd_lock(genpd);
1831 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1832 	genpd_unlock(genpd);
1833 
1834 	if (ret) {
1835 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1836 			 genpd->name);
1837 		return ret;
1838 	}
1839 
1840 	gpd_data->power_nb = nb;
1841 	return 0;
1842 }
1843 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1844 
1845 /**
1846  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1847  *
1848  * @dev: Device that is associated with the notifier
1849  *
1850  * Users may call this function to remove a genpd power on/off notifier for an
1851  * attached @dev.
1852  *
1853  * It is assumed that the user guarantee that the genpd wouldn't be detached
1854  * while this routine is getting called.
1855  *
1856  * Returns 0 on success and negative error values on failures.
1857  */
1858 int dev_pm_genpd_remove_notifier(struct device *dev)
1859 {
1860 	struct generic_pm_domain *genpd;
1861 	struct generic_pm_domain_data *gpd_data;
1862 	int ret;
1863 
1864 	genpd = dev_to_genpd_safe(dev);
1865 	if (!genpd)
1866 		return -ENODEV;
1867 
1868 	if (WARN_ON(!dev->power.subsys_data ||
1869 		     !dev->power.subsys_data->domain_data))
1870 		return -EINVAL;
1871 
1872 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1873 	if (!gpd_data->power_nb)
1874 		return -ENODEV;
1875 
1876 	genpd_lock(genpd);
1877 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1878 					    gpd_data->power_nb);
1879 	genpd_unlock(genpd);
1880 
1881 	if (ret) {
1882 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1883 			 genpd->name);
1884 		return ret;
1885 	}
1886 
1887 	gpd_data->power_nb = NULL;
1888 	return 0;
1889 }
1890 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1891 
1892 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1893 			       struct generic_pm_domain *subdomain)
1894 {
1895 	struct gpd_link *link, *itr;
1896 	int ret = 0;
1897 
1898 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1899 	    || genpd == subdomain)
1900 		return -EINVAL;
1901 
1902 	/*
1903 	 * If the domain can be powered on/off in an IRQ safe
1904 	 * context, ensure that the subdomain can also be
1905 	 * powered on/off in that context.
1906 	 */
1907 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1908 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1909 				genpd->name, subdomain->name);
1910 		return -EINVAL;
1911 	}
1912 
1913 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1914 	if (!link)
1915 		return -ENOMEM;
1916 
1917 	genpd_lock(subdomain);
1918 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1919 
1920 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1921 		ret = -EINVAL;
1922 		goto out;
1923 	}
1924 
1925 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1926 		if (itr->child == subdomain && itr->parent == genpd) {
1927 			ret = -EINVAL;
1928 			goto out;
1929 		}
1930 	}
1931 
1932 	link->parent = genpd;
1933 	list_add_tail(&link->parent_node, &genpd->parent_links);
1934 	link->child = subdomain;
1935 	list_add_tail(&link->child_node, &subdomain->child_links);
1936 	if (genpd_status_on(subdomain))
1937 		genpd_sd_counter_inc(genpd);
1938 
1939  out:
1940 	genpd_unlock(genpd);
1941 	genpd_unlock(subdomain);
1942 	if (ret)
1943 		kfree(link);
1944 	return ret;
1945 }
1946 
1947 /**
1948  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1949  * @genpd: Leader PM domain to add the subdomain to.
1950  * @subdomain: Subdomain to be added.
1951  */
1952 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1953 			   struct generic_pm_domain *subdomain)
1954 {
1955 	int ret;
1956 
1957 	mutex_lock(&gpd_list_lock);
1958 	ret = genpd_add_subdomain(genpd, subdomain);
1959 	mutex_unlock(&gpd_list_lock);
1960 
1961 	return ret;
1962 }
1963 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1964 
1965 /**
1966  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1967  * @genpd: Leader PM domain to remove the subdomain from.
1968  * @subdomain: Subdomain to be removed.
1969  */
1970 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1971 			      struct generic_pm_domain *subdomain)
1972 {
1973 	struct gpd_link *l, *link;
1974 	int ret = -EINVAL;
1975 
1976 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1977 		return -EINVAL;
1978 
1979 	genpd_lock(subdomain);
1980 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1981 
1982 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1983 		pr_warn("%s: unable to remove subdomain %s\n",
1984 			genpd->name, subdomain->name);
1985 		ret = -EBUSY;
1986 		goto out;
1987 	}
1988 
1989 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1990 		if (link->child != subdomain)
1991 			continue;
1992 
1993 		list_del(&link->parent_node);
1994 		list_del(&link->child_node);
1995 		kfree(link);
1996 		if (genpd_status_on(subdomain))
1997 			genpd_sd_counter_dec(genpd);
1998 
1999 		ret = 0;
2000 		break;
2001 	}
2002 
2003 out:
2004 	genpd_unlock(genpd);
2005 	genpd_unlock(subdomain);
2006 
2007 	return ret;
2008 }
2009 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2010 
2011 static void genpd_free_default_power_state(struct genpd_power_state *states,
2012 					   unsigned int state_count)
2013 {
2014 	kfree(states);
2015 }
2016 
2017 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2018 {
2019 	struct genpd_power_state *state;
2020 
2021 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2022 	if (!state)
2023 		return -ENOMEM;
2024 
2025 	genpd->states = state;
2026 	genpd->state_count = 1;
2027 	genpd->free_states = genpd_free_default_power_state;
2028 
2029 	return 0;
2030 }
2031 
2032 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2033 {
2034 	struct genpd_governor_data *gd = NULL;
2035 	int ret;
2036 
2037 	if (genpd_is_cpu_domain(genpd) &&
2038 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2039 		return -ENOMEM;
2040 
2041 	if (genpd->gov) {
2042 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2043 		if (!gd) {
2044 			ret = -ENOMEM;
2045 			goto free;
2046 		}
2047 
2048 		gd->max_off_time_ns = -1;
2049 		gd->max_off_time_changed = true;
2050 		gd->next_wakeup = KTIME_MAX;
2051 		gd->next_hrtimer = KTIME_MAX;
2052 	}
2053 
2054 	/* Use only one "off" state if there were no states declared */
2055 	if (genpd->state_count == 0) {
2056 		ret = genpd_set_default_power_state(genpd);
2057 		if (ret)
2058 			goto free;
2059 	}
2060 
2061 	genpd->gd = gd;
2062 	return 0;
2063 
2064 free:
2065 	if (genpd_is_cpu_domain(genpd))
2066 		free_cpumask_var(genpd->cpus);
2067 	kfree(gd);
2068 	return ret;
2069 }
2070 
2071 static void genpd_free_data(struct generic_pm_domain *genpd)
2072 {
2073 	if (genpd_is_cpu_domain(genpd))
2074 		free_cpumask_var(genpd->cpus);
2075 	if (genpd->free_states)
2076 		genpd->free_states(genpd->states, genpd->state_count);
2077 	kfree(genpd->gd);
2078 }
2079 
2080 static void genpd_lock_init(struct generic_pm_domain *genpd)
2081 {
2082 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2083 		spin_lock_init(&genpd->slock);
2084 		genpd->lock_ops = &genpd_spin_ops;
2085 	} else {
2086 		mutex_init(&genpd->mlock);
2087 		genpd->lock_ops = &genpd_mtx_ops;
2088 	}
2089 }
2090 
2091 /**
2092  * pm_genpd_init - Initialize a generic I/O PM domain object.
2093  * @genpd: PM domain object to initialize.
2094  * @gov: PM domain governor to associate with the domain (may be NULL).
2095  * @is_off: Initial value of the domain's power_is_off field.
2096  *
2097  * Returns 0 on successful initialization, else a negative error code.
2098  */
2099 int pm_genpd_init(struct generic_pm_domain *genpd,
2100 		  struct dev_power_governor *gov, bool is_off)
2101 {
2102 	int ret;
2103 
2104 	if (IS_ERR_OR_NULL(genpd))
2105 		return -EINVAL;
2106 
2107 	INIT_LIST_HEAD(&genpd->parent_links);
2108 	INIT_LIST_HEAD(&genpd->child_links);
2109 	INIT_LIST_HEAD(&genpd->dev_list);
2110 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2111 	genpd_lock_init(genpd);
2112 	genpd->gov = gov;
2113 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2114 	atomic_set(&genpd->sd_count, 0);
2115 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2116 	genpd->device_count = 0;
2117 	genpd->provider = NULL;
2118 	genpd->has_provider = false;
2119 	genpd->accounting_time = ktime_get_mono_fast_ns();
2120 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2121 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2122 	genpd->domain.ops.prepare = genpd_prepare;
2123 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2124 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2125 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2126 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2127 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2128 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2129 	genpd->domain.ops.complete = genpd_complete;
2130 	genpd->domain.start = genpd_dev_pm_start;
2131 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2132 
2133 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2134 		genpd->dev_ops.stop = pm_clk_suspend;
2135 		genpd->dev_ops.start = pm_clk_resume;
2136 	}
2137 
2138 	/* The always-on governor works better with the corresponding flag. */
2139 	if (gov == &pm_domain_always_on_gov)
2140 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2141 
2142 	/* Always-on domains must be powered on at initialization. */
2143 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2144 			!genpd_status_on(genpd)) {
2145 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2146 		return -EINVAL;
2147 	}
2148 
2149 	/* Multiple states but no governor doesn't make sense. */
2150 	if (!gov && genpd->state_count > 1)
2151 		pr_warn("%s: no governor for states\n", genpd->name);
2152 
2153 	ret = genpd_alloc_data(genpd);
2154 	if (ret)
2155 		return ret;
2156 
2157 	device_initialize(&genpd->dev);
2158 	dev_set_name(&genpd->dev, "%s", genpd->name);
2159 
2160 	mutex_lock(&gpd_list_lock);
2161 	list_add(&genpd->gpd_list_node, &gpd_list);
2162 	mutex_unlock(&gpd_list_lock);
2163 	genpd_debug_add(genpd);
2164 
2165 	return 0;
2166 }
2167 EXPORT_SYMBOL_GPL(pm_genpd_init);
2168 
2169 static int genpd_remove(struct generic_pm_domain *genpd)
2170 {
2171 	struct gpd_link *l, *link;
2172 
2173 	if (IS_ERR_OR_NULL(genpd))
2174 		return -EINVAL;
2175 
2176 	genpd_lock(genpd);
2177 
2178 	if (genpd->has_provider) {
2179 		genpd_unlock(genpd);
2180 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2181 		return -EBUSY;
2182 	}
2183 
2184 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2185 		genpd_unlock(genpd);
2186 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2187 		return -EBUSY;
2188 	}
2189 
2190 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2191 		list_del(&link->parent_node);
2192 		list_del(&link->child_node);
2193 		kfree(link);
2194 	}
2195 
2196 	list_del(&genpd->gpd_list_node);
2197 	genpd_unlock(genpd);
2198 	genpd_debug_remove(genpd);
2199 	cancel_work_sync(&genpd->power_off_work);
2200 	genpd_free_data(genpd);
2201 
2202 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2203 
2204 	return 0;
2205 }
2206 
2207 /**
2208  * pm_genpd_remove - Remove a generic I/O PM domain
2209  * @genpd: Pointer to PM domain that is to be removed.
2210  *
2211  * To remove the PM domain, this function:
2212  *  - Removes the PM domain as a subdomain to any parent domains,
2213  *    if it was added.
2214  *  - Removes the PM domain from the list of registered PM domains.
2215  *
2216  * The PM domain will only be removed, if the associated provider has
2217  * been removed, it is not a parent to any other PM domain and has no
2218  * devices associated with it.
2219  */
2220 int pm_genpd_remove(struct generic_pm_domain *genpd)
2221 {
2222 	int ret;
2223 
2224 	mutex_lock(&gpd_list_lock);
2225 	ret = genpd_remove(genpd);
2226 	mutex_unlock(&gpd_list_lock);
2227 
2228 	return ret;
2229 }
2230 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2231 
2232 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2233 
2234 /*
2235  * Device Tree based PM domain providers.
2236  *
2237  * The code below implements generic device tree based PM domain providers that
2238  * bind device tree nodes with generic PM domains registered in the system.
2239  *
2240  * Any driver that registers generic PM domains and needs to support binding of
2241  * devices to these domains is supposed to register a PM domain provider, which
2242  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2243  *
2244  * Two simple mapping functions have been provided for convenience:
2245  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2246  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2247  *    index.
2248  */
2249 
2250 /**
2251  * struct of_genpd_provider - PM domain provider registration structure
2252  * @link: Entry in global list of PM domain providers
2253  * @node: Pointer to device tree node of PM domain provider
2254  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2255  *         into a PM domain.
2256  * @data: context pointer to be passed into @xlate callback
2257  */
2258 struct of_genpd_provider {
2259 	struct list_head link;
2260 	struct device_node *node;
2261 	genpd_xlate_t xlate;
2262 	void *data;
2263 };
2264 
2265 /* List of registered PM domain providers. */
2266 static LIST_HEAD(of_genpd_providers);
2267 /* Mutex to protect the list above. */
2268 static DEFINE_MUTEX(of_genpd_mutex);
2269 
2270 /**
2271  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2272  * @genpdspec: OF phandle args to map into a PM domain
2273  * @data: xlate function private data - pointer to struct generic_pm_domain
2274  *
2275  * This is a generic xlate function that can be used to model PM domains that
2276  * have their own device tree nodes. The private data of xlate function needs
2277  * to be a valid pointer to struct generic_pm_domain.
2278  */
2279 static struct generic_pm_domain *genpd_xlate_simple(
2280 					const struct of_phandle_args *genpdspec,
2281 					void *data)
2282 {
2283 	return data;
2284 }
2285 
2286 /**
2287  * genpd_xlate_onecell() - Xlate function using a single index.
2288  * @genpdspec: OF phandle args to map into a PM domain
2289  * @data: xlate function private data - pointer to struct genpd_onecell_data
2290  *
2291  * This is a generic xlate function that can be used to model simple PM domain
2292  * controllers that have one device tree node and provide multiple PM domains.
2293  * A single cell is used as an index into an array of PM domains specified in
2294  * the genpd_onecell_data struct when registering the provider.
2295  */
2296 static struct generic_pm_domain *genpd_xlate_onecell(
2297 					const struct of_phandle_args *genpdspec,
2298 					void *data)
2299 {
2300 	struct genpd_onecell_data *genpd_data = data;
2301 	unsigned int idx = genpdspec->args[0];
2302 
2303 	if (genpdspec->args_count != 1)
2304 		return ERR_PTR(-EINVAL);
2305 
2306 	if (idx >= genpd_data->num_domains) {
2307 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2308 		return ERR_PTR(-EINVAL);
2309 	}
2310 
2311 	if (!genpd_data->domains[idx])
2312 		return ERR_PTR(-ENOENT);
2313 
2314 	return genpd_data->domains[idx];
2315 }
2316 
2317 /**
2318  * genpd_add_provider() - Register a PM domain provider for a node
2319  * @np: Device node pointer associated with the PM domain provider.
2320  * @xlate: Callback for decoding PM domain from phandle arguments.
2321  * @data: Context pointer for @xlate callback.
2322  */
2323 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2324 			      void *data)
2325 {
2326 	struct of_genpd_provider *cp;
2327 
2328 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2329 	if (!cp)
2330 		return -ENOMEM;
2331 
2332 	cp->node = of_node_get(np);
2333 	cp->data = data;
2334 	cp->xlate = xlate;
2335 	fwnode_dev_initialized(&np->fwnode, true);
2336 
2337 	mutex_lock(&of_genpd_mutex);
2338 	list_add(&cp->link, &of_genpd_providers);
2339 	mutex_unlock(&of_genpd_mutex);
2340 	pr_debug("Added domain provider from %pOF\n", np);
2341 
2342 	return 0;
2343 }
2344 
2345 static bool genpd_present(const struct generic_pm_domain *genpd)
2346 {
2347 	bool ret = false;
2348 	const struct generic_pm_domain *gpd;
2349 
2350 	mutex_lock(&gpd_list_lock);
2351 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2352 		if (gpd == genpd) {
2353 			ret = true;
2354 			break;
2355 		}
2356 	}
2357 	mutex_unlock(&gpd_list_lock);
2358 
2359 	return ret;
2360 }
2361 
2362 /**
2363  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2364  * @np: Device node pointer associated with the PM domain provider.
2365  * @genpd: Pointer to PM domain associated with the PM domain provider.
2366  */
2367 int of_genpd_add_provider_simple(struct device_node *np,
2368 				 struct generic_pm_domain *genpd)
2369 {
2370 	int ret;
2371 
2372 	if (!np || !genpd)
2373 		return -EINVAL;
2374 
2375 	if (!genpd_present(genpd))
2376 		return -EINVAL;
2377 
2378 	genpd->dev.of_node = np;
2379 
2380 	/* Parse genpd OPP table */
2381 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2382 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2383 		if (ret)
2384 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2385 
2386 		/*
2387 		 * Save table for faster processing while setting performance
2388 		 * state.
2389 		 */
2390 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2391 		WARN_ON(IS_ERR(genpd->opp_table));
2392 	}
2393 
2394 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2395 	if (ret) {
2396 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2397 			dev_pm_opp_put_opp_table(genpd->opp_table);
2398 			dev_pm_opp_of_remove_table(&genpd->dev);
2399 		}
2400 
2401 		return ret;
2402 	}
2403 
2404 	genpd->provider = &np->fwnode;
2405 	genpd->has_provider = true;
2406 
2407 	return 0;
2408 }
2409 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2410 
2411 /**
2412  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2413  * @np: Device node pointer associated with the PM domain provider.
2414  * @data: Pointer to the data associated with the PM domain provider.
2415  */
2416 int of_genpd_add_provider_onecell(struct device_node *np,
2417 				  struct genpd_onecell_data *data)
2418 {
2419 	struct generic_pm_domain *genpd;
2420 	unsigned int i;
2421 	int ret = -EINVAL;
2422 
2423 	if (!np || !data)
2424 		return -EINVAL;
2425 
2426 	if (!data->xlate)
2427 		data->xlate = genpd_xlate_onecell;
2428 
2429 	for (i = 0; i < data->num_domains; i++) {
2430 		genpd = data->domains[i];
2431 
2432 		if (!genpd)
2433 			continue;
2434 		if (!genpd_present(genpd))
2435 			goto error;
2436 
2437 		genpd->dev.of_node = np;
2438 
2439 		/* Parse genpd OPP table */
2440 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2441 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2442 			if (ret) {
2443 				dev_err_probe(&genpd->dev, ret,
2444 					      "Failed to add OPP table for index %d\n", i);
2445 				goto error;
2446 			}
2447 
2448 			/*
2449 			 * Save table for faster processing while setting
2450 			 * performance state.
2451 			 */
2452 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2453 			WARN_ON(IS_ERR(genpd->opp_table));
2454 		}
2455 
2456 		genpd->provider = &np->fwnode;
2457 		genpd->has_provider = true;
2458 	}
2459 
2460 	ret = genpd_add_provider(np, data->xlate, data);
2461 	if (ret < 0)
2462 		goto error;
2463 
2464 	return 0;
2465 
2466 error:
2467 	while (i--) {
2468 		genpd = data->domains[i];
2469 
2470 		if (!genpd)
2471 			continue;
2472 
2473 		genpd->provider = NULL;
2474 		genpd->has_provider = false;
2475 
2476 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2477 			dev_pm_opp_put_opp_table(genpd->opp_table);
2478 			dev_pm_opp_of_remove_table(&genpd->dev);
2479 		}
2480 	}
2481 
2482 	return ret;
2483 }
2484 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2485 
2486 /**
2487  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2488  * @np: Device node pointer associated with the PM domain provider
2489  */
2490 void of_genpd_del_provider(struct device_node *np)
2491 {
2492 	struct of_genpd_provider *cp, *tmp;
2493 	struct generic_pm_domain *gpd;
2494 
2495 	mutex_lock(&gpd_list_lock);
2496 	mutex_lock(&of_genpd_mutex);
2497 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2498 		if (cp->node == np) {
2499 			/*
2500 			 * For each PM domain associated with the
2501 			 * provider, set the 'has_provider' to false
2502 			 * so that the PM domain can be safely removed.
2503 			 */
2504 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2505 				if (gpd->provider == &np->fwnode) {
2506 					gpd->has_provider = false;
2507 
2508 					if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
2509 						continue;
2510 
2511 					dev_pm_opp_put_opp_table(gpd->opp_table);
2512 					dev_pm_opp_of_remove_table(&gpd->dev);
2513 				}
2514 			}
2515 
2516 			fwnode_dev_initialized(&cp->node->fwnode, false);
2517 			list_del(&cp->link);
2518 			of_node_put(cp->node);
2519 			kfree(cp);
2520 			break;
2521 		}
2522 	}
2523 	mutex_unlock(&of_genpd_mutex);
2524 	mutex_unlock(&gpd_list_lock);
2525 }
2526 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2527 
2528 /**
2529  * genpd_get_from_provider() - Look-up PM domain
2530  * @genpdspec: OF phandle args to use for look-up
2531  *
2532  * Looks for a PM domain provider under the node specified by @genpdspec and if
2533  * found, uses xlate function of the provider to map phandle args to a PM
2534  * domain.
2535  *
2536  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2537  * on failure.
2538  */
2539 static struct generic_pm_domain *genpd_get_from_provider(
2540 					const struct of_phandle_args *genpdspec)
2541 {
2542 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2543 	struct of_genpd_provider *provider;
2544 
2545 	if (!genpdspec)
2546 		return ERR_PTR(-EINVAL);
2547 
2548 	mutex_lock(&of_genpd_mutex);
2549 
2550 	/* Check if we have such a provider in our array */
2551 	list_for_each_entry(provider, &of_genpd_providers, link) {
2552 		if (provider->node == genpdspec->np)
2553 			genpd = provider->xlate(genpdspec, provider->data);
2554 		if (!IS_ERR(genpd))
2555 			break;
2556 	}
2557 
2558 	mutex_unlock(&of_genpd_mutex);
2559 
2560 	return genpd;
2561 }
2562 
2563 /**
2564  * of_genpd_add_device() - Add a device to an I/O PM domain
2565  * @genpdspec: OF phandle args to use for look-up PM domain
2566  * @dev: Device to be added.
2567  *
2568  * Looks-up an I/O PM domain based upon phandle args provided and adds
2569  * the device to the PM domain. Returns a negative error code on failure.
2570  */
2571 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2572 {
2573 	struct generic_pm_domain *genpd;
2574 	int ret;
2575 
2576 	if (!dev)
2577 		return -EINVAL;
2578 
2579 	mutex_lock(&gpd_list_lock);
2580 
2581 	genpd = genpd_get_from_provider(genpdspec);
2582 	if (IS_ERR(genpd)) {
2583 		ret = PTR_ERR(genpd);
2584 		goto out;
2585 	}
2586 
2587 	ret = genpd_add_device(genpd, dev, dev);
2588 
2589 out:
2590 	mutex_unlock(&gpd_list_lock);
2591 
2592 	return ret;
2593 }
2594 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2595 
2596 /**
2597  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2598  * @parent_spec: OF phandle args to use for parent PM domain look-up
2599  * @subdomain_spec: OF phandle args to use for subdomain look-up
2600  *
2601  * Looks-up a parent PM domain and subdomain based upon phandle args
2602  * provided and adds the subdomain to the parent PM domain. Returns a
2603  * negative error code on failure.
2604  */
2605 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2606 			   const struct of_phandle_args *subdomain_spec)
2607 {
2608 	struct generic_pm_domain *parent, *subdomain;
2609 	int ret;
2610 
2611 	mutex_lock(&gpd_list_lock);
2612 
2613 	parent = genpd_get_from_provider(parent_spec);
2614 	if (IS_ERR(parent)) {
2615 		ret = PTR_ERR(parent);
2616 		goto out;
2617 	}
2618 
2619 	subdomain = genpd_get_from_provider(subdomain_spec);
2620 	if (IS_ERR(subdomain)) {
2621 		ret = PTR_ERR(subdomain);
2622 		goto out;
2623 	}
2624 
2625 	ret = genpd_add_subdomain(parent, subdomain);
2626 
2627 out:
2628 	mutex_unlock(&gpd_list_lock);
2629 
2630 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2631 }
2632 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2633 
2634 /**
2635  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2636  * @parent_spec: OF phandle args to use for parent PM domain look-up
2637  * @subdomain_spec: OF phandle args to use for subdomain look-up
2638  *
2639  * Looks-up a parent PM domain and subdomain based upon phandle args
2640  * provided and removes the subdomain from the parent PM domain. Returns a
2641  * negative error code on failure.
2642  */
2643 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2644 			      const struct of_phandle_args *subdomain_spec)
2645 {
2646 	struct generic_pm_domain *parent, *subdomain;
2647 	int ret;
2648 
2649 	mutex_lock(&gpd_list_lock);
2650 
2651 	parent = genpd_get_from_provider(parent_spec);
2652 	if (IS_ERR(parent)) {
2653 		ret = PTR_ERR(parent);
2654 		goto out;
2655 	}
2656 
2657 	subdomain = genpd_get_from_provider(subdomain_spec);
2658 	if (IS_ERR(subdomain)) {
2659 		ret = PTR_ERR(subdomain);
2660 		goto out;
2661 	}
2662 
2663 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2664 
2665 out:
2666 	mutex_unlock(&gpd_list_lock);
2667 
2668 	return ret;
2669 }
2670 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2671 
2672 /**
2673  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2674  * @np: Pointer to device node associated with provider
2675  *
2676  * Find the last PM domain that was added by a particular provider and
2677  * remove this PM domain from the list of PM domains. The provider is
2678  * identified by the 'provider' device structure that is passed. The PM
2679  * domain will only be removed, if the provider associated with domain
2680  * has been removed.
2681  *
2682  * Returns a valid pointer to struct generic_pm_domain on success or
2683  * ERR_PTR() on failure.
2684  */
2685 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2686 {
2687 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2688 	int ret;
2689 
2690 	if (IS_ERR_OR_NULL(np))
2691 		return ERR_PTR(-EINVAL);
2692 
2693 	mutex_lock(&gpd_list_lock);
2694 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2695 		if (gpd->provider == &np->fwnode) {
2696 			ret = genpd_remove(gpd);
2697 			genpd = ret ? ERR_PTR(ret) : gpd;
2698 			break;
2699 		}
2700 	}
2701 	mutex_unlock(&gpd_list_lock);
2702 
2703 	return genpd;
2704 }
2705 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2706 
2707 static void genpd_release_dev(struct device *dev)
2708 {
2709 	of_node_put(dev->of_node);
2710 	kfree(dev);
2711 }
2712 
2713 static const struct bus_type genpd_bus_type = {
2714 	.name		= "genpd",
2715 };
2716 
2717 /**
2718  * genpd_dev_pm_detach - Detach a device from its PM domain.
2719  * @dev: Device to detach.
2720  * @power_off: Currently not used
2721  *
2722  * Try to locate a corresponding generic PM domain, which the device was
2723  * attached to previously. If such is found, the device is detached from it.
2724  */
2725 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2726 {
2727 	struct generic_pm_domain *pd;
2728 	unsigned int i;
2729 	int ret = 0;
2730 
2731 	pd = dev_to_genpd(dev);
2732 	if (IS_ERR(pd))
2733 		return;
2734 
2735 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2736 
2737 	/* Drop the default performance state */
2738 	if (dev_gpd_data(dev)->default_pstate) {
2739 		dev_pm_genpd_set_performance_state(dev, 0);
2740 		dev_gpd_data(dev)->default_pstate = 0;
2741 	}
2742 
2743 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2744 		ret = genpd_remove_device(pd, dev);
2745 		if (ret != -EAGAIN)
2746 			break;
2747 
2748 		mdelay(i);
2749 		cond_resched();
2750 	}
2751 
2752 	if (ret < 0) {
2753 		dev_err(dev, "failed to remove from PM domain %s: %d",
2754 			pd->name, ret);
2755 		return;
2756 	}
2757 
2758 	/* Check if PM domain can be powered off after removing this device. */
2759 	genpd_queue_power_off_work(pd);
2760 
2761 	/* Unregister the device if it was created by genpd. */
2762 	if (dev->bus == &genpd_bus_type)
2763 		device_unregister(dev);
2764 }
2765 
2766 static void genpd_dev_pm_sync(struct device *dev)
2767 {
2768 	struct generic_pm_domain *pd;
2769 
2770 	pd = dev_to_genpd(dev);
2771 	if (IS_ERR(pd))
2772 		return;
2773 
2774 	genpd_queue_power_off_work(pd);
2775 }
2776 
2777 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2778 				 unsigned int index, bool power_on)
2779 {
2780 	struct of_phandle_args pd_args;
2781 	struct generic_pm_domain *pd;
2782 	int pstate;
2783 	int ret;
2784 
2785 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2786 				"#power-domain-cells", index, &pd_args);
2787 	if (ret < 0)
2788 		return ret;
2789 
2790 	mutex_lock(&gpd_list_lock);
2791 	pd = genpd_get_from_provider(&pd_args);
2792 	of_node_put(pd_args.np);
2793 	if (IS_ERR(pd)) {
2794 		mutex_unlock(&gpd_list_lock);
2795 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2796 			__func__, PTR_ERR(pd));
2797 		return driver_deferred_probe_check_state(base_dev);
2798 	}
2799 
2800 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2801 
2802 	ret = genpd_add_device(pd, dev, base_dev);
2803 	mutex_unlock(&gpd_list_lock);
2804 
2805 	if (ret < 0)
2806 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2807 
2808 	dev->pm_domain->detach = genpd_dev_pm_detach;
2809 	dev->pm_domain->sync = genpd_dev_pm_sync;
2810 
2811 	/* Set the default performance state */
2812 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2813 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2814 		ret = pstate;
2815 		goto err;
2816 	} else if (pstate > 0) {
2817 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2818 		if (ret)
2819 			goto err;
2820 		dev_gpd_data(dev)->default_pstate = pstate;
2821 	}
2822 
2823 	if (power_on) {
2824 		genpd_lock(pd);
2825 		ret = genpd_power_on(pd, 0);
2826 		genpd_unlock(pd);
2827 	}
2828 
2829 	if (ret) {
2830 		/* Drop the default performance state */
2831 		if (dev_gpd_data(dev)->default_pstate) {
2832 			dev_pm_genpd_set_performance_state(dev, 0);
2833 			dev_gpd_data(dev)->default_pstate = 0;
2834 		}
2835 
2836 		genpd_remove_device(pd, dev);
2837 		return -EPROBE_DEFER;
2838 	}
2839 
2840 	return 1;
2841 
2842 err:
2843 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2844 		pd->name, ret);
2845 	genpd_remove_device(pd, dev);
2846 	return ret;
2847 }
2848 
2849 /**
2850  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2851  * @dev: Device to attach.
2852  *
2853  * Parse device's OF node to find a PM domain specifier. If such is found,
2854  * attaches the device to retrieved pm_domain ops.
2855  *
2856  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2857  * PM domain or when multiple power-domains exists for it, else a negative error
2858  * code. Note that if a power-domain exists for the device, but it cannot be
2859  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2860  * not probed and to re-try again later.
2861  */
2862 int genpd_dev_pm_attach(struct device *dev)
2863 {
2864 	if (!dev->of_node)
2865 		return 0;
2866 
2867 	/*
2868 	 * Devices with multiple PM domains must be attached separately, as we
2869 	 * can only attach one PM domain per device.
2870 	 */
2871 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2872 				       "#power-domain-cells") != 1)
2873 		return 0;
2874 
2875 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2876 }
2877 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2878 
2879 /**
2880  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2881  * @dev: The device used to lookup the PM domain.
2882  * @index: The index of the PM domain.
2883  *
2884  * Parse device's OF node to find a PM domain specifier at the provided @index.
2885  * If such is found, creates a virtual device and attaches it to the retrieved
2886  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2887  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2888  *
2889  * Returns the created virtual device if successfully attached PM domain, NULL
2890  * when the device don't need a PM domain, else an ERR_PTR() in case of
2891  * failures. If a power-domain exists for the device, but cannot be found or
2892  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2893  * is not probed and to re-try again later.
2894  */
2895 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2896 					 unsigned int index)
2897 {
2898 	struct device *virt_dev;
2899 	int num_domains;
2900 	int ret;
2901 
2902 	if (!dev->of_node)
2903 		return NULL;
2904 
2905 	/* Verify that the index is within a valid range. */
2906 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2907 						 "#power-domain-cells");
2908 	if (index >= num_domains)
2909 		return NULL;
2910 
2911 	/* Allocate and register device on the genpd bus. */
2912 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2913 	if (!virt_dev)
2914 		return ERR_PTR(-ENOMEM);
2915 
2916 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2917 	virt_dev->bus = &genpd_bus_type;
2918 	virt_dev->release = genpd_release_dev;
2919 	virt_dev->of_node = of_node_get(dev->of_node);
2920 
2921 	ret = device_register(virt_dev);
2922 	if (ret) {
2923 		put_device(virt_dev);
2924 		return ERR_PTR(ret);
2925 	}
2926 
2927 	/* Try to attach the device to the PM domain at the specified index. */
2928 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2929 	if (ret < 1) {
2930 		device_unregister(virt_dev);
2931 		return ret ? ERR_PTR(ret) : NULL;
2932 	}
2933 
2934 	pm_runtime_enable(virt_dev);
2935 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2936 
2937 	return virt_dev;
2938 }
2939 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2940 
2941 /**
2942  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2943  * @dev: The device used to lookup the PM domain.
2944  * @name: The name of the PM domain.
2945  *
2946  * Parse device's OF node to find a PM domain specifier using the
2947  * power-domain-names DT property. For further description see
2948  * genpd_dev_pm_attach_by_id().
2949  */
2950 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2951 {
2952 	int index;
2953 
2954 	if (!dev->of_node)
2955 		return NULL;
2956 
2957 	index = of_property_match_string(dev->of_node, "power-domain-names",
2958 					 name);
2959 	if (index < 0)
2960 		return NULL;
2961 
2962 	return genpd_dev_pm_attach_by_id(dev, index);
2963 }
2964 
2965 static const struct of_device_id idle_state_match[] = {
2966 	{ .compatible = "domain-idle-state", },
2967 	{ }
2968 };
2969 
2970 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2971 				    struct device_node *state_node)
2972 {
2973 	int err;
2974 	u32 residency;
2975 	u32 entry_latency, exit_latency;
2976 
2977 	err = of_property_read_u32(state_node, "entry-latency-us",
2978 						&entry_latency);
2979 	if (err) {
2980 		pr_debug(" * %pOF missing entry-latency-us property\n",
2981 			 state_node);
2982 		return -EINVAL;
2983 	}
2984 
2985 	err = of_property_read_u32(state_node, "exit-latency-us",
2986 						&exit_latency);
2987 	if (err) {
2988 		pr_debug(" * %pOF missing exit-latency-us property\n",
2989 			 state_node);
2990 		return -EINVAL;
2991 	}
2992 
2993 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2994 	if (!err)
2995 		genpd_state->residency_ns = 1000LL * residency;
2996 
2997 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2998 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
2999 	genpd_state->fwnode = &state_node->fwnode;
3000 
3001 	return 0;
3002 }
3003 
3004 static int genpd_iterate_idle_states(struct device_node *dn,
3005 				     struct genpd_power_state *states)
3006 {
3007 	int ret;
3008 	struct of_phandle_iterator it;
3009 	struct device_node *np;
3010 	int i = 0;
3011 
3012 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3013 	if (ret <= 0)
3014 		return ret == -ENOENT ? 0 : ret;
3015 
3016 	/* Loop over the phandles until all the requested entry is found */
3017 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3018 		np = it.node;
3019 		if (!of_match_node(idle_state_match, np))
3020 			continue;
3021 
3022 		if (!of_device_is_available(np))
3023 			continue;
3024 
3025 		if (states) {
3026 			ret = genpd_parse_state(&states[i], np);
3027 			if (ret) {
3028 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3029 				       np, ret);
3030 				of_node_put(np);
3031 				return ret;
3032 			}
3033 		}
3034 		i++;
3035 	}
3036 
3037 	return i;
3038 }
3039 
3040 /**
3041  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3042  *
3043  * @dn: The genpd device node
3044  * @states: The pointer to which the state array will be saved.
3045  * @n: The count of elements in the array returned from this function.
3046  *
3047  * Returns the device states parsed from the OF node. The memory for the states
3048  * is allocated by this function and is the responsibility of the caller to
3049  * free the memory after use. If any or zero compatible domain idle states is
3050  * found it returns 0 and in case of errors, a negative error code is returned.
3051  */
3052 int of_genpd_parse_idle_states(struct device_node *dn,
3053 			struct genpd_power_state **states, int *n)
3054 {
3055 	struct genpd_power_state *st;
3056 	int ret;
3057 
3058 	ret = genpd_iterate_idle_states(dn, NULL);
3059 	if (ret < 0)
3060 		return ret;
3061 
3062 	if (!ret) {
3063 		*states = NULL;
3064 		*n = 0;
3065 		return 0;
3066 	}
3067 
3068 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3069 	if (!st)
3070 		return -ENOMEM;
3071 
3072 	ret = genpd_iterate_idle_states(dn, st);
3073 	if (ret <= 0) {
3074 		kfree(st);
3075 		return ret < 0 ? ret : -EINVAL;
3076 	}
3077 
3078 	*states = st;
3079 	*n = ret;
3080 
3081 	return 0;
3082 }
3083 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3084 
3085 static int __init genpd_bus_init(void)
3086 {
3087 	return bus_register(&genpd_bus_type);
3088 }
3089 core_initcall(genpd_bus_init);
3090 
3091 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3092 
3093 
3094 /***        debugfs support        ***/
3095 
3096 #ifdef CONFIG_DEBUG_FS
3097 /*
3098  * TODO: This function is a slightly modified version of rtpm_status_show
3099  * from sysfs.c, so generalize it.
3100  */
3101 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3102 {
3103 	static const char * const status_lookup[] = {
3104 		[RPM_ACTIVE] = "active",
3105 		[RPM_RESUMING] = "resuming",
3106 		[RPM_SUSPENDED] = "suspended",
3107 		[RPM_SUSPENDING] = "suspending"
3108 	};
3109 	const char *p = "";
3110 
3111 	if (dev->power.runtime_error)
3112 		p = "error";
3113 	else if (dev->power.disable_depth)
3114 		p = "unsupported";
3115 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3116 		p = status_lookup[dev->power.runtime_status];
3117 	else
3118 		WARN_ON(1);
3119 
3120 	seq_printf(s, "%-25s  ", p);
3121 }
3122 
3123 static void perf_status_str(struct seq_file *s, struct device *dev)
3124 {
3125 	struct generic_pm_domain_data *gpd_data;
3126 
3127 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3128 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3129 }
3130 
3131 static int genpd_summary_one(struct seq_file *s,
3132 			struct generic_pm_domain *genpd)
3133 {
3134 	static const char * const status_lookup[] = {
3135 		[GENPD_STATE_ON] = "on",
3136 		[GENPD_STATE_OFF] = "off"
3137 	};
3138 	struct pm_domain_data *pm_data;
3139 	const char *kobj_path;
3140 	struct gpd_link *link;
3141 	char state[16];
3142 	int ret;
3143 
3144 	ret = genpd_lock_interruptible(genpd);
3145 	if (ret)
3146 		return -ERESTARTSYS;
3147 
3148 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3149 		goto exit;
3150 	if (!genpd_status_on(genpd))
3151 		snprintf(state, sizeof(state), "%s-%u",
3152 			 status_lookup[genpd->status], genpd->state_idx);
3153 	else
3154 		snprintf(state, sizeof(state), "%s",
3155 			 status_lookup[genpd->status]);
3156 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3157 
3158 	/*
3159 	 * Modifications on the list require holding locks on both
3160 	 * parent and child, so we are safe.
3161 	 * Also genpd->name is immutable.
3162 	 */
3163 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3164 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3165 			seq_printf(s, "\n%48s", " ");
3166 		seq_printf(s, "%s", link->child->name);
3167 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3168 			seq_puts(s, ", ");
3169 	}
3170 
3171 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3172 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3173 				genpd_is_irq_safe(genpd) ?
3174 				GFP_ATOMIC : GFP_KERNEL);
3175 		if (kobj_path == NULL)
3176 			continue;
3177 
3178 		seq_printf(s, "\n    %-50s  ", kobj_path);
3179 		rtpm_status_str(s, pm_data->dev);
3180 		perf_status_str(s, pm_data->dev);
3181 		kfree(kobj_path);
3182 	}
3183 
3184 	seq_puts(s, "\n");
3185 exit:
3186 	genpd_unlock(genpd);
3187 
3188 	return 0;
3189 }
3190 
3191 static int summary_show(struct seq_file *s, void *data)
3192 {
3193 	struct generic_pm_domain *genpd;
3194 	int ret = 0;
3195 
3196 	seq_puts(s, "domain                          status          children                           performance\n");
3197 	seq_puts(s, "    /device                                             runtime status\n");
3198 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3199 
3200 	ret = mutex_lock_interruptible(&gpd_list_lock);
3201 	if (ret)
3202 		return -ERESTARTSYS;
3203 
3204 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3205 		ret = genpd_summary_one(s, genpd);
3206 		if (ret)
3207 			break;
3208 	}
3209 	mutex_unlock(&gpd_list_lock);
3210 
3211 	return ret;
3212 }
3213 
3214 static int status_show(struct seq_file *s, void *data)
3215 {
3216 	static const char * const status_lookup[] = {
3217 		[GENPD_STATE_ON] = "on",
3218 		[GENPD_STATE_OFF] = "off"
3219 	};
3220 
3221 	struct generic_pm_domain *genpd = s->private;
3222 	int ret = 0;
3223 
3224 	ret = genpd_lock_interruptible(genpd);
3225 	if (ret)
3226 		return -ERESTARTSYS;
3227 
3228 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3229 		goto exit;
3230 
3231 	if (genpd->status == GENPD_STATE_OFF)
3232 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3233 			genpd->state_idx);
3234 	else
3235 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3236 exit:
3237 	genpd_unlock(genpd);
3238 	return ret;
3239 }
3240 
3241 static int sub_domains_show(struct seq_file *s, void *data)
3242 {
3243 	struct generic_pm_domain *genpd = s->private;
3244 	struct gpd_link *link;
3245 	int ret = 0;
3246 
3247 	ret = genpd_lock_interruptible(genpd);
3248 	if (ret)
3249 		return -ERESTARTSYS;
3250 
3251 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3252 		seq_printf(s, "%s\n", link->child->name);
3253 
3254 	genpd_unlock(genpd);
3255 	return ret;
3256 }
3257 
3258 static int idle_states_show(struct seq_file *s, void *data)
3259 {
3260 	struct generic_pm_domain *genpd = s->private;
3261 	u64 now, delta, idle_time = 0;
3262 	unsigned int i;
3263 	int ret = 0;
3264 
3265 	ret = genpd_lock_interruptible(genpd);
3266 	if (ret)
3267 		return -ERESTARTSYS;
3268 
3269 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3270 
3271 	for (i = 0; i < genpd->state_count; i++) {
3272 		idle_time += genpd->states[i].idle_time;
3273 
3274 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3275 			now = ktime_get_mono_fast_ns();
3276 			if (now > genpd->accounting_time) {
3277 				delta = now - genpd->accounting_time;
3278 				idle_time += delta;
3279 			}
3280 		}
3281 
3282 		do_div(idle_time, NSEC_PER_MSEC);
3283 		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3284 			   genpd->states[i].usage, genpd->states[i].rejected);
3285 	}
3286 
3287 	genpd_unlock(genpd);
3288 	return ret;
3289 }
3290 
3291 static int active_time_show(struct seq_file *s, void *data)
3292 {
3293 	struct generic_pm_domain *genpd = s->private;
3294 	u64 now, on_time, delta = 0;
3295 	int ret = 0;
3296 
3297 	ret = genpd_lock_interruptible(genpd);
3298 	if (ret)
3299 		return -ERESTARTSYS;
3300 
3301 	if (genpd->status == GENPD_STATE_ON) {
3302 		now = ktime_get_mono_fast_ns();
3303 		if (now > genpd->accounting_time)
3304 			delta = now - genpd->accounting_time;
3305 	}
3306 
3307 	on_time = genpd->on_time + delta;
3308 	do_div(on_time, NSEC_PER_MSEC);
3309 	seq_printf(s, "%llu ms\n", on_time);
3310 
3311 	genpd_unlock(genpd);
3312 	return ret;
3313 }
3314 
3315 static int total_idle_time_show(struct seq_file *s, void *data)
3316 {
3317 	struct generic_pm_domain *genpd = s->private;
3318 	u64 now, delta, total = 0;
3319 	unsigned int i;
3320 	int ret = 0;
3321 
3322 	ret = genpd_lock_interruptible(genpd);
3323 	if (ret)
3324 		return -ERESTARTSYS;
3325 
3326 	for (i = 0; i < genpd->state_count; i++) {
3327 		total += genpd->states[i].idle_time;
3328 
3329 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3330 			now = ktime_get_mono_fast_ns();
3331 			if (now > genpd->accounting_time) {
3332 				delta = now - genpd->accounting_time;
3333 				total += delta;
3334 			}
3335 		}
3336 	}
3337 
3338 	do_div(total, NSEC_PER_MSEC);
3339 	seq_printf(s, "%llu ms\n", total);
3340 
3341 	genpd_unlock(genpd);
3342 	return ret;
3343 }
3344 
3345 
3346 static int devices_show(struct seq_file *s, void *data)
3347 {
3348 	struct generic_pm_domain *genpd = s->private;
3349 	struct pm_domain_data *pm_data;
3350 	const char *kobj_path;
3351 	int ret = 0;
3352 
3353 	ret = genpd_lock_interruptible(genpd);
3354 	if (ret)
3355 		return -ERESTARTSYS;
3356 
3357 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3358 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3359 				genpd_is_irq_safe(genpd) ?
3360 				GFP_ATOMIC : GFP_KERNEL);
3361 		if (kobj_path == NULL)
3362 			continue;
3363 
3364 		seq_printf(s, "%s\n", kobj_path);
3365 		kfree(kobj_path);
3366 	}
3367 
3368 	genpd_unlock(genpd);
3369 	return ret;
3370 }
3371 
3372 static int perf_state_show(struct seq_file *s, void *data)
3373 {
3374 	struct generic_pm_domain *genpd = s->private;
3375 
3376 	if (genpd_lock_interruptible(genpd))
3377 		return -ERESTARTSYS;
3378 
3379 	seq_printf(s, "%u\n", genpd->performance_state);
3380 
3381 	genpd_unlock(genpd);
3382 	return 0;
3383 }
3384 
3385 DEFINE_SHOW_ATTRIBUTE(summary);
3386 DEFINE_SHOW_ATTRIBUTE(status);
3387 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3388 DEFINE_SHOW_ATTRIBUTE(idle_states);
3389 DEFINE_SHOW_ATTRIBUTE(active_time);
3390 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3391 DEFINE_SHOW_ATTRIBUTE(devices);
3392 DEFINE_SHOW_ATTRIBUTE(perf_state);
3393 
3394 static void genpd_debug_add(struct generic_pm_domain *genpd)
3395 {
3396 	struct dentry *d;
3397 
3398 	if (!genpd_debugfs_dir)
3399 		return;
3400 
3401 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3402 
3403 	debugfs_create_file("current_state", 0444,
3404 			    d, genpd, &status_fops);
3405 	debugfs_create_file("sub_domains", 0444,
3406 			    d, genpd, &sub_domains_fops);
3407 	debugfs_create_file("idle_states", 0444,
3408 			    d, genpd, &idle_states_fops);
3409 	debugfs_create_file("active_time", 0444,
3410 			    d, genpd, &active_time_fops);
3411 	debugfs_create_file("total_idle_time", 0444,
3412 			    d, genpd, &total_idle_time_fops);
3413 	debugfs_create_file("devices", 0444,
3414 			    d, genpd, &devices_fops);
3415 	if (genpd->set_performance_state)
3416 		debugfs_create_file("perf_state", 0444,
3417 				    d, genpd, &perf_state_fops);
3418 }
3419 
3420 static int __init genpd_debug_init(void)
3421 {
3422 	struct generic_pm_domain *genpd;
3423 
3424 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3425 
3426 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3427 			    NULL, &summary_fops);
3428 
3429 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3430 		genpd_debug_add(genpd);
3431 
3432 	return 0;
3433 }
3434 late_initcall(genpd_debug_init);
3435 
3436 static void __exit genpd_debug_exit(void)
3437 {
3438 	debugfs_remove_recursive(genpd_debugfs_dir);
3439 }
3440 __exitcall(genpd_debug_exit);
3441 #endif /* CONFIG_DEBUG_FS */
3442