xref: /linux/drivers/pmdomain/core.c (revision daa121128a2d2ac6006159e2c47676e4fcd21eab)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_domain.h>
16 #include <linux/pm_qos.h>
17 #include <linux/pm_clock.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/sched.h>
21 #include <linux/suspend.h>
22 #include <linux/export.h>
23 #include <linux/cpu.h>
24 #include <linux/debugfs.h>
25 
26 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
27 
28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
29 ({								\
30 	type (*__routine)(struct device *__d); 			\
31 	type __ret = (type)0;					\
32 								\
33 	__routine = genpd->dev_ops.callback; 			\
34 	if (__routine) {					\
35 		__ret = __routine(dev); 			\
36 	}							\
37 	__ret;							\
38 })
39 
40 static LIST_HEAD(gpd_list);
41 static DEFINE_MUTEX(gpd_list_lock);
42 
43 struct genpd_lock_ops {
44 	void (*lock)(struct generic_pm_domain *genpd);
45 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
46 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
47 	void (*unlock)(struct generic_pm_domain *genpd);
48 };
49 
50 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
51 {
52 	mutex_lock(&genpd->mlock);
53 }
54 
55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
56 					int depth)
57 {
58 	mutex_lock_nested(&genpd->mlock, depth);
59 }
60 
61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
62 {
63 	return mutex_lock_interruptible(&genpd->mlock);
64 }
65 
66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
67 {
68 	return mutex_unlock(&genpd->mlock);
69 }
70 
71 static const struct genpd_lock_ops genpd_mtx_ops = {
72 	.lock = genpd_lock_mtx,
73 	.lock_nested = genpd_lock_nested_mtx,
74 	.lock_interruptible = genpd_lock_interruptible_mtx,
75 	.unlock = genpd_unlock_mtx,
76 };
77 
78 static void genpd_lock_spin(struct generic_pm_domain *genpd)
79 	__acquires(&genpd->slock)
80 {
81 	unsigned long flags;
82 
83 	spin_lock_irqsave(&genpd->slock, flags);
84 	genpd->lock_flags = flags;
85 }
86 
87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
88 					int depth)
89 	__acquires(&genpd->slock)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
94 	genpd->lock_flags = flags;
95 }
96 
97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
98 	__acquires(&genpd->slock)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&genpd->slock, flags);
103 	genpd->lock_flags = flags;
104 	return 0;
105 }
106 
107 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
108 	__releases(&genpd->slock)
109 {
110 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
111 }
112 
113 static const struct genpd_lock_ops genpd_spin_ops = {
114 	.lock = genpd_lock_spin,
115 	.lock_nested = genpd_lock_nested_spin,
116 	.lock_interruptible = genpd_lock_interruptible_spin,
117 	.unlock = genpd_unlock_spin,
118 };
119 
120 #define genpd_lock(p)			p->lock_ops->lock(p)
121 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
122 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
123 #define genpd_unlock(p)			p->lock_ops->unlock(p)
124 
125 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
126 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
127 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
128 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
129 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
130 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
131 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
132 
133 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
134 		const struct generic_pm_domain *genpd)
135 {
136 	bool ret;
137 
138 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139 
140 	/*
141 	 * Warn once if an IRQ safe device is attached to a domain, which
142 	 * callbacks are allowed to sleep. This indicates a suboptimal
143 	 * configuration for PM, but it doesn't matter for an always on domain.
144 	 */
145 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
146 		return ret;
147 
148 	if (ret)
149 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
150 				genpd->name);
151 
152 	return ret;
153 }
154 
155 static int genpd_runtime_suspend(struct device *dev);
156 
157 /*
158  * Get the generic PM domain for a particular struct device.
159  * This validates the struct device pointer, the PM domain pointer,
160  * and checks that the PM domain pointer is a real generic PM domain.
161  * Any failure results in NULL being returned.
162  */
163 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
164 {
165 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
166 		return NULL;
167 
168 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
169 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
170 		return pd_to_genpd(dev->pm_domain);
171 
172 	return NULL;
173 }
174 
175 /*
176  * This should only be used where we are certain that the pm_domain
177  * attached to the device is a genpd domain.
178  */
179 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
180 {
181 	if (IS_ERR_OR_NULL(dev->pm_domain))
182 		return ERR_PTR(-EINVAL);
183 
184 	return pd_to_genpd(dev->pm_domain);
185 }
186 
187 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
188 			  struct device *dev)
189 {
190 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
191 }
192 
193 static int genpd_start_dev(const struct generic_pm_domain *genpd,
194 			   struct device *dev)
195 {
196 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
197 }
198 
199 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
200 {
201 	bool ret = false;
202 
203 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
204 		ret = !!atomic_dec_and_test(&genpd->sd_count);
205 
206 	return ret;
207 }
208 
209 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
210 {
211 	atomic_inc(&genpd->sd_count);
212 	smp_mb__after_atomic();
213 }
214 
215 #ifdef CONFIG_DEBUG_FS
216 static struct dentry *genpd_debugfs_dir;
217 
218 static void genpd_debug_add(struct generic_pm_domain *genpd);
219 
220 static void genpd_debug_remove(struct generic_pm_domain *genpd)
221 {
222 	if (!genpd_debugfs_dir)
223 		return;
224 
225 	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
226 }
227 
228 static void genpd_update_accounting(struct generic_pm_domain *genpd)
229 {
230 	u64 delta, now;
231 
232 	now = ktime_get_mono_fast_ns();
233 	if (now <= genpd->accounting_time)
234 		return;
235 
236 	delta = now - genpd->accounting_time;
237 
238 	/*
239 	 * If genpd->status is active, it means we are just
240 	 * out of off and so update the idle time and vice
241 	 * versa.
242 	 */
243 	if (genpd->status == GENPD_STATE_ON)
244 		genpd->states[genpd->state_idx].idle_time += delta;
245 	else
246 		genpd->on_time += delta;
247 
248 	genpd->accounting_time = now;
249 }
250 #else
251 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
252 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
253 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
254 #endif
255 
256 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
257 					   unsigned int state)
258 {
259 	struct generic_pm_domain_data *pd_data;
260 	struct pm_domain_data *pdd;
261 	struct gpd_link *link;
262 
263 	/* New requested state is same as Max requested state */
264 	if (state == genpd->performance_state)
265 		return state;
266 
267 	/* New requested state is higher than Max requested state */
268 	if (state > genpd->performance_state)
269 		return state;
270 
271 	/* Traverse all devices within the domain */
272 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
273 		pd_data = to_gpd_data(pdd);
274 
275 		if (pd_data->performance_state > state)
276 			state = pd_data->performance_state;
277 	}
278 
279 	/*
280 	 * Traverse all sub-domains within the domain. This can be
281 	 * done without any additional locking as the link->performance_state
282 	 * field is protected by the parent genpd->lock, which is already taken.
283 	 *
284 	 * Also note that link->performance_state (subdomain's performance state
285 	 * requirement to parent domain) is different from
286 	 * link->child->performance_state (current performance state requirement
287 	 * of the devices/sub-domains of the subdomain) and so can have a
288 	 * different value.
289 	 *
290 	 * Note that we also take vote from powered-off sub-domains into account
291 	 * as the same is done for devices right now.
292 	 */
293 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
294 		if (link->performance_state > state)
295 			state = link->performance_state;
296 	}
297 
298 	return state;
299 }
300 
301 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
302 					 struct generic_pm_domain *parent,
303 					 unsigned int pstate)
304 {
305 	if (!parent->set_performance_state)
306 		return pstate;
307 
308 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
309 						  parent->opp_table,
310 						  pstate);
311 }
312 
313 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
314 					unsigned int state, int depth);
315 
316 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
317 {
318 	struct generic_pm_domain *parent = link->parent;
319 	int parent_state;
320 
321 	genpd_lock_nested(parent, depth + 1);
322 
323 	parent_state = link->prev_performance_state;
324 	link->performance_state = parent_state;
325 
326 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
327 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
328 		pr_err("%s: Failed to roll back to %d performance state\n",
329 		       parent->name, parent_state);
330 	}
331 
332 	genpd_unlock(parent);
333 }
334 
335 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
336 				   struct gpd_link *link,
337 				   unsigned int state, int depth)
338 {
339 	struct generic_pm_domain *parent = link->parent;
340 	int parent_state, ret;
341 
342 	/* Find parent's performance state */
343 	ret = genpd_xlate_performance_state(genpd, parent, state);
344 	if (unlikely(ret < 0))
345 		return ret;
346 
347 	parent_state = ret;
348 
349 	genpd_lock_nested(parent, depth + 1);
350 
351 	link->prev_performance_state = link->performance_state;
352 	link->performance_state = parent_state;
353 
354 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
355 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
356 	if (ret)
357 		link->performance_state = link->prev_performance_state;
358 
359 	genpd_unlock(parent);
360 
361 	return ret;
362 }
363 
364 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
365 					unsigned int state, int depth)
366 {
367 	struct gpd_link *link = NULL;
368 	int ret;
369 
370 	if (state == genpd->performance_state)
371 		return 0;
372 
373 	/* When scaling up, propagate to parents first in normal order */
374 	if (state > genpd->performance_state) {
375 		list_for_each_entry(link, &genpd->child_links, child_node) {
376 			ret = _genpd_set_parent_state(genpd, link, state, depth);
377 			if (ret)
378 				goto rollback_parents_up;
379 		}
380 	}
381 
382 	if (genpd->set_performance_state) {
383 		ret = genpd->set_performance_state(genpd, state);
384 		if (ret) {
385 			if (link)
386 				goto rollback_parents_up;
387 			return ret;
388 		}
389 	}
390 
391 	/* When scaling down, propagate to parents last in reverse order */
392 	if (state < genpd->performance_state) {
393 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
394 			ret = _genpd_set_parent_state(genpd, link, state, depth);
395 			if (ret)
396 				goto rollback_parents_down;
397 		}
398 	}
399 
400 	genpd->performance_state = state;
401 	return 0;
402 
403 rollback_parents_up:
404 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
405 		_genpd_rollback_parent_state(link, depth);
406 	return ret;
407 rollback_parents_down:
408 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
409 		_genpd_rollback_parent_state(link, depth);
410 	return ret;
411 }
412 
413 static int genpd_set_performance_state(struct device *dev, unsigned int state)
414 {
415 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
416 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
417 	unsigned int prev_state;
418 	int ret;
419 
420 	prev_state = gpd_data->performance_state;
421 	if (prev_state == state)
422 		return 0;
423 
424 	gpd_data->performance_state = state;
425 	state = _genpd_reeval_performance_state(genpd, state);
426 
427 	ret = _genpd_set_performance_state(genpd, state, 0);
428 	if (ret)
429 		gpd_data->performance_state = prev_state;
430 
431 	return ret;
432 }
433 
434 static int genpd_drop_performance_state(struct device *dev)
435 {
436 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
437 
438 	if (!genpd_set_performance_state(dev, 0))
439 		return prev_state;
440 
441 	return 0;
442 }
443 
444 static void genpd_restore_performance_state(struct device *dev,
445 					    unsigned int state)
446 {
447 	if (state)
448 		genpd_set_performance_state(dev, state);
449 }
450 
451 static int genpd_dev_pm_set_performance_state(struct device *dev,
452 					      unsigned int state)
453 {
454 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
455 	int ret = 0;
456 
457 	genpd_lock(genpd);
458 	if (pm_runtime_suspended(dev)) {
459 		dev_gpd_data(dev)->rpm_pstate = state;
460 	} else {
461 		ret = genpd_set_performance_state(dev, state);
462 		if (!ret)
463 			dev_gpd_data(dev)->rpm_pstate = 0;
464 	}
465 	genpd_unlock(genpd);
466 
467 	return ret;
468 }
469 
470 /**
471  * dev_pm_genpd_set_performance_state- Set performance state of device's power
472  * domain.
473  *
474  * @dev: Device for which the performance-state needs to be set.
475  * @state: Target performance state of the device. This can be set as 0 when the
476  *	   device doesn't have any performance state constraints left (And so
477  *	   the device wouldn't participate anymore to find the target
478  *	   performance state of the genpd).
479  *
480  * It is assumed that the users guarantee that the genpd wouldn't be detached
481  * while this routine is getting called.
482  *
483  * Returns 0 on success and negative error values on failures.
484  */
485 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
486 {
487 	struct generic_pm_domain *genpd;
488 
489 	genpd = dev_to_genpd_safe(dev);
490 	if (!genpd)
491 		return -ENODEV;
492 
493 	if (WARN_ON(!dev->power.subsys_data ||
494 		     !dev->power.subsys_data->domain_data))
495 		return -EINVAL;
496 
497 	return genpd_dev_pm_set_performance_state(dev, state);
498 }
499 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
500 
501 /**
502  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
503  *
504  * @dev: Device to handle
505  * @next: impending interrupt/wakeup for the device
506  *
507  *
508  * Allow devices to inform of the next wakeup. It's assumed that the users
509  * guarantee that the genpd wouldn't be detached while this routine is getting
510  * called. Additionally, it's also assumed that @dev isn't runtime suspended
511  * (RPM_SUSPENDED)."
512  * Although devices are expected to update the next_wakeup after the end of
513  * their usecase as well, it is possible the devices themselves may not know
514  * about that, so stale @next will be ignored when powering off the domain.
515  */
516 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
517 {
518 	struct generic_pm_domain *genpd;
519 	struct gpd_timing_data *td;
520 
521 	genpd = dev_to_genpd_safe(dev);
522 	if (!genpd)
523 		return;
524 
525 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
526 	if (td)
527 		td->next_wakeup = next;
528 }
529 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
530 
531 /**
532  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
533  * @dev: A device that is attached to the genpd.
534  *
535  * This routine should typically be called for a device, at the point of when a
536  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
537  *
538  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
539  * valid value have been set.
540  */
541 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
542 {
543 	struct generic_pm_domain *genpd;
544 
545 	genpd = dev_to_genpd_safe(dev);
546 	if (!genpd)
547 		return KTIME_MAX;
548 
549 	if (genpd->gd)
550 		return genpd->gd->next_hrtimer;
551 
552 	return KTIME_MAX;
553 }
554 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
555 
556 /*
557  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
558  *
559  * @dev: A device that is attached to the genpd.
560  *
561  * Allows a consumer of the genpd to notify the provider that the next power off
562  * should be synchronous.
563  *
564  * It is assumed that the users guarantee that the genpd wouldn't be detached
565  * while this routine is getting called.
566  */
567 void dev_pm_genpd_synced_poweroff(struct device *dev)
568 {
569 	struct generic_pm_domain *genpd;
570 
571 	genpd = dev_to_genpd_safe(dev);
572 	if (!genpd)
573 		return;
574 
575 	genpd_lock(genpd);
576 	genpd->synced_poweroff = true;
577 	genpd_unlock(genpd);
578 }
579 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
580 
581 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
582 {
583 	unsigned int state_idx = genpd->state_idx;
584 	ktime_t time_start;
585 	s64 elapsed_ns;
586 	int ret;
587 
588 	/* Notify consumers that we are about to power on. */
589 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
590 					     GENPD_NOTIFY_PRE_ON,
591 					     GENPD_NOTIFY_OFF, NULL);
592 	ret = notifier_to_errno(ret);
593 	if (ret)
594 		return ret;
595 
596 	if (!genpd->power_on)
597 		goto out;
598 
599 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
600 	if (!timed) {
601 		ret = genpd->power_on(genpd);
602 		if (ret)
603 			goto err;
604 
605 		goto out;
606 	}
607 
608 	time_start = ktime_get();
609 	ret = genpd->power_on(genpd);
610 	if (ret)
611 		goto err;
612 
613 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
614 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
615 		goto out;
616 
617 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
618 	genpd->gd->max_off_time_changed = true;
619 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
620 		 genpd->name, "on", elapsed_ns);
621 
622 out:
623 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
624 	genpd->synced_poweroff = false;
625 	return 0;
626 err:
627 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
628 				NULL);
629 	return ret;
630 }
631 
632 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
633 {
634 	unsigned int state_idx = genpd->state_idx;
635 	ktime_t time_start;
636 	s64 elapsed_ns;
637 	int ret;
638 
639 	/* Notify consumers that we are about to power off. */
640 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
641 					     GENPD_NOTIFY_PRE_OFF,
642 					     GENPD_NOTIFY_ON, NULL);
643 	ret = notifier_to_errno(ret);
644 	if (ret)
645 		return ret;
646 
647 	if (!genpd->power_off)
648 		goto out;
649 
650 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
651 	if (!timed) {
652 		ret = genpd->power_off(genpd);
653 		if (ret)
654 			goto busy;
655 
656 		goto out;
657 	}
658 
659 	time_start = ktime_get();
660 	ret = genpd->power_off(genpd);
661 	if (ret)
662 		goto busy;
663 
664 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
665 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
666 		goto out;
667 
668 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
669 	genpd->gd->max_off_time_changed = true;
670 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
671 		 genpd->name, "off", elapsed_ns);
672 
673 out:
674 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
675 				NULL);
676 	return 0;
677 busy:
678 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
679 	return ret;
680 }
681 
682 /**
683  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
684  * @genpd: PM domain to power off.
685  *
686  * Queue up the execution of genpd_power_off() unless it's already been done
687  * before.
688  */
689 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
690 {
691 	queue_work(pm_wq, &genpd->power_off_work);
692 }
693 
694 /**
695  * genpd_power_off - Remove power from a given PM domain.
696  * @genpd: PM domain to power down.
697  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
698  * RPM status of the releated device is in an intermediate state, not yet turned
699  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
700  * be RPM_SUSPENDED, while it tries to power off the PM domain.
701  * @depth: nesting count for lockdep.
702  *
703  * If all of the @genpd's devices have been suspended and all of its subdomains
704  * have been powered down, remove power from @genpd.
705  */
706 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
707 			   unsigned int depth)
708 {
709 	struct pm_domain_data *pdd;
710 	struct gpd_link *link;
711 	unsigned int not_suspended = 0;
712 	int ret;
713 
714 	/*
715 	 * Do not try to power off the domain in the following situations:
716 	 * (1) The domain is already in the "power off" state.
717 	 * (2) System suspend is in progress.
718 	 */
719 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
720 		return 0;
721 
722 	/*
723 	 * Abort power off for the PM domain in the following situations:
724 	 * (1) The domain is configured as always on.
725 	 * (2) When the domain has a subdomain being powered on.
726 	 */
727 	if (genpd_is_always_on(genpd) ||
728 			genpd_is_rpm_always_on(genpd) ||
729 			atomic_read(&genpd->sd_count) > 0)
730 		return -EBUSY;
731 
732 	/*
733 	 * The children must be in their deepest (powered-off) states to allow
734 	 * the parent to be powered off. Note that, there's no need for
735 	 * additional locking, as powering on a child, requires the parent's
736 	 * lock to be acquired first.
737 	 */
738 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
739 		struct generic_pm_domain *child = link->child;
740 		if (child->state_idx < child->state_count - 1)
741 			return -EBUSY;
742 	}
743 
744 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
745 		/*
746 		 * Do not allow PM domain to be powered off, when an IRQ safe
747 		 * device is part of a non-IRQ safe domain.
748 		 */
749 		if (!pm_runtime_suspended(pdd->dev) ||
750 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
751 			not_suspended++;
752 	}
753 
754 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
755 		return -EBUSY;
756 
757 	if (genpd->gov && genpd->gov->power_down_ok) {
758 		if (!genpd->gov->power_down_ok(&genpd->domain))
759 			return -EAGAIN;
760 	}
761 
762 	/* Default to shallowest state. */
763 	if (!genpd->gov)
764 		genpd->state_idx = 0;
765 
766 	/* Don't power off, if a child domain is waiting to power on. */
767 	if (atomic_read(&genpd->sd_count) > 0)
768 		return -EBUSY;
769 
770 	ret = _genpd_power_off(genpd, true);
771 	if (ret) {
772 		genpd->states[genpd->state_idx].rejected++;
773 		return ret;
774 	}
775 
776 	genpd->status = GENPD_STATE_OFF;
777 	genpd_update_accounting(genpd);
778 	genpd->states[genpd->state_idx].usage++;
779 
780 	list_for_each_entry(link, &genpd->child_links, child_node) {
781 		genpd_sd_counter_dec(link->parent);
782 		genpd_lock_nested(link->parent, depth + 1);
783 		genpd_power_off(link->parent, false, depth + 1);
784 		genpd_unlock(link->parent);
785 	}
786 
787 	return 0;
788 }
789 
790 /**
791  * genpd_power_on - Restore power to a given PM domain and its parents.
792  * @genpd: PM domain to power up.
793  * @depth: nesting count for lockdep.
794  *
795  * Restore power to @genpd and all of its parents so that it is possible to
796  * resume a device belonging to it.
797  */
798 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
799 {
800 	struct gpd_link *link;
801 	int ret = 0;
802 
803 	if (genpd_status_on(genpd))
804 		return 0;
805 
806 	/*
807 	 * The list is guaranteed not to change while the loop below is being
808 	 * executed, unless one of the parents' .power_on() callbacks fiddles
809 	 * with it.
810 	 */
811 	list_for_each_entry(link, &genpd->child_links, child_node) {
812 		struct generic_pm_domain *parent = link->parent;
813 
814 		genpd_sd_counter_inc(parent);
815 
816 		genpd_lock_nested(parent, depth + 1);
817 		ret = genpd_power_on(parent, depth + 1);
818 		genpd_unlock(parent);
819 
820 		if (ret) {
821 			genpd_sd_counter_dec(parent);
822 			goto err;
823 		}
824 	}
825 
826 	ret = _genpd_power_on(genpd, true);
827 	if (ret)
828 		goto err;
829 
830 	genpd->status = GENPD_STATE_ON;
831 	genpd_update_accounting(genpd);
832 
833 	return 0;
834 
835  err:
836 	list_for_each_entry_continue_reverse(link,
837 					&genpd->child_links,
838 					child_node) {
839 		genpd_sd_counter_dec(link->parent);
840 		genpd_lock_nested(link->parent, depth + 1);
841 		genpd_power_off(link->parent, false, depth + 1);
842 		genpd_unlock(link->parent);
843 	}
844 
845 	return ret;
846 }
847 
848 static int genpd_dev_pm_start(struct device *dev)
849 {
850 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
851 
852 	return genpd_start_dev(genpd, dev);
853 }
854 
855 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
856 				     unsigned long val, void *ptr)
857 {
858 	struct generic_pm_domain_data *gpd_data;
859 	struct device *dev;
860 
861 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
862 	dev = gpd_data->base.dev;
863 
864 	for (;;) {
865 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
866 		struct pm_domain_data *pdd;
867 		struct gpd_timing_data *td;
868 
869 		spin_lock_irq(&dev->power.lock);
870 
871 		pdd = dev->power.subsys_data ?
872 				dev->power.subsys_data->domain_data : NULL;
873 		if (pdd) {
874 			td = to_gpd_data(pdd)->td;
875 			if (td) {
876 				td->constraint_changed = true;
877 				genpd = dev_to_genpd(dev);
878 			}
879 		}
880 
881 		spin_unlock_irq(&dev->power.lock);
882 
883 		if (!IS_ERR(genpd)) {
884 			genpd_lock(genpd);
885 			genpd->gd->max_off_time_changed = true;
886 			genpd_unlock(genpd);
887 		}
888 
889 		dev = dev->parent;
890 		if (!dev || dev->power.ignore_children)
891 			break;
892 	}
893 
894 	return NOTIFY_DONE;
895 }
896 
897 /**
898  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
899  * @work: Work structure used for scheduling the execution of this function.
900  */
901 static void genpd_power_off_work_fn(struct work_struct *work)
902 {
903 	struct generic_pm_domain *genpd;
904 
905 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
906 
907 	genpd_lock(genpd);
908 	genpd_power_off(genpd, false, 0);
909 	genpd_unlock(genpd);
910 }
911 
912 /**
913  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
914  * @dev: Device to handle.
915  */
916 static int __genpd_runtime_suspend(struct device *dev)
917 {
918 	int (*cb)(struct device *__dev);
919 
920 	if (dev->type && dev->type->pm)
921 		cb = dev->type->pm->runtime_suspend;
922 	else if (dev->class && dev->class->pm)
923 		cb = dev->class->pm->runtime_suspend;
924 	else if (dev->bus && dev->bus->pm)
925 		cb = dev->bus->pm->runtime_suspend;
926 	else
927 		cb = NULL;
928 
929 	if (!cb && dev->driver && dev->driver->pm)
930 		cb = dev->driver->pm->runtime_suspend;
931 
932 	return cb ? cb(dev) : 0;
933 }
934 
935 /**
936  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
937  * @dev: Device to handle.
938  */
939 static int __genpd_runtime_resume(struct device *dev)
940 {
941 	int (*cb)(struct device *__dev);
942 
943 	if (dev->type && dev->type->pm)
944 		cb = dev->type->pm->runtime_resume;
945 	else if (dev->class && dev->class->pm)
946 		cb = dev->class->pm->runtime_resume;
947 	else if (dev->bus && dev->bus->pm)
948 		cb = dev->bus->pm->runtime_resume;
949 	else
950 		cb = NULL;
951 
952 	if (!cb && dev->driver && dev->driver->pm)
953 		cb = dev->driver->pm->runtime_resume;
954 
955 	return cb ? cb(dev) : 0;
956 }
957 
958 /**
959  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
960  * @dev: Device to suspend.
961  *
962  * Carry out a runtime suspend of a device under the assumption that its
963  * pm_domain field points to the domain member of an object of type
964  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
965  */
966 static int genpd_runtime_suspend(struct device *dev)
967 {
968 	struct generic_pm_domain *genpd;
969 	bool (*suspend_ok)(struct device *__dev);
970 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
971 	struct gpd_timing_data *td = gpd_data->td;
972 	bool runtime_pm = pm_runtime_enabled(dev);
973 	ktime_t time_start = 0;
974 	s64 elapsed_ns;
975 	int ret;
976 
977 	dev_dbg(dev, "%s()\n", __func__);
978 
979 	genpd = dev_to_genpd(dev);
980 	if (IS_ERR(genpd))
981 		return -EINVAL;
982 
983 	/*
984 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
985 	 * callbacks for other purposes than runtime PM. In those scenarios
986 	 * runtime PM is disabled. Under these circumstances, we shall skip
987 	 * validating/measuring the PM QoS latency.
988 	 */
989 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
990 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
991 		return -EBUSY;
992 
993 	/* Measure suspend latency. */
994 	if (td && runtime_pm)
995 		time_start = ktime_get();
996 
997 	ret = __genpd_runtime_suspend(dev);
998 	if (ret)
999 		return ret;
1000 
1001 	ret = genpd_stop_dev(genpd, dev);
1002 	if (ret) {
1003 		__genpd_runtime_resume(dev);
1004 		return ret;
1005 	}
1006 
1007 	/* Update suspend latency value if the measured time exceeds it. */
1008 	if (td && runtime_pm) {
1009 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1010 		if (elapsed_ns > td->suspend_latency_ns) {
1011 			td->suspend_latency_ns = elapsed_ns;
1012 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1013 				elapsed_ns);
1014 			genpd->gd->max_off_time_changed = true;
1015 			td->constraint_changed = true;
1016 		}
1017 	}
1018 
1019 	/*
1020 	 * If power.irq_safe is set, this routine may be run with
1021 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1022 	 */
1023 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1024 		return 0;
1025 
1026 	genpd_lock(genpd);
1027 	genpd_power_off(genpd, true, 0);
1028 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1029 	genpd_unlock(genpd);
1030 
1031 	return 0;
1032 }
1033 
1034 /**
1035  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1036  * @dev: Device to resume.
1037  *
1038  * Carry out a runtime resume of a device under the assumption that its
1039  * pm_domain field points to the domain member of an object of type
1040  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1041  */
1042 static int genpd_runtime_resume(struct device *dev)
1043 {
1044 	struct generic_pm_domain *genpd;
1045 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1046 	struct gpd_timing_data *td = gpd_data->td;
1047 	bool timed = td && pm_runtime_enabled(dev);
1048 	ktime_t time_start = 0;
1049 	s64 elapsed_ns;
1050 	int ret;
1051 
1052 	dev_dbg(dev, "%s()\n", __func__);
1053 
1054 	genpd = dev_to_genpd(dev);
1055 	if (IS_ERR(genpd))
1056 		return -EINVAL;
1057 
1058 	/*
1059 	 * As we don't power off a non IRQ safe domain, which holds
1060 	 * an IRQ safe device, we don't need to restore power to it.
1061 	 */
1062 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1063 		goto out;
1064 
1065 	genpd_lock(genpd);
1066 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1067 	ret = genpd_power_on(genpd, 0);
1068 	genpd_unlock(genpd);
1069 
1070 	if (ret)
1071 		return ret;
1072 
1073  out:
1074 	/* Measure resume latency. */
1075 	if (timed)
1076 		time_start = ktime_get();
1077 
1078 	ret = genpd_start_dev(genpd, dev);
1079 	if (ret)
1080 		goto err_poweroff;
1081 
1082 	ret = __genpd_runtime_resume(dev);
1083 	if (ret)
1084 		goto err_stop;
1085 
1086 	/* Update resume latency value if the measured time exceeds it. */
1087 	if (timed) {
1088 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1089 		if (elapsed_ns > td->resume_latency_ns) {
1090 			td->resume_latency_ns = elapsed_ns;
1091 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1092 				elapsed_ns);
1093 			genpd->gd->max_off_time_changed = true;
1094 			td->constraint_changed = true;
1095 		}
1096 	}
1097 
1098 	return 0;
1099 
1100 err_stop:
1101 	genpd_stop_dev(genpd, dev);
1102 err_poweroff:
1103 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1104 		genpd_lock(genpd);
1105 		genpd_power_off(genpd, true, 0);
1106 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1107 		genpd_unlock(genpd);
1108 	}
1109 
1110 	return ret;
1111 }
1112 
1113 static bool pd_ignore_unused;
1114 static int __init pd_ignore_unused_setup(char *__unused)
1115 {
1116 	pd_ignore_unused = true;
1117 	return 1;
1118 }
1119 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1120 
1121 /**
1122  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1123  */
1124 static int __init genpd_power_off_unused(void)
1125 {
1126 	struct generic_pm_domain *genpd;
1127 
1128 	if (pd_ignore_unused) {
1129 		pr_warn("genpd: Not disabling unused power domains\n");
1130 		return 0;
1131 	}
1132 
1133 	pr_info("genpd: Disabling unused power domains\n");
1134 	mutex_lock(&gpd_list_lock);
1135 
1136 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1137 		genpd_queue_power_off_work(genpd);
1138 
1139 	mutex_unlock(&gpd_list_lock);
1140 
1141 	return 0;
1142 }
1143 late_initcall_sync(genpd_power_off_unused);
1144 
1145 #ifdef CONFIG_PM_SLEEP
1146 
1147 /**
1148  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1149  * @genpd: PM domain to power off, if possible.
1150  * @use_lock: use the lock.
1151  * @depth: nesting count for lockdep.
1152  *
1153  * Check if the given PM domain can be powered off (during system suspend or
1154  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1155  *
1156  * This function is only called in "noirq" and "syscore" stages of system power
1157  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1158  * these cases the lock must be held.
1159  */
1160 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1161 				 unsigned int depth)
1162 {
1163 	struct gpd_link *link;
1164 
1165 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1166 		return;
1167 
1168 	if (genpd->suspended_count != genpd->device_count
1169 	    || atomic_read(&genpd->sd_count) > 0)
1170 		return;
1171 
1172 	/* Check that the children are in their deepest (powered-off) state. */
1173 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1174 		struct generic_pm_domain *child = link->child;
1175 		if (child->state_idx < child->state_count - 1)
1176 			return;
1177 	}
1178 
1179 	/* Choose the deepest state when suspending */
1180 	genpd->state_idx = genpd->state_count - 1;
1181 	if (_genpd_power_off(genpd, false)) {
1182 		genpd->states[genpd->state_idx].rejected++;
1183 		return;
1184 	} else {
1185 		genpd->states[genpd->state_idx].usage++;
1186 	}
1187 
1188 	genpd->status = GENPD_STATE_OFF;
1189 
1190 	list_for_each_entry(link, &genpd->child_links, child_node) {
1191 		genpd_sd_counter_dec(link->parent);
1192 
1193 		if (use_lock)
1194 			genpd_lock_nested(link->parent, depth + 1);
1195 
1196 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1197 
1198 		if (use_lock)
1199 			genpd_unlock(link->parent);
1200 	}
1201 }
1202 
1203 /**
1204  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1205  * @genpd: PM domain to power on.
1206  * @use_lock: use the lock.
1207  * @depth: nesting count for lockdep.
1208  *
1209  * This function is only called in "noirq" and "syscore" stages of system power
1210  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1211  * these cases the lock must be held.
1212  */
1213 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1214 				unsigned int depth)
1215 {
1216 	struct gpd_link *link;
1217 
1218 	if (genpd_status_on(genpd))
1219 		return;
1220 
1221 	list_for_each_entry(link, &genpd->child_links, child_node) {
1222 		genpd_sd_counter_inc(link->parent);
1223 
1224 		if (use_lock)
1225 			genpd_lock_nested(link->parent, depth + 1);
1226 
1227 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1228 
1229 		if (use_lock)
1230 			genpd_unlock(link->parent);
1231 	}
1232 
1233 	_genpd_power_on(genpd, false);
1234 	genpd->status = GENPD_STATE_ON;
1235 }
1236 
1237 /**
1238  * genpd_prepare - Start power transition of a device in a PM domain.
1239  * @dev: Device to start the transition of.
1240  *
1241  * Start a power transition of a device (during a system-wide power transition)
1242  * under the assumption that its pm_domain field points to the domain member of
1243  * an object of type struct generic_pm_domain representing a PM domain
1244  * consisting of I/O devices.
1245  */
1246 static int genpd_prepare(struct device *dev)
1247 {
1248 	struct generic_pm_domain *genpd;
1249 	int ret;
1250 
1251 	dev_dbg(dev, "%s()\n", __func__);
1252 
1253 	genpd = dev_to_genpd(dev);
1254 	if (IS_ERR(genpd))
1255 		return -EINVAL;
1256 
1257 	genpd_lock(genpd);
1258 	genpd->prepared_count++;
1259 	genpd_unlock(genpd);
1260 
1261 	ret = pm_generic_prepare(dev);
1262 	if (ret < 0) {
1263 		genpd_lock(genpd);
1264 
1265 		genpd->prepared_count--;
1266 
1267 		genpd_unlock(genpd);
1268 	}
1269 
1270 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1271 	return ret >= 0 ? 0 : ret;
1272 }
1273 
1274 /**
1275  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1276  *   I/O pm domain.
1277  * @dev: Device to suspend.
1278  * @suspend_noirq: Generic suspend_noirq callback.
1279  * @resume_noirq: Generic resume_noirq callback.
1280  *
1281  * Stop the device and remove power from the domain if all devices in it have
1282  * been stopped.
1283  */
1284 static int genpd_finish_suspend(struct device *dev,
1285 				int (*suspend_noirq)(struct device *dev),
1286 				int (*resume_noirq)(struct device *dev))
1287 {
1288 	struct generic_pm_domain *genpd;
1289 	int ret = 0;
1290 
1291 	genpd = dev_to_genpd(dev);
1292 	if (IS_ERR(genpd))
1293 		return -EINVAL;
1294 
1295 	ret = suspend_noirq(dev);
1296 	if (ret)
1297 		return ret;
1298 
1299 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1300 		return 0;
1301 
1302 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1303 	    !pm_runtime_status_suspended(dev)) {
1304 		ret = genpd_stop_dev(genpd, dev);
1305 		if (ret) {
1306 			resume_noirq(dev);
1307 			return ret;
1308 		}
1309 	}
1310 
1311 	genpd_lock(genpd);
1312 	genpd->suspended_count++;
1313 	genpd_sync_power_off(genpd, true, 0);
1314 	genpd_unlock(genpd);
1315 
1316 	return 0;
1317 }
1318 
1319 /**
1320  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1321  * @dev: Device to suspend.
1322  *
1323  * Stop the device and remove power from the domain if all devices in it have
1324  * been stopped.
1325  */
1326 static int genpd_suspend_noirq(struct device *dev)
1327 {
1328 	dev_dbg(dev, "%s()\n", __func__);
1329 
1330 	return genpd_finish_suspend(dev,
1331 				    pm_generic_suspend_noirq,
1332 				    pm_generic_resume_noirq);
1333 }
1334 
1335 /**
1336  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1337  * @dev: Device to resume.
1338  * @resume_noirq: Generic resume_noirq callback.
1339  *
1340  * Restore power to the device's PM domain, if necessary, and start the device.
1341  */
1342 static int genpd_finish_resume(struct device *dev,
1343 			       int (*resume_noirq)(struct device *dev))
1344 {
1345 	struct generic_pm_domain *genpd;
1346 	int ret;
1347 
1348 	dev_dbg(dev, "%s()\n", __func__);
1349 
1350 	genpd = dev_to_genpd(dev);
1351 	if (IS_ERR(genpd))
1352 		return -EINVAL;
1353 
1354 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1355 		return resume_noirq(dev);
1356 
1357 	genpd_lock(genpd);
1358 	genpd_sync_power_on(genpd, true, 0);
1359 	genpd->suspended_count--;
1360 	genpd_unlock(genpd);
1361 
1362 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1363 	    !pm_runtime_status_suspended(dev)) {
1364 		ret = genpd_start_dev(genpd, dev);
1365 		if (ret)
1366 			return ret;
1367 	}
1368 
1369 	return pm_generic_resume_noirq(dev);
1370 }
1371 
1372 /**
1373  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1374  * @dev: Device to resume.
1375  *
1376  * Restore power to the device's PM domain, if necessary, and start the device.
1377  */
1378 static int genpd_resume_noirq(struct device *dev)
1379 {
1380 	dev_dbg(dev, "%s()\n", __func__);
1381 
1382 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1383 }
1384 
1385 /**
1386  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1387  * @dev: Device to freeze.
1388  *
1389  * Carry out a late freeze of a device under the assumption that its
1390  * pm_domain field points to the domain member of an object of type
1391  * struct generic_pm_domain representing a power domain consisting of I/O
1392  * devices.
1393  */
1394 static int genpd_freeze_noirq(struct device *dev)
1395 {
1396 	dev_dbg(dev, "%s()\n", __func__);
1397 
1398 	return genpd_finish_suspend(dev,
1399 				    pm_generic_freeze_noirq,
1400 				    pm_generic_thaw_noirq);
1401 }
1402 
1403 /**
1404  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1405  * @dev: Device to thaw.
1406  *
1407  * Start the device, unless power has been removed from the domain already
1408  * before the system transition.
1409  */
1410 static int genpd_thaw_noirq(struct device *dev)
1411 {
1412 	dev_dbg(dev, "%s()\n", __func__);
1413 
1414 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1415 }
1416 
1417 /**
1418  * genpd_poweroff_noirq - Completion of hibernation of device in an
1419  *   I/O PM domain.
1420  * @dev: Device to poweroff.
1421  *
1422  * Stop the device and remove power from the domain if all devices in it have
1423  * been stopped.
1424  */
1425 static int genpd_poweroff_noirq(struct device *dev)
1426 {
1427 	dev_dbg(dev, "%s()\n", __func__);
1428 
1429 	return genpd_finish_suspend(dev,
1430 				    pm_generic_poweroff_noirq,
1431 				    pm_generic_restore_noirq);
1432 }
1433 
1434 /**
1435  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1436  * @dev: Device to resume.
1437  *
1438  * Make sure the domain will be in the same power state as before the
1439  * hibernation the system is resuming from and start the device if necessary.
1440  */
1441 static int genpd_restore_noirq(struct device *dev)
1442 {
1443 	dev_dbg(dev, "%s()\n", __func__);
1444 
1445 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1446 }
1447 
1448 /**
1449  * genpd_complete - Complete power transition of a device in a power domain.
1450  * @dev: Device to complete the transition of.
1451  *
1452  * Complete a power transition of a device (during a system-wide power
1453  * transition) under the assumption that its pm_domain field points to the
1454  * domain member of an object of type struct generic_pm_domain representing
1455  * a power domain consisting of I/O devices.
1456  */
1457 static void genpd_complete(struct device *dev)
1458 {
1459 	struct generic_pm_domain *genpd;
1460 
1461 	dev_dbg(dev, "%s()\n", __func__);
1462 
1463 	genpd = dev_to_genpd(dev);
1464 	if (IS_ERR(genpd))
1465 		return;
1466 
1467 	pm_generic_complete(dev);
1468 
1469 	genpd_lock(genpd);
1470 
1471 	genpd->prepared_count--;
1472 	if (!genpd->prepared_count)
1473 		genpd_queue_power_off_work(genpd);
1474 
1475 	genpd_unlock(genpd);
1476 }
1477 
1478 static void genpd_switch_state(struct device *dev, bool suspend)
1479 {
1480 	struct generic_pm_domain *genpd;
1481 	bool use_lock;
1482 
1483 	genpd = dev_to_genpd_safe(dev);
1484 	if (!genpd)
1485 		return;
1486 
1487 	use_lock = genpd_is_irq_safe(genpd);
1488 
1489 	if (use_lock)
1490 		genpd_lock(genpd);
1491 
1492 	if (suspend) {
1493 		genpd->suspended_count++;
1494 		genpd_sync_power_off(genpd, use_lock, 0);
1495 	} else {
1496 		genpd_sync_power_on(genpd, use_lock, 0);
1497 		genpd->suspended_count--;
1498 	}
1499 
1500 	if (use_lock)
1501 		genpd_unlock(genpd);
1502 }
1503 
1504 /**
1505  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1506  * @dev: The device that is attached to the genpd, that can be suspended.
1507  *
1508  * This routine should typically be called for a device that needs to be
1509  * suspended during the syscore suspend phase. It may also be called during
1510  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1511  * genpd.
1512  */
1513 void dev_pm_genpd_suspend(struct device *dev)
1514 {
1515 	genpd_switch_state(dev, true);
1516 }
1517 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1518 
1519 /**
1520  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1521  * @dev: The device that is attached to the genpd, which needs to be resumed.
1522  *
1523  * This routine should typically be called for a device that needs to be resumed
1524  * during the syscore resume phase. It may also be called during suspend-to-idle
1525  * to resume a corresponding CPU device that is attached to a genpd.
1526  */
1527 void dev_pm_genpd_resume(struct device *dev)
1528 {
1529 	genpd_switch_state(dev, false);
1530 }
1531 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1532 
1533 #else /* !CONFIG_PM_SLEEP */
1534 
1535 #define genpd_prepare		NULL
1536 #define genpd_suspend_noirq	NULL
1537 #define genpd_resume_noirq	NULL
1538 #define genpd_freeze_noirq	NULL
1539 #define genpd_thaw_noirq	NULL
1540 #define genpd_poweroff_noirq	NULL
1541 #define genpd_restore_noirq	NULL
1542 #define genpd_complete		NULL
1543 
1544 #endif /* CONFIG_PM_SLEEP */
1545 
1546 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1547 							   bool has_governor)
1548 {
1549 	struct generic_pm_domain_data *gpd_data;
1550 	struct gpd_timing_data *td;
1551 	int ret;
1552 
1553 	ret = dev_pm_get_subsys_data(dev);
1554 	if (ret)
1555 		return ERR_PTR(ret);
1556 
1557 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1558 	if (!gpd_data) {
1559 		ret = -ENOMEM;
1560 		goto err_put;
1561 	}
1562 
1563 	gpd_data->base.dev = dev;
1564 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1565 
1566 	/* Allocate data used by a governor. */
1567 	if (has_governor) {
1568 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1569 		if (!td) {
1570 			ret = -ENOMEM;
1571 			goto err_free;
1572 		}
1573 
1574 		td->constraint_changed = true;
1575 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1576 		td->next_wakeup = KTIME_MAX;
1577 		gpd_data->td = td;
1578 	}
1579 
1580 	spin_lock_irq(&dev->power.lock);
1581 
1582 	if (dev->power.subsys_data->domain_data)
1583 		ret = -EINVAL;
1584 	else
1585 		dev->power.subsys_data->domain_data = &gpd_data->base;
1586 
1587 	spin_unlock_irq(&dev->power.lock);
1588 
1589 	if (ret)
1590 		goto err_free;
1591 
1592 	return gpd_data;
1593 
1594  err_free:
1595 	kfree(gpd_data->td);
1596 	kfree(gpd_data);
1597  err_put:
1598 	dev_pm_put_subsys_data(dev);
1599 	return ERR_PTR(ret);
1600 }
1601 
1602 static void genpd_free_dev_data(struct device *dev,
1603 				struct generic_pm_domain_data *gpd_data)
1604 {
1605 	spin_lock_irq(&dev->power.lock);
1606 
1607 	dev->power.subsys_data->domain_data = NULL;
1608 
1609 	spin_unlock_irq(&dev->power.lock);
1610 
1611 	kfree(gpd_data->td);
1612 	kfree(gpd_data);
1613 	dev_pm_put_subsys_data(dev);
1614 }
1615 
1616 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1617 				 int cpu, bool set, unsigned int depth)
1618 {
1619 	struct gpd_link *link;
1620 
1621 	if (!genpd_is_cpu_domain(genpd))
1622 		return;
1623 
1624 	list_for_each_entry(link, &genpd->child_links, child_node) {
1625 		struct generic_pm_domain *parent = link->parent;
1626 
1627 		genpd_lock_nested(parent, depth + 1);
1628 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1629 		genpd_unlock(parent);
1630 	}
1631 
1632 	if (set)
1633 		cpumask_set_cpu(cpu, genpd->cpus);
1634 	else
1635 		cpumask_clear_cpu(cpu, genpd->cpus);
1636 }
1637 
1638 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1639 {
1640 	if (cpu >= 0)
1641 		genpd_update_cpumask(genpd, cpu, true, 0);
1642 }
1643 
1644 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1645 {
1646 	if (cpu >= 0)
1647 		genpd_update_cpumask(genpd, cpu, false, 0);
1648 }
1649 
1650 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1651 {
1652 	int cpu;
1653 
1654 	if (!genpd_is_cpu_domain(genpd))
1655 		return -1;
1656 
1657 	for_each_possible_cpu(cpu) {
1658 		if (get_cpu_device(cpu) == dev)
1659 			return cpu;
1660 	}
1661 
1662 	return -1;
1663 }
1664 
1665 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1666 			    struct device *base_dev)
1667 {
1668 	struct genpd_governor_data *gd = genpd->gd;
1669 	struct generic_pm_domain_data *gpd_data;
1670 	int ret;
1671 
1672 	dev_dbg(dev, "%s()\n", __func__);
1673 
1674 	gpd_data = genpd_alloc_dev_data(dev, gd);
1675 	if (IS_ERR(gpd_data))
1676 		return PTR_ERR(gpd_data);
1677 
1678 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1679 
1680 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1681 	if (ret)
1682 		goto out;
1683 
1684 	genpd_lock(genpd);
1685 
1686 	genpd_set_cpumask(genpd, gpd_data->cpu);
1687 	dev_pm_domain_set(dev, &genpd->domain);
1688 
1689 	genpd->device_count++;
1690 	if (gd)
1691 		gd->max_off_time_changed = true;
1692 
1693 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1694 
1695 	genpd_unlock(genpd);
1696  out:
1697 	if (ret)
1698 		genpd_free_dev_data(dev, gpd_data);
1699 	else
1700 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1701 					DEV_PM_QOS_RESUME_LATENCY);
1702 
1703 	return ret;
1704 }
1705 
1706 /**
1707  * pm_genpd_add_device - Add a device to an I/O PM domain.
1708  * @genpd: PM domain to add the device to.
1709  * @dev: Device to be added.
1710  */
1711 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1712 {
1713 	int ret;
1714 
1715 	if (!genpd || !dev)
1716 		return -EINVAL;
1717 
1718 	mutex_lock(&gpd_list_lock);
1719 	ret = genpd_add_device(genpd, dev, dev);
1720 	mutex_unlock(&gpd_list_lock);
1721 
1722 	return ret;
1723 }
1724 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1725 
1726 static int genpd_remove_device(struct generic_pm_domain *genpd,
1727 			       struct device *dev)
1728 {
1729 	struct generic_pm_domain_data *gpd_data;
1730 	struct pm_domain_data *pdd;
1731 	int ret = 0;
1732 
1733 	dev_dbg(dev, "%s()\n", __func__);
1734 
1735 	pdd = dev->power.subsys_data->domain_data;
1736 	gpd_data = to_gpd_data(pdd);
1737 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1738 				   DEV_PM_QOS_RESUME_LATENCY);
1739 
1740 	genpd_lock(genpd);
1741 
1742 	if (genpd->prepared_count > 0) {
1743 		ret = -EAGAIN;
1744 		goto out;
1745 	}
1746 
1747 	genpd->device_count--;
1748 	if (genpd->gd)
1749 		genpd->gd->max_off_time_changed = true;
1750 
1751 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1752 	dev_pm_domain_set(dev, NULL);
1753 
1754 	list_del_init(&pdd->list_node);
1755 
1756 	genpd_unlock(genpd);
1757 
1758 	if (genpd->detach_dev)
1759 		genpd->detach_dev(genpd, dev);
1760 
1761 	genpd_free_dev_data(dev, gpd_data);
1762 
1763 	return 0;
1764 
1765  out:
1766 	genpd_unlock(genpd);
1767 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1768 
1769 	return ret;
1770 }
1771 
1772 /**
1773  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1774  * @dev: Device to be removed.
1775  */
1776 int pm_genpd_remove_device(struct device *dev)
1777 {
1778 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1779 
1780 	if (!genpd)
1781 		return -EINVAL;
1782 
1783 	return genpd_remove_device(genpd, dev);
1784 }
1785 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1786 
1787 /**
1788  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1789  *
1790  * @dev: Device that should be associated with the notifier
1791  * @nb: The notifier block to register
1792  *
1793  * Users may call this function to add a genpd power on/off notifier for an
1794  * attached @dev. Only one notifier per device is allowed. The notifier is
1795  * sent when genpd is powering on/off the PM domain.
1796  *
1797  * It is assumed that the user guarantee that the genpd wouldn't be detached
1798  * while this routine is getting called.
1799  *
1800  * Returns 0 on success and negative error values on failures.
1801  */
1802 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1803 {
1804 	struct generic_pm_domain *genpd;
1805 	struct generic_pm_domain_data *gpd_data;
1806 	int ret;
1807 
1808 	genpd = dev_to_genpd_safe(dev);
1809 	if (!genpd)
1810 		return -ENODEV;
1811 
1812 	if (WARN_ON(!dev->power.subsys_data ||
1813 		     !dev->power.subsys_data->domain_data))
1814 		return -EINVAL;
1815 
1816 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1817 	if (gpd_data->power_nb)
1818 		return -EEXIST;
1819 
1820 	genpd_lock(genpd);
1821 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1822 	genpd_unlock(genpd);
1823 
1824 	if (ret) {
1825 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1826 			 genpd->name);
1827 		return ret;
1828 	}
1829 
1830 	gpd_data->power_nb = nb;
1831 	return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1834 
1835 /**
1836  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1837  *
1838  * @dev: Device that is associated with the notifier
1839  *
1840  * Users may call this function to remove a genpd power on/off notifier for an
1841  * attached @dev.
1842  *
1843  * It is assumed that the user guarantee that the genpd wouldn't be detached
1844  * while this routine is getting called.
1845  *
1846  * Returns 0 on success and negative error values on failures.
1847  */
1848 int dev_pm_genpd_remove_notifier(struct device *dev)
1849 {
1850 	struct generic_pm_domain *genpd;
1851 	struct generic_pm_domain_data *gpd_data;
1852 	int ret;
1853 
1854 	genpd = dev_to_genpd_safe(dev);
1855 	if (!genpd)
1856 		return -ENODEV;
1857 
1858 	if (WARN_ON(!dev->power.subsys_data ||
1859 		     !dev->power.subsys_data->domain_data))
1860 		return -EINVAL;
1861 
1862 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1863 	if (!gpd_data->power_nb)
1864 		return -ENODEV;
1865 
1866 	genpd_lock(genpd);
1867 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1868 					    gpd_data->power_nb);
1869 	genpd_unlock(genpd);
1870 
1871 	if (ret) {
1872 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1873 			 genpd->name);
1874 		return ret;
1875 	}
1876 
1877 	gpd_data->power_nb = NULL;
1878 	return 0;
1879 }
1880 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1881 
1882 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1883 			       struct generic_pm_domain *subdomain)
1884 {
1885 	struct gpd_link *link, *itr;
1886 	int ret = 0;
1887 
1888 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1889 	    || genpd == subdomain)
1890 		return -EINVAL;
1891 
1892 	/*
1893 	 * If the domain can be powered on/off in an IRQ safe
1894 	 * context, ensure that the subdomain can also be
1895 	 * powered on/off in that context.
1896 	 */
1897 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1898 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1899 				genpd->name, subdomain->name);
1900 		return -EINVAL;
1901 	}
1902 
1903 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1904 	if (!link)
1905 		return -ENOMEM;
1906 
1907 	genpd_lock(subdomain);
1908 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1909 
1910 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1911 		ret = -EINVAL;
1912 		goto out;
1913 	}
1914 
1915 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1916 		if (itr->child == subdomain && itr->parent == genpd) {
1917 			ret = -EINVAL;
1918 			goto out;
1919 		}
1920 	}
1921 
1922 	link->parent = genpd;
1923 	list_add_tail(&link->parent_node, &genpd->parent_links);
1924 	link->child = subdomain;
1925 	list_add_tail(&link->child_node, &subdomain->child_links);
1926 	if (genpd_status_on(subdomain))
1927 		genpd_sd_counter_inc(genpd);
1928 
1929  out:
1930 	genpd_unlock(genpd);
1931 	genpd_unlock(subdomain);
1932 	if (ret)
1933 		kfree(link);
1934 	return ret;
1935 }
1936 
1937 /**
1938  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1939  * @genpd: Leader PM domain to add the subdomain to.
1940  * @subdomain: Subdomain to be added.
1941  */
1942 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1943 			   struct generic_pm_domain *subdomain)
1944 {
1945 	int ret;
1946 
1947 	mutex_lock(&gpd_list_lock);
1948 	ret = genpd_add_subdomain(genpd, subdomain);
1949 	mutex_unlock(&gpd_list_lock);
1950 
1951 	return ret;
1952 }
1953 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1954 
1955 /**
1956  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1957  * @genpd: Leader PM domain to remove the subdomain from.
1958  * @subdomain: Subdomain to be removed.
1959  */
1960 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1961 			      struct generic_pm_domain *subdomain)
1962 {
1963 	struct gpd_link *l, *link;
1964 	int ret = -EINVAL;
1965 
1966 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1967 		return -EINVAL;
1968 
1969 	genpd_lock(subdomain);
1970 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1971 
1972 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1973 		pr_warn("%s: unable to remove subdomain %s\n",
1974 			genpd->name, subdomain->name);
1975 		ret = -EBUSY;
1976 		goto out;
1977 	}
1978 
1979 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1980 		if (link->child != subdomain)
1981 			continue;
1982 
1983 		list_del(&link->parent_node);
1984 		list_del(&link->child_node);
1985 		kfree(link);
1986 		if (genpd_status_on(subdomain))
1987 			genpd_sd_counter_dec(genpd);
1988 
1989 		ret = 0;
1990 		break;
1991 	}
1992 
1993 out:
1994 	genpd_unlock(genpd);
1995 	genpd_unlock(subdomain);
1996 
1997 	return ret;
1998 }
1999 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2000 
2001 static void genpd_free_default_power_state(struct genpd_power_state *states,
2002 					   unsigned int state_count)
2003 {
2004 	kfree(states);
2005 }
2006 
2007 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2008 {
2009 	struct genpd_power_state *state;
2010 
2011 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2012 	if (!state)
2013 		return -ENOMEM;
2014 
2015 	genpd->states = state;
2016 	genpd->state_count = 1;
2017 	genpd->free_states = genpd_free_default_power_state;
2018 
2019 	return 0;
2020 }
2021 
2022 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2023 {
2024 	struct genpd_governor_data *gd = NULL;
2025 	int ret;
2026 
2027 	if (genpd_is_cpu_domain(genpd) &&
2028 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2029 		return -ENOMEM;
2030 
2031 	if (genpd->gov) {
2032 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2033 		if (!gd) {
2034 			ret = -ENOMEM;
2035 			goto free;
2036 		}
2037 
2038 		gd->max_off_time_ns = -1;
2039 		gd->max_off_time_changed = true;
2040 		gd->next_wakeup = KTIME_MAX;
2041 		gd->next_hrtimer = KTIME_MAX;
2042 	}
2043 
2044 	/* Use only one "off" state if there were no states declared */
2045 	if (genpd->state_count == 0) {
2046 		ret = genpd_set_default_power_state(genpd);
2047 		if (ret)
2048 			goto free;
2049 	}
2050 
2051 	genpd->gd = gd;
2052 	return 0;
2053 
2054 free:
2055 	if (genpd_is_cpu_domain(genpd))
2056 		free_cpumask_var(genpd->cpus);
2057 	kfree(gd);
2058 	return ret;
2059 }
2060 
2061 static void genpd_free_data(struct generic_pm_domain *genpd)
2062 {
2063 	if (genpd_is_cpu_domain(genpd))
2064 		free_cpumask_var(genpd->cpus);
2065 	if (genpd->free_states)
2066 		genpd->free_states(genpd->states, genpd->state_count);
2067 	kfree(genpd->gd);
2068 }
2069 
2070 static void genpd_lock_init(struct generic_pm_domain *genpd)
2071 {
2072 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2073 		spin_lock_init(&genpd->slock);
2074 		genpd->lock_ops = &genpd_spin_ops;
2075 	} else {
2076 		mutex_init(&genpd->mlock);
2077 		genpd->lock_ops = &genpd_mtx_ops;
2078 	}
2079 }
2080 
2081 /**
2082  * pm_genpd_init - Initialize a generic I/O PM domain object.
2083  * @genpd: PM domain object to initialize.
2084  * @gov: PM domain governor to associate with the domain (may be NULL).
2085  * @is_off: Initial value of the domain's power_is_off field.
2086  *
2087  * Returns 0 on successful initialization, else a negative error code.
2088  */
2089 int pm_genpd_init(struct generic_pm_domain *genpd,
2090 		  struct dev_power_governor *gov, bool is_off)
2091 {
2092 	int ret;
2093 
2094 	if (IS_ERR_OR_NULL(genpd))
2095 		return -EINVAL;
2096 
2097 	INIT_LIST_HEAD(&genpd->parent_links);
2098 	INIT_LIST_HEAD(&genpd->child_links);
2099 	INIT_LIST_HEAD(&genpd->dev_list);
2100 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2101 	genpd_lock_init(genpd);
2102 	genpd->gov = gov;
2103 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2104 	atomic_set(&genpd->sd_count, 0);
2105 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2106 	genpd->device_count = 0;
2107 	genpd->provider = NULL;
2108 	genpd->has_provider = false;
2109 	genpd->accounting_time = ktime_get_mono_fast_ns();
2110 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2111 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2112 	genpd->domain.ops.prepare = genpd_prepare;
2113 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2114 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2115 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2116 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2117 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2118 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2119 	genpd->domain.ops.complete = genpd_complete;
2120 	genpd->domain.start = genpd_dev_pm_start;
2121 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2122 
2123 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2124 		genpd->dev_ops.stop = pm_clk_suspend;
2125 		genpd->dev_ops.start = pm_clk_resume;
2126 	}
2127 
2128 	/* The always-on governor works better with the corresponding flag. */
2129 	if (gov == &pm_domain_always_on_gov)
2130 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2131 
2132 	/* Always-on domains must be powered on at initialization. */
2133 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2134 			!genpd_status_on(genpd)) {
2135 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2136 		return -EINVAL;
2137 	}
2138 
2139 	/* Multiple states but no governor doesn't make sense. */
2140 	if (!gov && genpd->state_count > 1)
2141 		pr_warn("%s: no governor for states\n", genpd->name);
2142 
2143 	ret = genpd_alloc_data(genpd);
2144 	if (ret)
2145 		return ret;
2146 
2147 	device_initialize(&genpd->dev);
2148 	dev_set_name(&genpd->dev, "%s", genpd->name);
2149 
2150 	mutex_lock(&gpd_list_lock);
2151 	list_add(&genpd->gpd_list_node, &gpd_list);
2152 	mutex_unlock(&gpd_list_lock);
2153 	genpd_debug_add(genpd);
2154 
2155 	return 0;
2156 }
2157 EXPORT_SYMBOL_GPL(pm_genpd_init);
2158 
2159 static int genpd_remove(struct generic_pm_domain *genpd)
2160 {
2161 	struct gpd_link *l, *link;
2162 
2163 	if (IS_ERR_OR_NULL(genpd))
2164 		return -EINVAL;
2165 
2166 	genpd_lock(genpd);
2167 
2168 	if (genpd->has_provider) {
2169 		genpd_unlock(genpd);
2170 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2171 		return -EBUSY;
2172 	}
2173 
2174 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2175 		genpd_unlock(genpd);
2176 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2177 		return -EBUSY;
2178 	}
2179 
2180 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2181 		list_del(&link->parent_node);
2182 		list_del(&link->child_node);
2183 		kfree(link);
2184 	}
2185 
2186 	list_del(&genpd->gpd_list_node);
2187 	genpd_unlock(genpd);
2188 	genpd_debug_remove(genpd);
2189 	cancel_work_sync(&genpd->power_off_work);
2190 	genpd_free_data(genpd);
2191 
2192 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2193 
2194 	return 0;
2195 }
2196 
2197 /**
2198  * pm_genpd_remove - Remove a generic I/O PM domain
2199  * @genpd: Pointer to PM domain that is to be removed.
2200  *
2201  * To remove the PM domain, this function:
2202  *  - Removes the PM domain as a subdomain to any parent domains,
2203  *    if it was added.
2204  *  - Removes the PM domain from the list of registered PM domains.
2205  *
2206  * The PM domain will only be removed, if the associated provider has
2207  * been removed, it is not a parent to any other PM domain and has no
2208  * devices associated with it.
2209  */
2210 int pm_genpd_remove(struct generic_pm_domain *genpd)
2211 {
2212 	int ret;
2213 
2214 	mutex_lock(&gpd_list_lock);
2215 	ret = genpd_remove(genpd);
2216 	mutex_unlock(&gpd_list_lock);
2217 
2218 	return ret;
2219 }
2220 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2221 
2222 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2223 
2224 /*
2225  * Device Tree based PM domain providers.
2226  *
2227  * The code below implements generic device tree based PM domain providers that
2228  * bind device tree nodes with generic PM domains registered in the system.
2229  *
2230  * Any driver that registers generic PM domains and needs to support binding of
2231  * devices to these domains is supposed to register a PM domain provider, which
2232  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2233  *
2234  * Two simple mapping functions have been provided for convenience:
2235  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2236  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2237  *    index.
2238  */
2239 
2240 /**
2241  * struct of_genpd_provider - PM domain provider registration structure
2242  * @link: Entry in global list of PM domain providers
2243  * @node: Pointer to device tree node of PM domain provider
2244  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2245  *         into a PM domain.
2246  * @data: context pointer to be passed into @xlate callback
2247  */
2248 struct of_genpd_provider {
2249 	struct list_head link;
2250 	struct device_node *node;
2251 	genpd_xlate_t xlate;
2252 	void *data;
2253 };
2254 
2255 /* List of registered PM domain providers. */
2256 static LIST_HEAD(of_genpd_providers);
2257 /* Mutex to protect the list above. */
2258 static DEFINE_MUTEX(of_genpd_mutex);
2259 
2260 /**
2261  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2262  * @genpdspec: OF phandle args to map into a PM domain
2263  * @data: xlate function private data - pointer to struct generic_pm_domain
2264  *
2265  * This is a generic xlate function that can be used to model PM domains that
2266  * have their own device tree nodes. The private data of xlate function needs
2267  * to be a valid pointer to struct generic_pm_domain.
2268  */
2269 static struct generic_pm_domain *genpd_xlate_simple(
2270 					const struct of_phandle_args *genpdspec,
2271 					void *data)
2272 {
2273 	return data;
2274 }
2275 
2276 /**
2277  * genpd_xlate_onecell() - Xlate function using a single index.
2278  * @genpdspec: OF phandle args to map into a PM domain
2279  * @data: xlate function private data - pointer to struct genpd_onecell_data
2280  *
2281  * This is a generic xlate function that can be used to model simple PM domain
2282  * controllers that have one device tree node and provide multiple PM domains.
2283  * A single cell is used as an index into an array of PM domains specified in
2284  * the genpd_onecell_data struct when registering the provider.
2285  */
2286 static struct generic_pm_domain *genpd_xlate_onecell(
2287 					const struct of_phandle_args *genpdspec,
2288 					void *data)
2289 {
2290 	struct genpd_onecell_data *genpd_data = data;
2291 	unsigned int idx = genpdspec->args[0];
2292 
2293 	if (genpdspec->args_count != 1)
2294 		return ERR_PTR(-EINVAL);
2295 
2296 	if (idx >= genpd_data->num_domains) {
2297 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2298 		return ERR_PTR(-EINVAL);
2299 	}
2300 
2301 	if (!genpd_data->domains[idx])
2302 		return ERR_PTR(-ENOENT);
2303 
2304 	return genpd_data->domains[idx];
2305 }
2306 
2307 /**
2308  * genpd_add_provider() - Register a PM domain provider for a node
2309  * @np: Device node pointer associated with the PM domain provider.
2310  * @xlate: Callback for decoding PM domain from phandle arguments.
2311  * @data: Context pointer for @xlate callback.
2312  */
2313 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2314 			      void *data)
2315 {
2316 	struct of_genpd_provider *cp;
2317 
2318 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2319 	if (!cp)
2320 		return -ENOMEM;
2321 
2322 	cp->node = of_node_get(np);
2323 	cp->data = data;
2324 	cp->xlate = xlate;
2325 	fwnode_dev_initialized(&np->fwnode, true);
2326 
2327 	mutex_lock(&of_genpd_mutex);
2328 	list_add(&cp->link, &of_genpd_providers);
2329 	mutex_unlock(&of_genpd_mutex);
2330 	pr_debug("Added domain provider from %pOF\n", np);
2331 
2332 	return 0;
2333 }
2334 
2335 static bool genpd_present(const struct generic_pm_domain *genpd)
2336 {
2337 	bool ret = false;
2338 	const struct generic_pm_domain *gpd;
2339 
2340 	mutex_lock(&gpd_list_lock);
2341 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2342 		if (gpd == genpd) {
2343 			ret = true;
2344 			break;
2345 		}
2346 	}
2347 	mutex_unlock(&gpd_list_lock);
2348 
2349 	return ret;
2350 }
2351 
2352 /**
2353  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2354  * @np: Device node pointer associated with the PM domain provider.
2355  * @genpd: Pointer to PM domain associated with the PM domain provider.
2356  */
2357 int of_genpd_add_provider_simple(struct device_node *np,
2358 				 struct generic_pm_domain *genpd)
2359 {
2360 	int ret;
2361 
2362 	if (!np || !genpd)
2363 		return -EINVAL;
2364 
2365 	if (!genpd_present(genpd))
2366 		return -EINVAL;
2367 
2368 	genpd->dev.of_node = np;
2369 
2370 	/* Parse genpd OPP table */
2371 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2372 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2373 		if (ret)
2374 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2375 
2376 		/*
2377 		 * Save table for faster processing while setting performance
2378 		 * state.
2379 		 */
2380 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2381 		WARN_ON(IS_ERR(genpd->opp_table));
2382 	}
2383 
2384 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2385 	if (ret) {
2386 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2387 			dev_pm_opp_put_opp_table(genpd->opp_table);
2388 			dev_pm_opp_of_remove_table(&genpd->dev);
2389 		}
2390 
2391 		return ret;
2392 	}
2393 
2394 	genpd->provider = &np->fwnode;
2395 	genpd->has_provider = true;
2396 
2397 	return 0;
2398 }
2399 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2400 
2401 /**
2402  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2403  * @np: Device node pointer associated with the PM domain provider.
2404  * @data: Pointer to the data associated with the PM domain provider.
2405  */
2406 int of_genpd_add_provider_onecell(struct device_node *np,
2407 				  struct genpd_onecell_data *data)
2408 {
2409 	struct generic_pm_domain *genpd;
2410 	unsigned int i;
2411 	int ret = -EINVAL;
2412 
2413 	if (!np || !data)
2414 		return -EINVAL;
2415 
2416 	if (!data->xlate)
2417 		data->xlate = genpd_xlate_onecell;
2418 
2419 	for (i = 0; i < data->num_domains; i++) {
2420 		genpd = data->domains[i];
2421 
2422 		if (!genpd)
2423 			continue;
2424 		if (!genpd_present(genpd))
2425 			goto error;
2426 
2427 		genpd->dev.of_node = np;
2428 
2429 		/* Parse genpd OPP table */
2430 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2431 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2432 			if (ret) {
2433 				dev_err_probe(&genpd->dev, ret,
2434 					      "Failed to add OPP table for index %d\n", i);
2435 				goto error;
2436 			}
2437 
2438 			/*
2439 			 * Save table for faster processing while setting
2440 			 * performance state.
2441 			 */
2442 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2443 			WARN_ON(IS_ERR(genpd->opp_table));
2444 		}
2445 
2446 		genpd->provider = &np->fwnode;
2447 		genpd->has_provider = true;
2448 	}
2449 
2450 	ret = genpd_add_provider(np, data->xlate, data);
2451 	if (ret < 0)
2452 		goto error;
2453 
2454 	return 0;
2455 
2456 error:
2457 	while (i--) {
2458 		genpd = data->domains[i];
2459 
2460 		if (!genpd)
2461 			continue;
2462 
2463 		genpd->provider = NULL;
2464 		genpd->has_provider = false;
2465 
2466 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2467 			dev_pm_opp_put_opp_table(genpd->opp_table);
2468 			dev_pm_opp_of_remove_table(&genpd->dev);
2469 		}
2470 	}
2471 
2472 	return ret;
2473 }
2474 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2475 
2476 /**
2477  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2478  * @np: Device node pointer associated with the PM domain provider
2479  */
2480 void of_genpd_del_provider(struct device_node *np)
2481 {
2482 	struct of_genpd_provider *cp, *tmp;
2483 	struct generic_pm_domain *gpd;
2484 
2485 	mutex_lock(&gpd_list_lock);
2486 	mutex_lock(&of_genpd_mutex);
2487 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2488 		if (cp->node == np) {
2489 			/*
2490 			 * For each PM domain associated with the
2491 			 * provider, set the 'has_provider' to false
2492 			 * so that the PM domain can be safely removed.
2493 			 */
2494 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2495 				if (gpd->provider == &np->fwnode) {
2496 					gpd->has_provider = false;
2497 
2498 					if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
2499 						continue;
2500 
2501 					dev_pm_opp_put_opp_table(gpd->opp_table);
2502 					dev_pm_opp_of_remove_table(&gpd->dev);
2503 				}
2504 			}
2505 
2506 			fwnode_dev_initialized(&cp->node->fwnode, false);
2507 			list_del(&cp->link);
2508 			of_node_put(cp->node);
2509 			kfree(cp);
2510 			break;
2511 		}
2512 	}
2513 	mutex_unlock(&of_genpd_mutex);
2514 	mutex_unlock(&gpd_list_lock);
2515 }
2516 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2517 
2518 /**
2519  * genpd_get_from_provider() - Look-up PM domain
2520  * @genpdspec: OF phandle args to use for look-up
2521  *
2522  * Looks for a PM domain provider under the node specified by @genpdspec and if
2523  * found, uses xlate function of the provider to map phandle args to a PM
2524  * domain.
2525  *
2526  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2527  * on failure.
2528  */
2529 static struct generic_pm_domain *genpd_get_from_provider(
2530 					const struct of_phandle_args *genpdspec)
2531 {
2532 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2533 	struct of_genpd_provider *provider;
2534 
2535 	if (!genpdspec)
2536 		return ERR_PTR(-EINVAL);
2537 
2538 	mutex_lock(&of_genpd_mutex);
2539 
2540 	/* Check if we have such a provider in our array */
2541 	list_for_each_entry(provider, &of_genpd_providers, link) {
2542 		if (provider->node == genpdspec->np)
2543 			genpd = provider->xlate(genpdspec, provider->data);
2544 		if (!IS_ERR(genpd))
2545 			break;
2546 	}
2547 
2548 	mutex_unlock(&of_genpd_mutex);
2549 
2550 	return genpd;
2551 }
2552 
2553 /**
2554  * of_genpd_add_device() - Add a device to an I/O PM domain
2555  * @genpdspec: OF phandle args to use for look-up PM domain
2556  * @dev: Device to be added.
2557  *
2558  * Looks-up an I/O PM domain based upon phandle args provided and adds
2559  * the device to the PM domain. Returns a negative error code on failure.
2560  */
2561 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2562 {
2563 	struct generic_pm_domain *genpd;
2564 	int ret;
2565 
2566 	if (!dev)
2567 		return -EINVAL;
2568 
2569 	mutex_lock(&gpd_list_lock);
2570 
2571 	genpd = genpd_get_from_provider(genpdspec);
2572 	if (IS_ERR(genpd)) {
2573 		ret = PTR_ERR(genpd);
2574 		goto out;
2575 	}
2576 
2577 	ret = genpd_add_device(genpd, dev, dev);
2578 
2579 out:
2580 	mutex_unlock(&gpd_list_lock);
2581 
2582 	return ret;
2583 }
2584 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2585 
2586 /**
2587  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2588  * @parent_spec: OF phandle args to use for parent PM domain look-up
2589  * @subdomain_spec: OF phandle args to use for subdomain look-up
2590  *
2591  * Looks-up a parent PM domain and subdomain based upon phandle args
2592  * provided and adds the subdomain to the parent PM domain. Returns a
2593  * negative error code on failure.
2594  */
2595 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2596 			   const struct of_phandle_args *subdomain_spec)
2597 {
2598 	struct generic_pm_domain *parent, *subdomain;
2599 	int ret;
2600 
2601 	mutex_lock(&gpd_list_lock);
2602 
2603 	parent = genpd_get_from_provider(parent_spec);
2604 	if (IS_ERR(parent)) {
2605 		ret = PTR_ERR(parent);
2606 		goto out;
2607 	}
2608 
2609 	subdomain = genpd_get_from_provider(subdomain_spec);
2610 	if (IS_ERR(subdomain)) {
2611 		ret = PTR_ERR(subdomain);
2612 		goto out;
2613 	}
2614 
2615 	ret = genpd_add_subdomain(parent, subdomain);
2616 
2617 out:
2618 	mutex_unlock(&gpd_list_lock);
2619 
2620 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2621 }
2622 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2623 
2624 /**
2625  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2626  * @parent_spec: OF phandle args to use for parent PM domain look-up
2627  * @subdomain_spec: OF phandle args to use for subdomain look-up
2628  *
2629  * Looks-up a parent PM domain and subdomain based upon phandle args
2630  * provided and removes the subdomain from the parent PM domain. Returns a
2631  * negative error code on failure.
2632  */
2633 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2634 			      const struct of_phandle_args *subdomain_spec)
2635 {
2636 	struct generic_pm_domain *parent, *subdomain;
2637 	int ret;
2638 
2639 	mutex_lock(&gpd_list_lock);
2640 
2641 	parent = genpd_get_from_provider(parent_spec);
2642 	if (IS_ERR(parent)) {
2643 		ret = PTR_ERR(parent);
2644 		goto out;
2645 	}
2646 
2647 	subdomain = genpd_get_from_provider(subdomain_spec);
2648 	if (IS_ERR(subdomain)) {
2649 		ret = PTR_ERR(subdomain);
2650 		goto out;
2651 	}
2652 
2653 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2654 
2655 out:
2656 	mutex_unlock(&gpd_list_lock);
2657 
2658 	return ret;
2659 }
2660 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2661 
2662 /**
2663  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2664  * @np: Pointer to device node associated with provider
2665  *
2666  * Find the last PM domain that was added by a particular provider and
2667  * remove this PM domain from the list of PM domains. The provider is
2668  * identified by the 'provider' device structure that is passed. The PM
2669  * domain will only be removed, if the provider associated with domain
2670  * has been removed.
2671  *
2672  * Returns a valid pointer to struct generic_pm_domain on success or
2673  * ERR_PTR() on failure.
2674  */
2675 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2676 {
2677 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2678 	int ret;
2679 
2680 	if (IS_ERR_OR_NULL(np))
2681 		return ERR_PTR(-EINVAL);
2682 
2683 	mutex_lock(&gpd_list_lock);
2684 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2685 		if (gpd->provider == &np->fwnode) {
2686 			ret = genpd_remove(gpd);
2687 			genpd = ret ? ERR_PTR(ret) : gpd;
2688 			break;
2689 		}
2690 	}
2691 	mutex_unlock(&gpd_list_lock);
2692 
2693 	return genpd;
2694 }
2695 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2696 
2697 static void genpd_release_dev(struct device *dev)
2698 {
2699 	of_node_put(dev->of_node);
2700 	kfree(dev);
2701 }
2702 
2703 static const struct bus_type genpd_bus_type = {
2704 	.name		= "genpd",
2705 };
2706 
2707 /**
2708  * genpd_dev_pm_detach - Detach a device from its PM domain.
2709  * @dev: Device to detach.
2710  * @power_off: Currently not used
2711  *
2712  * Try to locate a corresponding generic PM domain, which the device was
2713  * attached to previously. If such is found, the device is detached from it.
2714  */
2715 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2716 {
2717 	struct generic_pm_domain *pd;
2718 	unsigned int i;
2719 	int ret = 0;
2720 
2721 	pd = dev_to_genpd(dev);
2722 	if (IS_ERR(pd))
2723 		return;
2724 
2725 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2726 
2727 	/* Drop the default performance state */
2728 	if (dev_gpd_data(dev)->default_pstate) {
2729 		dev_pm_genpd_set_performance_state(dev, 0);
2730 		dev_gpd_data(dev)->default_pstate = 0;
2731 	}
2732 
2733 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2734 		ret = genpd_remove_device(pd, dev);
2735 		if (ret != -EAGAIN)
2736 			break;
2737 
2738 		mdelay(i);
2739 		cond_resched();
2740 	}
2741 
2742 	if (ret < 0) {
2743 		dev_err(dev, "failed to remove from PM domain %s: %d",
2744 			pd->name, ret);
2745 		return;
2746 	}
2747 
2748 	/* Check if PM domain can be powered off after removing this device. */
2749 	genpd_queue_power_off_work(pd);
2750 
2751 	/* Unregister the device if it was created by genpd. */
2752 	if (dev->bus == &genpd_bus_type)
2753 		device_unregister(dev);
2754 }
2755 
2756 static void genpd_dev_pm_sync(struct device *dev)
2757 {
2758 	struct generic_pm_domain *pd;
2759 
2760 	pd = dev_to_genpd(dev);
2761 	if (IS_ERR(pd))
2762 		return;
2763 
2764 	genpd_queue_power_off_work(pd);
2765 }
2766 
2767 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2768 				 unsigned int index, bool power_on)
2769 {
2770 	struct of_phandle_args pd_args;
2771 	struct generic_pm_domain *pd;
2772 	int pstate;
2773 	int ret;
2774 
2775 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2776 				"#power-domain-cells", index, &pd_args);
2777 	if (ret < 0)
2778 		return ret;
2779 
2780 	mutex_lock(&gpd_list_lock);
2781 	pd = genpd_get_from_provider(&pd_args);
2782 	of_node_put(pd_args.np);
2783 	if (IS_ERR(pd)) {
2784 		mutex_unlock(&gpd_list_lock);
2785 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2786 			__func__, PTR_ERR(pd));
2787 		return driver_deferred_probe_check_state(base_dev);
2788 	}
2789 
2790 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2791 
2792 	ret = genpd_add_device(pd, dev, base_dev);
2793 	mutex_unlock(&gpd_list_lock);
2794 
2795 	if (ret < 0)
2796 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2797 
2798 	dev->pm_domain->detach = genpd_dev_pm_detach;
2799 	dev->pm_domain->sync = genpd_dev_pm_sync;
2800 
2801 	/* Set the default performance state */
2802 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2803 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2804 		ret = pstate;
2805 		goto err;
2806 	} else if (pstate > 0) {
2807 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2808 		if (ret)
2809 			goto err;
2810 		dev_gpd_data(dev)->default_pstate = pstate;
2811 	}
2812 
2813 	if (power_on) {
2814 		genpd_lock(pd);
2815 		ret = genpd_power_on(pd, 0);
2816 		genpd_unlock(pd);
2817 	}
2818 
2819 	if (ret) {
2820 		/* Drop the default performance state */
2821 		if (dev_gpd_data(dev)->default_pstate) {
2822 			dev_pm_genpd_set_performance_state(dev, 0);
2823 			dev_gpd_data(dev)->default_pstate = 0;
2824 		}
2825 
2826 		genpd_remove_device(pd, dev);
2827 		return -EPROBE_DEFER;
2828 	}
2829 
2830 	return 1;
2831 
2832 err:
2833 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2834 		pd->name, ret);
2835 	genpd_remove_device(pd, dev);
2836 	return ret;
2837 }
2838 
2839 /**
2840  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2841  * @dev: Device to attach.
2842  *
2843  * Parse device's OF node to find a PM domain specifier. If such is found,
2844  * attaches the device to retrieved pm_domain ops.
2845  *
2846  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2847  * PM domain or when multiple power-domains exists for it, else a negative error
2848  * code. Note that if a power-domain exists for the device, but it cannot be
2849  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2850  * not probed and to re-try again later.
2851  */
2852 int genpd_dev_pm_attach(struct device *dev)
2853 {
2854 	if (!dev->of_node)
2855 		return 0;
2856 
2857 	/*
2858 	 * Devices with multiple PM domains must be attached separately, as we
2859 	 * can only attach one PM domain per device.
2860 	 */
2861 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2862 				       "#power-domain-cells") != 1)
2863 		return 0;
2864 
2865 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2866 }
2867 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2868 
2869 /**
2870  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2871  * @dev: The device used to lookup the PM domain.
2872  * @index: The index of the PM domain.
2873  *
2874  * Parse device's OF node to find a PM domain specifier at the provided @index.
2875  * If such is found, creates a virtual device and attaches it to the retrieved
2876  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2877  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2878  *
2879  * Returns the created virtual device if successfully attached PM domain, NULL
2880  * when the device don't need a PM domain, else an ERR_PTR() in case of
2881  * failures. If a power-domain exists for the device, but cannot be found or
2882  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2883  * is not probed and to re-try again later.
2884  */
2885 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2886 					 unsigned int index)
2887 {
2888 	struct device *virt_dev;
2889 	int num_domains;
2890 	int ret;
2891 
2892 	if (!dev->of_node)
2893 		return NULL;
2894 
2895 	/* Verify that the index is within a valid range. */
2896 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2897 						 "#power-domain-cells");
2898 	if (index >= num_domains)
2899 		return NULL;
2900 
2901 	/* Allocate and register device on the genpd bus. */
2902 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2903 	if (!virt_dev)
2904 		return ERR_PTR(-ENOMEM);
2905 
2906 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2907 	virt_dev->bus = &genpd_bus_type;
2908 	virt_dev->release = genpd_release_dev;
2909 	virt_dev->of_node = of_node_get(dev->of_node);
2910 
2911 	ret = device_register(virt_dev);
2912 	if (ret) {
2913 		put_device(virt_dev);
2914 		return ERR_PTR(ret);
2915 	}
2916 
2917 	/* Try to attach the device to the PM domain at the specified index. */
2918 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2919 	if (ret < 1) {
2920 		device_unregister(virt_dev);
2921 		return ret ? ERR_PTR(ret) : NULL;
2922 	}
2923 
2924 	pm_runtime_enable(virt_dev);
2925 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2926 
2927 	return virt_dev;
2928 }
2929 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2930 
2931 /**
2932  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2933  * @dev: The device used to lookup the PM domain.
2934  * @name: The name of the PM domain.
2935  *
2936  * Parse device's OF node to find a PM domain specifier using the
2937  * power-domain-names DT property. For further description see
2938  * genpd_dev_pm_attach_by_id().
2939  */
2940 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2941 {
2942 	int index;
2943 
2944 	if (!dev->of_node)
2945 		return NULL;
2946 
2947 	index = of_property_match_string(dev->of_node, "power-domain-names",
2948 					 name);
2949 	if (index < 0)
2950 		return NULL;
2951 
2952 	return genpd_dev_pm_attach_by_id(dev, index);
2953 }
2954 
2955 static const struct of_device_id idle_state_match[] = {
2956 	{ .compatible = "domain-idle-state", },
2957 	{ }
2958 };
2959 
2960 static int genpd_parse_state(struct genpd_power_state *genpd_state,
2961 				    struct device_node *state_node)
2962 {
2963 	int err;
2964 	u32 residency;
2965 	u32 entry_latency, exit_latency;
2966 
2967 	err = of_property_read_u32(state_node, "entry-latency-us",
2968 						&entry_latency);
2969 	if (err) {
2970 		pr_debug(" * %pOF missing entry-latency-us property\n",
2971 			 state_node);
2972 		return -EINVAL;
2973 	}
2974 
2975 	err = of_property_read_u32(state_node, "exit-latency-us",
2976 						&exit_latency);
2977 	if (err) {
2978 		pr_debug(" * %pOF missing exit-latency-us property\n",
2979 			 state_node);
2980 		return -EINVAL;
2981 	}
2982 
2983 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2984 	if (!err)
2985 		genpd_state->residency_ns = 1000LL * residency;
2986 
2987 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2988 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
2989 	genpd_state->fwnode = &state_node->fwnode;
2990 
2991 	return 0;
2992 }
2993 
2994 static int genpd_iterate_idle_states(struct device_node *dn,
2995 				     struct genpd_power_state *states)
2996 {
2997 	int ret;
2998 	struct of_phandle_iterator it;
2999 	struct device_node *np;
3000 	int i = 0;
3001 
3002 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3003 	if (ret <= 0)
3004 		return ret == -ENOENT ? 0 : ret;
3005 
3006 	/* Loop over the phandles until all the requested entry is found */
3007 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3008 		np = it.node;
3009 		if (!of_match_node(idle_state_match, np))
3010 			continue;
3011 
3012 		if (!of_device_is_available(np))
3013 			continue;
3014 
3015 		if (states) {
3016 			ret = genpd_parse_state(&states[i], np);
3017 			if (ret) {
3018 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3019 				       np, ret);
3020 				of_node_put(np);
3021 				return ret;
3022 			}
3023 		}
3024 		i++;
3025 	}
3026 
3027 	return i;
3028 }
3029 
3030 /**
3031  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3032  *
3033  * @dn: The genpd device node
3034  * @states: The pointer to which the state array will be saved.
3035  * @n: The count of elements in the array returned from this function.
3036  *
3037  * Returns the device states parsed from the OF node. The memory for the states
3038  * is allocated by this function and is the responsibility of the caller to
3039  * free the memory after use. If any or zero compatible domain idle states is
3040  * found it returns 0 and in case of errors, a negative error code is returned.
3041  */
3042 int of_genpd_parse_idle_states(struct device_node *dn,
3043 			struct genpd_power_state **states, int *n)
3044 {
3045 	struct genpd_power_state *st;
3046 	int ret;
3047 
3048 	ret = genpd_iterate_idle_states(dn, NULL);
3049 	if (ret < 0)
3050 		return ret;
3051 
3052 	if (!ret) {
3053 		*states = NULL;
3054 		*n = 0;
3055 		return 0;
3056 	}
3057 
3058 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3059 	if (!st)
3060 		return -ENOMEM;
3061 
3062 	ret = genpd_iterate_idle_states(dn, st);
3063 	if (ret <= 0) {
3064 		kfree(st);
3065 		return ret < 0 ? ret : -EINVAL;
3066 	}
3067 
3068 	*states = st;
3069 	*n = ret;
3070 
3071 	return 0;
3072 }
3073 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3074 
3075 static int __init genpd_bus_init(void)
3076 {
3077 	return bus_register(&genpd_bus_type);
3078 }
3079 core_initcall(genpd_bus_init);
3080 
3081 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3082 
3083 
3084 /***        debugfs support        ***/
3085 
3086 #ifdef CONFIG_DEBUG_FS
3087 /*
3088  * TODO: This function is a slightly modified version of rtpm_status_show
3089  * from sysfs.c, so generalize it.
3090  */
3091 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3092 {
3093 	static const char * const status_lookup[] = {
3094 		[RPM_ACTIVE] = "active",
3095 		[RPM_RESUMING] = "resuming",
3096 		[RPM_SUSPENDED] = "suspended",
3097 		[RPM_SUSPENDING] = "suspending"
3098 	};
3099 	const char *p = "";
3100 
3101 	if (dev->power.runtime_error)
3102 		p = "error";
3103 	else if (dev->power.disable_depth)
3104 		p = "unsupported";
3105 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3106 		p = status_lookup[dev->power.runtime_status];
3107 	else
3108 		WARN_ON(1);
3109 
3110 	seq_printf(s, "%-25s  ", p);
3111 }
3112 
3113 static void perf_status_str(struct seq_file *s, struct device *dev)
3114 {
3115 	struct generic_pm_domain_data *gpd_data;
3116 
3117 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3118 	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3119 }
3120 
3121 static int genpd_summary_one(struct seq_file *s,
3122 			struct generic_pm_domain *genpd)
3123 {
3124 	static const char * const status_lookup[] = {
3125 		[GENPD_STATE_ON] = "on",
3126 		[GENPD_STATE_OFF] = "off"
3127 	};
3128 	struct pm_domain_data *pm_data;
3129 	const char *kobj_path;
3130 	struct gpd_link *link;
3131 	char state[16];
3132 	int ret;
3133 
3134 	ret = genpd_lock_interruptible(genpd);
3135 	if (ret)
3136 		return -ERESTARTSYS;
3137 
3138 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3139 		goto exit;
3140 	if (!genpd_status_on(genpd))
3141 		snprintf(state, sizeof(state), "%s-%u",
3142 			 status_lookup[genpd->status], genpd->state_idx);
3143 	else
3144 		snprintf(state, sizeof(state), "%s",
3145 			 status_lookup[genpd->status]);
3146 	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3147 
3148 	/*
3149 	 * Modifications on the list require holding locks on both
3150 	 * parent and child, so we are safe.
3151 	 * Also genpd->name is immutable.
3152 	 */
3153 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3154 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3155 			seq_printf(s, "\n%48s", " ");
3156 		seq_printf(s, "%s", link->child->name);
3157 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3158 			seq_puts(s, ", ");
3159 	}
3160 
3161 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3162 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3163 				genpd_is_irq_safe(genpd) ?
3164 				GFP_ATOMIC : GFP_KERNEL);
3165 		if (kobj_path == NULL)
3166 			continue;
3167 
3168 		seq_printf(s, "\n    %-50s  ", kobj_path);
3169 		rtpm_status_str(s, pm_data->dev);
3170 		perf_status_str(s, pm_data->dev);
3171 		kfree(kobj_path);
3172 	}
3173 
3174 	seq_puts(s, "\n");
3175 exit:
3176 	genpd_unlock(genpd);
3177 
3178 	return 0;
3179 }
3180 
3181 static int summary_show(struct seq_file *s, void *data)
3182 {
3183 	struct generic_pm_domain *genpd;
3184 	int ret = 0;
3185 
3186 	seq_puts(s, "domain                          status          children                           performance\n");
3187 	seq_puts(s, "    /device                                             runtime status\n");
3188 	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3189 
3190 	ret = mutex_lock_interruptible(&gpd_list_lock);
3191 	if (ret)
3192 		return -ERESTARTSYS;
3193 
3194 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3195 		ret = genpd_summary_one(s, genpd);
3196 		if (ret)
3197 			break;
3198 	}
3199 	mutex_unlock(&gpd_list_lock);
3200 
3201 	return ret;
3202 }
3203 
3204 static int status_show(struct seq_file *s, void *data)
3205 {
3206 	static const char * const status_lookup[] = {
3207 		[GENPD_STATE_ON] = "on",
3208 		[GENPD_STATE_OFF] = "off"
3209 	};
3210 
3211 	struct generic_pm_domain *genpd = s->private;
3212 	int ret = 0;
3213 
3214 	ret = genpd_lock_interruptible(genpd);
3215 	if (ret)
3216 		return -ERESTARTSYS;
3217 
3218 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3219 		goto exit;
3220 
3221 	if (genpd->status == GENPD_STATE_OFF)
3222 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3223 			genpd->state_idx);
3224 	else
3225 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3226 exit:
3227 	genpd_unlock(genpd);
3228 	return ret;
3229 }
3230 
3231 static int sub_domains_show(struct seq_file *s, void *data)
3232 {
3233 	struct generic_pm_domain *genpd = s->private;
3234 	struct gpd_link *link;
3235 	int ret = 0;
3236 
3237 	ret = genpd_lock_interruptible(genpd);
3238 	if (ret)
3239 		return -ERESTARTSYS;
3240 
3241 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3242 		seq_printf(s, "%s\n", link->child->name);
3243 
3244 	genpd_unlock(genpd);
3245 	return ret;
3246 }
3247 
3248 static int idle_states_show(struct seq_file *s, void *data)
3249 {
3250 	struct generic_pm_domain *genpd = s->private;
3251 	u64 now, delta, idle_time = 0;
3252 	unsigned int i;
3253 	int ret = 0;
3254 
3255 	ret = genpd_lock_interruptible(genpd);
3256 	if (ret)
3257 		return -ERESTARTSYS;
3258 
3259 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3260 
3261 	for (i = 0; i < genpd->state_count; i++) {
3262 		idle_time += genpd->states[i].idle_time;
3263 
3264 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3265 			now = ktime_get_mono_fast_ns();
3266 			if (now > genpd->accounting_time) {
3267 				delta = now - genpd->accounting_time;
3268 				idle_time += delta;
3269 			}
3270 		}
3271 
3272 		do_div(idle_time, NSEC_PER_MSEC);
3273 		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3274 			   genpd->states[i].usage, genpd->states[i].rejected);
3275 	}
3276 
3277 	genpd_unlock(genpd);
3278 	return ret;
3279 }
3280 
3281 static int active_time_show(struct seq_file *s, void *data)
3282 {
3283 	struct generic_pm_domain *genpd = s->private;
3284 	u64 now, on_time, delta = 0;
3285 	int ret = 0;
3286 
3287 	ret = genpd_lock_interruptible(genpd);
3288 	if (ret)
3289 		return -ERESTARTSYS;
3290 
3291 	if (genpd->status == GENPD_STATE_ON) {
3292 		now = ktime_get_mono_fast_ns();
3293 		if (now > genpd->accounting_time)
3294 			delta = now - genpd->accounting_time;
3295 	}
3296 
3297 	on_time = genpd->on_time + delta;
3298 	do_div(on_time, NSEC_PER_MSEC);
3299 	seq_printf(s, "%llu ms\n", on_time);
3300 
3301 	genpd_unlock(genpd);
3302 	return ret;
3303 }
3304 
3305 static int total_idle_time_show(struct seq_file *s, void *data)
3306 {
3307 	struct generic_pm_domain *genpd = s->private;
3308 	u64 now, delta, total = 0;
3309 	unsigned int i;
3310 	int ret = 0;
3311 
3312 	ret = genpd_lock_interruptible(genpd);
3313 	if (ret)
3314 		return -ERESTARTSYS;
3315 
3316 	for (i = 0; i < genpd->state_count; i++) {
3317 		total += genpd->states[i].idle_time;
3318 
3319 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3320 			now = ktime_get_mono_fast_ns();
3321 			if (now > genpd->accounting_time) {
3322 				delta = now - genpd->accounting_time;
3323 				total += delta;
3324 			}
3325 		}
3326 	}
3327 
3328 	do_div(total, NSEC_PER_MSEC);
3329 	seq_printf(s, "%llu ms\n", total);
3330 
3331 	genpd_unlock(genpd);
3332 	return ret;
3333 }
3334 
3335 
3336 static int devices_show(struct seq_file *s, void *data)
3337 {
3338 	struct generic_pm_domain *genpd = s->private;
3339 	struct pm_domain_data *pm_data;
3340 	const char *kobj_path;
3341 	int ret = 0;
3342 
3343 	ret = genpd_lock_interruptible(genpd);
3344 	if (ret)
3345 		return -ERESTARTSYS;
3346 
3347 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3348 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3349 				genpd_is_irq_safe(genpd) ?
3350 				GFP_ATOMIC : GFP_KERNEL);
3351 		if (kobj_path == NULL)
3352 			continue;
3353 
3354 		seq_printf(s, "%s\n", kobj_path);
3355 		kfree(kobj_path);
3356 	}
3357 
3358 	genpd_unlock(genpd);
3359 	return ret;
3360 }
3361 
3362 static int perf_state_show(struct seq_file *s, void *data)
3363 {
3364 	struct generic_pm_domain *genpd = s->private;
3365 
3366 	if (genpd_lock_interruptible(genpd))
3367 		return -ERESTARTSYS;
3368 
3369 	seq_printf(s, "%u\n", genpd->performance_state);
3370 
3371 	genpd_unlock(genpd);
3372 	return 0;
3373 }
3374 
3375 DEFINE_SHOW_ATTRIBUTE(summary);
3376 DEFINE_SHOW_ATTRIBUTE(status);
3377 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3378 DEFINE_SHOW_ATTRIBUTE(idle_states);
3379 DEFINE_SHOW_ATTRIBUTE(active_time);
3380 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3381 DEFINE_SHOW_ATTRIBUTE(devices);
3382 DEFINE_SHOW_ATTRIBUTE(perf_state);
3383 
3384 static void genpd_debug_add(struct generic_pm_domain *genpd)
3385 {
3386 	struct dentry *d;
3387 
3388 	if (!genpd_debugfs_dir)
3389 		return;
3390 
3391 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3392 
3393 	debugfs_create_file("current_state", 0444,
3394 			    d, genpd, &status_fops);
3395 	debugfs_create_file("sub_domains", 0444,
3396 			    d, genpd, &sub_domains_fops);
3397 	debugfs_create_file("idle_states", 0444,
3398 			    d, genpd, &idle_states_fops);
3399 	debugfs_create_file("active_time", 0444,
3400 			    d, genpd, &active_time_fops);
3401 	debugfs_create_file("total_idle_time", 0444,
3402 			    d, genpd, &total_idle_time_fops);
3403 	debugfs_create_file("devices", 0444,
3404 			    d, genpd, &devices_fops);
3405 	if (genpd->set_performance_state)
3406 		debugfs_create_file("perf_state", 0444,
3407 				    d, genpd, &perf_state_fops);
3408 }
3409 
3410 static int __init genpd_debug_init(void)
3411 {
3412 	struct generic_pm_domain *genpd;
3413 
3414 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3415 
3416 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3417 			    NULL, &summary_fops);
3418 
3419 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3420 		genpd_debug_add(genpd);
3421 
3422 	return 0;
3423 }
3424 late_initcall(genpd_debug_init);
3425 
3426 static void __exit genpd_debug_exit(void)
3427 {
3428 	debugfs_remove_recursive(genpd_debugfs_dir);
3429 }
3430 __exitcall(genpd_debug_exit);
3431 #endif /* CONFIG_DEBUG_FS */
3432