xref: /linux/drivers/pmdomain/core.c (revision 982aaa683d20804c21c6b8b1ca295ae531c91df5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_domain.h>
17 #include <linux/pm_qos.h>
18 #include <linux/pm_clock.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/export.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
26 
27 /* Provides a unique ID for each genpd device */
28 static DEFINE_IDA(genpd_ida);
29 
30 /* The bus for genpd_providers. */
31 static const struct bus_type genpd_provider_bus_type = {
32 	.name		= "genpd_provider",
33 };
34 
35 /* The parent for genpd_provider devices. */
36 static struct device genpd_provider_bus = {
37 	.init_name = "genpd_provider",
38 };
39 
40 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
41 
42 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
43 ({								\
44 	type (*__routine)(struct device *__d); 			\
45 	type __ret = (type)0;					\
46 								\
47 	__routine = genpd->dev_ops.callback; 			\
48 	if (__routine) {					\
49 		__ret = __routine(dev); 			\
50 	}							\
51 	__ret;							\
52 })
53 
54 static LIST_HEAD(gpd_list);
55 static DEFINE_MUTEX(gpd_list_lock);
56 
57 struct genpd_lock_ops {
58 	void (*lock)(struct generic_pm_domain *genpd);
59 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
60 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
61 	void (*unlock)(struct generic_pm_domain *genpd);
62 };
63 
64 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
65 {
66 	mutex_lock(&genpd->mlock);
67 }
68 
69 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
70 					int depth)
71 {
72 	mutex_lock_nested(&genpd->mlock, depth);
73 }
74 
75 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
76 {
77 	return mutex_lock_interruptible(&genpd->mlock);
78 }
79 
80 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
81 {
82 	return mutex_unlock(&genpd->mlock);
83 }
84 
85 static const struct genpd_lock_ops genpd_mtx_ops = {
86 	.lock = genpd_lock_mtx,
87 	.lock_nested = genpd_lock_nested_mtx,
88 	.lock_interruptible = genpd_lock_interruptible_mtx,
89 	.unlock = genpd_unlock_mtx,
90 };
91 
92 static void genpd_lock_spin(struct generic_pm_domain *genpd)
93 	__acquires(&genpd->slock)
94 {
95 	unsigned long flags;
96 
97 	spin_lock_irqsave(&genpd->slock, flags);
98 	genpd->lock_flags = flags;
99 }
100 
101 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
102 					int depth)
103 	__acquires(&genpd->slock)
104 {
105 	unsigned long flags;
106 
107 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
108 	genpd->lock_flags = flags;
109 }
110 
111 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
112 	__acquires(&genpd->slock)
113 {
114 	unsigned long flags;
115 
116 	spin_lock_irqsave(&genpd->slock, flags);
117 	genpd->lock_flags = flags;
118 	return 0;
119 }
120 
121 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
122 	__releases(&genpd->slock)
123 {
124 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
125 }
126 
127 static const struct genpd_lock_ops genpd_spin_ops = {
128 	.lock = genpd_lock_spin,
129 	.lock_nested = genpd_lock_nested_spin,
130 	.lock_interruptible = genpd_lock_interruptible_spin,
131 	.unlock = genpd_unlock_spin,
132 };
133 
134 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
135 	__acquires(&genpd->raw_slock)
136 {
137 	unsigned long flags;
138 
139 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
140 	genpd->raw_lock_flags = flags;
141 }
142 
143 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
144 					int depth)
145 	__acquires(&genpd->raw_slock)
146 {
147 	unsigned long flags;
148 
149 	raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
150 	genpd->raw_lock_flags = flags;
151 }
152 
153 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
154 	__acquires(&genpd->raw_slock)
155 {
156 	unsigned long flags;
157 
158 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
159 	genpd->raw_lock_flags = flags;
160 	return 0;
161 }
162 
163 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
164 	__releases(&genpd->raw_slock)
165 {
166 	raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
167 }
168 
169 static const struct genpd_lock_ops genpd_raw_spin_ops = {
170 	.lock = genpd_lock_raw_spin,
171 	.lock_nested = genpd_lock_nested_raw_spin,
172 	.lock_interruptible = genpd_lock_interruptible_raw_spin,
173 	.unlock = genpd_unlock_raw_spin,
174 };
175 
176 #define genpd_lock(p)			p->lock_ops->lock(p)
177 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
178 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
179 #define genpd_unlock(p)			p->lock_ops->unlock(p)
180 
181 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
182 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
183 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
184 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
185 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
186 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
187 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
188 #define genpd_is_dev_name_fw(genpd)	(genpd->flags & GENPD_FLAG_DEV_NAME_FW)
189 #define genpd_is_no_sync_state(genpd)	(genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
190 
191 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
192 		const struct generic_pm_domain *genpd)
193 {
194 	bool ret;
195 
196 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
197 
198 	/*
199 	 * Warn once if an IRQ safe device is attached to a domain, which
200 	 * callbacks are allowed to sleep. This indicates a suboptimal
201 	 * configuration for PM, but it doesn't matter for an always on domain.
202 	 */
203 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
204 		return ret;
205 
206 	if (ret)
207 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
208 			      dev_name(&genpd->dev));
209 
210 	return ret;
211 }
212 
213 static int genpd_runtime_suspend(struct device *dev);
214 
215 /*
216  * Get the generic PM domain for a particular struct device.
217  * This validates the struct device pointer, the PM domain pointer,
218  * and checks that the PM domain pointer is a real generic PM domain.
219  * Any failure results in NULL being returned.
220  */
221 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
222 {
223 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
224 		return NULL;
225 
226 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
227 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
228 		return pd_to_genpd(dev->pm_domain);
229 
230 	return NULL;
231 }
232 
233 /*
234  * This should only be used where we are certain that the pm_domain
235  * attached to the device is a genpd domain.
236  */
237 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
238 {
239 	if (IS_ERR_OR_NULL(dev->pm_domain))
240 		return ERR_PTR(-EINVAL);
241 
242 	return pd_to_genpd(dev->pm_domain);
243 }
244 
245 struct device *dev_to_genpd_dev(struct device *dev)
246 {
247 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
248 
249 	if (IS_ERR(genpd))
250 		return ERR_CAST(genpd);
251 
252 	return &genpd->dev;
253 }
254 
255 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
256 			  struct device *dev)
257 {
258 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
259 }
260 
261 static int genpd_start_dev(const struct generic_pm_domain *genpd,
262 			   struct device *dev)
263 {
264 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
265 }
266 
267 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
268 {
269 	bool ret = false;
270 
271 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
272 		ret = !!atomic_dec_and_test(&genpd->sd_count);
273 
274 	return ret;
275 }
276 
277 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
278 {
279 	atomic_inc(&genpd->sd_count);
280 	smp_mb__after_atomic();
281 }
282 
283 #ifdef CONFIG_DEBUG_FS
284 static struct dentry *genpd_debugfs_dir;
285 
286 static void genpd_debug_add(struct generic_pm_domain *genpd);
287 
288 static void genpd_debug_remove(struct generic_pm_domain *genpd)
289 {
290 	if (!genpd_debugfs_dir)
291 		return;
292 
293 	debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
294 }
295 
296 static void genpd_update_accounting(struct generic_pm_domain *genpd)
297 {
298 	u64 delta, now;
299 
300 	now = ktime_get_mono_fast_ns();
301 	if (now <= genpd->accounting_time)
302 		return;
303 
304 	delta = now - genpd->accounting_time;
305 
306 	/*
307 	 * If genpd->status is active, it means we are just
308 	 * out of off and so update the idle time and vice
309 	 * versa.
310 	 */
311 	if (genpd->status == GENPD_STATE_ON)
312 		genpd->states[genpd->state_idx].idle_time += delta;
313 	else
314 		genpd->on_time += delta;
315 
316 	genpd->accounting_time = now;
317 }
318 
319 static void genpd_reflect_residency(struct generic_pm_domain *genpd)
320 {
321 	struct genpd_governor_data *gd = genpd->gd;
322 	struct genpd_power_state *state, *next_state;
323 	unsigned int state_idx;
324 	s64 sleep_ns, target_ns;
325 
326 	if (!gd || !gd->reflect_residency)
327 		return;
328 
329 	sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
330 	state_idx = genpd->state_idx;
331 	state = &genpd->states[state_idx];
332 	target_ns = state->power_off_latency_ns + state->residency_ns;
333 
334 	if (sleep_ns < target_ns) {
335 		state->above++;
336 	} else if (state_idx < (genpd->state_count -1)) {
337 		next_state = &genpd->states[state_idx + 1];
338 		target_ns = next_state->power_off_latency_ns +
339 			next_state->residency_ns;
340 
341 		if (sleep_ns >= target_ns)
342 			state->below++;
343 	}
344 
345 	gd->reflect_residency = false;
346 }
347 #else
348 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
349 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
350 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
351 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
352 #endif
353 
354 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
355 					   unsigned int state)
356 {
357 	struct generic_pm_domain_data *pd_data;
358 	struct pm_domain_data *pdd;
359 	struct gpd_link *link;
360 
361 	/* New requested state is same as Max requested state */
362 	if (state == genpd->performance_state)
363 		return state;
364 
365 	/* New requested state is higher than Max requested state */
366 	if (state > genpd->performance_state)
367 		return state;
368 
369 	/* Traverse all devices within the domain */
370 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
371 		pd_data = to_gpd_data(pdd);
372 
373 		if (pd_data->performance_state > state)
374 			state = pd_data->performance_state;
375 	}
376 
377 	/*
378 	 * Traverse all sub-domains within the domain. This can be
379 	 * done without any additional locking as the link->performance_state
380 	 * field is protected by the parent genpd->lock, which is already taken.
381 	 *
382 	 * Also note that link->performance_state (subdomain's performance state
383 	 * requirement to parent domain) is different from
384 	 * link->child->performance_state (current performance state requirement
385 	 * of the devices/sub-domains of the subdomain) and so can have a
386 	 * different value.
387 	 *
388 	 * Note that we also take vote from powered-off sub-domains into account
389 	 * as the same is done for devices right now.
390 	 */
391 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
392 		if (link->performance_state > state)
393 			state = link->performance_state;
394 	}
395 
396 	return state;
397 }
398 
399 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
400 					 struct generic_pm_domain *parent,
401 					 unsigned int pstate)
402 {
403 	if (!parent->set_performance_state)
404 		return pstate;
405 
406 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
407 						  parent->opp_table,
408 						  pstate);
409 }
410 
411 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
412 					unsigned int state, int depth);
413 
414 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
415 {
416 	struct generic_pm_domain *parent = link->parent;
417 	int parent_state;
418 
419 	genpd_lock_nested(parent, depth + 1);
420 
421 	parent_state = link->prev_performance_state;
422 	link->performance_state = parent_state;
423 
424 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
425 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
426 		pr_err("%s: Failed to roll back to %d performance state\n",
427 		       parent->name, parent_state);
428 	}
429 
430 	genpd_unlock(parent);
431 }
432 
433 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
434 				   struct gpd_link *link,
435 				   unsigned int state, int depth)
436 {
437 	struct generic_pm_domain *parent = link->parent;
438 	int parent_state, ret;
439 
440 	/* Find parent's performance state */
441 	ret = genpd_xlate_performance_state(genpd, parent, state);
442 	if (unlikely(ret < 0))
443 		return ret;
444 
445 	parent_state = ret;
446 
447 	genpd_lock_nested(parent, depth + 1);
448 
449 	link->prev_performance_state = link->performance_state;
450 	link->performance_state = parent_state;
451 
452 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
453 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
454 	if (ret)
455 		link->performance_state = link->prev_performance_state;
456 
457 	genpd_unlock(parent);
458 
459 	return ret;
460 }
461 
462 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
463 					unsigned int state, int depth)
464 {
465 	struct gpd_link *link = NULL;
466 	int ret;
467 
468 	if (state == genpd->performance_state)
469 		return 0;
470 
471 	/* When scaling up, propagate to parents first in normal order */
472 	if (state > genpd->performance_state) {
473 		list_for_each_entry(link, &genpd->child_links, child_node) {
474 			ret = _genpd_set_parent_state(genpd, link, state, depth);
475 			if (ret)
476 				goto rollback_parents_up;
477 		}
478 	}
479 
480 	if (genpd->set_performance_state) {
481 		ret = genpd->set_performance_state(genpd, state);
482 		if (ret) {
483 			if (link)
484 				goto rollback_parents_up;
485 			return ret;
486 		}
487 	}
488 
489 	/* When scaling down, propagate to parents last in reverse order */
490 	if (state < genpd->performance_state) {
491 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
492 			ret = _genpd_set_parent_state(genpd, link, state, depth);
493 			if (ret)
494 				goto rollback_parents_down;
495 		}
496 	}
497 
498 	genpd->performance_state = state;
499 	return 0;
500 
501 rollback_parents_up:
502 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
503 		_genpd_rollback_parent_state(link, depth);
504 	return ret;
505 rollback_parents_down:
506 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
507 		_genpd_rollback_parent_state(link, depth);
508 	return ret;
509 }
510 
511 static int genpd_set_performance_state(struct device *dev, unsigned int state)
512 {
513 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
514 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
515 	unsigned int prev_state;
516 	int ret;
517 
518 	prev_state = gpd_data->performance_state;
519 	if (prev_state == state)
520 		return 0;
521 
522 	gpd_data->performance_state = state;
523 	state = _genpd_reeval_performance_state(genpd, state);
524 
525 	ret = _genpd_set_performance_state(genpd, state, 0);
526 	if (ret)
527 		gpd_data->performance_state = prev_state;
528 
529 	return ret;
530 }
531 
532 static int genpd_drop_performance_state(struct device *dev)
533 {
534 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
535 
536 	if (!genpd_set_performance_state(dev, 0))
537 		return prev_state;
538 
539 	return 0;
540 }
541 
542 static void genpd_restore_performance_state(struct device *dev,
543 					    unsigned int state)
544 {
545 	if (state)
546 		genpd_set_performance_state(dev, state);
547 }
548 
549 static int genpd_dev_pm_set_performance_state(struct device *dev,
550 					      unsigned int state)
551 {
552 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
553 	int ret = 0;
554 
555 	genpd_lock(genpd);
556 	if (pm_runtime_suspended(dev)) {
557 		dev_gpd_data(dev)->rpm_pstate = state;
558 	} else {
559 		ret = genpd_set_performance_state(dev, state);
560 		if (!ret)
561 			dev_gpd_data(dev)->rpm_pstate = 0;
562 	}
563 	genpd_unlock(genpd);
564 
565 	return ret;
566 }
567 
568 /**
569  * dev_pm_genpd_set_performance_state- Set performance state of device's power
570  * domain.
571  *
572  * @dev: Device for which the performance-state needs to be set.
573  * @state: Target performance state of the device. This can be set as 0 when the
574  *	   device doesn't have any performance state constraints left (And so
575  *	   the device wouldn't participate anymore to find the target
576  *	   performance state of the genpd).
577  *
578  * It is assumed that the users guarantee that the genpd wouldn't be detached
579  * while this routine is getting called.
580  *
581  * Returns 0 on success and negative error values on failures.
582  */
583 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
584 {
585 	struct generic_pm_domain *genpd;
586 
587 	genpd = dev_to_genpd_safe(dev);
588 	if (!genpd)
589 		return -ENODEV;
590 
591 	if (WARN_ON(!dev->power.subsys_data ||
592 		     !dev->power.subsys_data->domain_data))
593 		return -EINVAL;
594 
595 	return genpd_dev_pm_set_performance_state(dev, state);
596 }
597 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
598 
599 /**
600  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
601  *
602  * @dev: Device to handle
603  * @next: impending interrupt/wakeup for the device
604  *
605  *
606  * Allow devices to inform of the next wakeup. It's assumed that the users
607  * guarantee that the genpd wouldn't be detached while this routine is getting
608  * called. Additionally, it's also assumed that @dev isn't runtime suspended
609  * (RPM_SUSPENDED)."
610  * Although devices are expected to update the next_wakeup after the end of
611  * their usecase as well, it is possible the devices themselves may not know
612  * about that, so stale @next will be ignored when powering off the domain.
613  */
614 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
615 {
616 	struct generic_pm_domain *genpd;
617 	struct gpd_timing_data *td;
618 
619 	genpd = dev_to_genpd_safe(dev);
620 	if (!genpd)
621 		return;
622 
623 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
624 	if (td)
625 		td->next_wakeup = next;
626 }
627 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
628 
629 /**
630  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
631  * @dev: A device that is attached to the genpd.
632  *
633  * This routine should typically be called for a device, at the point of when a
634  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
635  *
636  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
637  * valid value have been set.
638  */
639 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
640 {
641 	struct generic_pm_domain *genpd;
642 
643 	genpd = dev_to_genpd_safe(dev);
644 	if (!genpd)
645 		return KTIME_MAX;
646 
647 	if (genpd->gd)
648 		return genpd->gd->next_hrtimer;
649 
650 	return KTIME_MAX;
651 }
652 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
653 
654 /*
655  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
656  *
657  * @dev: A device that is attached to the genpd.
658  *
659  * Allows a consumer of the genpd to notify the provider that the next power off
660  * should be synchronous.
661  *
662  * It is assumed that the users guarantee that the genpd wouldn't be detached
663  * while this routine is getting called.
664  */
665 void dev_pm_genpd_synced_poweroff(struct device *dev)
666 {
667 	struct generic_pm_domain *genpd;
668 
669 	genpd = dev_to_genpd_safe(dev);
670 	if (!genpd)
671 		return;
672 
673 	genpd_lock(genpd);
674 	genpd->synced_poweroff = true;
675 	genpd_unlock(genpd);
676 }
677 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
678 
679 /**
680  * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
681  *
682  * @dev: Device for which the HW-mode should be changed.
683  * @enable: Value to set or unset the HW-mode.
684  *
685  * Some PM domains can rely on HW signals to control the power for a device. To
686  * allow a consumer driver to switch the behaviour for its device in runtime,
687  * which may be beneficial from a latency or energy point of view, this function
688  * may be called.
689  *
690  * It is assumed that the users guarantee that the genpd wouldn't be detached
691  * while this routine is getting called.
692  *
693  * Return: Returns 0 on success and negative error values on failures.
694  */
695 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
696 {
697 	struct generic_pm_domain *genpd;
698 	int ret = 0;
699 
700 	genpd = dev_to_genpd_safe(dev);
701 	if (!genpd)
702 		return -ENODEV;
703 
704 	if (!genpd->set_hwmode_dev)
705 		return -EOPNOTSUPP;
706 
707 	genpd_lock(genpd);
708 
709 	if (dev_gpd_data(dev)->hw_mode == enable)
710 		goto out;
711 
712 	ret = genpd->set_hwmode_dev(genpd, dev, enable);
713 	if (!ret)
714 		dev_gpd_data(dev)->hw_mode = enable;
715 
716 out:
717 	genpd_unlock(genpd);
718 	return ret;
719 }
720 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
721 
722 /**
723  * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
724  *
725  * @dev: Device for which the current HW-mode setting should be fetched.
726  *
727  * This helper function allows consumer drivers to fetch the current HW mode
728  * setting of its the device.
729  *
730  * It is assumed that the users guarantee that the genpd wouldn't be detached
731  * while this routine is getting called.
732  *
733  * Return: Returns the HW mode setting of device from SW cached hw_mode.
734  */
735 bool dev_pm_genpd_get_hwmode(struct device *dev)
736 {
737 	return dev_gpd_data(dev)->hw_mode;
738 }
739 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
740 
741 /**
742  * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
743  *
744  * @dev: Device for which the PM domain may need to stay on for.
745  * @on: Value to set or unset for the condition.
746  *
747  * For some usecases a consumer driver requires its device to remain power-on
748  * from the PM domain perspective during runtime. This function allows the
749  * behaviour to be dynamically controlled for a device attached to a genpd.
750  *
751  * It is assumed that the users guarantee that the genpd wouldn't be detached
752  * while this routine is getting called.
753  *
754  * Return: Returns 0 on success and negative error values on failures.
755  */
756 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
757 {
758 	struct generic_pm_domain *genpd;
759 
760 	genpd = dev_to_genpd_safe(dev);
761 	if (!genpd)
762 		return -ENODEV;
763 
764 	genpd_lock(genpd);
765 	dev_gpd_data(dev)->rpm_always_on = on;
766 	genpd_unlock(genpd);
767 
768 	return 0;
769 }
770 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
771 
772 /**
773  * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
774  *
775  * @genpd: The PM domain the idle-state belongs to.
776  * @state_idx: The index of the idle-state that failed.
777  *
778  * In some special cases the ->power_off() callback is asynchronously powering
779  * off the PM domain, leading to that it may return zero to indicate success,
780  * even though the actual power-off could fail. To account for this correctly in
781  * the rejected/usage counts for the idle-state statistics, users can call this
782  * function to adjust the values.
783  *
784  * It is assumed that the users guarantee that the genpd doesn't get removed
785  * while this routine is getting called.
786  */
787 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
788 			   unsigned int state_idx)
789 {
790 	genpd_lock(genpd);
791 	genpd->states[genpd->state_idx].rejected++;
792 	genpd->states[genpd->state_idx].usage--;
793 	genpd_unlock(genpd);
794 }
795 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
796 
797 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
798 {
799 	unsigned int state_idx = genpd->state_idx;
800 	ktime_t time_start;
801 	s64 elapsed_ns;
802 	int ret;
803 
804 	/* Notify consumers that we are about to power on. */
805 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
806 					     GENPD_NOTIFY_PRE_ON,
807 					     GENPD_NOTIFY_OFF, NULL);
808 	ret = notifier_to_errno(ret);
809 	if (ret)
810 		return ret;
811 
812 	if (!genpd->power_on)
813 		goto out;
814 
815 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
816 	if (!timed) {
817 		ret = genpd->power_on(genpd);
818 		if (ret)
819 			goto err;
820 
821 		goto out;
822 	}
823 
824 	time_start = ktime_get();
825 	ret = genpd->power_on(genpd);
826 	if (ret)
827 		goto err;
828 
829 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
830 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
831 		goto out;
832 
833 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
834 	genpd->gd->max_off_time_changed = true;
835 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
836 		 dev_name(&genpd->dev), "on", elapsed_ns);
837 
838 out:
839 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
840 	genpd->synced_poweroff = false;
841 	return 0;
842 err:
843 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
844 				NULL);
845 	return ret;
846 }
847 
848 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
849 {
850 	unsigned int state_idx = genpd->state_idx;
851 	ktime_t time_start;
852 	s64 elapsed_ns;
853 	int ret;
854 
855 	/* Notify consumers that we are about to power off. */
856 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
857 					     GENPD_NOTIFY_PRE_OFF,
858 					     GENPD_NOTIFY_ON, NULL);
859 	ret = notifier_to_errno(ret);
860 	if (ret)
861 		return ret;
862 
863 	if (!genpd->power_off)
864 		goto out;
865 
866 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
867 	if (!timed) {
868 		ret = genpd->power_off(genpd);
869 		if (ret)
870 			goto busy;
871 
872 		goto out;
873 	}
874 
875 	time_start = ktime_get();
876 	ret = genpd->power_off(genpd);
877 	if (ret)
878 		goto busy;
879 
880 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
881 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
882 		goto out;
883 
884 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
885 	genpd->gd->max_off_time_changed = true;
886 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
887 		 dev_name(&genpd->dev), "off", elapsed_ns);
888 
889 out:
890 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
891 				NULL);
892 	return 0;
893 busy:
894 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
895 	return ret;
896 }
897 
898 /**
899  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
900  * @genpd: PM domain to power off.
901  *
902  * Queue up the execution of genpd_power_off() unless it's already been done
903  * before.
904  */
905 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
906 {
907 	queue_work(pm_wq, &genpd->power_off_work);
908 }
909 
910 /**
911  * genpd_power_off - Remove power from a given PM domain.
912  * @genpd: PM domain to power down.
913  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
914  * RPM status of the releated device is in an intermediate state, not yet turned
915  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
916  * be RPM_SUSPENDED, while it tries to power off the PM domain.
917  * @depth: nesting count for lockdep.
918  *
919  * If all of the @genpd's devices have been suspended and all of its subdomains
920  * have been powered down, remove power from @genpd.
921  */
922 static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
923 			    unsigned int depth)
924 {
925 	struct pm_domain_data *pdd;
926 	struct gpd_link *link;
927 	unsigned int not_suspended = 0;
928 
929 	/*
930 	 * Do not try to power off the domain in the following situations:
931 	 * The domain is already in the "power off" state.
932 	 * System suspend is in progress.
933 	 * The domain is configured as always on.
934 	 * The domain was on at boot and still need to stay on.
935 	 * The domain has a subdomain being powered on.
936 	 */
937 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
938 	    genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
939 	    genpd->stay_on || atomic_read(&genpd->sd_count) > 0)
940 		return;
941 
942 	/*
943 	 * The children must be in their deepest (powered-off) states to allow
944 	 * the parent to be powered off. Note that, there's no need for
945 	 * additional locking, as powering on a child, requires the parent's
946 	 * lock to be acquired first.
947 	 */
948 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
949 		struct generic_pm_domain *child = link->child;
950 		if (child->state_idx < child->state_count - 1)
951 			return;
952 	}
953 
954 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
955 		/*
956 		 * Do not allow PM domain to be powered off, when an IRQ safe
957 		 * device is part of a non-IRQ safe domain.
958 		 */
959 		if (!pm_runtime_suspended(pdd->dev) ||
960 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
961 			not_suspended++;
962 
963 		/* The device may need its PM domain to stay powered on. */
964 		if (to_gpd_data(pdd)->rpm_always_on)
965 			return;
966 	}
967 
968 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
969 		return;
970 
971 	if (genpd->gov && genpd->gov->power_down_ok) {
972 		if (!genpd->gov->power_down_ok(&genpd->domain))
973 			return;
974 	}
975 
976 	/* Default to shallowest state. */
977 	if (!genpd->gov)
978 		genpd->state_idx = 0;
979 
980 	/* Don't power off, if a child domain is waiting to power on. */
981 	if (atomic_read(&genpd->sd_count) > 0)
982 		return;
983 
984 	if (_genpd_power_off(genpd, true)) {
985 		genpd->states[genpd->state_idx].rejected++;
986 		return;
987 	}
988 
989 	genpd->status = GENPD_STATE_OFF;
990 	genpd_update_accounting(genpd);
991 	genpd->states[genpd->state_idx].usage++;
992 
993 	list_for_each_entry(link, &genpd->child_links, child_node) {
994 		genpd_sd_counter_dec(link->parent);
995 		genpd_lock_nested(link->parent, depth + 1);
996 		genpd_power_off(link->parent, false, depth + 1);
997 		genpd_unlock(link->parent);
998 	}
999 }
1000 
1001 /**
1002  * genpd_power_on - Restore power to a given PM domain and its parents.
1003  * @genpd: PM domain to power up.
1004  * @depth: nesting count for lockdep.
1005  *
1006  * Restore power to @genpd and all of its parents so that it is possible to
1007  * resume a device belonging to it.
1008  */
1009 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
1010 {
1011 	struct gpd_link *link;
1012 	int ret = 0;
1013 
1014 	if (genpd_status_on(genpd))
1015 		return 0;
1016 
1017 	/* Reflect over the entered idle-states residency for debugfs. */
1018 	genpd_reflect_residency(genpd);
1019 
1020 	/*
1021 	 * The list is guaranteed not to change while the loop below is being
1022 	 * executed, unless one of the parents' .power_on() callbacks fiddles
1023 	 * with it.
1024 	 */
1025 	list_for_each_entry(link, &genpd->child_links, child_node) {
1026 		struct generic_pm_domain *parent = link->parent;
1027 
1028 		genpd_sd_counter_inc(parent);
1029 
1030 		genpd_lock_nested(parent, depth + 1);
1031 		ret = genpd_power_on(parent, depth + 1);
1032 		genpd_unlock(parent);
1033 
1034 		if (ret) {
1035 			genpd_sd_counter_dec(parent);
1036 			goto err;
1037 		}
1038 	}
1039 
1040 	ret = _genpd_power_on(genpd, true);
1041 	if (ret)
1042 		goto err;
1043 
1044 	genpd->status = GENPD_STATE_ON;
1045 	genpd_update_accounting(genpd);
1046 
1047 	return 0;
1048 
1049  err:
1050 	list_for_each_entry_continue_reverse(link,
1051 					&genpd->child_links,
1052 					child_node) {
1053 		genpd_sd_counter_dec(link->parent);
1054 		genpd_lock_nested(link->parent, depth + 1);
1055 		genpd_power_off(link->parent, false, depth + 1);
1056 		genpd_unlock(link->parent);
1057 	}
1058 
1059 	return ret;
1060 }
1061 
1062 static int genpd_dev_pm_start(struct device *dev)
1063 {
1064 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
1065 
1066 	return genpd_start_dev(genpd, dev);
1067 }
1068 
1069 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1070 				     unsigned long val, void *ptr)
1071 {
1072 	struct generic_pm_domain_data *gpd_data;
1073 	struct device *dev;
1074 
1075 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1076 	dev = gpd_data->base.dev;
1077 
1078 	for (;;) {
1079 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1080 		struct pm_domain_data *pdd;
1081 		struct gpd_timing_data *td;
1082 
1083 		spin_lock_irq(&dev->power.lock);
1084 
1085 		pdd = dev->power.subsys_data ?
1086 				dev->power.subsys_data->domain_data : NULL;
1087 		if (pdd) {
1088 			td = to_gpd_data(pdd)->td;
1089 			if (td) {
1090 				td->constraint_changed = true;
1091 				genpd = dev_to_genpd(dev);
1092 			}
1093 		}
1094 
1095 		spin_unlock_irq(&dev->power.lock);
1096 
1097 		if (!IS_ERR(genpd)) {
1098 			genpd_lock(genpd);
1099 			genpd->gd->max_off_time_changed = true;
1100 			genpd_unlock(genpd);
1101 		}
1102 
1103 		dev = dev->parent;
1104 		if (!dev || dev->power.ignore_children)
1105 			break;
1106 	}
1107 
1108 	return NOTIFY_DONE;
1109 }
1110 
1111 /**
1112  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1113  * @work: Work structure used for scheduling the execution of this function.
1114  */
1115 static void genpd_power_off_work_fn(struct work_struct *work)
1116 {
1117 	struct generic_pm_domain *genpd;
1118 
1119 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
1120 
1121 	genpd_lock(genpd);
1122 	genpd_power_off(genpd, false, 0);
1123 	genpd_unlock(genpd);
1124 }
1125 
1126 /**
1127  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1128  * @dev: Device to handle.
1129  */
1130 static int __genpd_runtime_suspend(struct device *dev)
1131 {
1132 	int (*cb)(struct device *__dev);
1133 
1134 	if (dev->type && dev->type->pm)
1135 		cb = dev->type->pm->runtime_suspend;
1136 	else if (dev->class && dev->class->pm)
1137 		cb = dev->class->pm->runtime_suspend;
1138 	else if (dev->bus && dev->bus->pm)
1139 		cb = dev->bus->pm->runtime_suspend;
1140 	else
1141 		cb = NULL;
1142 
1143 	if (!cb && dev->driver && dev->driver->pm)
1144 		cb = dev->driver->pm->runtime_suspend;
1145 
1146 	return cb ? cb(dev) : 0;
1147 }
1148 
1149 /**
1150  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1151  * @dev: Device to handle.
1152  */
1153 static int __genpd_runtime_resume(struct device *dev)
1154 {
1155 	int (*cb)(struct device *__dev);
1156 
1157 	if (dev->type && dev->type->pm)
1158 		cb = dev->type->pm->runtime_resume;
1159 	else if (dev->class && dev->class->pm)
1160 		cb = dev->class->pm->runtime_resume;
1161 	else if (dev->bus && dev->bus->pm)
1162 		cb = dev->bus->pm->runtime_resume;
1163 	else
1164 		cb = NULL;
1165 
1166 	if (!cb && dev->driver && dev->driver->pm)
1167 		cb = dev->driver->pm->runtime_resume;
1168 
1169 	return cb ? cb(dev) : 0;
1170 }
1171 
1172 /**
1173  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1174  * @dev: Device to suspend.
1175  *
1176  * Carry out a runtime suspend of a device under the assumption that its
1177  * pm_domain field points to the domain member of an object of type
1178  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1179  */
1180 static int genpd_runtime_suspend(struct device *dev)
1181 {
1182 	struct generic_pm_domain *genpd;
1183 	bool (*suspend_ok)(struct device *__dev);
1184 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1185 	struct gpd_timing_data *td = gpd_data->td;
1186 	bool runtime_pm = pm_runtime_enabled(dev);
1187 	ktime_t time_start = 0;
1188 	s64 elapsed_ns;
1189 	int ret;
1190 
1191 	dev_dbg(dev, "%s()\n", __func__);
1192 
1193 	genpd = dev_to_genpd(dev);
1194 	if (IS_ERR(genpd))
1195 		return -EINVAL;
1196 
1197 	/*
1198 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
1199 	 * callbacks for other purposes than runtime PM. In those scenarios
1200 	 * runtime PM is disabled. Under these circumstances, we shall skip
1201 	 * validating/measuring the PM QoS latency.
1202 	 */
1203 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1204 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
1205 		return -EBUSY;
1206 
1207 	/* Measure suspend latency. */
1208 	if (td && runtime_pm)
1209 		time_start = ktime_get();
1210 
1211 	ret = __genpd_runtime_suspend(dev);
1212 	if (ret)
1213 		return ret;
1214 
1215 	ret = genpd_stop_dev(genpd, dev);
1216 	if (ret) {
1217 		__genpd_runtime_resume(dev);
1218 		return ret;
1219 	}
1220 
1221 	/* Update suspend latency value if the measured time exceeds it. */
1222 	if (td && runtime_pm) {
1223 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1224 		if (elapsed_ns > td->suspend_latency_ns) {
1225 			td->suspend_latency_ns = elapsed_ns;
1226 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1227 				elapsed_ns);
1228 			genpd->gd->max_off_time_changed = true;
1229 			td->constraint_changed = true;
1230 		}
1231 	}
1232 
1233 	/*
1234 	 * If power.irq_safe is set, this routine may be run with
1235 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1236 	 */
1237 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1238 		return 0;
1239 
1240 	genpd_lock(genpd);
1241 	genpd_power_off(genpd, true, 0);
1242 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1243 	genpd_unlock(genpd);
1244 
1245 	return 0;
1246 }
1247 
1248 /**
1249  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1250  * @dev: Device to resume.
1251  *
1252  * Carry out a runtime resume of a device under the assumption that its
1253  * pm_domain field points to the domain member of an object of type
1254  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1255  */
1256 static int genpd_runtime_resume(struct device *dev)
1257 {
1258 	struct generic_pm_domain *genpd;
1259 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1260 	struct gpd_timing_data *td = gpd_data->td;
1261 	bool timed = td && pm_runtime_enabled(dev);
1262 	ktime_t time_start = 0;
1263 	s64 elapsed_ns;
1264 	int ret;
1265 
1266 	dev_dbg(dev, "%s()\n", __func__);
1267 
1268 	genpd = dev_to_genpd(dev);
1269 	if (IS_ERR(genpd))
1270 		return -EINVAL;
1271 
1272 	/*
1273 	 * As we don't power off a non IRQ safe domain, which holds
1274 	 * an IRQ safe device, we don't need to restore power to it.
1275 	 */
1276 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1277 		goto out;
1278 
1279 	genpd_lock(genpd);
1280 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1281 	ret = genpd_power_on(genpd, 0);
1282 	genpd_unlock(genpd);
1283 
1284 	if (ret)
1285 		return ret;
1286 
1287  out:
1288 	/* Measure resume latency. */
1289 	if (timed)
1290 		time_start = ktime_get();
1291 
1292 	ret = genpd_start_dev(genpd, dev);
1293 	if (ret)
1294 		goto err_poweroff;
1295 
1296 	ret = __genpd_runtime_resume(dev);
1297 	if (ret)
1298 		goto err_stop;
1299 
1300 	/* Update resume latency value if the measured time exceeds it. */
1301 	if (timed) {
1302 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1303 		if (elapsed_ns > td->resume_latency_ns) {
1304 			td->resume_latency_ns = elapsed_ns;
1305 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1306 				elapsed_ns);
1307 			genpd->gd->max_off_time_changed = true;
1308 			td->constraint_changed = true;
1309 		}
1310 	}
1311 
1312 	return 0;
1313 
1314 err_stop:
1315 	genpd_stop_dev(genpd, dev);
1316 err_poweroff:
1317 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1318 		genpd_lock(genpd);
1319 		genpd_power_off(genpd, true, 0);
1320 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1321 		genpd_unlock(genpd);
1322 	}
1323 
1324 	return ret;
1325 }
1326 
1327 #ifndef CONFIG_PM_GENERIC_DOMAINS_OF
1328 static bool pd_ignore_unused;
1329 static int __init pd_ignore_unused_setup(char *__unused)
1330 {
1331 	pd_ignore_unused = true;
1332 	return 1;
1333 }
1334 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1335 
1336 /**
1337  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1338  */
1339 static int __init genpd_power_off_unused(void)
1340 {
1341 	struct generic_pm_domain *genpd;
1342 
1343 	if (pd_ignore_unused) {
1344 		pr_warn("genpd: Not disabling unused power domains\n");
1345 		return 0;
1346 	}
1347 
1348 	pr_info("genpd: Disabling unused power domains\n");
1349 	mutex_lock(&gpd_list_lock);
1350 
1351 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1352 		genpd_lock(genpd);
1353 		genpd->stay_on = false;
1354 		genpd_unlock(genpd);
1355 		genpd_queue_power_off_work(genpd);
1356 	}
1357 
1358 	mutex_unlock(&gpd_list_lock);
1359 
1360 	return 0;
1361 }
1362 late_initcall_sync(genpd_power_off_unused);
1363 #endif
1364 
1365 #ifdef CONFIG_PM_SLEEP
1366 
1367 /**
1368  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1369  * @genpd: PM domain to power off, if possible.
1370  * @use_lock: use the lock.
1371  * @depth: nesting count for lockdep.
1372  *
1373  * Check if the given PM domain can be powered off (during system suspend or
1374  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1375  *
1376  * This function is only called in "noirq" and "syscore" stages of system power
1377  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1378  * these cases the lock must be held.
1379  */
1380 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1381 				 unsigned int depth)
1382 {
1383 	struct gpd_link *link;
1384 
1385 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1386 		return;
1387 
1388 	if (genpd->suspended_count != genpd->device_count
1389 	    || atomic_read(&genpd->sd_count) > 0)
1390 		return;
1391 
1392 	/* Check that the children are in their deepest (powered-off) state. */
1393 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1394 		struct generic_pm_domain *child = link->child;
1395 		if (child->state_idx < child->state_count - 1)
1396 			return;
1397 	}
1398 
1399 	/* Choose the deepest state when suspending */
1400 	genpd->state_idx = genpd->state_count - 1;
1401 	if (_genpd_power_off(genpd, false)) {
1402 		genpd->states[genpd->state_idx].rejected++;
1403 		return;
1404 	} else {
1405 		genpd->states[genpd->state_idx].usage++;
1406 	}
1407 
1408 	genpd->status = GENPD_STATE_OFF;
1409 
1410 	list_for_each_entry(link, &genpd->child_links, child_node) {
1411 		genpd_sd_counter_dec(link->parent);
1412 
1413 		if (use_lock)
1414 			genpd_lock_nested(link->parent, depth + 1);
1415 
1416 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1417 
1418 		if (use_lock)
1419 			genpd_unlock(link->parent);
1420 	}
1421 }
1422 
1423 /**
1424  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1425  * @genpd: PM domain to power on.
1426  * @use_lock: use the lock.
1427  * @depth: nesting count for lockdep.
1428  *
1429  * This function is only called in "noirq" and "syscore" stages of system power
1430  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1431  * these cases the lock must be held.
1432  */
1433 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1434 				unsigned int depth)
1435 {
1436 	struct gpd_link *link;
1437 
1438 	if (genpd_status_on(genpd))
1439 		return;
1440 
1441 	list_for_each_entry(link, &genpd->child_links, child_node) {
1442 		genpd_sd_counter_inc(link->parent);
1443 
1444 		if (use_lock)
1445 			genpd_lock_nested(link->parent, depth + 1);
1446 
1447 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1448 
1449 		if (use_lock)
1450 			genpd_unlock(link->parent);
1451 	}
1452 
1453 	_genpd_power_on(genpd, false);
1454 	genpd->status = GENPD_STATE_ON;
1455 }
1456 
1457 /**
1458  * genpd_prepare - Start power transition of a device in a PM domain.
1459  * @dev: Device to start the transition of.
1460  *
1461  * Start a power transition of a device (during a system-wide power transition)
1462  * under the assumption that its pm_domain field points to the domain member of
1463  * an object of type struct generic_pm_domain representing a PM domain
1464  * consisting of I/O devices.
1465  */
1466 static int genpd_prepare(struct device *dev)
1467 {
1468 	struct generic_pm_domain *genpd;
1469 	int ret;
1470 
1471 	dev_dbg(dev, "%s()\n", __func__);
1472 
1473 	genpd = dev_to_genpd(dev);
1474 	if (IS_ERR(genpd))
1475 		return -EINVAL;
1476 
1477 	genpd_lock(genpd);
1478 	genpd->prepared_count++;
1479 	genpd_unlock(genpd);
1480 
1481 	ret = pm_generic_prepare(dev);
1482 	if (ret < 0) {
1483 		genpd_lock(genpd);
1484 
1485 		genpd->prepared_count--;
1486 
1487 		genpd_unlock(genpd);
1488 	}
1489 
1490 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1491 	return ret >= 0 ? 0 : ret;
1492 }
1493 
1494 /**
1495  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1496  *   I/O pm domain.
1497  * @dev: Device to suspend.
1498  * @suspend_noirq: Generic suspend_noirq callback.
1499  * @resume_noirq: Generic resume_noirq callback.
1500  *
1501  * Stop the device and remove power from the domain if all devices in it have
1502  * been stopped.
1503  */
1504 static int genpd_finish_suspend(struct device *dev,
1505 				int (*suspend_noirq)(struct device *dev),
1506 				int (*resume_noirq)(struct device *dev))
1507 {
1508 	struct generic_pm_domain *genpd;
1509 	int ret = 0;
1510 
1511 	genpd = dev_to_genpd(dev);
1512 	if (IS_ERR(genpd))
1513 		return -EINVAL;
1514 
1515 	ret = suspend_noirq(dev);
1516 	if (ret)
1517 		return ret;
1518 
1519 	if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1520 		return 0;
1521 
1522 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1523 	    !pm_runtime_status_suspended(dev)) {
1524 		ret = genpd_stop_dev(genpd, dev);
1525 		if (ret) {
1526 			resume_noirq(dev);
1527 			return ret;
1528 		}
1529 	}
1530 
1531 	genpd_lock(genpd);
1532 	genpd->suspended_count++;
1533 	genpd_sync_power_off(genpd, true, 0);
1534 	genpd_unlock(genpd);
1535 
1536 	return 0;
1537 }
1538 
1539 /**
1540  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1541  * @dev: Device to suspend.
1542  *
1543  * Stop the device and remove power from the domain if all devices in it have
1544  * been stopped.
1545  */
1546 static int genpd_suspend_noirq(struct device *dev)
1547 {
1548 	dev_dbg(dev, "%s()\n", __func__);
1549 
1550 	return genpd_finish_suspend(dev,
1551 				    pm_generic_suspend_noirq,
1552 				    pm_generic_resume_noirq);
1553 }
1554 
1555 /**
1556  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1557  * @dev: Device to resume.
1558  * @resume_noirq: Generic resume_noirq callback.
1559  *
1560  * Restore power to the device's PM domain, if necessary, and start the device.
1561  */
1562 static int genpd_finish_resume(struct device *dev,
1563 			       int (*resume_noirq)(struct device *dev))
1564 {
1565 	struct generic_pm_domain *genpd;
1566 	int ret;
1567 
1568 	dev_dbg(dev, "%s()\n", __func__);
1569 
1570 	genpd = dev_to_genpd(dev);
1571 	if (IS_ERR(genpd))
1572 		return -EINVAL;
1573 
1574 	if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1575 		return resume_noirq(dev);
1576 
1577 	genpd_lock(genpd);
1578 	genpd_sync_power_on(genpd, true, 0);
1579 	genpd->suspended_count--;
1580 	genpd_unlock(genpd);
1581 
1582 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1583 	    !pm_runtime_status_suspended(dev)) {
1584 		ret = genpd_start_dev(genpd, dev);
1585 		if (ret)
1586 			return ret;
1587 	}
1588 
1589 	return pm_generic_resume_noirq(dev);
1590 }
1591 
1592 /**
1593  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1594  * @dev: Device to resume.
1595  *
1596  * Restore power to the device's PM domain, if necessary, and start the device.
1597  */
1598 static int genpd_resume_noirq(struct device *dev)
1599 {
1600 	dev_dbg(dev, "%s()\n", __func__);
1601 
1602 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1603 }
1604 
1605 /**
1606  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1607  * @dev: Device to freeze.
1608  *
1609  * Carry out a late freeze of a device under the assumption that its
1610  * pm_domain field points to the domain member of an object of type
1611  * struct generic_pm_domain representing a power domain consisting of I/O
1612  * devices.
1613  */
1614 static int genpd_freeze_noirq(struct device *dev)
1615 {
1616 	dev_dbg(dev, "%s()\n", __func__);
1617 
1618 	return genpd_finish_suspend(dev,
1619 				    pm_generic_freeze_noirq,
1620 				    pm_generic_thaw_noirq);
1621 }
1622 
1623 /**
1624  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1625  * @dev: Device to thaw.
1626  *
1627  * Start the device, unless power has been removed from the domain already
1628  * before the system transition.
1629  */
1630 static int genpd_thaw_noirq(struct device *dev)
1631 {
1632 	dev_dbg(dev, "%s()\n", __func__);
1633 
1634 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1635 }
1636 
1637 /**
1638  * genpd_poweroff_noirq - Completion of hibernation of device in an
1639  *   I/O PM domain.
1640  * @dev: Device to poweroff.
1641  *
1642  * Stop the device and remove power from the domain if all devices in it have
1643  * been stopped.
1644  */
1645 static int genpd_poweroff_noirq(struct device *dev)
1646 {
1647 	dev_dbg(dev, "%s()\n", __func__);
1648 
1649 	return genpd_finish_suspend(dev,
1650 				    pm_generic_poweroff_noirq,
1651 				    pm_generic_restore_noirq);
1652 }
1653 
1654 /**
1655  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1656  * @dev: Device to resume.
1657  *
1658  * Make sure the domain will be in the same power state as before the
1659  * hibernation the system is resuming from and start the device if necessary.
1660  */
1661 static int genpd_restore_noirq(struct device *dev)
1662 {
1663 	dev_dbg(dev, "%s()\n", __func__);
1664 
1665 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1666 }
1667 
1668 /**
1669  * genpd_complete - Complete power transition of a device in a power domain.
1670  * @dev: Device to complete the transition of.
1671  *
1672  * Complete a power transition of a device (during a system-wide power
1673  * transition) under the assumption that its pm_domain field points to the
1674  * domain member of an object of type struct generic_pm_domain representing
1675  * a power domain consisting of I/O devices.
1676  */
1677 static void genpd_complete(struct device *dev)
1678 {
1679 	struct generic_pm_domain *genpd;
1680 
1681 	dev_dbg(dev, "%s()\n", __func__);
1682 
1683 	genpd = dev_to_genpd(dev);
1684 	if (IS_ERR(genpd))
1685 		return;
1686 
1687 	pm_generic_complete(dev);
1688 
1689 	genpd_lock(genpd);
1690 
1691 	genpd->prepared_count--;
1692 	if (!genpd->prepared_count)
1693 		genpd_queue_power_off_work(genpd);
1694 
1695 	genpd_unlock(genpd);
1696 }
1697 
1698 static void genpd_switch_state(struct device *dev, bool suspend)
1699 {
1700 	struct generic_pm_domain *genpd;
1701 	bool use_lock;
1702 
1703 	genpd = dev_to_genpd_safe(dev);
1704 	if (!genpd)
1705 		return;
1706 
1707 	use_lock = genpd_is_irq_safe(genpd);
1708 
1709 	if (use_lock)
1710 		genpd_lock(genpd);
1711 
1712 	if (suspend) {
1713 		genpd->suspended_count++;
1714 		genpd_sync_power_off(genpd, use_lock, 0);
1715 	} else {
1716 		genpd_sync_power_on(genpd, use_lock, 0);
1717 		genpd->suspended_count--;
1718 	}
1719 
1720 	if (use_lock)
1721 		genpd_unlock(genpd);
1722 }
1723 
1724 /**
1725  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1726  * @dev: The device that is attached to the genpd, that can be suspended.
1727  *
1728  * This routine should typically be called for a device that needs to be
1729  * suspended during the syscore suspend phase. It may also be called during
1730  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1731  * genpd.
1732  */
1733 void dev_pm_genpd_suspend(struct device *dev)
1734 {
1735 	genpd_switch_state(dev, true);
1736 }
1737 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1738 
1739 /**
1740  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1741  * @dev: The device that is attached to the genpd, which needs to be resumed.
1742  *
1743  * This routine should typically be called for a device that needs to be resumed
1744  * during the syscore resume phase. It may also be called during suspend-to-idle
1745  * to resume a corresponding CPU device that is attached to a genpd.
1746  */
1747 void dev_pm_genpd_resume(struct device *dev)
1748 {
1749 	genpd_switch_state(dev, false);
1750 }
1751 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1752 
1753 #else /* !CONFIG_PM_SLEEP */
1754 
1755 #define genpd_prepare		NULL
1756 #define genpd_suspend_noirq	NULL
1757 #define genpd_resume_noirq	NULL
1758 #define genpd_freeze_noirq	NULL
1759 #define genpd_thaw_noirq	NULL
1760 #define genpd_poweroff_noirq	NULL
1761 #define genpd_restore_noirq	NULL
1762 #define genpd_complete		NULL
1763 
1764 #endif /* CONFIG_PM_SLEEP */
1765 
1766 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1767 							   bool has_governor)
1768 {
1769 	struct generic_pm_domain_data *gpd_data;
1770 	struct gpd_timing_data *td;
1771 	int ret;
1772 
1773 	ret = dev_pm_get_subsys_data(dev);
1774 	if (ret)
1775 		return ERR_PTR(ret);
1776 
1777 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1778 	if (!gpd_data) {
1779 		ret = -ENOMEM;
1780 		goto err_put;
1781 	}
1782 
1783 	gpd_data->base.dev = dev;
1784 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1785 
1786 	/* Allocate data used by a governor. */
1787 	if (has_governor) {
1788 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1789 		if (!td) {
1790 			ret = -ENOMEM;
1791 			goto err_free;
1792 		}
1793 
1794 		td->constraint_changed = true;
1795 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1796 		td->next_wakeup = KTIME_MAX;
1797 		gpd_data->td = td;
1798 	}
1799 
1800 	spin_lock_irq(&dev->power.lock);
1801 
1802 	if (dev->power.subsys_data->domain_data)
1803 		ret = -EINVAL;
1804 	else
1805 		dev->power.subsys_data->domain_data = &gpd_data->base;
1806 
1807 	spin_unlock_irq(&dev->power.lock);
1808 
1809 	if (ret)
1810 		goto err_free;
1811 
1812 	return gpd_data;
1813 
1814  err_free:
1815 	kfree(gpd_data->td);
1816 	kfree(gpd_data);
1817  err_put:
1818 	dev_pm_put_subsys_data(dev);
1819 	return ERR_PTR(ret);
1820 }
1821 
1822 static void genpd_free_dev_data(struct device *dev,
1823 				struct generic_pm_domain_data *gpd_data)
1824 {
1825 	spin_lock_irq(&dev->power.lock);
1826 
1827 	dev->power.subsys_data->domain_data = NULL;
1828 
1829 	spin_unlock_irq(&dev->power.lock);
1830 
1831 	dev_pm_opp_clear_config(gpd_data->opp_token);
1832 	kfree(gpd_data->td);
1833 	kfree(gpd_data);
1834 	dev_pm_put_subsys_data(dev);
1835 }
1836 
1837 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1838 				 int cpu, bool set, unsigned int depth)
1839 {
1840 	struct gpd_link *link;
1841 
1842 	if (!genpd_is_cpu_domain(genpd))
1843 		return;
1844 
1845 	list_for_each_entry(link, &genpd->child_links, child_node) {
1846 		struct generic_pm_domain *parent = link->parent;
1847 
1848 		genpd_lock_nested(parent, depth + 1);
1849 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1850 		genpd_unlock(parent);
1851 	}
1852 
1853 	if (set)
1854 		cpumask_set_cpu(cpu, genpd->cpus);
1855 	else
1856 		cpumask_clear_cpu(cpu, genpd->cpus);
1857 }
1858 
1859 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1860 {
1861 	if (cpu >= 0)
1862 		genpd_update_cpumask(genpd, cpu, true, 0);
1863 }
1864 
1865 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1866 {
1867 	if (cpu >= 0)
1868 		genpd_update_cpumask(genpd, cpu, false, 0);
1869 }
1870 
1871 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1872 {
1873 	int cpu;
1874 
1875 	if (!genpd_is_cpu_domain(genpd))
1876 		return -1;
1877 
1878 	for_each_possible_cpu(cpu) {
1879 		if (get_cpu_device(cpu) == dev)
1880 			return cpu;
1881 	}
1882 
1883 	return -1;
1884 }
1885 
1886 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1887 			    struct device *base_dev)
1888 {
1889 	struct genpd_governor_data *gd = genpd->gd;
1890 	struct generic_pm_domain_data *gpd_data;
1891 	int ret;
1892 
1893 	dev_dbg(dev, "%s()\n", __func__);
1894 
1895 	gpd_data = genpd_alloc_dev_data(dev, gd);
1896 	if (IS_ERR(gpd_data))
1897 		return PTR_ERR(gpd_data);
1898 
1899 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1900 
1901 	gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1902 
1903 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1904 	if (ret)
1905 		goto out;
1906 
1907 	genpd_lock(genpd);
1908 
1909 	genpd_set_cpumask(genpd, gpd_data->cpu);
1910 
1911 	genpd->device_count++;
1912 	if (gd)
1913 		gd->max_off_time_changed = true;
1914 
1915 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1916 
1917 	genpd_unlock(genpd);
1918 	dev_pm_domain_set(dev, &genpd->domain);
1919  out:
1920 	if (ret)
1921 		genpd_free_dev_data(dev, gpd_data);
1922 	else
1923 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1924 					DEV_PM_QOS_RESUME_LATENCY);
1925 
1926 	return ret;
1927 }
1928 
1929 /**
1930  * pm_genpd_add_device - Add a device to an I/O PM domain.
1931  * @genpd: PM domain to add the device to.
1932  * @dev: Device to be added.
1933  */
1934 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1935 {
1936 	int ret;
1937 
1938 	if (!genpd || !dev)
1939 		return -EINVAL;
1940 
1941 	mutex_lock(&gpd_list_lock);
1942 	ret = genpd_add_device(genpd, dev, dev);
1943 	mutex_unlock(&gpd_list_lock);
1944 
1945 	return ret;
1946 }
1947 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1948 
1949 static int genpd_remove_device(struct generic_pm_domain *genpd,
1950 			       struct device *dev)
1951 {
1952 	struct generic_pm_domain_data *gpd_data;
1953 	struct pm_domain_data *pdd;
1954 	int ret = 0;
1955 
1956 	dev_dbg(dev, "%s()\n", __func__);
1957 
1958 	pdd = dev->power.subsys_data->domain_data;
1959 	gpd_data = to_gpd_data(pdd);
1960 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1961 				   DEV_PM_QOS_RESUME_LATENCY);
1962 
1963 	genpd_lock(genpd);
1964 
1965 	if (genpd->prepared_count > 0) {
1966 		ret = -EAGAIN;
1967 		goto out;
1968 	}
1969 
1970 	genpd->device_count--;
1971 	if (genpd->gd)
1972 		genpd->gd->max_off_time_changed = true;
1973 
1974 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1975 
1976 	list_del_init(&pdd->list_node);
1977 
1978 	genpd_unlock(genpd);
1979 
1980 	dev_pm_domain_set(dev, NULL);
1981 
1982 	if (genpd->detach_dev)
1983 		genpd->detach_dev(genpd, dev);
1984 
1985 	genpd_free_dev_data(dev, gpd_data);
1986 
1987 	return 0;
1988 
1989  out:
1990 	genpd_unlock(genpd);
1991 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1992 
1993 	return ret;
1994 }
1995 
1996 /**
1997  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1998  * @dev: Device to be removed.
1999  */
2000 int pm_genpd_remove_device(struct device *dev)
2001 {
2002 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
2003 
2004 	if (!genpd)
2005 		return -EINVAL;
2006 
2007 	return genpd_remove_device(genpd, dev);
2008 }
2009 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
2010 
2011 /**
2012  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
2013  *
2014  * @dev: Device that should be associated with the notifier
2015  * @nb: The notifier block to register
2016  *
2017  * Users may call this function to add a genpd power on/off notifier for an
2018  * attached @dev. Only one notifier per device is allowed. The notifier is
2019  * sent when genpd is powering on/off the PM domain.
2020  *
2021  * It is assumed that the user guarantee that the genpd wouldn't be detached
2022  * while this routine is getting called.
2023  *
2024  * Returns 0 on success and negative error values on failures.
2025  */
2026 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2027 {
2028 	struct generic_pm_domain *genpd;
2029 	struct generic_pm_domain_data *gpd_data;
2030 	int ret;
2031 
2032 	genpd = dev_to_genpd_safe(dev);
2033 	if (!genpd)
2034 		return -ENODEV;
2035 
2036 	if (WARN_ON(!dev->power.subsys_data ||
2037 		     !dev->power.subsys_data->domain_data))
2038 		return -EINVAL;
2039 
2040 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2041 	if (gpd_data->power_nb)
2042 		return -EEXIST;
2043 
2044 	genpd_lock(genpd);
2045 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2046 	genpd_unlock(genpd);
2047 
2048 	if (ret) {
2049 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
2050 			 dev_name(&genpd->dev));
2051 		return ret;
2052 	}
2053 
2054 	gpd_data->power_nb = nb;
2055 	return 0;
2056 }
2057 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2058 
2059 /**
2060  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2061  *
2062  * @dev: Device that is associated with the notifier
2063  *
2064  * Users may call this function to remove a genpd power on/off notifier for an
2065  * attached @dev.
2066  *
2067  * It is assumed that the user guarantee that the genpd wouldn't be detached
2068  * while this routine is getting called.
2069  *
2070  * Returns 0 on success and negative error values on failures.
2071  */
2072 int dev_pm_genpd_remove_notifier(struct device *dev)
2073 {
2074 	struct generic_pm_domain *genpd;
2075 	struct generic_pm_domain_data *gpd_data;
2076 	int ret;
2077 
2078 	genpd = dev_to_genpd_safe(dev);
2079 	if (!genpd)
2080 		return -ENODEV;
2081 
2082 	if (WARN_ON(!dev->power.subsys_data ||
2083 		     !dev->power.subsys_data->domain_data))
2084 		return -EINVAL;
2085 
2086 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2087 	if (!gpd_data->power_nb)
2088 		return -ENODEV;
2089 
2090 	genpd_lock(genpd);
2091 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2092 					    gpd_data->power_nb);
2093 	genpd_unlock(genpd);
2094 
2095 	if (ret) {
2096 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2097 			 dev_name(&genpd->dev));
2098 		return ret;
2099 	}
2100 
2101 	gpd_data->power_nb = NULL;
2102 	return 0;
2103 }
2104 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2105 
2106 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2107 			       struct generic_pm_domain *subdomain)
2108 {
2109 	struct gpd_link *link, *itr;
2110 	int ret = 0;
2111 
2112 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2113 	    || genpd == subdomain)
2114 		return -EINVAL;
2115 
2116 	/*
2117 	 * If the domain can be powered on/off in an IRQ safe
2118 	 * context, ensure that the subdomain can also be
2119 	 * powered on/off in that context.
2120 	 */
2121 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2122 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2123 		     dev_name(&genpd->dev), subdomain->name);
2124 		return -EINVAL;
2125 	}
2126 
2127 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2128 	if (!link)
2129 		return -ENOMEM;
2130 
2131 	genpd_lock(subdomain);
2132 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2133 
2134 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2135 		ret = -EINVAL;
2136 		goto out;
2137 	}
2138 
2139 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2140 		if (itr->child == subdomain && itr->parent == genpd) {
2141 			ret = -EINVAL;
2142 			goto out;
2143 		}
2144 	}
2145 
2146 	link->parent = genpd;
2147 	list_add_tail(&link->parent_node, &genpd->parent_links);
2148 	link->child = subdomain;
2149 	list_add_tail(&link->child_node, &subdomain->child_links);
2150 	if (genpd_status_on(subdomain))
2151 		genpd_sd_counter_inc(genpd);
2152 
2153  out:
2154 	genpd_unlock(genpd);
2155 	genpd_unlock(subdomain);
2156 	if (ret)
2157 		kfree(link);
2158 	return ret;
2159 }
2160 
2161 /**
2162  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2163  * @genpd: Leader PM domain to add the subdomain to.
2164  * @subdomain: Subdomain to be added.
2165  */
2166 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2167 			   struct generic_pm_domain *subdomain)
2168 {
2169 	int ret;
2170 
2171 	mutex_lock(&gpd_list_lock);
2172 	ret = genpd_add_subdomain(genpd, subdomain);
2173 	mutex_unlock(&gpd_list_lock);
2174 
2175 	return ret;
2176 }
2177 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2178 
2179 /**
2180  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2181  * @genpd: Leader PM domain to remove the subdomain from.
2182  * @subdomain: Subdomain to be removed.
2183  */
2184 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2185 			      struct generic_pm_domain *subdomain)
2186 {
2187 	struct gpd_link *l, *link;
2188 	int ret = -EINVAL;
2189 
2190 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2191 		return -EINVAL;
2192 
2193 	genpd_lock(subdomain);
2194 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2195 
2196 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2197 		pr_warn("%s: unable to remove subdomain %s\n",
2198 			dev_name(&genpd->dev), subdomain->name);
2199 		ret = -EBUSY;
2200 		goto out;
2201 	}
2202 
2203 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2204 		if (link->child != subdomain)
2205 			continue;
2206 
2207 		list_del(&link->parent_node);
2208 		list_del(&link->child_node);
2209 		kfree(link);
2210 		if (genpd_status_on(subdomain))
2211 			genpd_sd_counter_dec(genpd);
2212 
2213 		ret = 0;
2214 		break;
2215 	}
2216 
2217 out:
2218 	genpd_unlock(genpd);
2219 	genpd_unlock(subdomain);
2220 
2221 	return ret;
2222 }
2223 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2224 
2225 static void genpd_free_default_power_state(struct genpd_power_state *states,
2226 					   unsigned int state_count)
2227 {
2228 	kfree(states);
2229 }
2230 
2231 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2232 {
2233 	struct genpd_power_state *state;
2234 
2235 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2236 	if (!state)
2237 		return -ENOMEM;
2238 
2239 	genpd->states = state;
2240 	genpd->state_count = 1;
2241 	genpd->free_states = genpd_free_default_power_state;
2242 
2243 	return 0;
2244 }
2245 
2246 static void genpd_provider_release(struct device *dev)
2247 {
2248 	/* nothing to be done here */
2249 }
2250 
2251 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2252 {
2253 	struct genpd_governor_data *gd = NULL;
2254 	int ret;
2255 
2256 	if (genpd_is_cpu_domain(genpd) &&
2257 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2258 		return -ENOMEM;
2259 
2260 	if (genpd->gov) {
2261 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2262 		if (!gd) {
2263 			ret = -ENOMEM;
2264 			goto free;
2265 		}
2266 
2267 		gd->max_off_time_ns = -1;
2268 		gd->max_off_time_changed = true;
2269 		gd->next_wakeup = KTIME_MAX;
2270 		gd->next_hrtimer = KTIME_MAX;
2271 	}
2272 
2273 	/* Use only one "off" state if there were no states declared */
2274 	if (genpd->state_count == 0) {
2275 		ret = genpd_set_default_power_state(genpd);
2276 		if (ret)
2277 			goto free;
2278 	}
2279 
2280 	genpd->gd = gd;
2281 	device_initialize(&genpd->dev);
2282 	genpd->dev.release = genpd_provider_release;
2283 	genpd->dev.bus = &genpd_provider_bus_type;
2284 	genpd->dev.parent = &genpd_provider_bus;
2285 
2286 	if (!genpd_is_dev_name_fw(genpd)) {
2287 		dev_set_name(&genpd->dev, "%s", genpd->name);
2288 	} else {
2289 		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2290 		if (ret < 0)
2291 			goto put;
2292 
2293 		genpd->device_id = ret;
2294 		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2295 	}
2296 
2297 	return 0;
2298 put:
2299 	put_device(&genpd->dev);
2300 	if (genpd->free_states == genpd_free_default_power_state) {
2301 		kfree(genpd->states);
2302 		genpd->states = NULL;
2303 	}
2304 free:
2305 	if (genpd_is_cpu_domain(genpd))
2306 		free_cpumask_var(genpd->cpus);
2307 	kfree(gd);
2308 	return ret;
2309 }
2310 
2311 static void genpd_free_data(struct generic_pm_domain *genpd)
2312 {
2313 	put_device(&genpd->dev);
2314 	if (genpd->device_id != -ENXIO)
2315 		ida_free(&genpd_ida, genpd->device_id);
2316 	if (genpd_is_cpu_domain(genpd))
2317 		free_cpumask_var(genpd->cpus);
2318 	if (genpd->free_states)
2319 		genpd->free_states(genpd->states, genpd->state_count);
2320 	kfree(genpd->gd);
2321 }
2322 
2323 static void genpd_lock_init(struct generic_pm_domain *genpd)
2324 {
2325 	if (genpd_is_cpu_domain(genpd)) {
2326 		raw_spin_lock_init(&genpd->raw_slock);
2327 		genpd->lock_ops = &genpd_raw_spin_ops;
2328 	} else if (genpd_is_irq_safe(genpd)) {
2329 		spin_lock_init(&genpd->slock);
2330 		genpd->lock_ops = &genpd_spin_ops;
2331 	} else {
2332 		mutex_init(&genpd->mlock);
2333 		genpd->lock_ops = &genpd_mtx_ops;
2334 	}
2335 }
2336 
2337 /**
2338  * pm_genpd_init - Initialize a generic I/O PM domain object.
2339  * @genpd: PM domain object to initialize.
2340  * @gov: PM domain governor to associate with the domain (may be NULL).
2341  * @is_off: Initial value of the domain's power_is_off field.
2342  *
2343  * Returns 0 on successful initialization, else a negative error code.
2344  */
2345 int pm_genpd_init(struct generic_pm_domain *genpd,
2346 		  struct dev_power_governor *gov, bool is_off)
2347 {
2348 	int ret;
2349 
2350 	if (IS_ERR_OR_NULL(genpd))
2351 		return -EINVAL;
2352 
2353 	INIT_LIST_HEAD(&genpd->parent_links);
2354 	INIT_LIST_HEAD(&genpd->child_links);
2355 	INIT_LIST_HEAD(&genpd->dev_list);
2356 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2357 	genpd_lock_init(genpd);
2358 	genpd->gov = gov;
2359 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2360 	atomic_set(&genpd->sd_count, 0);
2361 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2362 	genpd->stay_on = !is_off;
2363 	genpd->sync_state = GENPD_SYNC_STATE_OFF;
2364 	genpd->device_count = 0;
2365 	genpd->provider = NULL;
2366 	genpd->device_id = -ENXIO;
2367 	genpd->has_provider = false;
2368 	genpd->opp_table = NULL;
2369 	genpd->accounting_time = ktime_get_mono_fast_ns();
2370 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2371 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2372 	genpd->domain.ops.prepare = genpd_prepare;
2373 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2374 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2375 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2376 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2377 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2378 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2379 	genpd->domain.ops.complete = genpd_complete;
2380 	genpd->domain.start = genpd_dev_pm_start;
2381 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2382 
2383 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2384 		genpd->dev_ops.stop = pm_clk_suspend;
2385 		genpd->dev_ops.start = pm_clk_resume;
2386 	}
2387 
2388 	/* The always-on governor works better with the corresponding flag. */
2389 	if (gov == &pm_domain_always_on_gov)
2390 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2391 
2392 	/* Always-on domains must be powered on at initialization. */
2393 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2394 			!genpd_status_on(genpd)) {
2395 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2396 		return -EINVAL;
2397 	}
2398 
2399 	/* Multiple states but no governor doesn't make sense. */
2400 	if (!gov && genpd->state_count > 1)
2401 		pr_warn("%s: no governor for states\n", genpd->name);
2402 
2403 	ret = genpd_alloc_data(genpd);
2404 	if (ret)
2405 		return ret;
2406 
2407 	mutex_lock(&gpd_list_lock);
2408 	list_add(&genpd->gpd_list_node, &gpd_list);
2409 	mutex_unlock(&gpd_list_lock);
2410 	genpd_debug_add(genpd);
2411 
2412 	return 0;
2413 }
2414 EXPORT_SYMBOL_GPL(pm_genpd_init);
2415 
2416 static int genpd_remove(struct generic_pm_domain *genpd)
2417 {
2418 	struct gpd_link *l, *link;
2419 
2420 	if (IS_ERR_OR_NULL(genpd))
2421 		return -EINVAL;
2422 
2423 	genpd_lock(genpd);
2424 
2425 	if (genpd->has_provider) {
2426 		genpd_unlock(genpd);
2427 		pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2428 		return -EBUSY;
2429 	}
2430 
2431 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2432 		genpd_unlock(genpd);
2433 		pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2434 		return -EBUSY;
2435 	}
2436 
2437 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2438 		list_del(&link->parent_node);
2439 		list_del(&link->child_node);
2440 		kfree(link);
2441 	}
2442 
2443 	list_del(&genpd->gpd_list_node);
2444 	genpd_unlock(genpd);
2445 	genpd_debug_remove(genpd);
2446 	cancel_work_sync(&genpd->power_off_work);
2447 	genpd_free_data(genpd);
2448 
2449 	pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2450 
2451 	return 0;
2452 }
2453 
2454 /**
2455  * pm_genpd_remove - Remove a generic I/O PM domain
2456  * @genpd: Pointer to PM domain that is to be removed.
2457  *
2458  * To remove the PM domain, this function:
2459  *  - Removes the PM domain as a subdomain to any parent domains,
2460  *    if it was added.
2461  *  - Removes the PM domain from the list of registered PM domains.
2462  *
2463  * The PM domain will only be removed, if the associated provider has
2464  * been removed, it is not a parent to any other PM domain and has no
2465  * devices associated with it.
2466  */
2467 int pm_genpd_remove(struct generic_pm_domain *genpd)
2468 {
2469 	int ret;
2470 
2471 	mutex_lock(&gpd_list_lock);
2472 	ret = genpd_remove(genpd);
2473 	mutex_unlock(&gpd_list_lock);
2474 
2475 	return ret;
2476 }
2477 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2478 
2479 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2480 
2481 /*
2482  * Device Tree based PM domain providers.
2483  *
2484  * The code below implements generic device tree based PM domain providers that
2485  * bind device tree nodes with generic PM domains registered in the system.
2486  *
2487  * Any driver that registers generic PM domains and needs to support binding of
2488  * devices to these domains is supposed to register a PM domain provider, which
2489  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2490  *
2491  * Two simple mapping functions have been provided for convenience:
2492  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2493  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2494  *    index.
2495  */
2496 
2497 /**
2498  * struct of_genpd_provider - PM domain provider registration structure
2499  * @link: Entry in global list of PM domain providers
2500  * @node: Pointer to device tree node of PM domain provider
2501  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2502  *         into a PM domain.
2503  * @data: context pointer to be passed into @xlate callback
2504  */
2505 struct of_genpd_provider {
2506 	struct list_head link;
2507 	struct device_node *node;
2508 	genpd_xlate_t xlate;
2509 	void *data;
2510 };
2511 
2512 /* List of registered PM domain providers. */
2513 static LIST_HEAD(of_genpd_providers);
2514 /* Mutex to protect the list above. */
2515 static DEFINE_MUTEX(of_genpd_mutex);
2516 /* Used to prevent registering devices before the bus. */
2517 static bool genpd_bus_registered;
2518 
2519 /**
2520  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2521  * @genpdspec: OF phandle args to map into a PM domain
2522  * @data: xlate function private data - pointer to struct generic_pm_domain
2523  *
2524  * This is a generic xlate function that can be used to model PM domains that
2525  * have their own device tree nodes. The private data of xlate function needs
2526  * to be a valid pointer to struct generic_pm_domain.
2527  */
2528 static struct generic_pm_domain *genpd_xlate_simple(
2529 					const struct of_phandle_args *genpdspec,
2530 					void *data)
2531 {
2532 	return data;
2533 }
2534 
2535 /**
2536  * genpd_xlate_onecell() - Xlate function using a single index.
2537  * @genpdspec: OF phandle args to map into a PM domain
2538  * @data: xlate function private data - pointer to struct genpd_onecell_data
2539  *
2540  * This is a generic xlate function that can be used to model simple PM domain
2541  * controllers that have one device tree node and provide multiple PM domains.
2542  * A single cell is used as an index into an array of PM domains specified in
2543  * the genpd_onecell_data struct when registering the provider.
2544  */
2545 static struct generic_pm_domain *genpd_xlate_onecell(
2546 					const struct of_phandle_args *genpdspec,
2547 					void *data)
2548 {
2549 	struct genpd_onecell_data *genpd_data = data;
2550 	unsigned int idx = genpdspec->args[0];
2551 
2552 	if (genpdspec->args_count != 1)
2553 		return ERR_PTR(-EINVAL);
2554 
2555 	if (idx >= genpd_data->num_domains) {
2556 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2557 		return ERR_PTR(-EINVAL);
2558 	}
2559 
2560 	if (!genpd_data->domains[idx])
2561 		return ERR_PTR(-ENOENT);
2562 
2563 	return genpd_data->domains[idx];
2564 }
2565 
2566 /**
2567  * genpd_add_provider() - Register a PM domain provider for a node
2568  * @np: Device node pointer associated with the PM domain provider.
2569  * @xlate: Callback for decoding PM domain from phandle arguments.
2570  * @data: Context pointer for @xlate callback.
2571  */
2572 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2573 			      void *data)
2574 {
2575 	struct of_genpd_provider *cp;
2576 
2577 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2578 	if (!cp)
2579 		return -ENOMEM;
2580 
2581 	cp->node = of_node_get(np);
2582 	cp->data = data;
2583 	cp->xlate = xlate;
2584 	fwnode_dev_initialized(of_fwnode_handle(np), true);
2585 
2586 	mutex_lock(&of_genpd_mutex);
2587 	list_add(&cp->link, &of_genpd_providers);
2588 	mutex_unlock(&of_genpd_mutex);
2589 	pr_debug("Added domain provider from %pOF\n", np);
2590 
2591 	return 0;
2592 }
2593 
2594 static bool genpd_present(const struct generic_pm_domain *genpd)
2595 {
2596 	bool ret = false;
2597 	const struct generic_pm_domain *gpd;
2598 
2599 	mutex_lock(&gpd_list_lock);
2600 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2601 		if (gpd == genpd) {
2602 			ret = true;
2603 			break;
2604 		}
2605 	}
2606 	mutex_unlock(&gpd_list_lock);
2607 
2608 	return ret;
2609 }
2610 
2611 static void genpd_sync_state(struct device *dev)
2612 {
2613 	return of_genpd_sync_state(dev->of_node);
2614 }
2615 
2616 /**
2617  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2618  * @np: Device node pointer associated with the PM domain provider.
2619  * @genpd: Pointer to PM domain associated with the PM domain provider.
2620  */
2621 int of_genpd_add_provider_simple(struct device_node *np,
2622 				 struct generic_pm_domain *genpd)
2623 {
2624 	struct fwnode_handle *fwnode;
2625 	struct device *dev;
2626 	int ret;
2627 
2628 	if (!np || !genpd)
2629 		return -EINVAL;
2630 
2631 	if (!genpd_bus_registered)
2632 		return -ENODEV;
2633 
2634 	if (!genpd_present(genpd))
2635 		return -EINVAL;
2636 
2637 	genpd->dev.of_node = np;
2638 
2639 	fwnode = of_fwnode_handle(np);
2640 	dev = get_dev_from_fwnode(fwnode);
2641 	if (!dev && !genpd_is_no_sync_state(genpd)) {
2642 		genpd->sync_state = GENPD_SYNC_STATE_SIMPLE;
2643 		device_set_node(&genpd->dev, fwnode);
2644 	} else {
2645 		dev_set_drv_sync_state(dev, genpd_sync_state);
2646 	}
2647 
2648 	put_device(dev);
2649 
2650 	ret = device_add(&genpd->dev);
2651 	if (ret)
2652 		return ret;
2653 
2654 	/* Parse genpd OPP table */
2655 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2656 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2657 		if (ret) {
2658 			dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2659 			goto err_del;
2660 		}
2661 
2662 		/*
2663 		 * Save table for faster processing while setting performance
2664 		 * state.
2665 		 */
2666 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2667 		WARN_ON(IS_ERR(genpd->opp_table));
2668 	}
2669 
2670 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2671 	if (ret)
2672 		goto err_opp;
2673 
2674 	genpd->provider = fwnode;
2675 	genpd->has_provider = true;
2676 
2677 	return 0;
2678 
2679 err_opp:
2680 	if (genpd->opp_table) {
2681 		dev_pm_opp_put_opp_table(genpd->opp_table);
2682 		dev_pm_opp_of_remove_table(&genpd->dev);
2683 	}
2684 err_del:
2685 	device_del(&genpd->dev);
2686 	return ret;
2687 }
2688 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2689 
2690 /**
2691  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2692  * @np: Device node pointer associated with the PM domain provider.
2693  * @data: Pointer to the data associated with the PM domain provider.
2694  */
2695 int of_genpd_add_provider_onecell(struct device_node *np,
2696 				  struct genpd_onecell_data *data)
2697 {
2698 	struct generic_pm_domain *genpd;
2699 	struct fwnode_handle *fwnode;
2700 	struct device *dev;
2701 	unsigned int i;
2702 	int ret = -EINVAL;
2703 	bool sync_state = false;
2704 
2705 	if (!np || !data)
2706 		return -EINVAL;
2707 
2708 	if (!genpd_bus_registered)
2709 		return -ENODEV;
2710 
2711 	if (!data->xlate)
2712 		data->xlate = genpd_xlate_onecell;
2713 
2714 	fwnode = of_fwnode_handle(np);
2715 	dev = get_dev_from_fwnode(fwnode);
2716 	if (!dev)
2717 		sync_state = true;
2718 	else
2719 		dev_set_drv_sync_state(dev, genpd_sync_state);
2720 
2721 	put_device(dev);
2722 
2723 	for (i = 0; i < data->num_domains; i++) {
2724 		genpd = data->domains[i];
2725 
2726 		if (!genpd)
2727 			continue;
2728 		if (!genpd_present(genpd))
2729 			goto error;
2730 
2731 		genpd->dev.of_node = np;
2732 
2733 		if (sync_state && !genpd_is_no_sync_state(genpd)) {
2734 			genpd->sync_state = GENPD_SYNC_STATE_ONECELL;
2735 			device_set_node(&genpd->dev, fwnode);
2736 			sync_state = false;
2737 		}
2738 
2739 		ret = device_add(&genpd->dev);
2740 		if (ret)
2741 			goto error;
2742 
2743 		/* Parse genpd OPP table */
2744 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2745 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2746 			if (ret) {
2747 				dev_err_probe(&genpd->dev, ret,
2748 					      "Failed to add OPP table for index %d\n", i);
2749 				device_del(&genpd->dev);
2750 				goto error;
2751 			}
2752 
2753 			/*
2754 			 * Save table for faster processing while setting
2755 			 * performance state.
2756 			 */
2757 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2758 			WARN_ON(IS_ERR(genpd->opp_table));
2759 		}
2760 
2761 		genpd->provider = fwnode;
2762 		genpd->has_provider = true;
2763 	}
2764 
2765 	ret = genpd_add_provider(np, data->xlate, data);
2766 	if (ret < 0)
2767 		goto error;
2768 
2769 	return 0;
2770 
2771 error:
2772 	while (i--) {
2773 		genpd = data->domains[i];
2774 
2775 		if (!genpd)
2776 			continue;
2777 
2778 		genpd->provider = NULL;
2779 		genpd->has_provider = false;
2780 
2781 		if (genpd->opp_table) {
2782 			dev_pm_opp_put_opp_table(genpd->opp_table);
2783 			dev_pm_opp_of_remove_table(&genpd->dev);
2784 		}
2785 
2786 		device_del(&genpd->dev);
2787 	}
2788 
2789 	return ret;
2790 }
2791 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2792 
2793 /**
2794  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2795  * @np: Device node pointer associated with the PM domain provider
2796  */
2797 void of_genpd_del_provider(struct device_node *np)
2798 {
2799 	struct of_genpd_provider *cp, *tmp;
2800 	struct generic_pm_domain *gpd;
2801 
2802 	mutex_lock(&gpd_list_lock);
2803 	mutex_lock(&of_genpd_mutex);
2804 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2805 		if (cp->node == np) {
2806 			/*
2807 			 * For each PM domain associated with the
2808 			 * provider, set the 'has_provider' to false
2809 			 * so that the PM domain can be safely removed.
2810 			 */
2811 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2812 				if (gpd->provider == of_fwnode_handle(np)) {
2813 					gpd->has_provider = false;
2814 
2815 					if (gpd->opp_table) {
2816 						dev_pm_opp_put_opp_table(gpd->opp_table);
2817 						dev_pm_opp_of_remove_table(&gpd->dev);
2818 					}
2819 
2820 					device_del(&gpd->dev);
2821 				}
2822 			}
2823 
2824 			fwnode_dev_initialized(of_fwnode_handle(cp->node), false);
2825 			list_del(&cp->link);
2826 			of_node_put(cp->node);
2827 			kfree(cp);
2828 			break;
2829 		}
2830 	}
2831 	mutex_unlock(&of_genpd_mutex);
2832 	mutex_unlock(&gpd_list_lock);
2833 }
2834 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2835 
2836 /**
2837  * genpd_get_from_provider() - Look-up PM domain
2838  * @genpdspec: OF phandle args to use for look-up
2839  *
2840  * Looks for a PM domain provider under the node specified by @genpdspec and if
2841  * found, uses xlate function of the provider to map phandle args to a PM
2842  * domain.
2843  *
2844  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2845  * on failure.
2846  */
2847 static struct generic_pm_domain *genpd_get_from_provider(
2848 					const struct of_phandle_args *genpdspec)
2849 {
2850 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2851 	struct of_genpd_provider *provider;
2852 
2853 	if (!genpdspec)
2854 		return ERR_PTR(-EINVAL);
2855 
2856 	mutex_lock(&of_genpd_mutex);
2857 
2858 	/* Check if we have such a provider in our array */
2859 	list_for_each_entry(provider, &of_genpd_providers, link) {
2860 		if (provider->node == genpdspec->np)
2861 			genpd = provider->xlate(genpdspec, provider->data);
2862 		if (!IS_ERR(genpd))
2863 			break;
2864 	}
2865 
2866 	mutex_unlock(&of_genpd_mutex);
2867 
2868 	return genpd;
2869 }
2870 
2871 /**
2872  * of_genpd_add_device() - Add a device to an I/O PM domain
2873  * @genpdspec: OF phandle args to use for look-up PM domain
2874  * @dev: Device to be added.
2875  *
2876  * Looks-up an I/O PM domain based upon phandle args provided and adds
2877  * the device to the PM domain. Returns a negative error code on failure.
2878  */
2879 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2880 {
2881 	struct generic_pm_domain *genpd;
2882 	int ret;
2883 
2884 	if (!dev)
2885 		return -EINVAL;
2886 
2887 	mutex_lock(&gpd_list_lock);
2888 
2889 	genpd = genpd_get_from_provider(genpdspec);
2890 	if (IS_ERR(genpd)) {
2891 		ret = PTR_ERR(genpd);
2892 		goto out;
2893 	}
2894 
2895 	ret = genpd_add_device(genpd, dev, dev);
2896 
2897 out:
2898 	mutex_unlock(&gpd_list_lock);
2899 
2900 	return ret;
2901 }
2902 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2903 
2904 /**
2905  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2906  * @parent_spec: OF phandle args to use for parent PM domain look-up
2907  * @subdomain_spec: OF phandle args to use for subdomain look-up
2908  *
2909  * Looks-up a parent PM domain and subdomain based upon phandle args
2910  * provided and adds the subdomain to the parent PM domain. Returns a
2911  * negative error code on failure.
2912  */
2913 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2914 			   const struct of_phandle_args *subdomain_spec)
2915 {
2916 	struct generic_pm_domain *parent, *subdomain;
2917 	int ret;
2918 
2919 	mutex_lock(&gpd_list_lock);
2920 
2921 	parent = genpd_get_from_provider(parent_spec);
2922 	if (IS_ERR(parent)) {
2923 		ret = PTR_ERR(parent);
2924 		goto out;
2925 	}
2926 
2927 	subdomain = genpd_get_from_provider(subdomain_spec);
2928 	if (IS_ERR(subdomain)) {
2929 		ret = PTR_ERR(subdomain);
2930 		goto out;
2931 	}
2932 
2933 	ret = genpd_add_subdomain(parent, subdomain);
2934 
2935 out:
2936 	mutex_unlock(&gpd_list_lock);
2937 
2938 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2939 }
2940 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2941 
2942 /**
2943  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2944  * @parent_spec: OF phandle args to use for parent PM domain look-up
2945  * @subdomain_spec: OF phandle args to use for subdomain look-up
2946  *
2947  * Looks-up a parent PM domain and subdomain based upon phandle args
2948  * provided and removes the subdomain from the parent PM domain. Returns a
2949  * negative error code on failure.
2950  */
2951 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2952 			      const struct of_phandle_args *subdomain_spec)
2953 {
2954 	struct generic_pm_domain *parent, *subdomain;
2955 	int ret;
2956 
2957 	mutex_lock(&gpd_list_lock);
2958 
2959 	parent = genpd_get_from_provider(parent_spec);
2960 	if (IS_ERR(parent)) {
2961 		ret = PTR_ERR(parent);
2962 		goto out;
2963 	}
2964 
2965 	subdomain = genpd_get_from_provider(subdomain_spec);
2966 	if (IS_ERR(subdomain)) {
2967 		ret = PTR_ERR(subdomain);
2968 		goto out;
2969 	}
2970 
2971 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2972 
2973 out:
2974 	mutex_unlock(&gpd_list_lock);
2975 
2976 	return ret;
2977 }
2978 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2979 
2980 /**
2981  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2982  * @np: Pointer to device node associated with provider
2983  *
2984  * Find the last PM domain that was added by a particular provider and
2985  * remove this PM domain from the list of PM domains. The provider is
2986  * identified by the 'provider' device structure that is passed. The PM
2987  * domain will only be removed, if the provider associated with domain
2988  * has been removed.
2989  *
2990  * Returns a valid pointer to struct generic_pm_domain on success or
2991  * ERR_PTR() on failure.
2992  */
2993 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2994 {
2995 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2996 	int ret;
2997 
2998 	if (IS_ERR_OR_NULL(np))
2999 		return ERR_PTR(-EINVAL);
3000 
3001 	mutex_lock(&gpd_list_lock);
3002 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
3003 		if (gpd->provider == of_fwnode_handle(np)) {
3004 			ret = genpd_remove(gpd);
3005 			genpd = ret ? ERR_PTR(ret) : gpd;
3006 			break;
3007 		}
3008 	}
3009 	mutex_unlock(&gpd_list_lock);
3010 
3011 	return genpd;
3012 }
3013 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
3014 
3015 static void genpd_release_dev(struct device *dev)
3016 {
3017 	of_node_put(dev->of_node);
3018 	kfree(dev);
3019 }
3020 
3021 static const struct bus_type genpd_bus_type = {
3022 	.name		= "genpd",
3023 };
3024 
3025 /**
3026  * genpd_dev_pm_detach - Detach a device from its PM domain.
3027  * @dev: Device to detach.
3028  * @power_off: Currently not used
3029  *
3030  * Try to locate a corresponding generic PM domain, which the device was
3031  * attached to previously. If such is found, the device is detached from it.
3032  */
3033 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
3034 {
3035 	struct generic_pm_domain *pd;
3036 	unsigned int i;
3037 	int ret = 0;
3038 
3039 	pd = dev_to_genpd(dev);
3040 	if (IS_ERR(pd))
3041 		return;
3042 
3043 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
3044 
3045 	/* Drop the default performance state */
3046 	if (dev_gpd_data(dev)->default_pstate) {
3047 		dev_pm_genpd_set_performance_state(dev, 0);
3048 		dev_gpd_data(dev)->default_pstate = 0;
3049 	}
3050 
3051 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
3052 		ret = genpd_remove_device(pd, dev);
3053 		if (ret != -EAGAIN)
3054 			break;
3055 
3056 		mdelay(i);
3057 		cond_resched();
3058 	}
3059 
3060 	if (ret < 0) {
3061 		dev_err(dev, "failed to remove from PM domain %s: %d",
3062 			pd->name, ret);
3063 		return;
3064 	}
3065 
3066 	/* Check if PM domain can be powered off after removing this device. */
3067 	genpd_queue_power_off_work(pd);
3068 
3069 	/* Unregister the device if it was created by genpd. */
3070 	if (dev->bus == &genpd_bus_type)
3071 		device_unregister(dev);
3072 }
3073 
3074 static void genpd_dev_pm_sync(struct device *dev)
3075 {
3076 	struct generic_pm_domain *pd;
3077 
3078 	pd = dev_to_genpd(dev);
3079 	if (IS_ERR(pd))
3080 		return;
3081 
3082 	genpd_queue_power_off_work(pd);
3083 }
3084 
3085 static int genpd_set_required_opp_dev(struct device *dev,
3086 				      struct device *base_dev)
3087 {
3088 	struct dev_pm_opp_config config = {
3089 		.required_dev = dev,
3090 	};
3091 	int ret;
3092 
3093 	/* Limit support to non-providers for now. */
3094 	if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3095 		return 0;
3096 
3097 	if (!dev_pm_opp_of_has_required_opp(base_dev))
3098 		return 0;
3099 
3100 	ret = dev_pm_opp_set_config(base_dev, &config);
3101 	if (ret < 0)
3102 		return ret;
3103 
3104 	dev_gpd_data(dev)->opp_token = ret;
3105 	return 0;
3106 }
3107 
3108 static int genpd_set_required_opp(struct device *dev, unsigned int index)
3109 {
3110 	int ret, pstate;
3111 
3112 	/* Set the default performance state */
3113 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
3114 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3115 		ret = pstate;
3116 		goto err;
3117 	} else if (pstate > 0) {
3118 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
3119 		if (ret)
3120 			goto err;
3121 		dev_gpd_data(dev)->default_pstate = pstate;
3122 	}
3123 
3124 	return 0;
3125 err:
3126 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3127 		dev_to_genpd(dev)->name, ret);
3128 	return ret;
3129 }
3130 
3131 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3132 				 unsigned int index, unsigned int num_domains,
3133 				 bool power_on)
3134 {
3135 	struct of_phandle_args pd_args;
3136 	struct generic_pm_domain *pd;
3137 	int ret;
3138 
3139 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3140 				"#power-domain-cells", index, &pd_args);
3141 	if (ret < 0)
3142 		return ret;
3143 
3144 	mutex_lock(&gpd_list_lock);
3145 	pd = genpd_get_from_provider(&pd_args);
3146 	of_node_put(pd_args.np);
3147 	if (IS_ERR(pd)) {
3148 		mutex_unlock(&gpd_list_lock);
3149 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3150 			__func__, PTR_ERR(pd));
3151 		return driver_deferred_probe_check_state(base_dev);
3152 	}
3153 
3154 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3155 
3156 	ret = genpd_add_device(pd, dev, base_dev);
3157 	mutex_unlock(&gpd_list_lock);
3158 
3159 	if (ret < 0)
3160 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3161 
3162 	dev->pm_domain->detach = genpd_dev_pm_detach;
3163 	dev->pm_domain->sync = genpd_dev_pm_sync;
3164 
3165 	/*
3166 	 * For a single PM domain the index of the required OPP must be zero, so
3167 	 * let's try to assign a required dev in that case. In the multiple PM
3168 	 * domains case, we need platform code to specify the index.
3169 	 */
3170 	if (num_domains == 1) {
3171 		ret = genpd_set_required_opp_dev(dev, base_dev);
3172 		if (ret)
3173 			goto err;
3174 	}
3175 
3176 	ret = genpd_set_required_opp(dev, index);
3177 	if (ret)
3178 		goto err;
3179 
3180 	if (power_on) {
3181 		genpd_lock(pd);
3182 		ret = genpd_power_on(pd, 0);
3183 		genpd_unlock(pd);
3184 	}
3185 
3186 	if (ret) {
3187 		/* Drop the default performance state */
3188 		if (dev_gpd_data(dev)->default_pstate) {
3189 			dev_pm_genpd_set_performance_state(dev, 0);
3190 			dev_gpd_data(dev)->default_pstate = 0;
3191 		}
3192 
3193 		genpd_remove_device(pd, dev);
3194 		return -EPROBE_DEFER;
3195 	}
3196 
3197 	return 1;
3198 
3199 err:
3200 	genpd_remove_device(pd, dev);
3201 	return ret;
3202 }
3203 
3204 /**
3205  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3206  * @dev: Device to attach.
3207  *
3208  * Parse device's OF node to find a PM domain specifier. If such is found,
3209  * attaches the device to retrieved pm_domain ops.
3210  *
3211  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3212  * PM domain or when multiple power-domains exists for it, else a negative error
3213  * code. Note that if a power-domain exists for the device, but it cannot be
3214  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3215  * not probed and to re-try again later.
3216  */
3217 int genpd_dev_pm_attach(struct device *dev)
3218 {
3219 	if (!dev->of_node)
3220 		return 0;
3221 
3222 	/*
3223 	 * Devices with multiple PM domains must be attached separately, as we
3224 	 * can only attach one PM domain per device.
3225 	 */
3226 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
3227 				       "#power-domain-cells") != 1)
3228 		return 0;
3229 
3230 	return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3231 }
3232 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3233 
3234 /**
3235  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3236  * @dev: The device used to lookup the PM domain.
3237  * @index: The index of the PM domain.
3238  *
3239  * Parse device's OF node to find a PM domain specifier at the provided @index.
3240  * If such is found, creates a virtual device and attaches it to the retrieved
3241  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3242  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3243  *
3244  * Returns the created virtual device if successfully attached PM domain, NULL
3245  * when the device don't need a PM domain, else an ERR_PTR() in case of
3246  * failures. If a power-domain exists for the device, but cannot be found or
3247  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3248  * is not probed and to re-try again later.
3249  */
3250 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3251 					 unsigned int index)
3252 {
3253 	struct device *virt_dev;
3254 	int num_domains;
3255 	int ret;
3256 
3257 	if (!dev->of_node)
3258 		return NULL;
3259 
3260 	/* Verify that the index is within a valid range. */
3261 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3262 						 "#power-domain-cells");
3263 	if (num_domains < 0 || index >= num_domains)
3264 		return NULL;
3265 
3266 	if (!genpd_bus_registered)
3267 		return ERR_PTR(-ENODEV);
3268 
3269 	/* Allocate and register device on the genpd bus. */
3270 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3271 	if (!virt_dev)
3272 		return ERR_PTR(-ENOMEM);
3273 
3274 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3275 	virt_dev->bus = &genpd_bus_type;
3276 	virt_dev->release = genpd_release_dev;
3277 	virt_dev->of_node = of_node_get(dev->of_node);
3278 
3279 	ret = device_register(virt_dev);
3280 	if (ret) {
3281 		put_device(virt_dev);
3282 		return ERR_PTR(ret);
3283 	}
3284 
3285 	/* Try to attach the device to the PM domain at the specified index. */
3286 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3287 	if (ret < 1) {
3288 		device_unregister(virt_dev);
3289 		return ret ? ERR_PTR(ret) : NULL;
3290 	}
3291 
3292 	pm_runtime_enable(virt_dev);
3293 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3294 
3295 	return virt_dev;
3296 }
3297 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3298 
3299 /**
3300  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3301  * @dev: The device used to lookup the PM domain.
3302  * @name: The name of the PM domain.
3303  *
3304  * Parse device's OF node to find a PM domain specifier using the
3305  * power-domain-names DT property. For further description see
3306  * genpd_dev_pm_attach_by_id().
3307  */
3308 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3309 {
3310 	int index;
3311 
3312 	if (!dev->of_node)
3313 		return NULL;
3314 
3315 	index = of_property_match_string(dev->of_node, "power-domain-names",
3316 					 name);
3317 	if (index < 0)
3318 		return NULL;
3319 
3320 	return genpd_dev_pm_attach_by_id(dev, index);
3321 }
3322 
3323 static const struct of_device_id idle_state_match[] = {
3324 	{ .compatible = "domain-idle-state", },
3325 	{ }
3326 };
3327 
3328 static int genpd_parse_state(struct genpd_power_state *genpd_state,
3329 				    struct device_node *state_node)
3330 {
3331 	int err;
3332 	u32 residency;
3333 	u32 entry_latency, exit_latency;
3334 
3335 	err = of_property_read_u32(state_node, "entry-latency-us",
3336 						&entry_latency);
3337 	if (err) {
3338 		pr_debug(" * %pOF missing entry-latency-us property\n",
3339 			 state_node);
3340 		return -EINVAL;
3341 	}
3342 
3343 	err = of_property_read_u32(state_node, "exit-latency-us",
3344 						&exit_latency);
3345 	if (err) {
3346 		pr_debug(" * %pOF missing exit-latency-us property\n",
3347 			 state_node);
3348 		return -EINVAL;
3349 	}
3350 
3351 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
3352 	if (!err)
3353 		genpd_state->residency_ns = 1000LL * residency;
3354 
3355 	of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3356 
3357 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3358 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3359 	genpd_state->fwnode = of_fwnode_handle(state_node);
3360 
3361 	return 0;
3362 }
3363 
3364 static int genpd_iterate_idle_states(struct device_node *dn,
3365 				     struct genpd_power_state *states)
3366 {
3367 	int ret;
3368 	struct of_phandle_iterator it;
3369 	struct device_node *np;
3370 	int i = 0;
3371 
3372 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3373 	if (ret <= 0)
3374 		return ret == -ENOENT ? 0 : ret;
3375 
3376 	/* Loop over the phandles until all the requested entry is found */
3377 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3378 		np = it.node;
3379 		if (!of_match_node(idle_state_match, np))
3380 			continue;
3381 
3382 		if (!of_device_is_available(np))
3383 			continue;
3384 
3385 		if (states) {
3386 			ret = genpd_parse_state(&states[i], np);
3387 			if (ret) {
3388 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3389 				       np, ret);
3390 				of_node_put(np);
3391 				return ret;
3392 			}
3393 		}
3394 		i++;
3395 	}
3396 
3397 	return i;
3398 }
3399 
3400 /**
3401  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3402  *
3403  * @dn: The genpd device node
3404  * @states: The pointer to which the state array will be saved.
3405  * @n: The count of elements in the array returned from this function.
3406  *
3407  * Returns the device states parsed from the OF node. The memory for the states
3408  * is allocated by this function and is the responsibility of the caller to
3409  * free the memory after use. If any or zero compatible domain idle states is
3410  * found it returns 0 and in case of errors, a negative error code is returned.
3411  */
3412 int of_genpd_parse_idle_states(struct device_node *dn,
3413 			struct genpd_power_state **states, int *n)
3414 {
3415 	struct genpd_power_state *st;
3416 	int ret;
3417 
3418 	ret = genpd_iterate_idle_states(dn, NULL);
3419 	if (ret < 0)
3420 		return ret;
3421 
3422 	if (!ret) {
3423 		*states = NULL;
3424 		*n = 0;
3425 		return 0;
3426 	}
3427 
3428 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3429 	if (!st)
3430 		return -ENOMEM;
3431 
3432 	ret = genpd_iterate_idle_states(dn, st);
3433 	if (ret <= 0) {
3434 		kfree(st);
3435 		return ret < 0 ? ret : -EINVAL;
3436 	}
3437 
3438 	*states = st;
3439 	*n = ret;
3440 
3441 	return 0;
3442 }
3443 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3444 
3445 /**
3446  * of_genpd_sync_state() - A common sync_state function for genpd providers
3447  * @np: The device node the genpd provider is associated with.
3448  *
3449  * The @np that corresponds to a genpd provider may provide one or multiple
3450  * genpds. This function makes use @np to find the genpds that belongs to the
3451  * provider. For each genpd we try a power-off.
3452  */
3453 void of_genpd_sync_state(struct device_node *np)
3454 {
3455 	struct generic_pm_domain *genpd;
3456 
3457 	if (!np)
3458 		return;
3459 
3460 	mutex_lock(&gpd_list_lock);
3461 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3462 		if (genpd->provider == of_fwnode_handle(np)) {
3463 			genpd_lock(genpd);
3464 			genpd->stay_on = false;
3465 			genpd_power_off(genpd, false, 0);
3466 			genpd_unlock(genpd);
3467 		}
3468 	}
3469 	mutex_unlock(&gpd_list_lock);
3470 }
3471 EXPORT_SYMBOL_GPL(of_genpd_sync_state);
3472 
3473 static int genpd_provider_probe(struct device *dev)
3474 {
3475 	return 0;
3476 }
3477 
3478 static void genpd_provider_sync_state(struct device *dev)
3479 {
3480 	struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
3481 
3482 	switch (genpd->sync_state) {
3483 	case GENPD_SYNC_STATE_OFF:
3484 		break;
3485 
3486 	case GENPD_SYNC_STATE_ONECELL:
3487 		of_genpd_sync_state(dev->of_node);
3488 		break;
3489 
3490 	case GENPD_SYNC_STATE_SIMPLE:
3491 		genpd_lock(genpd);
3492 		genpd->stay_on = false;
3493 		genpd_power_off(genpd, false, 0);
3494 		genpd_unlock(genpd);
3495 		break;
3496 
3497 	default:
3498 		break;
3499 	}
3500 }
3501 
3502 static struct device_driver genpd_provider_drv = {
3503 	.name = "genpd_provider",
3504 	.bus = &genpd_provider_bus_type,
3505 	.probe = genpd_provider_probe,
3506 	.sync_state = genpd_provider_sync_state,
3507 	.suppress_bind_attrs = true,
3508 };
3509 
3510 static int __init genpd_bus_init(void)
3511 {
3512 	int ret;
3513 
3514 	ret = device_register(&genpd_provider_bus);
3515 	if (ret) {
3516 		put_device(&genpd_provider_bus);
3517 		return ret;
3518 	}
3519 
3520 	ret = bus_register(&genpd_provider_bus_type);
3521 	if (ret)
3522 		goto err_dev;
3523 
3524 	ret = bus_register(&genpd_bus_type);
3525 	if (ret)
3526 		goto err_prov_bus;
3527 
3528 	ret = driver_register(&genpd_provider_drv);
3529 	if (ret)
3530 		goto err_bus;
3531 
3532 	genpd_bus_registered = true;
3533 	return 0;
3534 
3535 err_bus:
3536 	bus_unregister(&genpd_bus_type);
3537 err_prov_bus:
3538 	bus_unregister(&genpd_provider_bus_type);
3539 err_dev:
3540 	device_unregister(&genpd_provider_bus);
3541 	return ret;
3542 }
3543 core_initcall(genpd_bus_init);
3544 
3545 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3546 
3547 
3548 /***        debugfs support        ***/
3549 
3550 #ifdef CONFIG_DEBUG_FS
3551 /*
3552  * TODO: This function is a slightly modified version of rtpm_status_show
3553  * from sysfs.c, so generalize it.
3554  */
3555 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3556 {
3557 	static const char * const status_lookup[] = {
3558 		[RPM_ACTIVE] = "active",
3559 		[RPM_RESUMING] = "resuming",
3560 		[RPM_SUSPENDED] = "suspended",
3561 		[RPM_SUSPENDING] = "suspending"
3562 	};
3563 	const char *p = "";
3564 
3565 	if (dev->power.runtime_error)
3566 		p = "error";
3567 	else if (dev->power.disable_depth)
3568 		p = "unsupported";
3569 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3570 		p = status_lookup[dev->power.runtime_status];
3571 	else
3572 		WARN_ON(1);
3573 
3574 	seq_printf(s, "%-26s  ", p);
3575 }
3576 
3577 static void perf_status_str(struct seq_file *s, struct device *dev)
3578 {
3579 	struct generic_pm_domain_data *gpd_data;
3580 
3581 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3582 
3583 	seq_printf(s, "%-10u  ", gpd_data->performance_state);
3584 }
3585 
3586 static void mode_status_str(struct seq_file *s, struct device *dev)
3587 {
3588 	struct generic_pm_domain_data *gpd_data;
3589 
3590 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3591 
3592 	seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3593 }
3594 
3595 static int genpd_summary_one(struct seq_file *s,
3596 			struct generic_pm_domain *genpd)
3597 {
3598 	static const char * const status_lookup[] = {
3599 		[GENPD_STATE_ON] = "on",
3600 		[GENPD_STATE_OFF] = "off"
3601 	};
3602 	struct pm_domain_data *pm_data;
3603 	struct gpd_link *link;
3604 	char state[16];
3605 	int ret;
3606 
3607 	ret = genpd_lock_interruptible(genpd);
3608 	if (ret)
3609 		return -ERESTARTSYS;
3610 
3611 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3612 		goto exit;
3613 	if (!genpd_status_on(genpd))
3614 		snprintf(state, sizeof(state), "%s-%u",
3615 			 status_lookup[genpd->status], genpd->state_idx);
3616 	else
3617 		snprintf(state, sizeof(state), "%s",
3618 			 status_lookup[genpd->status]);
3619 	seq_printf(s, "%-30s  %-30s  %u", dev_name(&genpd->dev), state, genpd->performance_state);
3620 
3621 	/*
3622 	 * Modifications on the list require holding locks on both
3623 	 * parent and child, so we are safe.
3624 	 * Also the device name is immutable.
3625 	 */
3626 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3627 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3628 			seq_printf(s, "\n%48s", " ");
3629 		seq_printf(s, "%s", link->child->name);
3630 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3631 			seq_puts(s, ", ");
3632 	}
3633 
3634 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3635 		seq_printf(s, "\n    %-30s  ", dev_name(pm_data->dev));
3636 		rtpm_status_str(s, pm_data->dev);
3637 		perf_status_str(s, pm_data->dev);
3638 		mode_status_str(s, pm_data->dev);
3639 	}
3640 
3641 	seq_puts(s, "\n");
3642 exit:
3643 	genpd_unlock(genpd);
3644 
3645 	return 0;
3646 }
3647 
3648 static int summary_show(struct seq_file *s, void *data)
3649 {
3650 	struct generic_pm_domain *genpd;
3651 	int ret = 0;
3652 
3653 	seq_puts(s, "domain                          status          children        performance\n");
3654 	seq_puts(s, "    /device                         runtime status                  managed by\n");
3655 	seq_puts(s, "------------------------------------------------------------------------------\n");
3656 
3657 	ret = mutex_lock_interruptible(&gpd_list_lock);
3658 	if (ret)
3659 		return -ERESTARTSYS;
3660 
3661 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3662 		ret = genpd_summary_one(s, genpd);
3663 		if (ret)
3664 			break;
3665 	}
3666 	mutex_unlock(&gpd_list_lock);
3667 
3668 	return ret;
3669 }
3670 
3671 static int status_show(struct seq_file *s, void *data)
3672 {
3673 	static const char * const status_lookup[] = {
3674 		[GENPD_STATE_ON] = "on",
3675 		[GENPD_STATE_OFF] = "off"
3676 	};
3677 
3678 	struct generic_pm_domain *genpd = s->private;
3679 	int ret = 0;
3680 
3681 	ret = genpd_lock_interruptible(genpd);
3682 	if (ret)
3683 		return -ERESTARTSYS;
3684 
3685 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3686 		goto exit;
3687 
3688 	if (genpd->status == GENPD_STATE_OFF)
3689 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3690 			genpd->state_idx);
3691 	else
3692 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3693 exit:
3694 	genpd_unlock(genpd);
3695 	return ret;
3696 }
3697 
3698 static int sub_domains_show(struct seq_file *s, void *data)
3699 {
3700 	struct generic_pm_domain *genpd = s->private;
3701 	struct gpd_link *link;
3702 	int ret = 0;
3703 
3704 	ret = genpd_lock_interruptible(genpd);
3705 	if (ret)
3706 		return -ERESTARTSYS;
3707 
3708 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3709 		seq_printf(s, "%s\n", link->child->name);
3710 
3711 	genpd_unlock(genpd);
3712 	return ret;
3713 }
3714 
3715 static int idle_states_show(struct seq_file *s, void *data)
3716 {
3717 	struct generic_pm_domain *genpd = s->private;
3718 	u64 now, delta, idle_time = 0;
3719 	unsigned int i;
3720 	int ret = 0;
3721 
3722 	ret = genpd_lock_interruptible(genpd);
3723 	if (ret)
3724 		return -ERESTARTSYS;
3725 
3726 	seq_puts(s, "State          Time Spent(ms) Usage      Rejected   Above      Below\n");
3727 
3728 	for (i = 0; i < genpd->state_count; i++) {
3729 		struct genpd_power_state *state = &genpd->states[i];
3730 		char state_name[15];
3731 
3732 		idle_time += state->idle_time;
3733 
3734 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3735 			now = ktime_get_mono_fast_ns();
3736 			if (now > genpd->accounting_time) {
3737 				delta = now - genpd->accounting_time;
3738 				idle_time += delta;
3739 			}
3740 		}
3741 
3742 		if (!state->name)
3743 			snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
3744 
3745 		do_div(idle_time, NSEC_PER_MSEC);
3746 		seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
3747 			   state->name ?: state_name, idle_time,
3748 			   state->usage, state->rejected, state->above,
3749 			   state->below);
3750 	}
3751 
3752 	genpd_unlock(genpd);
3753 	return ret;
3754 }
3755 
3756 static int active_time_show(struct seq_file *s, void *data)
3757 {
3758 	struct generic_pm_domain *genpd = s->private;
3759 	u64 now, on_time, delta = 0;
3760 	int ret = 0;
3761 
3762 	ret = genpd_lock_interruptible(genpd);
3763 	if (ret)
3764 		return -ERESTARTSYS;
3765 
3766 	if (genpd->status == GENPD_STATE_ON) {
3767 		now = ktime_get_mono_fast_ns();
3768 		if (now > genpd->accounting_time)
3769 			delta = now - genpd->accounting_time;
3770 	}
3771 
3772 	on_time = genpd->on_time + delta;
3773 	do_div(on_time, NSEC_PER_MSEC);
3774 	seq_printf(s, "%llu ms\n", on_time);
3775 
3776 	genpd_unlock(genpd);
3777 	return ret;
3778 }
3779 
3780 static int total_idle_time_show(struct seq_file *s, void *data)
3781 {
3782 	struct generic_pm_domain *genpd = s->private;
3783 	u64 now, delta, total = 0;
3784 	unsigned int i;
3785 	int ret = 0;
3786 
3787 	ret = genpd_lock_interruptible(genpd);
3788 	if (ret)
3789 		return -ERESTARTSYS;
3790 
3791 	for (i = 0; i < genpd->state_count; i++) {
3792 		total += genpd->states[i].idle_time;
3793 
3794 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3795 			now = ktime_get_mono_fast_ns();
3796 			if (now > genpd->accounting_time) {
3797 				delta = now - genpd->accounting_time;
3798 				total += delta;
3799 			}
3800 		}
3801 	}
3802 
3803 	do_div(total, NSEC_PER_MSEC);
3804 	seq_printf(s, "%llu ms\n", total);
3805 
3806 	genpd_unlock(genpd);
3807 	return ret;
3808 }
3809 
3810 
3811 static int devices_show(struct seq_file *s, void *data)
3812 {
3813 	struct generic_pm_domain *genpd = s->private;
3814 	struct pm_domain_data *pm_data;
3815 	int ret = 0;
3816 
3817 	ret = genpd_lock_interruptible(genpd);
3818 	if (ret)
3819 		return -ERESTARTSYS;
3820 
3821 	list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3822 		seq_printf(s, "%s\n", dev_name(pm_data->dev));
3823 
3824 	genpd_unlock(genpd);
3825 	return ret;
3826 }
3827 
3828 static int perf_state_show(struct seq_file *s, void *data)
3829 {
3830 	struct generic_pm_domain *genpd = s->private;
3831 
3832 	if (genpd_lock_interruptible(genpd))
3833 		return -ERESTARTSYS;
3834 
3835 	seq_printf(s, "%u\n", genpd->performance_state);
3836 
3837 	genpd_unlock(genpd);
3838 	return 0;
3839 }
3840 
3841 DEFINE_SHOW_ATTRIBUTE(summary);
3842 DEFINE_SHOW_ATTRIBUTE(status);
3843 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3844 DEFINE_SHOW_ATTRIBUTE(idle_states);
3845 DEFINE_SHOW_ATTRIBUTE(active_time);
3846 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3847 DEFINE_SHOW_ATTRIBUTE(devices);
3848 DEFINE_SHOW_ATTRIBUTE(perf_state);
3849 
3850 static void genpd_debug_add(struct generic_pm_domain *genpd)
3851 {
3852 	struct dentry *d;
3853 
3854 	if (!genpd_debugfs_dir)
3855 		return;
3856 
3857 	d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3858 
3859 	debugfs_create_file("current_state", 0444,
3860 			    d, genpd, &status_fops);
3861 	debugfs_create_file("sub_domains", 0444,
3862 			    d, genpd, &sub_domains_fops);
3863 	debugfs_create_file("idle_states", 0444,
3864 			    d, genpd, &idle_states_fops);
3865 	debugfs_create_file("active_time", 0444,
3866 			    d, genpd, &active_time_fops);
3867 	debugfs_create_file("total_idle_time", 0444,
3868 			    d, genpd, &total_idle_time_fops);
3869 	debugfs_create_file("devices", 0444,
3870 			    d, genpd, &devices_fops);
3871 	if (genpd->set_performance_state)
3872 		debugfs_create_file("perf_state", 0444,
3873 				    d, genpd, &perf_state_fops);
3874 }
3875 
3876 static int __init genpd_debug_init(void)
3877 {
3878 	struct generic_pm_domain *genpd;
3879 
3880 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3881 
3882 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3883 			    NULL, &summary_fops);
3884 
3885 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3886 		genpd_debug_add(genpd);
3887 
3888 	return 0;
3889 }
3890 late_initcall(genpd_debug_init);
3891 
3892 static void __exit genpd_debug_exit(void)
3893 {
3894 	debugfs_remove_recursive(genpd_debugfs_dir);
3895 }
3896 __exitcall(genpd_debug_exit);
3897 #endif /* CONFIG_DEBUG_FS */
3898