xref: /linux/drivers/pmdomain/core.c (revision fc8f5028eb0cc5aee0501a99f59a04f748fbff1c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_domain.h>
17 #include <linux/pm_qos.h>
18 #include <linux/pm_clock.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/export.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
26 
27 /* Provides a unique ID for each genpd device */
28 static DEFINE_IDA(genpd_ida);
29 
30 /* The bus for genpd_providers. */
31 static const struct bus_type genpd_provider_bus_type = {
32 	.name		= "genpd_provider",
33 };
34 
35 /* The parent for genpd_provider devices. */
36 static struct device genpd_provider_bus = {
37 	.init_name = "genpd_provider",
38 };
39 
40 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
41 
42 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
43 ({								\
44 	type (*__routine)(struct device *__d); 			\
45 	type __ret = (type)0;					\
46 								\
47 	__routine = genpd->dev_ops.callback; 			\
48 	if (__routine) {					\
49 		__ret = __routine(dev); 			\
50 	}							\
51 	__ret;							\
52 })
53 
54 static LIST_HEAD(gpd_list);
55 static DEFINE_MUTEX(gpd_list_lock);
56 
57 struct genpd_lock_ops {
58 	void (*lock)(struct generic_pm_domain *genpd);
59 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
60 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
61 	void (*unlock)(struct generic_pm_domain *genpd);
62 };
63 
genpd_lock_mtx(struct generic_pm_domain * genpd)64 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
65 {
66 	mutex_lock(&genpd->mlock);
67 }
68 
genpd_lock_nested_mtx(struct generic_pm_domain * genpd,int depth)69 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
70 					int depth)
71 {
72 	mutex_lock_nested(&genpd->mlock, depth);
73 }
74 
genpd_lock_interruptible_mtx(struct generic_pm_domain * genpd)75 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
76 {
77 	return mutex_lock_interruptible(&genpd->mlock);
78 }
79 
genpd_unlock_mtx(struct generic_pm_domain * genpd)80 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
81 {
82 	return mutex_unlock(&genpd->mlock);
83 }
84 
85 static const struct genpd_lock_ops genpd_mtx_ops = {
86 	.lock = genpd_lock_mtx,
87 	.lock_nested = genpd_lock_nested_mtx,
88 	.lock_interruptible = genpd_lock_interruptible_mtx,
89 	.unlock = genpd_unlock_mtx,
90 };
91 
genpd_lock_spin(struct generic_pm_domain * genpd)92 static void genpd_lock_spin(struct generic_pm_domain *genpd)
93 	__acquires(&genpd->slock)
94 {
95 	unsigned long flags;
96 
97 	spin_lock_irqsave(&genpd->slock, flags);
98 	genpd->lock_flags = flags;
99 }
100 
genpd_lock_nested_spin(struct generic_pm_domain * genpd,int depth)101 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
102 					int depth)
103 	__acquires(&genpd->slock)
104 {
105 	unsigned long flags;
106 
107 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
108 	genpd->lock_flags = flags;
109 }
110 
genpd_lock_interruptible_spin(struct generic_pm_domain * genpd)111 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
112 	__acquires(&genpd->slock)
113 {
114 	unsigned long flags;
115 
116 	spin_lock_irqsave(&genpd->slock, flags);
117 	genpd->lock_flags = flags;
118 	return 0;
119 }
120 
genpd_unlock_spin(struct generic_pm_domain * genpd)121 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
122 	__releases(&genpd->slock)
123 {
124 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
125 }
126 
127 static const struct genpd_lock_ops genpd_spin_ops = {
128 	.lock = genpd_lock_spin,
129 	.lock_nested = genpd_lock_nested_spin,
130 	.lock_interruptible = genpd_lock_interruptible_spin,
131 	.unlock = genpd_unlock_spin,
132 };
133 
genpd_lock_raw_spin(struct generic_pm_domain * genpd)134 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
135 	__acquires(&genpd->raw_slock)
136 {
137 	unsigned long flags;
138 
139 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
140 	genpd->raw_lock_flags = flags;
141 }
142 
genpd_lock_nested_raw_spin(struct generic_pm_domain * genpd,int depth)143 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
144 					int depth)
145 	__acquires(&genpd->raw_slock)
146 {
147 	unsigned long flags;
148 
149 	raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
150 	genpd->raw_lock_flags = flags;
151 }
152 
genpd_lock_interruptible_raw_spin(struct generic_pm_domain * genpd)153 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
154 	__acquires(&genpd->raw_slock)
155 {
156 	unsigned long flags;
157 
158 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
159 	genpd->raw_lock_flags = flags;
160 	return 0;
161 }
162 
genpd_unlock_raw_spin(struct generic_pm_domain * genpd)163 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
164 	__releases(&genpd->raw_slock)
165 {
166 	raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
167 }
168 
169 static const struct genpd_lock_ops genpd_raw_spin_ops = {
170 	.lock = genpd_lock_raw_spin,
171 	.lock_nested = genpd_lock_nested_raw_spin,
172 	.lock_interruptible = genpd_lock_interruptible_raw_spin,
173 	.unlock = genpd_unlock_raw_spin,
174 };
175 
176 #define genpd_lock(p)			p->lock_ops->lock(p)
177 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
178 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
179 #define genpd_unlock(p)			p->lock_ops->unlock(p)
180 
181 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
182 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
183 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
184 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
185 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
186 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
187 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
188 #define genpd_is_dev_name_fw(genpd)	(genpd->flags & GENPD_FLAG_DEV_NAME_FW)
189 #define genpd_is_no_sync_state(genpd)	(genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
190 
irq_safe_dev_in_sleep_domain(struct device * dev,const struct generic_pm_domain * genpd)191 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
192 		const struct generic_pm_domain *genpd)
193 {
194 	bool ret;
195 
196 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
197 
198 	/*
199 	 * Warn once if an IRQ safe device is attached to a domain, which
200 	 * callbacks are allowed to sleep. This indicates a suboptimal
201 	 * configuration for PM, but it doesn't matter for an always on domain.
202 	 */
203 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
204 		return ret;
205 
206 	if (ret)
207 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
208 			      dev_name(&genpd->dev));
209 
210 	return ret;
211 }
212 
213 static int genpd_runtime_suspend(struct device *dev);
214 
215 /*
216  * Get the generic PM domain for a particular struct device.
217  * This validates the struct device pointer, the PM domain pointer,
218  * and checks that the PM domain pointer is a real generic PM domain.
219  * Any failure results in NULL being returned.
220  */
dev_to_genpd_safe(struct device * dev)221 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
222 {
223 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
224 		return NULL;
225 
226 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
227 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
228 		return pd_to_genpd(dev->pm_domain);
229 
230 	return NULL;
231 }
232 
233 /*
234  * This should only be used where we are certain that the pm_domain
235  * attached to the device is a genpd domain.
236  */
dev_to_genpd(struct device * dev)237 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
238 {
239 	if (IS_ERR_OR_NULL(dev->pm_domain))
240 		return ERR_PTR(-EINVAL);
241 
242 	return pd_to_genpd(dev->pm_domain);
243 }
244 
dev_to_genpd_dev(struct device * dev)245 struct device *dev_to_genpd_dev(struct device *dev)
246 {
247 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
248 
249 	if (IS_ERR(genpd))
250 		return ERR_CAST(genpd);
251 
252 	return &genpd->dev;
253 }
254 
genpd_stop_dev(const struct generic_pm_domain * genpd,struct device * dev)255 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
256 			  struct device *dev)
257 {
258 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
259 }
260 
genpd_start_dev(const struct generic_pm_domain * genpd,struct device * dev)261 static int genpd_start_dev(const struct generic_pm_domain *genpd,
262 			   struct device *dev)
263 {
264 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
265 }
266 
genpd_sd_counter_dec(struct generic_pm_domain * genpd)267 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
268 {
269 	bool ret = false;
270 
271 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
272 		ret = !!atomic_dec_and_test(&genpd->sd_count);
273 
274 	return ret;
275 }
276 
genpd_sd_counter_inc(struct generic_pm_domain * genpd)277 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
278 {
279 	atomic_inc(&genpd->sd_count);
280 	smp_mb__after_atomic();
281 }
282 
283 #ifdef CONFIG_DEBUG_FS
284 static struct dentry *genpd_debugfs_dir;
285 
286 static void genpd_debug_add(struct generic_pm_domain *genpd);
287 
genpd_debug_remove(struct generic_pm_domain * genpd)288 static void genpd_debug_remove(struct generic_pm_domain *genpd)
289 {
290 	if (!genpd_debugfs_dir)
291 		return;
292 
293 	debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
294 }
295 
genpd_update_accounting(struct generic_pm_domain * genpd)296 static void genpd_update_accounting(struct generic_pm_domain *genpd)
297 {
298 	u64 delta, now;
299 
300 	now = ktime_get_mono_fast_ns();
301 	if (now <= genpd->accounting_time)
302 		return;
303 
304 	delta = now - genpd->accounting_time;
305 
306 	/*
307 	 * If genpd->status is active, it means we are just
308 	 * out of off and so update the idle time and vice
309 	 * versa.
310 	 */
311 	if (genpd->status == GENPD_STATE_ON)
312 		genpd->states[genpd->state_idx].idle_time += delta;
313 	else
314 		genpd->on_time += delta;
315 
316 	genpd->accounting_time = now;
317 }
318 
genpd_reflect_residency(struct generic_pm_domain * genpd)319 static void genpd_reflect_residency(struct generic_pm_domain *genpd)
320 {
321 	struct genpd_governor_data *gd = genpd->gd;
322 	struct genpd_power_state *state, *next_state;
323 	unsigned int state_idx;
324 	s64 sleep_ns, target_ns;
325 
326 	if (!gd || !gd->reflect_residency)
327 		return;
328 
329 	sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
330 	state_idx = genpd->state_idx;
331 	state = &genpd->states[state_idx];
332 	target_ns = state->power_off_latency_ns + state->residency_ns;
333 
334 	if (sleep_ns < target_ns) {
335 		state->above++;
336 	} else if (state_idx < (genpd->state_count -1)) {
337 		next_state = &genpd->states[state_idx + 1];
338 		target_ns = next_state->power_off_latency_ns +
339 			next_state->residency_ns;
340 
341 		if (sleep_ns >= target_ns)
342 			state->below++;
343 	}
344 
345 	gd->reflect_residency = false;
346 }
347 #else
genpd_debug_add(struct generic_pm_domain * genpd)348 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
genpd_debug_remove(struct generic_pm_domain * genpd)349 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
genpd_update_accounting(struct generic_pm_domain * genpd)350 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
genpd_reflect_residency(struct generic_pm_domain * genpd)351 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
352 #endif
353 
_genpd_reeval_performance_state(struct generic_pm_domain * genpd,unsigned int state)354 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
355 					   unsigned int state)
356 {
357 	struct generic_pm_domain_data *pd_data;
358 	struct pm_domain_data *pdd;
359 	struct gpd_link *link;
360 
361 	/* New requested state is same as Max requested state */
362 	if (state == genpd->performance_state)
363 		return state;
364 
365 	/* New requested state is higher than Max requested state */
366 	if (state > genpd->performance_state)
367 		return state;
368 
369 	/* Traverse all devices within the domain */
370 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
371 		pd_data = to_gpd_data(pdd);
372 
373 		if (pd_data->performance_state > state)
374 			state = pd_data->performance_state;
375 	}
376 
377 	/*
378 	 * Traverse all sub-domains within the domain. This can be
379 	 * done without any additional locking as the link->performance_state
380 	 * field is protected by the parent genpd->lock, which is already taken.
381 	 *
382 	 * Also note that link->performance_state (subdomain's performance state
383 	 * requirement to parent domain) is different from
384 	 * link->child->performance_state (current performance state requirement
385 	 * of the devices/sub-domains of the subdomain) and so can have a
386 	 * different value.
387 	 *
388 	 * Note that we also take vote from powered-off sub-domains into account
389 	 * as the same is done for devices right now.
390 	 */
391 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
392 		if (link->performance_state > state)
393 			state = link->performance_state;
394 	}
395 
396 	return state;
397 }
398 
genpd_xlate_performance_state(struct generic_pm_domain * genpd,struct generic_pm_domain * parent,unsigned int pstate)399 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
400 					 struct generic_pm_domain *parent,
401 					 unsigned int pstate)
402 {
403 	if (!parent->set_performance_state)
404 		return pstate;
405 
406 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
407 						  parent->opp_table,
408 						  pstate);
409 }
410 
411 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
412 					unsigned int state, int depth);
413 
_genpd_rollback_parent_state(struct gpd_link * link,int depth)414 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
415 {
416 	struct generic_pm_domain *parent = link->parent;
417 	int parent_state;
418 
419 	genpd_lock_nested(parent, depth + 1);
420 
421 	parent_state = link->prev_performance_state;
422 	link->performance_state = parent_state;
423 
424 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
425 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
426 		pr_err("%s: Failed to roll back to %d performance state\n",
427 		       parent->name, parent_state);
428 	}
429 
430 	genpd_unlock(parent);
431 }
432 
_genpd_set_parent_state(struct generic_pm_domain * genpd,struct gpd_link * link,unsigned int state,int depth)433 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
434 				   struct gpd_link *link,
435 				   unsigned int state, int depth)
436 {
437 	struct generic_pm_domain *parent = link->parent;
438 	int parent_state, ret;
439 
440 	/* Find parent's performance state */
441 	ret = genpd_xlate_performance_state(genpd, parent, state);
442 	if (unlikely(ret < 0))
443 		return ret;
444 
445 	parent_state = ret;
446 
447 	genpd_lock_nested(parent, depth + 1);
448 
449 	link->prev_performance_state = link->performance_state;
450 	link->performance_state = parent_state;
451 
452 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
453 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
454 	if (ret)
455 		link->performance_state = link->prev_performance_state;
456 
457 	genpd_unlock(parent);
458 
459 	return ret;
460 }
461 
_genpd_set_performance_state(struct generic_pm_domain * genpd,unsigned int state,int depth)462 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
463 					unsigned int state, int depth)
464 {
465 	struct gpd_link *link = NULL;
466 	int ret;
467 
468 	if (state == genpd->performance_state)
469 		return 0;
470 
471 	/* When scaling up, propagate to parents first in normal order */
472 	if (state > genpd->performance_state) {
473 		list_for_each_entry(link, &genpd->child_links, child_node) {
474 			ret = _genpd_set_parent_state(genpd, link, state, depth);
475 			if (ret)
476 				goto rollback_parents_up;
477 		}
478 	}
479 
480 	if (genpd->set_performance_state) {
481 		ret = genpd->set_performance_state(genpd, state);
482 		if (ret) {
483 			if (link)
484 				goto rollback_parents_up;
485 			return ret;
486 		}
487 	}
488 
489 	/* When scaling down, propagate to parents last in reverse order */
490 	if (state < genpd->performance_state) {
491 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
492 			ret = _genpd_set_parent_state(genpd, link, state, depth);
493 			if (ret)
494 				goto rollback_parents_down;
495 		}
496 	}
497 
498 	genpd->performance_state = state;
499 	return 0;
500 
501 rollback_parents_up:
502 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
503 		_genpd_rollback_parent_state(link, depth);
504 	return ret;
505 rollback_parents_down:
506 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
507 		_genpd_rollback_parent_state(link, depth);
508 	return ret;
509 }
510 
genpd_set_performance_state(struct device * dev,unsigned int state)511 static int genpd_set_performance_state(struct device *dev, unsigned int state)
512 {
513 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
514 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
515 	unsigned int prev_state;
516 	int ret;
517 
518 	prev_state = gpd_data->performance_state;
519 	if (prev_state == state)
520 		return 0;
521 
522 	gpd_data->performance_state = state;
523 	state = _genpd_reeval_performance_state(genpd, state);
524 
525 	ret = _genpd_set_performance_state(genpd, state, 0);
526 	if (ret)
527 		gpd_data->performance_state = prev_state;
528 
529 	return ret;
530 }
531 
genpd_drop_performance_state(struct device * dev)532 static int genpd_drop_performance_state(struct device *dev)
533 {
534 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
535 
536 	if (!genpd_set_performance_state(dev, 0))
537 		return prev_state;
538 
539 	return 0;
540 }
541 
genpd_restore_performance_state(struct device * dev,unsigned int state)542 static void genpd_restore_performance_state(struct device *dev,
543 					    unsigned int state)
544 {
545 	if (state)
546 		genpd_set_performance_state(dev, state);
547 }
548 
genpd_dev_pm_set_performance_state(struct device * dev,unsigned int state)549 static int genpd_dev_pm_set_performance_state(struct device *dev,
550 					      unsigned int state)
551 {
552 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
553 	int ret = 0;
554 
555 	genpd_lock(genpd);
556 	if (pm_runtime_suspended(dev)) {
557 		dev_gpd_data(dev)->rpm_pstate = state;
558 	} else {
559 		ret = genpd_set_performance_state(dev, state);
560 		if (!ret)
561 			dev_gpd_data(dev)->rpm_pstate = 0;
562 	}
563 	genpd_unlock(genpd);
564 
565 	return ret;
566 }
567 
568 /**
569  * dev_pm_genpd_set_performance_state- Set performance state of device's power
570  * domain.
571  *
572  * @dev: Device for which the performance-state needs to be set.
573  * @state: Target performance state of the device. This can be set as 0 when the
574  *	   device doesn't have any performance state constraints left (And so
575  *	   the device wouldn't participate anymore to find the target
576  *	   performance state of the genpd).
577  *
578  * It is assumed that the users guarantee that the genpd wouldn't be detached
579  * while this routine is getting called.
580  *
581  * Returns 0 on success and negative error values on failures.
582  */
dev_pm_genpd_set_performance_state(struct device * dev,unsigned int state)583 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
584 {
585 	struct generic_pm_domain *genpd;
586 
587 	genpd = dev_to_genpd_safe(dev);
588 	if (!genpd)
589 		return -ENODEV;
590 
591 	if (WARN_ON(!dev->power.subsys_data ||
592 		     !dev->power.subsys_data->domain_data))
593 		return -EINVAL;
594 
595 	return genpd_dev_pm_set_performance_state(dev, state);
596 }
597 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
598 
599 /**
600  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
601  *
602  * @dev: Device to handle
603  * @next: impending interrupt/wakeup for the device
604  *
605  *
606  * Allow devices to inform of the next wakeup. It's assumed that the users
607  * guarantee that the genpd wouldn't be detached while this routine is getting
608  * called. Additionally, it's also assumed that @dev isn't runtime suspended
609  * (RPM_SUSPENDED)."
610  * Although devices are expected to update the next_wakeup after the end of
611  * their usecase as well, it is possible the devices themselves may not know
612  * about that, so stale @next will be ignored when powering off the domain.
613  */
dev_pm_genpd_set_next_wakeup(struct device * dev,ktime_t next)614 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
615 {
616 	struct generic_pm_domain *genpd;
617 	struct gpd_timing_data *td;
618 
619 	genpd = dev_to_genpd_safe(dev);
620 	if (!genpd)
621 		return;
622 
623 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
624 	if (td)
625 		td->next_wakeup = next;
626 }
627 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
628 
629 /**
630  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
631  * @dev: A device that is attached to the genpd.
632  *
633  * This routine should typically be called for a device, at the point of when a
634  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
635  *
636  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
637  * valid value have been set.
638  */
dev_pm_genpd_get_next_hrtimer(struct device * dev)639 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
640 {
641 	struct generic_pm_domain *genpd;
642 
643 	genpd = dev_to_genpd_safe(dev);
644 	if (!genpd)
645 		return KTIME_MAX;
646 
647 	if (genpd->gd)
648 		return genpd->gd->next_hrtimer;
649 
650 	return KTIME_MAX;
651 }
652 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
653 
654 /*
655  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
656  *
657  * @dev: A device that is attached to the genpd.
658  *
659  * Allows a consumer of the genpd to notify the provider that the next power off
660  * should be synchronous.
661  *
662  * It is assumed that the users guarantee that the genpd wouldn't be detached
663  * while this routine is getting called.
664  */
dev_pm_genpd_synced_poweroff(struct device * dev)665 void dev_pm_genpd_synced_poweroff(struct device *dev)
666 {
667 	struct generic_pm_domain *genpd;
668 
669 	genpd = dev_to_genpd_safe(dev);
670 	if (!genpd)
671 		return;
672 
673 	genpd_lock(genpd);
674 	genpd->synced_poweroff = true;
675 	genpd_unlock(genpd);
676 }
677 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
678 
679 /**
680  * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
681  *
682  * @dev: Device for which the HW-mode should be changed.
683  * @enable: Value to set or unset the HW-mode.
684  *
685  * Some PM domains can rely on HW signals to control the power for a device. To
686  * allow a consumer driver to switch the behaviour for its device in runtime,
687  * which may be beneficial from a latency or energy point of view, this function
688  * may be called.
689  *
690  * It is assumed that the users guarantee that the genpd wouldn't be detached
691  * while this routine is getting called.
692  *
693  * Return: Returns 0 on success and negative error values on failures.
694  */
dev_pm_genpd_set_hwmode(struct device * dev,bool enable)695 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
696 {
697 	struct generic_pm_domain *genpd;
698 	int ret = 0;
699 
700 	genpd = dev_to_genpd_safe(dev);
701 	if (!genpd)
702 		return -ENODEV;
703 
704 	if (!genpd->set_hwmode_dev)
705 		return -EOPNOTSUPP;
706 
707 	genpd_lock(genpd);
708 
709 	if (dev_gpd_data(dev)->hw_mode == enable)
710 		goto out;
711 
712 	ret = genpd->set_hwmode_dev(genpd, dev, enable);
713 	if (!ret)
714 		dev_gpd_data(dev)->hw_mode = enable;
715 
716 out:
717 	genpd_unlock(genpd);
718 	return ret;
719 }
720 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
721 
722 /**
723  * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
724  *
725  * @dev: Device for which the current HW-mode setting should be fetched.
726  *
727  * This helper function allows consumer drivers to fetch the current HW mode
728  * setting of its the device.
729  *
730  * It is assumed that the users guarantee that the genpd wouldn't be detached
731  * while this routine is getting called.
732  *
733  * Return: Returns the HW mode setting of device from SW cached hw_mode.
734  */
dev_pm_genpd_get_hwmode(struct device * dev)735 bool dev_pm_genpd_get_hwmode(struct device *dev)
736 {
737 	return dev_gpd_data(dev)->hw_mode;
738 }
739 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
740 
741 /**
742  * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
743  *
744  * @dev: Device for which the PM domain may need to stay on for.
745  * @on: Value to set or unset for the condition.
746  *
747  * For some usecases a consumer driver requires its device to remain power-on
748  * from the PM domain perspective during runtime. This function allows the
749  * behaviour to be dynamically controlled for a device attached to a genpd.
750  *
751  * It is assumed that the users guarantee that the genpd wouldn't be detached
752  * while this routine is getting called.
753  *
754  * Return: Returns 0 on success and negative error values on failures.
755  */
dev_pm_genpd_rpm_always_on(struct device * dev,bool on)756 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
757 {
758 	struct generic_pm_domain *genpd;
759 
760 	genpd = dev_to_genpd_safe(dev);
761 	if (!genpd)
762 		return -ENODEV;
763 
764 	genpd_lock(genpd);
765 	dev_gpd_data(dev)->rpm_always_on = on;
766 	genpd_unlock(genpd);
767 
768 	return 0;
769 }
770 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
771 
772 /**
773  * dev_pm_genpd_is_on() - Get device's current power domain status
774  *
775  * @dev: Device to get the current power status
776  *
777  * This function checks whether the generic power domain associated with the
778  * given device is on or not by verifying if genpd_status_on equals
779  * GENPD_STATE_ON.
780  *
781  * Note: this function returns the power status of the genpd at the time of the
782  * call. The power status may change after due to activity from other devices
783  * sharing the same genpd. Therefore, this information should not be relied for
784  * long-term decisions about the device power state.
785  *
786  * Return: 'true' if the device's power domain is on, 'false' otherwise.
787  */
dev_pm_genpd_is_on(struct device * dev)788 bool dev_pm_genpd_is_on(struct device *dev)
789 {
790 	struct generic_pm_domain *genpd;
791 	bool is_on;
792 
793 	genpd = dev_to_genpd_safe(dev);
794 	if (!genpd)
795 		return false;
796 
797 	genpd_lock(genpd);
798 	is_on = genpd_status_on(genpd);
799 	genpd_unlock(genpd);
800 
801 	return is_on;
802 }
803 EXPORT_SYMBOL_GPL(dev_pm_genpd_is_on);
804 
805 /**
806  * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
807  *
808  * @genpd: The PM domain the idle-state belongs to.
809  * @state_idx: The index of the idle-state that failed.
810  *
811  * In some special cases the ->power_off() callback is asynchronously powering
812  * off the PM domain, leading to that it may return zero to indicate success,
813  * even though the actual power-off could fail. To account for this correctly in
814  * the rejected/usage counts for the idle-state statistics, users can call this
815  * function to adjust the values.
816  *
817  * It is assumed that the users guarantee that the genpd doesn't get removed
818  * while this routine is getting called.
819  */
pm_genpd_inc_rejected(struct generic_pm_domain * genpd,unsigned int state_idx)820 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
821 			   unsigned int state_idx)
822 {
823 	genpd_lock(genpd);
824 	genpd->states[genpd->state_idx].rejected++;
825 	genpd->states[genpd->state_idx].usage--;
826 	genpd_unlock(genpd);
827 }
828 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
829 
_genpd_power_on(struct generic_pm_domain * genpd,bool timed)830 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
831 {
832 	unsigned int state_idx = genpd->state_idx;
833 	ktime_t time_start;
834 	s64 elapsed_ns;
835 	int ret;
836 
837 	/* Notify consumers that we are about to power on. */
838 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
839 					     GENPD_NOTIFY_PRE_ON,
840 					     GENPD_NOTIFY_OFF, NULL);
841 	ret = notifier_to_errno(ret);
842 	if (ret)
843 		return ret;
844 
845 	if (!genpd->power_on)
846 		goto out;
847 
848 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
849 	if (!timed) {
850 		ret = genpd->power_on(genpd);
851 		if (ret)
852 			goto err;
853 
854 		goto out;
855 	}
856 
857 	time_start = ktime_get();
858 	ret = genpd->power_on(genpd);
859 	if (ret)
860 		goto err;
861 
862 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
863 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
864 		goto out;
865 
866 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
867 	genpd->gd->max_off_time_changed = true;
868 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
869 		 dev_name(&genpd->dev), "on", elapsed_ns);
870 
871 out:
872 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
873 	genpd->synced_poweroff = false;
874 	return 0;
875 err:
876 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
877 				NULL);
878 	return ret;
879 }
880 
_genpd_power_off(struct generic_pm_domain * genpd,bool timed)881 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
882 {
883 	unsigned int state_idx = genpd->state_idx;
884 	ktime_t time_start;
885 	s64 elapsed_ns;
886 	int ret;
887 
888 	/* Notify consumers that we are about to power off. */
889 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
890 					     GENPD_NOTIFY_PRE_OFF,
891 					     GENPD_NOTIFY_ON, NULL);
892 	ret = notifier_to_errno(ret);
893 	if (ret)
894 		return ret;
895 
896 	if (!genpd->power_off)
897 		goto out;
898 
899 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
900 	if (!timed) {
901 		ret = genpd->power_off(genpd);
902 		if (ret)
903 			goto busy;
904 
905 		goto out;
906 	}
907 
908 	time_start = ktime_get();
909 	ret = genpd->power_off(genpd);
910 	if (ret)
911 		goto busy;
912 
913 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
914 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
915 		goto out;
916 
917 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
918 	genpd->gd->max_off_time_changed = true;
919 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
920 		 dev_name(&genpd->dev), "off", elapsed_ns);
921 
922 out:
923 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
924 				NULL);
925 	return 0;
926 busy:
927 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
928 	return ret;
929 }
930 
931 /**
932  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
933  * @genpd: PM domain to power off.
934  *
935  * Queue up the execution of genpd_power_off() unless it's already been done
936  * before.
937  */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)938 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
939 {
940 	queue_work(pm_wq, &genpd->power_off_work);
941 }
942 
943 /**
944  * genpd_power_off - Remove power from a given PM domain.
945  * @genpd: PM domain to power down.
946  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
947  * RPM status of the releated device is in an intermediate state, not yet turned
948  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
949  * be RPM_SUSPENDED, while it tries to power off the PM domain.
950  * @depth: nesting count for lockdep.
951  *
952  * If all of the @genpd's devices have been suspended and all of its subdomains
953  * have been powered down, remove power from @genpd.
954  */
genpd_power_off(struct generic_pm_domain * genpd,bool one_dev_on,unsigned int depth)955 static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
956 			    unsigned int depth)
957 {
958 	struct pm_domain_data *pdd;
959 	struct gpd_link *link;
960 	unsigned int not_suspended = 0;
961 
962 	/*
963 	 * Do not try to power off the domain in the following situations:
964 	 * The domain is already in the "power off" state.
965 	 * System suspend is in progress.
966 	 * The domain is configured as always on.
967 	 * The domain was on at boot and still need to stay on.
968 	 * The domain has a subdomain being powered on.
969 	 */
970 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
971 	    genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
972 	    genpd->stay_on || atomic_read(&genpd->sd_count) > 0)
973 		return;
974 
975 	/*
976 	 * The children must be in their deepest (powered-off) states to allow
977 	 * the parent to be powered off. Note that, there's no need for
978 	 * additional locking, as powering on a child, requires the parent's
979 	 * lock to be acquired first.
980 	 */
981 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
982 		struct generic_pm_domain *child = link->child;
983 		if (child->state_idx < child->state_count - 1)
984 			return;
985 	}
986 
987 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
988 		/*
989 		 * Do not allow PM domain to be powered off, when an IRQ safe
990 		 * device is part of a non-IRQ safe domain.
991 		 */
992 		if (!pm_runtime_suspended(pdd->dev) ||
993 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
994 			not_suspended++;
995 
996 		/* The device may need its PM domain to stay powered on. */
997 		if (to_gpd_data(pdd)->rpm_always_on)
998 			return;
999 	}
1000 
1001 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
1002 		return;
1003 
1004 	if (genpd->gov && genpd->gov->power_down_ok) {
1005 		if (!genpd->gov->power_down_ok(&genpd->domain))
1006 			return;
1007 	}
1008 
1009 	/* Default to shallowest state. */
1010 	if (!genpd->gov)
1011 		genpd->state_idx = 0;
1012 
1013 	/* Don't power off, if a child domain is waiting to power on. */
1014 	if (atomic_read(&genpd->sd_count) > 0)
1015 		return;
1016 
1017 	if (_genpd_power_off(genpd, true)) {
1018 		genpd->states[genpd->state_idx].rejected++;
1019 		return;
1020 	}
1021 
1022 	genpd->status = GENPD_STATE_OFF;
1023 	genpd_update_accounting(genpd);
1024 	genpd->states[genpd->state_idx].usage++;
1025 
1026 	list_for_each_entry(link, &genpd->child_links, child_node) {
1027 		genpd_sd_counter_dec(link->parent);
1028 		genpd_lock_nested(link->parent, depth + 1);
1029 		genpd_power_off(link->parent, false, depth + 1);
1030 		genpd_unlock(link->parent);
1031 	}
1032 }
1033 
1034 /**
1035  * genpd_power_on - Restore power to a given PM domain and its parents.
1036  * @genpd: PM domain to power up.
1037  * @depth: nesting count for lockdep.
1038  *
1039  * Restore power to @genpd and all of its parents so that it is possible to
1040  * resume a device belonging to it.
1041  */
genpd_power_on(struct generic_pm_domain * genpd,unsigned int depth)1042 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
1043 {
1044 	struct gpd_link *link;
1045 	int ret = 0;
1046 
1047 	if (genpd_status_on(genpd))
1048 		return 0;
1049 
1050 	/* Reflect over the entered idle-states residency for debugfs. */
1051 	genpd_reflect_residency(genpd);
1052 
1053 	/*
1054 	 * The list is guaranteed not to change while the loop below is being
1055 	 * executed, unless one of the parents' .power_on() callbacks fiddles
1056 	 * with it.
1057 	 */
1058 	list_for_each_entry(link, &genpd->child_links, child_node) {
1059 		struct generic_pm_domain *parent = link->parent;
1060 
1061 		genpd_sd_counter_inc(parent);
1062 
1063 		genpd_lock_nested(parent, depth + 1);
1064 		ret = genpd_power_on(parent, depth + 1);
1065 		genpd_unlock(parent);
1066 
1067 		if (ret) {
1068 			genpd_sd_counter_dec(parent);
1069 			goto err;
1070 		}
1071 	}
1072 
1073 	ret = _genpd_power_on(genpd, true);
1074 	if (ret)
1075 		goto err;
1076 
1077 	genpd->status = GENPD_STATE_ON;
1078 	genpd_update_accounting(genpd);
1079 
1080 	return 0;
1081 
1082  err:
1083 	list_for_each_entry_continue_reverse(link,
1084 					&genpd->child_links,
1085 					child_node) {
1086 		genpd_sd_counter_dec(link->parent);
1087 		genpd_lock_nested(link->parent, depth + 1);
1088 		genpd_power_off(link->parent, false, depth + 1);
1089 		genpd_unlock(link->parent);
1090 	}
1091 
1092 	return ret;
1093 }
1094 
genpd_dev_pm_start(struct device * dev)1095 static int genpd_dev_pm_start(struct device *dev)
1096 {
1097 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
1098 
1099 	return genpd_start_dev(genpd, dev);
1100 }
1101 
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)1102 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1103 				     unsigned long val, void *ptr)
1104 {
1105 	struct generic_pm_domain_data *gpd_data;
1106 	struct device *dev;
1107 
1108 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1109 	dev = gpd_data->base.dev;
1110 
1111 	for (;;) {
1112 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1113 		struct pm_domain_data *pdd;
1114 		struct gpd_timing_data *td;
1115 
1116 		spin_lock_irq(&dev->power.lock);
1117 
1118 		pdd = dev->power.subsys_data ?
1119 				dev->power.subsys_data->domain_data : NULL;
1120 		if (pdd) {
1121 			td = to_gpd_data(pdd)->td;
1122 			if (td) {
1123 				td->constraint_changed = true;
1124 				genpd = dev_to_genpd(dev);
1125 			}
1126 		}
1127 
1128 		spin_unlock_irq(&dev->power.lock);
1129 
1130 		if (!IS_ERR(genpd)) {
1131 			genpd_lock(genpd);
1132 			genpd->gd->max_off_time_changed = true;
1133 			genpd_unlock(genpd);
1134 		}
1135 
1136 		dev = dev->parent;
1137 		if (!dev || dev->power.ignore_children)
1138 			break;
1139 	}
1140 
1141 	return NOTIFY_DONE;
1142 }
1143 
1144 /**
1145  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1146  * @work: Work structure used for scheduling the execution of this function.
1147  */
genpd_power_off_work_fn(struct work_struct * work)1148 static void genpd_power_off_work_fn(struct work_struct *work)
1149 {
1150 	struct generic_pm_domain *genpd;
1151 
1152 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
1153 
1154 	genpd_lock(genpd);
1155 	genpd_power_off(genpd, false, 0);
1156 	genpd_unlock(genpd);
1157 }
1158 
1159 /**
1160  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1161  * @dev: Device to handle.
1162  */
__genpd_runtime_suspend(struct device * dev)1163 static int __genpd_runtime_suspend(struct device *dev)
1164 {
1165 	int (*cb)(struct device *__dev);
1166 
1167 	if (dev->type && dev->type->pm)
1168 		cb = dev->type->pm->runtime_suspend;
1169 	else if (dev->class && dev->class->pm)
1170 		cb = dev->class->pm->runtime_suspend;
1171 	else if (dev->bus && dev->bus->pm)
1172 		cb = dev->bus->pm->runtime_suspend;
1173 	else
1174 		cb = NULL;
1175 
1176 	if (!cb && dev->driver && dev->driver->pm)
1177 		cb = dev->driver->pm->runtime_suspend;
1178 
1179 	return cb ? cb(dev) : 0;
1180 }
1181 
1182 /**
1183  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1184  * @dev: Device to handle.
1185  */
__genpd_runtime_resume(struct device * dev)1186 static int __genpd_runtime_resume(struct device *dev)
1187 {
1188 	int (*cb)(struct device *__dev);
1189 
1190 	if (dev->type && dev->type->pm)
1191 		cb = dev->type->pm->runtime_resume;
1192 	else if (dev->class && dev->class->pm)
1193 		cb = dev->class->pm->runtime_resume;
1194 	else if (dev->bus && dev->bus->pm)
1195 		cb = dev->bus->pm->runtime_resume;
1196 	else
1197 		cb = NULL;
1198 
1199 	if (!cb && dev->driver && dev->driver->pm)
1200 		cb = dev->driver->pm->runtime_resume;
1201 
1202 	return cb ? cb(dev) : 0;
1203 }
1204 
1205 /**
1206  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1207  * @dev: Device to suspend.
1208  *
1209  * Carry out a runtime suspend of a device under the assumption that its
1210  * pm_domain field points to the domain member of an object of type
1211  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1212  */
genpd_runtime_suspend(struct device * dev)1213 static int genpd_runtime_suspend(struct device *dev)
1214 {
1215 	struct generic_pm_domain *genpd;
1216 	bool (*suspend_ok)(struct device *__dev);
1217 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1218 	struct gpd_timing_data *td = gpd_data->td;
1219 	bool runtime_pm = pm_runtime_enabled(dev);
1220 	ktime_t time_start = 0;
1221 	s64 elapsed_ns;
1222 	int ret;
1223 
1224 	dev_dbg(dev, "%s()\n", __func__);
1225 
1226 	genpd = dev_to_genpd(dev);
1227 	if (IS_ERR(genpd))
1228 		return -EINVAL;
1229 
1230 	/*
1231 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
1232 	 * callbacks for other purposes than runtime PM. In those scenarios
1233 	 * runtime PM is disabled. Under these circumstances, we shall skip
1234 	 * validating/measuring the PM QoS latency.
1235 	 */
1236 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1237 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
1238 		return -EBUSY;
1239 
1240 	/* Measure suspend latency. */
1241 	if (td && runtime_pm)
1242 		time_start = ktime_get();
1243 
1244 	ret = __genpd_runtime_suspend(dev);
1245 	if (ret)
1246 		return ret;
1247 
1248 	ret = genpd_stop_dev(genpd, dev);
1249 	if (ret) {
1250 		__genpd_runtime_resume(dev);
1251 		return ret;
1252 	}
1253 
1254 	/* Update suspend latency value if the measured time exceeds it. */
1255 	if (td && runtime_pm) {
1256 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1257 		if (elapsed_ns > td->suspend_latency_ns) {
1258 			td->suspend_latency_ns = elapsed_ns;
1259 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1260 				elapsed_ns);
1261 			genpd->gd->max_off_time_changed = true;
1262 			td->constraint_changed = true;
1263 		}
1264 	}
1265 
1266 	/*
1267 	 * If power.irq_safe is set, this routine may be run with
1268 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1269 	 */
1270 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1271 		return 0;
1272 
1273 	genpd_lock(genpd);
1274 	genpd_power_off(genpd, true, 0);
1275 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1276 	genpd_unlock(genpd);
1277 
1278 	return 0;
1279 }
1280 
1281 /**
1282  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1283  * @dev: Device to resume.
1284  *
1285  * Carry out a runtime resume of a device under the assumption that its
1286  * pm_domain field points to the domain member of an object of type
1287  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1288  */
genpd_runtime_resume(struct device * dev)1289 static int genpd_runtime_resume(struct device *dev)
1290 {
1291 	struct generic_pm_domain *genpd;
1292 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1293 	struct gpd_timing_data *td = gpd_data->td;
1294 	bool timed = td && pm_runtime_enabled(dev);
1295 	ktime_t time_start = 0;
1296 	s64 elapsed_ns;
1297 	int ret;
1298 
1299 	dev_dbg(dev, "%s()\n", __func__);
1300 
1301 	genpd = dev_to_genpd(dev);
1302 	if (IS_ERR(genpd))
1303 		return -EINVAL;
1304 
1305 	/*
1306 	 * As we don't power off a non IRQ safe domain, which holds
1307 	 * an IRQ safe device, we don't need to restore power to it.
1308 	 */
1309 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1310 		goto out;
1311 
1312 	genpd_lock(genpd);
1313 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1314 	ret = genpd_power_on(genpd, 0);
1315 	genpd_unlock(genpd);
1316 
1317 	if (ret)
1318 		return ret;
1319 
1320  out:
1321 	/* Measure resume latency. */
1322 	if (timed)
1323 		time_start = ktime_get();
1324 
1325 	ret = genpd_start_dev(genpd, dev);
1326 	if (ret)
1327 		goto err_poweroff;
1328 
1329 	ret = __genpd_runtime_resume(dev);
1330 	if (ret)
1331 		goto err_stop;
1332 
1333 	/* Update resume latency value if the measured time exceeds it. */
1334 	if (timed) {
1335 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1336 		if (elapsed_ns > td->resume_latency_ns) {
1337 			td->resume_latency_ns = elapsed_ns;
1338 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1339 				elapsed_ns);
1340 			genpd->gd->max_off_time_changed = true;
1341 			td->constraint_changed = true;
1342 		}
1343 	}
1344 
1345 	return 0;
1346 
1347 err_stop:
1348 	genpd_stop_dev(genpd, dev);
1349 err_poweroff:
1350 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1351 		genpd_lock(genpd);
1352 		genpd_power_off(genpd, true, 0);
1353 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1354 		genpd_unlock(genpd);
1355 	}
1356 
1357 	return ret;
1358 }
1359 
1360 #ifndef CONFIG_PM_GENERIC_DOMAINS_OF
1361 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)1362 static int __init pd_ignore_unused_setup(char *__unused)
1363 {
1364 	pd_ignore_unused = true;
1365 	return 1;
1366 }
1367 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1368 
1369 /**
1370  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1371  */
genpd_power_off_unused(void)1372 static int __init genpd_power_off_unused(void)
1373 {
1374 	struct generic_pm_domain *genpd;
1375 
1376 	if (pd_ignore_unused) {
1377 		pr_warn("genpd: Not disabling unused power domains\n");
1378 		return 0;
1379 	}
1380 
1381 	pr_info("genpd: Disabling unused power domains\n");
1382 	mutex_lock(&gpd_list_lock);
1383 
1384 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1385 		genpd_lock(genpd);
1386 		genpd->stay_on = false;
1387 		genpd_unlock(genpd);
1388 		genpd_queue_power_off_work(genpd);
1389 	}
1390 
1391 	mutex_unlock(&gpd_list_lock);
1392 
1393 	return 0;
1394 }
1395 late_initcall_sync(genpd_power_off_unused);
1396 #endif
1397 
1398 #ifdef CONFIG_PM_SLEEP
1399 
1400 /**
1401  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1402  * @genpd: PM domain to power off, if possible.
1403  * @use_lock: use the lock.
1404  * @depth: nesting count for lockdep.
1405  *
1406  * Check if the given PM domain can be powered off (during system suspend or
1407  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1408  *
1409  * This function is only called in "noirq" and "syscore" stages of system power
1410  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1411  * these cases the lock must be held.
1412  */
genpd_sync_power_off(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1413 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1414 				 unsigned int depth)
1415 {
1416 	struct gpd_link *link;
1417 
1418 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1419 		return;
1420 
1421 	if (genpd->suspended_count != genpd->device_count
1422 	    || atomic_read(&genpd->sd_count) > 0)
1423 		return;
1424 
1425 	/* Check that the children are in their deepest (powered-off) state. */
1426 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1427 		struct generic_pm_domain *child = link->child;
1428 		if (child->state_idx < child->state_count - 1)
1429 			return;
1430 	}
1431 
1432 	/* Choose the deepest state when suspending */
1433 	genpd->state_idx = genpd->state_count - 1;
1434 	if (_genpd_power_off(genpd, false)) {
1435 		genpd->states[genpd->state_idx].rejected++;
1436 		return;
1437 	} else {
1438 		genpd->states[genpd->state_idx].usage++;
1439 	}
1440 
1441 	genpd->status = GENPD_STATE_OFF;
1442 
1443 	list_for_each_entry(link, &genpd->child_links, child_node) {
1444 		genpd_sd_counter_dec(link->parent);
1445 
1446 		if (use_lock)
1447 			genpd_lock_nested(link->parent, depth + 1);
1448 
1449 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1450 
1451 		if (use_lock)
1452 			genpd_unlock(link->parent);
1453 	}
1454 }
1455 
1456 /**
1457  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1458  * @genpd: PM domain to power on.
1459  * @use_lock: use the lock.
1460  * @depth: nesting count for lockdep.
1461  *
1462  * This function is only called in "noirq" and "syscore" stages of system power
1463  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1464  * these cases the lock must be held.
1465  */
genpd_sync_power_on(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1466 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1467 				unsigned int depth)
1468 {
1469 	struct gpd_link *link;
1470 
1471 	if (genpd_status_on(genpd))
1472 		return;
1473 
1474 	list_for_each_entry(link, &genpd->child_links, child_node) {
1475 		genpd_sd_counter_inc(link->parent);
1476 
1477 		if (use_lock)
1478 			genpd_lock_nested(link->parent, depth + 1);
1479 
1480 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1481 
1482 		if (use_lock)
1483 			genpd_unlock(link->parent);
1484 	}
1485 
1486 	_genpd_power_on(genpd, false);
1487 	genpd->status = GENPD_STATE_ON;
1488 }
1489 
1490 /**
1491  * genpd_prepare - Start power transition of a device in a PM domain.
1492  * @dev: Device to start the transition of.
1493  *
1494  * Start a power transition of a device (during a system-wide power transition)
1495  * under the assumption that its pm_domain field points to the domain member of
1496  * an object of type struct generic_pm_domain representing a PM domain
1497  * consisting of I/O devices.
1498  */
genpd_prepare(struct device * dev)1499 static int genpd_prepare(struct device *dev)
1500 {
1501 	struct generic_pm_domain *genpd;
1502 	int ret;
1503 
1504 	dev_dbg(dev, "%s()\n", __func__);
1505 
1506 	genpd = dev_to_genpd(dev);
1507 	if (IS_ERR(genpd))
1508 		return -EINVAL;
1509 
1510 	genpd_lock(genpd);
1511 	genpd->prepared_count++;
1512 	genpd_unlock(genpd);
1513 
1514 	ret = pm_generic_prepare(dev);
1515 	if (ret < 0) {
1516 		genpd_lock(genpd);
1517 
1518 		genpd->prepared_count--;
1519 
1520 		genpd_unlock(genpd);
1521 	}
1522 
1523 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1524 	return ret >= 0 ? 0 : ret;
1525 }
1526 
1527 /**
1528  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1529  *   I/O pm domain.
1530  * @dev: Device to suspend.
1531  * @suspend_noirq: Generic suspend_noirq callback.
1532  * @resume_noirq: Generic resume_noirq callback.
1533  *
1534  * Stop the device and remove power from the domain if all devices in it have
1535  * been stopped.
1536  */
genpd_finish_suspend(struct device * dev,int (* suspend_noirq)(struct device * dev),int (* resume_noirq)(struct device * dev))1537 static int genpd_finish_suspend(struct device *dev,
1538 				int (*suspend_noirq)(struct device *dev),
1539 				int (*resume_noirq)(struct device *dev))
1540 {
1541 	struct generic_pm_domain *genpd;
1542 	int ret = 0;
1543 
1544 	genpd = dev_to_genpd(dev);
1545 	if (IS_ERR(genpd))
1546 		return -EINVAL;
1547 
1548 	ret = suspend_noirq(dev);
1549 	if (ret)
1550 		return ret;
1551 
1552 	if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1553 		return 0;
1554 
1555 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1556 	    !pm_runtime_status_suspended(dev)) {
1557 		ret = genpd_stop_dev(genpd, dev);
1558 		if (ret) {
1559 			resume_noirq(dev);
1560 			return ret;
1561 		}
1562 	}
1563 
1564 	genpd_lock(genpd);
1565 	genpd->suspended_count++;
1566 	genpd_sync_power_off(genpd, true, 0);
1567 	genpd_unlock(genpd);
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1574  * @dev: Device to suspend.
1575  *
1576  * Stop the device and remove power from the domain if all devices in it have
1577  * been stopped.
1578  */
genpd_suspend_noirq(struct device * dev)1579 static int genpd_suspend_noirq(struct device *dev)
1580 {
1581 	dev_dbg(dev, "%s()\n", __func__);
1582 
1583 	return genpd_finish_suspend(dev,
1584 				    pm_generic_suspend_noirq,
1585 				    pm_generic_resume_noirq);
1586 }
1587 
1588 /**
1589  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1590  * @dev: Device to resume.
1591  * @resume_noirq: Generic resume_noirq callback.
1592  *
1593  * Restore power to the device's PM domain, if necessary, and start the device.
1594  */
genpd_finish_resume(struct device * dev,int (* resume_noirq)(struct device * dev))1595 static int genpd_finish_resume(struct device *dev,
1596 			       int (*resume_noirq)(struct device *dev))
1597 {
1598 	struct generic_pm_domain *genpd;
1599 	int ret;
1600 
1601 	dev_dbg(dev, "%s()\n", __func__);
1602 
1603 	genpd = dev_to_genpd(dev);
1604 	if (IS_ERR(genpd))
1605 		return -EINVAL;
1606 
1607 	if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1608 		return resume_noirq(dev);
1609 
1610 	genpd_lock(genpd);
1611 	genpd_sync_power_on(genpd, true, 0);
1612 	genpd->suspended_count--;
1613 	genpd_unlock(genpd);
1614 
1615 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1616 	    !pm_runtime_status_suspended(dev)) {
1617 		ret = genpd_start_dev(genpd, dev);
1618 		if (ret)
1619 			return ret;
1620 	}
1621 
1622 	return pm_generic_resume_noirq(dev);
1623 }
1624 
1625 /**
1626  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1627  * @dev: Device to resume.
1628  *
1629  * Restore power to the device's PM domain, if necessary, and start the device.
1630  */
genpd_resume_noirq(struct device * dev)1631 static int genpd_resume_noirq(struct device *dev)
1632 {
1633 	dev_dbg(dev, "%s()\n", __func__);
1634 
1635 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1636 }
1637 
1638 /**
1639  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1640  * @dev: Device to freeze.
1641  *
1642  * Carry out a late freeze of a device under the assumption that its
1643  * pm_domain field points to the domain member of an object of type
1644  * struct generic_pm_domain representing a power domain consisting of I/O
1645  * devices.
1646  */
genpd_freeze_noirq(struct device * dev)1647 static int genpd_freeze_noirq(struct device *dev)
1648 {
1649 	dev_dbg(dev, "%s()\n", __func__);
1650 
1651 	return genpd_finish_suspend(dev,
1652 				    pm_generic_freeze_noirq,
1653 				    pm_generic_thaw_noirq);
1654 }
1655 
1656 /**
1657  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1658  * @dev: Device to thaw.
1659  *
1660  * Start the device, unless power has been removed from the domain already
1661  * before the system transition.
1662  */
genpd_thaw_noirq(struct device * dev)1663 static int genpd_thaw_noirq(struct device *dev)
1664 {
1665 	dev_dbg(dev, "%s()\n", __func__);
1666 
1667 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1668 }
1669 
1670 /**
1671  * genpd_poweroff_noirq - Completion of hibernation of device in an
1672  *   I/O PM domain.
1673  * @dev: Device to poweroff.
1674  *
1675  * Stop the device and remove power from the domain if all devices in it have
1676  * been stopped.
1677  */
genpd_poweroff_noirq(struct device * dev)1678 static int genpd_poweroff_noirq(struct device *dev)
1679 {
1680 	dev_dbg(dev, "%s()\n", __func__);
1681 
1682 	return genpd_finish_suspend(dev,
1683 				    pm_generic_poweroff_noirq,
1684 				    pm_generic_restore_noirq);
1685 }
1686 
1687 /**
1688  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1689  * @dev: Device to resume.
1690  *
1691  * Make sure the domain will be in the same power state as before the
1692  * hibernation the system is resuming from and start the device if necessary.
1693  */
genpd_restore_noirq(struct device * dev)1694 static int genpd_restore_noirq(struct device *dev)
1695 {
1696 	dev_dbg(dev, "%s()\n", __func__);
1697 
1698 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1699 }
1700 
1701 /**
1702  * genpd_complete - Complete power transition of a device in a power domain.
1703  * @dev: Device to complete the transition of.
1704  *
1705  * Complete a power transition of a device (during a system-wide power
1706  * transition) under the assumption that its pm_domain field points to the
1707  * domain member of an object of type struct generic_pm_domain representing
1708  * a power domain consisting of I/O devices.
1709  */
genpd_complete(struct device * dev)1710 static void genpd_complete(struct device *dev)
1711 {
1712 	struct generic_pm_domain *genpd;
1713 
1714 	dev_dbg(dev, "%s()\n", __func__);
1715 
1716 	genpd = dev_to_genpd(dev);
1717 	if (IS_ERR(genpd))
1718 		return;
1719 
1720 	pm_generic_complete(dev);
1721 
1722 	genpd_lock(genpd);
1723 
1724 	genpd->prepared_count--;
1725 	if (!genpd->prepared_count)
1726 		genpd_queue_power_off_work(genpd);
1727 
1728 	genpd_unlock(genpd);
1729 }
1730 
genpd_switch_state(struct device * dev,bool suspend)1731 static void genpd_switch_state(struct device *dev, bool suspend)
1732 {
1733 	struct generic_pm_domain *genpd;
1734 	bool use_lock;
1735 
1736 	genpd = dev_to_genpd_safe(dev);
1737 	if (!genpd)
1738 		return;
1739 
1740 	use_lock = genpd_is_irq_safe(genpd);
1741 
1742 	if (use_lock)
1743 		genpd_lock(genpd);
1744 
1745 	if (suspend) {
1746 		genpd->suspended_count++;
1747 		genpd_sync_power_off(genpd, use_lock, 0);
1748 	} else {
1749 		genpd_sync_power_on(genpd, use_lock, 0);
1750 		genpd->suspended_count--;
1751 	}
1752 
1753 	if (use_lock)
1754 		genpd_unlock(genpd);
1755 }
1756 
1757 /**
1758  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1759  * @dev: The device that is attached to the genpd, that can be suspended.
1760  *
1761  * This routine should typically be called for a device that needs to be
1762  * suspended during the syscore suspend phase. It may also be called during
1763  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1764  * genpd.
1765  */
dev_pm_genpd_suspend(struct device * dev)1766 void dev_pm_genpd_suspend(struct device *dev)
1767 {
1768 	genpd_switch_state(dev, true);
1769 }
1770 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1771 
1772 /**
1773  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1774  * @dev: The device that is attached to the genpd, which needs to be resumed.
1775  *
1776  * This routine should typically be called for a device that needs to be resumed
1777  * during the syscore resume phase. It may also be called during suspend-to-idle
1778  * to resume a corresponding CPU device that is attached to a genpd.
1779  */
dev_pm_genpd_resume(struct device * dev)1780 void dev_pm_genpd_resume(struct device *dev)
1781 {
1782 	genpd_switch_state(dev, false);
1783 }
1784 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1785 
1786 #else /* !CONFIG_PM_SLEEP */
1787 
1788 #define genpd_prepare		NULL
1789 #define genpd_suspend_noirq	NULL
1790 #define genpd_resume_noirq	NULL
1791 #define genpd_freeze_noirq	NULL
1792 #define genpd_thaw_noirq	NULL
1793 #define genpd_poweroff_noirq	NULL
1794 #define genpd_restore_noirq	NULL
1795 #define genpd_complete		NULL
1796 
1797 #endif /* CONFIG_PM_SLEEP */
1798 
genpd_alloc_dev_data(struct device * dev,bool has_governor)1799 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1800 							   bool has_governor)
1801 {
1802 	struct generic_pm_domain_data *gpd_data;
1803 	struct gpd_timing_data *td;
1804 	int ret;
1805 
1806 	ret = dev_pm_get_subsys_data(dev);
1807 	if (ret)
1808 		return ERR_PTR(ret);
1809 
1810 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1811 	if (!gpd_data) {
1812 		ret = -ENOMEM;
1813 		goto err_put;
1814 	}
1815 
1816 	gpd_data->base.dev = dev;
1817 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1818 
1819 	/* Allocate data used by a governor. */
1820 	if (has_governor) {
1821 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1822 		if (!td) {
1823 			ret = -ENOMEM;
1824 			goto err_free;
1825 		}
1826 
1827 		td->constraint_changed = true;
1828 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1829 		td->next_wakeup = KTIME_MAX;
1830 		gpd_data->td = td;
1831 	}
1832 
1833 	spin_lock_irq(&dev->power.lock);
1834 
1835 	if (dev->power.subsys_data->domain_data)
1836 		ret = -EINVAL;
1837 	else
1838 		dev->power.subsys_data->domain_data = &gpd_data->base;
1839 
1840 	spin_unlock_irq(&dev->power.lock);
1841 
1842 	if (ret)
1843 		goto err_free;
1844 
1845 	return gpd_data;
1846 
1847  err_free:
1848 	kfree(gpd_data->td);
1849 	kfree(gpd_data);
1850  err_put:
1851 	dev_pm_put_subsys_data(dev);
1852 	return ERR_PTR(ret);
1853 }
1854 
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1855 static void genpd_free_dev_data(struct device *dev,
1856 				struct generic_pm_domain_data *gpd_data)
1857 {
1858 	spin_lock_irq(&dev->power.lock);
1859 
1860 	dev->power.subsys_data->domain_data = NULL;
1861 
1862 	spin_unlock_irq(&dev->power.lock);
1863 
1864 	dev_pm_opp_clear_config(gpd_data->opp_token);
1865 	kfree(gpd_data->td);
1866 	kfree(gpd_data);
1867 	dev_pm_put_subsys_data(dev);
1868 }
1869 
genpd_update_cpumask(struct generic_pm_domain * genpd,int cpu,bool set,unsigned int depth)1870 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1871 				 int cpu, bool set, unsigned int depth)
1872 {
1873 	struct gpd_link *link;
1874 
1875 	if (!genpd_is_cpu_domain(genpd))
1876 		return;
1877 
1878 	list_for_each_entry(link, &genpd->child_links, child_node) {
1879 		struct generic_pm_domain *parent = link->parent;
1880 
1881 		genpd_lock_nested(parent, depth + 1);
1882 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1883 		genpd_unlock(parent);
1884 	}
1885 
1886 	if (set)
1887 		cpumask_set_cpu(cpu, genpd->cpus);
1888 	else
1889 		cpumask_clear_cpu(cpu, genpd->cpus);
1890 }
1891 
genpd_set_cpumask(struct generic_pm_domain * genpd,int cpu)1892 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1893 {
1894 	if (cpu >= 0)
1895 		genpd_update_cpumask(genpd, cpu, true, 0);
1896 }
1897 
genpd_clear_cpumask(struct generic_pm_domain * genpd,int cpu)1898 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1899 {
1900 	if (cpu >= 0)
1901 		genpd_update_cpumask(genpd, cpu, false, 0);
1902 }
1903 
genpd_get_cpu(struct generic_pm_domain * genpd,struct device * dev)1904 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1905 {
1906 	int cpu;
1907 
1908 	if (!genpd_is_cpu_domain(genpd))
1909 		return -1;
1910 
1911 	for_each_possible_cpu(cpu) {
1912 		if (get_cpu_device(cpu) == dev)
1913 			return cpu;
1914 	}
1915 
1916 	return -1;
1917 }
1918 
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct device * base_dev)1919 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1920 			    struct device *base_dev)
1921 {
1922 	struct genpd_governor_data *gd = genpd->gd;
1923 	struct generic_pm_domain_data *gpd_data;
1924 	int ret;
1925 
1926 	dev_dbg(dev, "%s()\n", __func__);
1927 
1928 	gpd_data = genpd_alloc_dev_data(dev, gd);
1929 	if (IS_ERR(gpd_data))
1930 		return PTR_ERR(gpd_data);
1931 
1932 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1933 
1934 	gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1935 
1936 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1937 	if (ret)
1938 		goto out;
1939 
1940 	genpd_lock(genpd);
1941 
1942 	genpd_set_cpumask(genpd, gpd_data->cpu);
1943 
1944 	genpd->device_count++;
1945 	if (gd)
1946 		gd->max_off_time_changed = true;
1947 
1948 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1949 
1950 	genpd_unlock(genpd);
1951 	dev_pm_domain_set(dev, &genpd->domain);
1952  out:
1953 	if (ret)
1954 		genpd_free_dev_data(dev, gpd_data);
1955 	else
1956 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1957 					DEV_PM_QOS_RESUME_LATENCY);
1958 
1959 	return ret;
1960 }
1961 
1962 /**
1963  * pm_genpd_add_device - Add a device to an I/O PM domain.
1964  * @genpd: PM domain to add the device to.
1965  * @dev: Device to be added.
1966  */
pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev)1967 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1968 {
1969 	int ret;
1970 
1971 	if (!genpd || !dev)
1972 		return -EINVAL;
1973 
1974 	mutex_lock(&gpd_list_lock);
1975 	ret = genpd_add_device(genpd, dev, dev);
1976 	mutex_unlock(&gpd_list_lock);
1977 
1978 	return ret;
1979 }
1980 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1981 
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1982 static int genpd_remove_device(struct generic_pm_domain *genpd,
1983 			       struct device *dev)
1984 {
1985 	struct generic_pm_domain_data *gpd_data;
1986 	struct pm_domain_data *pdd;
1987 	int ret = 0;
1988 
1989 	dev_dbg(dev, "%s()\n", __func__);
1990 
1991 	pdd = dev->power.subsys_data->domain_data;
1992 	gpd_data = to_gpd_data(pdd);
1993 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1994 				   DEV_PM_QOS_RESUME_LATENCY);
1995 
1996 	genpd_lock(genpd);
1997 
1998 	if (genpd->prepared_count > 0) {
1999 		ret = -EAGAIN;
2000 		goto out;
2001 	}
2002 
2003 	genpd->device_count--;
2004 	if (genpd->gd)
2005 		genpd->gd->max_off_time_changed = true;
2006 
2007 	genpd_clear_cpumask(genpd, gpd_data->cpu);
2008 
2009 	list_del_init(&pdd->list_node);
2010 
2011 	genpd_unlock(genpd);
2012 
2013 	dev_pm_domain_set(dev, NULL);
2014 
2015 	if (genpd->detach_dev)
2016 		genpd->detach_dev(genpd, dev);
2017 
2018 	genpd_free_dev_data(dev, gpd_data);
2019 
2020 	return 0;
2021 
2022  out:
2023 	genpd_unlock(genpd);
2024 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
2025 
2026 	return ret;
2027 }
2028 
2029 /**
2030  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
2031  * @dev: Device to be removed.
2032  */
pm_genpd_remove_device(struct device * dev)2033 int pm_genpd_remove_device(struct device *dev)
2034 {
2035 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
2036 
2037 	if (!genpd)
2038 		return -EINVAL;
2039 
2040 	return genpd_remove_device(genpd, dev);
2041 }
2042 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
2043 
2044 /**
2045  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
2046  *
2047  * @dev: Device that should be associated with the notifier
2048  * @nb: The notifier block to register
2049  *
2050  * Users may call this function to add a genpd power on/off notifier for an
2051  * attached @dev. Only one notifier per device is allowed. The notifier is
2052  * sent when genpd is powering on/off the PM domain.
2053  *
2054  * It is assumed that the user guarantee that the genpd wouldn't be detached
2055  * while this routine is getting called.
2056  *
2057  * Returns 0 on success and negative error values on failures.
2058  */
dev_pm_genpd_add_notifier(struct device * dev,struct notifier_block * nb)2059 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2060 {
2061 	struct generic_pm_domain *genpd;
2062 	struct generic_pm_domain_data *gpd_data;
2063 	int ret;
2064 
2065 	genpd = dev_to_genpd_safe(dev);
2066 	if (!genpd)
2067 		return -ENODEV;
2068 
2069 	if (WARN_ON(!dev->power.subsys_data ||
2070 		     !dev->power.subsys_data->domain_data))
2071 		return -EINVAL;
2072 
2073 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2074 	if (gpd_data->power_nb)
2075 		return -EEXIST;
2076 
2077 	genpd_lock(genpd);
2078 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2079 	genpd_unlock(genpd);
2080 
2081 	if (ret) {
2082 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
2083 			 dev_name(&genpd->dev));
2084 		return ret;
2085 	}
2086 
2087 	gpd_data->power_nb = nb;
2088 	return 0;
2089 }
2090 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2091 
2092 /**
2093  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2094  *
2095  * @dev: Device that is associated with the notifier
2096  *
2097  * Users may call this function to remove a genpd power on/off notifier for an
2098  * attached @dev.
2099  *
2100  * It is assumed that the user guarantee that the genpd wouldn't be detached
2101  * while this routine is getting called.
2102  *
2103  * Returns 0 on success and negative error values on failures.
2104  */
dev_pm_genpd_remove_notifier(struct device * dev)2105 int dev_pm_genpd_remove_notifier(struct device *dev)
2106 {
2107 	struct generic_pm_domain *genpd;
2108 	struct generic_pm_domain_data *gpd_data;
2109 	int ret;
2110 
2111 	genpd = dev_to_genpd_safe(dev);
2112 	if (!genpd)
2113 		return -ENODEV;
2114 
2115 	if (WARN_ON(!dev->power.subsys_data ||
2116 		     !dev->power.subsys_data->domain_data))
2117 		return -EINVAL;
2118 
2119 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2120 	if (!gpd_data->power_nb)
2121 		return -ENODEV;
2122 
2123 	genpd_lock(genpd);
2124 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2125 					    gpd_data->power_nb);
2126 	genpd_unlock(genpd);
2127 
2128 	if (ret) {
2129 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2130 			 dev_name(&genpd->dev));
2131 		return ret;
2132 	}
2133 
2134 	gpd_data->power_nb = NULL;
2135 	return 0;
2136 }
2137 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2138 
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2139 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2140 			       struct generic_pm_domain *subdomain)
2141 {
2142 	struct gpd_link *link, *itr;
2143 	int ret = 0;
2144 
2145 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2146 	    || genpd == subdomain)
2147 		return -EINVAL;
2148 
2149 	/*
2150 	 * If the domain can be powered on/off in an IRQ safe
2151 	 * context, ensure that the subdomain can also be
2152 	 * powered on/off in that context.
2153 	 */
2154 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2155 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2156 		     dev_name(&genpd->dev), subdomain->name);
2157 		return -EINVAL;
2158 	}
2159 
2160 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2161 	if (!link)
2162 		return -ENOMEM;
2163 
2164 	genpd_lock(subdomain);
2165 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2166 
2167 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2168 		ret = -EINVAL;
2169 		goto out;
2170 	}
2171 
2172 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2173 		if (itr->child == subdomain && itr->parent == genpd) {
2174 			ret = -EINVAL;
2175 			goto out;
2176 		}
2177 	}
2178 
2179 	link->parent = genpd;
2180 	list_add_tail(&link->parent_node, &genpd->parent_links);
2181 	link->child = subdomain;
2182 	list_add_tail(&link->child_node, &subdomain->child_links);
2183 	if (genpd_status_on(subdomain))
2184 		genpd_sd_counter_inc(genpd);
2185 
2186  out:
2187 	genpd_unlock(genpd);
2188 	genpd_unlock(subdomain);
2189 	if (ret)
2190 		kfree(link);
2191 	return ret;
2192 }
2193 
2194 /**
2195  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2196  * @genpd: Leader PM domain to add the subdomain to.
2197  * @subdomain: Subdomain to be added.
2198  */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2199 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2200 			   struct generic_pm_domain *subdomain)
2201 {
2202 	int ret;
2203 
2204 	mutex_lock(&gpd_list_lock);
2205 	ret = genpd_add_subdomain(genpd, subdomain);
2206 	mutex_unlock(&gpd_list_lock);
2207 
2208 	return ret;
2209 }
2210 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2211 
2212 /**
2213  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2214  * @genpd: Leader PM domain to remove the subdomain from.
2215  * @subdomain: Subdomain to be removed.
2216  */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2217 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2218 			      struct generic_pm_domain *subdomain)
2219 {
2220 	struct gpd_link *l, *link;
2221 	int ret = -EINVAL;
2222 
2223 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2224 		return -EINVAL;
2225 
2226 	genpd_lock(subdomain);
2227 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2228 
2229 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2230 		pr_warn("%s: unable to remove subdomain %s\n",
2231 			dev_name(&genpd->dev), subdomain->name);
2232 		ret = -EBUSY;
2233 		goto out;
2234 	}
2235 
2236 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2237 		if (link->child != subdomain)
2238 			continue;
2239 
2240 		list_del(&link->parent_node);
2241 		list_del(&link->child_node);
2242 		kfree(link);
2243 		if (genpd_status_on(subdomain))
2244 			genpd_sd_counter_dec(genpd);
2245 
2246 		ret = 0;
2247 		break;
2248 	}
2249 
2250 out:
2251 	genpd_unlock(genpd);
2252 	genpd_unlock(subdomain);
2253 
2254 	return ret;
2255 }
2256 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2257 
genpd_free_default_power_state(struct genpd_power_state * states,unsigned int state_count)2258 static void genpd_free_default_power_state(struct genpd_power_state *states,
2259 					   unsigned int state_count)
2260 {
2261 	kfree(states);
2262 }
2263 
genpd_set_default_power_state(struct generic_pm_domain * genpd)2264 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2265 {
2266 	struct genpd_power_state *state;
2267 
2268 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2269 	if (!state)
2270 		return -ENOMEM;
2271 
2272 	genpd->states = state;
2273 	genpd->state_count = 1;
2274 	genpd->free_states = genpd_free_default_power_state;
2275 
2276 	return 0;
2277 }
2278 
genpd_provider_release(struct device * dev)2279 static void genpd_provider_release(struct device *dev)
2280 {
2281 	/* nothing to be done here */
2282 }
2283 
genpd_alloc_data(struct generic_pm_domain * genpd)2284 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2285 {
2286 	struct genpd_governor_data *gd = NULL;
2287 	int ret;
2288 
2289 	if (genpd_is_cpu_domain(genpd) &&
2290 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2291 		return -ENOMEM;
2292 
2293 	if (genpd->gov) {
2294 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2295 		if (!gd) {
2296 			ret = -ENOMEM;
2297 			goto free;
2298 		}
2299 
2300 		gd->max_off_time_ns = -1;
2301 		gd->max_off_time_changed = true;
2302 		gd->next_wakeup = KTIME_MAX;
2303 		gd->next_hrtimer = KTIME_MAX;
2304 	}
2305 
2306 	/* Use only one "off" state if there were no states declared */
2307 	if (genpd->state_count == 0) {
2308 		ret = genpd_set_default_power_state(genpd);
2309 		if (ret)
2310 			goto free;
2311 	}
2312 
2313 	genpd->gd = gd;
2314 	device_initialize(&genpd->dev);
2315 	genpd->dev.release = genpd_provider_release;
2316 	genpd->dev.bus = &genpd_provider_bus_type;
2317 	genpd->dev.parent = &genpd_provider_bus;
2318 
2319 	if (!genpd_is_dev_name_fw(genpd)) {
2320 		dev_set_name(&genpd->dev, "%s", genpd->name);
2321 	} else {
2322 		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2323 		if (ret < 0)
2324 			goto put;
2325 
2326 		genpd->device_id = ret;
2327 		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2328 	}
2329 
2330 	return 0;
2331 put:
2332 	put_device(&genpd->dev);
2333 	if (genpd->free_states == genpd_free_default_power_state) {
2334 		kfree(genpd->states);
2335 		genpd->states = NULL;
2336 	}
2337 free:
2338 	if (genpd_is_cpu_domain(genpd))
2339 		free_cpumask_var(genpd->cpus);
2340 	kfree(gd);
2341 	return ret;
2342 }
2343 
genpd_free_data(struct generic_pm_domain * genpd)2344 static void genpd_free_data(struct generic_pm_domain *genpd)
2345 {
2346 	put_device(&genpd->dev);
2347 	if (genpd->device_id != -ENXIO)
2348 		ida_free(&genpd_ida, genpd->device_id);
2349 	if (genpd_is_cpu_domain(genpd))
2350 		free_cpumask_var(genpd->cpus);
2351 	if (genpd->free_states)
2352 		genpd->free_states(genpd->states, genpd->state_count);
2353 	kfree(genpd->gd);
2354 }
2355 
genpd_lock_init(struct generic_pm_domain * genpd)2356 static void genpd_lock_init(struct generic_pm_domain *genpd)
2357 {
2358 	if (genpd_is_cpu_domain(genpd)) {
2359 		raw_spin_lock_init(&genpd->raw_slock);
2360 		genpd->lock_ops = &genpd_raw_spin_ops;
2361 	} else if (genpd_is_irq_safe(genpd)) {
2362 		spin_lock_init(&genpd->slock);
2363 		genpd->lock_ops = &genpd_spin_ops;
2364 	} else {
2365 		mutex_init(&genpd->mlock);
2366 		genpd->lock_ops = &genpd_mtx_ops;
2367 	}
2368 }
2369 
2370 /**
2371  * pm_genpd_init - Initialize a generic I/O PM domain object.
2372  * @genpd: PM domain object to initialize.
2373  * @gov: PM domain governor to associate with the domain (may be NULL).
2374  * @is_off: Initial value of the domain's power_is_off field.
2375  *
2376  * Returns 0 on successful initialization, else a negative error code.
2377  */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)2378 int pm_genpd_init(struct generic_pm_domain *genpd,
2379 		  struct dev_power_governor *gov, bool is_off)
2380 {
2381 	int ret;
2382 
2383 	if (IS_ERR_OR_NULL(genpd))
2384 		return -EINVAL;
2385 
2386 	INIT_LIST_HEAD(&genpd->parent_links);
2387 	INIT_LIST_HEAD(&genpd->child_links);
2388 	INIT_LIST_HEAD(&genpd->dev_list);
2389 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2390 	genpd_lock_init(genpd);
2391 	genpd->gov = gov;
2392 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2393 	atomic_set(&genpd->sd_count, 0);
2394 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2395 	genpd->stay_on = !is_off;
2396 	genpd->sync_state = GENPD_SYNC_STATE_OFF;
2397 	genpd->device_count = 0;
2398 	genpd->provider = NULL;
2399 	genpd->device_id = -ENXIO;
2400 	genpd->has_provider = false;
2401 	genpd->opp_table = NULL;
2402 	genpd->accounting_time = ktime_get_mono_fast_ns();
2403 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2404 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2405 	genpd->domain.ops.prepare = genpd_prepare;
2406 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2407 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2408 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2409 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2410 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2411 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2412 	genpd->domain.ops.complete = genpd_complete;
2413 	genpd->domain.start = genpd_dev_pm_start;
2414 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2415 
2416 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2417 		genpd->dev_ops.stop = pm_clk_suspend;
2418 		genpd->dev_ops.start = pm_clk_resume;
2419 	}
2420 
2421 	/* The always-on governor works better with the corresponding flag. */
2422 	if (gov == &pm_domain_always_on_gov)
2423 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2424 
2425 	/* Always-on domains must be powered on at initialization. */
2426 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2427 			!genpd_status_on(genpd)) {
2428 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2429 		return -EINVAL;
2430 	}
2431 
2432 	/* Multiple states but no governor doesn't make sense. */
2433 	if (!gov && genpd->state_count > 1)
2434 		pr_warn("%s: no governor for states\n", genpd->name);
2435 
2436 	ret = genpd_alloc_data(genpd);
2437 	if (ret)
2438 		return ret;
2439 
2440 	mutex_lock(&gpd_list_lock);
2441 	list_add(&genpd->gpd_list_node, &gpd_list);
2442 	mutex_unlock(&gpd_list_lock);
2443 	genpd_debug_add(genpd);
2444 
2445 	return 0;
2446 }
2447 EXPORT_SYMBOL_GPL(pm_genpd_init);
2448 
genpd_remove(struct generic_pm_domain * genpd)2449 static int genpd_remove(struct generic_pm_domain *genpd)
2450 {
2451 	struct gpd_link *l, *link;
2452 
2453 	if (IS_ERR_OR_NULL(genpd))
2454 		return -EINVAL;
2455 
2456 	genpd_lock(genpd);
2457 
2458 	if (genpd->has_provider) {
2459 		genpd_unlock(genpd);
2460 		pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2461 		return -EBUSY;
2462 	}
2463 
2464 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2465 		genpd_unlock(genpd);
2466 		pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2467 		return -EBUSY;
2468 	}
2469 
2470 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2471 		list_del(&link->parent_node);
2472 		list_del(&link->child_node);
2473 		kfree(link);
2474 	}
2475 
2476 	list_del(&genpd->gpd_list_node);
2477 	genpd_unlock(genpd);
2478 	genpd_debug_remove(genpd);
2479 	cancel_work_sync(&genpd->power_off_work);
2480 	genpd_free_data(genpd);
2481 
2482 	pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2483 
2484 	return 0;
2485 }
2486 
2487 /**
2488  * pm_genpd_remove - Remove a generic I/O PM domain
2489  * @genpd: Pointer to PM domain that is to be removed.
2490  *
2491  * To remove the PM domain, this function:
2492  *  - Removes the PM domain as a subdomain to any parent domains,
2493  *    if it was added.
2494  *  - Removes the PM domain from the list of registered PM domains.
2495  *
2496  * The PM domain will only be removed, if the associated provider has
2497  * been removed, it is not a parent to any other PM domain and has no
2498  * devices associated with it.
2499  */
pm_genpd_remove(struct generic_pm_domain * genpd)2500 int pm_genpd_remove(struct generic_pm_domain *genpd)
2501 {
2502 	int ret;
2503 
2504 	mutex_lock(&gpd_list_lock);
2505 	ret = genpd_remove(genpd);
2506 	mutex_unlock(&gpd_list_lock);
2507 
2508 	return ret;
2509 }
2510 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2511 
2512 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2513 
2514 /*
2515  * Device Tree based PM domain providers.
2516  *
2517  * The code below implements generic device tree based PM domain providers that
2518  * bind device tree nodes with generic PM domains registered in the system.
2519  *
2520  * Any driver that registers generic PM domains and needs to support binding of
2521  * devices to these domains is supposed to register a PM domain provider, which
2522  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2523  *
2524  * Two simple mapping functions have been provided for convenience:
2525  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2526  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2527  *    index.
2528  */
2529 
2530 /**
2531  * struct of_genpd_provider - PM domain provider registration structure
2532  * @link: Entry in global list of PM domain providers
2533  * @node: Pointer to device tree node of PM domain provider
2534  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2535  *         into a PM domain.
2536  * @data: context pointer to be passed into @xlate callback
2537  */
2538 struct of_genpd_provider {
2539 	struct list_head link;
2540 	struct device_node *node;
2541 	genpd_xlate_t xlate;
2542 	void *data;
2543 };
2544 
2545 /* List of registered PM domain providers. */
2546 static LIST_HEAD(of_genpd_providers);
2547 /* Mutex to protect the list above. */
2548 static DEFINE_MUTEX(of_genpd_mutex);
2549 /* Used to prevent registering devices before the bus. */
2550 static bool genpd_bus_registered;
2551 
2552 /**
2553  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2554  * @genpdspec: OF phandle args to map into a PM domain
2555  * @data: xlate function private data - pointer to struct generic_pm_domain
2556  *
2557  * This is a generic xlate function that can be used to model PM domains that
2558  * have their own device tree nodes. The private data of xlate function needs
2559  * to be a valid pointer to struct generic_pm_domain.
2560  */
genpd_xlate_simple(const struct of_phandle_args * genpdspec,void * data)2561 static struct generic_pm_domain *genpd_xlate_simple(
2562 					const struct of_phandle_args *genpdspec,
2563 					void *data)
2564 {
2565 	return data;
2566 }
2567 
2568 /**
2569  * genpd_xlate_onecell() - Xlate function using a single index.
2570  * @genpdspec: OF phandle args to map into a PM domain
2571  * @data: xlate function private data - pointer to struct genpd_onecell_data
2572  *
2573  * This is a generic xlate function that can be used to model simple PM domain
2574  * controllers that have one device tree node and provide multiple PM domains.
2575  * A single cell is used as an index into an array of PM domains specified in
2576  * the genpd_onecell_data struct when registering the provider.
2577  */
genpd_xlate_onecell(const struct of_phandle_args * genpdspec,void * data)2578 static struct generic_pm_domain *genpd_xlate_onecell(
2579 					const struct of_phandle_args *genpdspec,
2580 					void *data)
2581 {
2582 	struct genpd_onecell_data *genpd_data = data;
2583 	unsigned int idx = genpdspec->args[0];
2584 
2585 	if (genpdspec->args_count != 1)
2586 		return ERR_PTR(-EINVAL);
2587 
2588 	if (idx >= genpd_data->num_domains) {
2589 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2590 		return ERR_PTR(-EINVAL);
2591 	}
2592 
2593 	if (!genpd_data->domains[idx])
2594 		return ERR_PTR(-ENOENT);
2595 
2596 	return genpd_data->domains[idx];
2597 }
2598 
2599 /**
2600  * genpd_add_provider() - Register a PM domain provider for a node
2601  * @np: Device node pointer associated with the PM domain provider.
2602  * @xlate: Callback for decoding PM domain from phandle arguments.
2603  * @data: Context pointer for @xlate callback.
2604  */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)2605 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2606 			      void *data)
2607 {
2608 	struct of_genpd_provider *cp;
2609 
2610 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2611 	if (!cp)
2612 		return -ENOMEM;
2613 
2614 	cp->node = of_node_get(np);
2615 	cp->data = data;
2616 	cp->xlate = xlate;
2617 	fwnode_dev_initialized(of_fwnode_handle(np), true);
2618 
2619 	mutex_lock(&of_genpd_mutex);
2620 	list_add(&cp->link, &of_genpd_providers);
2621 	mutex_unlock(&of_genpd_mutex);
2622 	pr_debug("Added domain provider from %pOF\n", np);
2623 
2624 	return 0;
2625 }
2626 
genpd_present(const struct generic_pm_domain * genpd)2627 static bool genpd_present(const struct generic_pm_domain *genpd)
2628 {
2629 	bool ret = false;
2630 	const struct generic_pm_domain *gpd;
2631 
2632 	mutex_lock(&gpd_list_lock);
2633 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2634 		if (gpd == genpd) {
2635 			ret = true;
2636 			break;
2637 		}
2638 	}
2639 	mutex_unlock(&gpd_list_lock);
2640 
2641 	return ret;
2642 }
2643 
genpd_sync_state(struct device * dev)2644 static void genpd_sync_state(struct device *dev)
2645 {
2646 	return of_genpd_sync_state(dev->of_node);
2647 }
2648 
2649 /**
2650  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2651  * @np: Device node pointer associated with the PM domain provider.
2652  * @genpd: Pointer to PM domain associated with the PM domain provider.
2653  */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)2654 int of_genpd_add_provider_simple(struct device_node *np,
2655 				 struct generic_pm_domain *genpd)
2656 {
2657 	struct fwnode_handle *fwnode;
2658 	struct device *dev;
2659 	int ret;
2660 
2661 	if (!np || !genpd)
2662 		return -EINVAL;
2663 
2664 	if (!genpd_bus_registered)
2665 		return -ENODEV;
2666 
2667 	if (!genpd_present(genpd))
2668 		return -EINVAL;
2669 
2670 	genpd->dev.of_node = np;
2671 
2672 	fwnode = of_fwnode_handle(np);
2673 	dev = get_dev_from_fwnode(fwnode);
2674 	if (!dev && !genpd_is_no_sync_state(genpd)) {
2675 		genpd->sync_state = GENPD_SYNC_STATE_SIMPLE;
2676 		device_set_node(&genpd->dev, fwnode);
2677 	} else {
2678 		dev_set_drv_sync_state(dev, genpd_sync_state);
2679 	}
2680 
2681 	put_device(dev);
2682 
2683 	ret = device_add(&genpd->dev);
2684 	if (ret)
2685 		return ret;
2686 
2687 	/* Parse genpd OPP table */
2688 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2689 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2690 		if (ret) {
2691 			dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2692 			goto err_del;
2693 		}
2694 
2695 		/*
2696 		 * Save table for faster processing while setting performance
2697 		 * state.
2698 		 */
2699 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2700 		WARN_ON(IS_ERR(genpd->opp_table));
2701 	}
2702 
2703 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2704 	if (ret)
2705 		goto err_opp;
2706 
2707 	genpd->provider = fwnode;
2708 	genpd->has_provider = true;
2709 
2710 	return 0;
2711 
2712 err_opp:
2713 	if (genpd->opp_table) {
2714 		dev_pm_opp_put_opp_table(genpd->opp_table);
2715 		dev_pm_opp_of_remove_table(&genpd->dev);
2716 	}
2717 err_del:
2718 	device_del(&genpd->dev);
2719 	return ret;
2720 }
2721 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2722 
2723 /**
2724  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2725  * @np: Device node pointer associated with the PM domain provider.
2726  * @data: Pointer to the data associated with the PM domain provider.
2727  */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)2728 int of_genpd_add_provider_onecell(struct device_node *np,
2729 				  struct genpd_onecell_data *data)
2730 {
2731 	struct generic_pm_domain *genpd;
2732 	struct fwnode_handle *fwnode;
2733 	struct device *dev;
2734 	unsigned int i;
2735 	int ret = -EINVAL;
2736 	bool sync_state = false;
2737 
2738 	if (!np || !data)
2739 		return -EINVAL;
2740 
2741 	if (!genpd_bus_registered)
2742 		return -ENODEV;
2743 
2744 	if (!data->xlate)
2745 		data->xlate = genpd_xlate_onecell;
2746 
2747 	fwnode = of_fwnode_handle(np);
2748 	dev = get_dev_from_fwnode(fwnode);
2749 	if (!dev)
2750 		sync_state = true;
2751 	else
2752 		dev_set_drv_sync_state(dev, genpd_sync_state);
2753 
2754 	put_device(dev);
2755 
2756 	for (i = 0; i < data->num_domains; i++) {
2757 		genpd = data->domains[i];
2758 
2759 		if (!genpd)
2760 			continue;
2761 		if (!genpd_present(genpd))
2762 			goto error;
2763 
2764 		genpd->dev.of_node = np;
2765 
2766 		if (sync_state && !genpd_is_no_sync_state(genpd)) {
2767 			genpd->sync_state = GENPD_SYNC_STATE_ONECELL;
2768 			device_set_node(&genpd->dev, fwnode);
2769 			sync_state = false;
2770 		}
2771 
2772 		ret = device_add(&genpd->dev);
2773 		if (ret)
2774 			goto error;
2775 
2776 		/* Parse genpd OPP table */
2777 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2778 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2779 			if (ret) {
2780 				dev_err_probe(&genpd->dev, ret,
2781 					      "Failed to add OPP table for index %d\n", i);
2782 				device_del(&genpd->dev);
2783 				goto error;
2784 			}
2785 
2786 			/*
2787 			 * Save table for faster processing while setting
2788 			 * performance state.
2789 			 */
2790 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2791 			WARN_ON(IS_ERR(genpd->opp_table));
2792 		}
2793 
2794 		genpd->provider = fwnode;
2795 		genpd->has_provider = true;
2796 	}
2797 
2798 	ret = genpd_add_provider(np, data->xlate, data);
2799 	if (ret < 0)
2800 		goto error;
2801 
2802 	return 0;
2803 
2804 error:
2805 	while (i--) {
2806 		genpd = data->domains[i];
2807 
2808 		if (!genpd)
2809 			continue;
2810 
2811 		genpd->provider = NULL;
2812 		genpd->has_provider = false;
2813 
2814 		if (genpd->opp_table) {
2815 			dev_pm_opp_put_opp_table(genpd->opp_table);
2816 			dev_pm_opp_of_remove_table(&genpd->dev);
2817 		}
2818 
2819 		device_del(&genpd->dev);
2820 	}
2821 
2822 	return ret;
2823 }
2824 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2825 
2826 /**
2827  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2828  * @np: Device node pointer associated with the PM domain provider
2829  */
of_genpd_del_provider(struct device_node * np)2830 void of_genpd_del_provider(struct device_node *np)
2831 {
2832 	struct of_genpd_provider *cp, *tmp;
2833 	struct generic_pm_domain *gpd;
2834 
2835 	mutex_lock(&gpd_list_lock);
2836 	mutex_lock(&of_genpd_mutex);
2837 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2838 		if (cp->node == np) {
2839 			/*
2840 			 * For each PM domain associated with the
2841 			 * provider, set the 'has_provider' to false
2842 			 * so that the PM domain can be safely removed.
2843 			 */
2844 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2845 				if (gpd->provider == of_fwnode_handle(np)) {
2846 					gpd->has_provider = false;
2847 
2848 					if (gpd->opp_table) {
2849 						dev_pm_opp_put_opp_table(gpd->opp_table);
2850 						dev_pm_opp_of_remove_table(&gpd->dev);
2851 					}
2852 
2853 					device_del(&gpd->dev);
2854 				}
2855 			}
2856 
2857 			fwnode_dev_initialized(of_fwnode_handle(cp->node), false);
2858 			list_del(&cp->link);
2859 			of_node_put(cp->node);
2860 			kfree(cp);
2861 			break;
2862 		}
2863 	}
2864 	mutex_unlock(&of_genpd_mutex);
2865 	mutex_unlock(&gpd_list_lock);
2866 }
2867 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2868 
2869 /**
2870  * genpd_get_from_provider() - Look-up PM domain
2871  * @genpdspec: OF phandle args to use for look-up
2872  *
2873  * Looks for a PM domain provider under the node specified by @genpdspec and if
2874  * found, uses xlate function of the provider to map phandle args to a PM
2875  * domain.
2876  *
2877  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2878  * on failure.
2879  */
genpd_get_from_provider(const struct of_phandle_args * genpdspec)2880 static struct generic_pm_domain *genpd_get_from_provider(
2881 					const struct of_phandle_args *genpdspec)
2882 {
2883 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2884 	struct of_genpd_provider *provider;
2885 
2886 	if (!genpdspec)
2887 		return ERR_PTR(-EINVAL);
2888 
2889 	mutex_lock(&of_genpd_mutex);
2890 
2891 	/* Check if we have such a provider in our array */
2892 	list_for_each_entry(provider, &of_genpd_providers, link) {
2893 		if (provider->node == genpdspec->np)
2894 			genpd = provider->xlate(genpdspec, provider->data);
2895 		if (!IS_ERR(genpd))
2896 			break;
2897 	}
2898 
2899 	mutex_unlock(&of_genpd_mutex);
2900 
2901 	return genpd;
2902 }
2903 
2904 /**
2905  * of_genpd_add_device() - Add a device to an I/O PM domain
2906  * @genpdspec: OF phandle args to use for look-up PM domain
2907  * @dev: Device to be added.
2908  *
2909  * Looks-up an I/O PM domain based upon phandle args provided and adds
2910  * the device to the PM domain. Returns a negative error code on failure.
2911  */
of_genpd_add_device(const struct of_phandle_args * genpdspec,struct device * dev)2912 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2913 {
2914 	struct generic_pm_domain *genpd;
2915 	int ret;
2916 
2917 	if (!dev)
2918 		return -EINVAL;
2919 
2920 	mutex_lock(&gpd_list_lock);
2921 
2922 	genpd = genpd_get_from_provider(genpdspec);
2923 	if (IS_ERR(genpd)) {
2924 		ret = PTR_ERR(genpd);
2925 		goto out;
2926 	}
2927 
2928 	ret = genpd_add_device(genpd, dev, dev);
2929 
2930 out:
2931 	mutex_unlock(&gpd_list_lock);
2932 
2933 	return ret;
2934 }
2935 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2936 
2937 /**
2938  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2939  * @parent_spec: OF phandle args to use for parent PM domain look-up
2940  * @subdomain_spec: OF phandle args to use for subdomain look-up
2941  *
2942  * Looks-up a parent PM domain and subdomain based upon phandle args
2943  * provided and adds the subdomain to the parent PM domain. Returns a
2944  * negative error code on failure.
2945  */
of_genpd_add_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2946 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2947 			   const struct of_phandle_args *subdomain_spec)
2948 {
2949 	struct generic_pm_domain *parent, *subdomain;
2950 	int ret;
2951 
2952 	mutex_lock(&gpd_list_lock);
2953 
2954 	parent = genpd_get_from_provider(parent_spec);
2955 	if (IS_ERR(parent)) {
2956 		ret = PTR_ERR(parent);
2957 		goto out;
2958 	}
2959 
2960 	subdomain = genpd_get_from_provider(subdomain_spec);
2961 	if (IS_ERR(subdomain)) {
2962 		ret = PTR_ERR(subdomain);
2963 		goto out;
2964 	}
2965 
2966 	ret = genpd_add_subdomain(parent, subdomain);
2967 
2968 out:
2969 	mutex_unlock(&gpd_list_lock);
2970 
2971 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2972 }
2973 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2974 
2975 /**
2976  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2977  * @parent_spec: OF phandle args to use for parent PM domain look-up
2978  * @subdomain_spec: OF phandle args to use for subdomain look-up
2979  *
2980  * Looks-up a parent PM domain and subdomain based upon phandle args
2981  * provided and removes the subdomain from the parent PM domain. Returns a
2982  * negative error code on failure.
2983  */
of_genpd_remove_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2984 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2985 			      const struct of_phandle_args *subdomain_spec)
2986 {
2987 	struct generic_pm_domain *parent, *subdomain;
2988 	int ret;
2989 
2990 	mutex_lock(&gpd_list_lock);
2991 
2992 	parent = genpd_get_from_provider(parent_spec);
2993 	if (IS_ERR(parent)) {
2994 		ret = PTR_ERR(parent);
2995 		goto out;
2996 	}
2997 
2998 	subdomain = genpd_get_from_provider(subdomain_spec);
2999 	if (IS_ERR(subdomain)) {
3000 		ret = PTR_ERR(subdomain);
3001 		goto out;
3002 	}
3003 
3004 	ret = pm_genpd_remove_subdomain(parent, subdomain);
3005 
3006 out:
3007 	mutex_unlock(&gpd_list_lock);
3008 
3009 	return ret;
3010 }
3011 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
3012 
3013 /**
3014  * of_genpd_remove_last - Remove the last PM domain registered for a provider
3015  * @np: Pointer to device node associated with provider
3016  *
3017  * Find the last PM domain that was added by a particular provider and
3018  * remove this PM domain from the list of PM domains. The provider is
3019  * identified by the 'provider' device structure that is passed. The PM
3020  * domain will only be removed, if the provider associated with domain
3021  * has been removed.
3022  *
3023  * Returns a valid pointer to struct generic_pm_domain on success or
3024  * ERR_PTR() on failure.
3025  */
of_genpd_remove_last(struct device_node * np)3026 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
3027 {
3028 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
3029 	int ret;
3030 
3031 	if (IS_ERR_OR_NULL(np))
3032 		return ERR_PTR(-EINVAL);
3033 
3034 	mutex_lock(&gpd_list_lock);
3035 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
3036 		if (gpd->provider == of_fwnode_handle(np)) {
3037 			ret = genpd_remove(gpd);
3038 			genpd = ret ? ERR_PTR(ret) : gpd;
3039 			break;
3040 		}
3041 	}
3042 	mutex_unlock(&gpd_list_lock);
3043 
3044 	return genpd;
3045 }
3046 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
3047 
genpd_release_dev(struct device * dev)3048 static void genpd_release_dev(struct device *dev)
3049 {
3050 	of_node_put(dev->of_node);
3051 	kfree(dev);
3052 }
3053 
3054 static const struct bus_type genpd_bus_type = {
3055 	.name		= "genpd",
3056 };
3057 
3058 /**
3059  * genpd_dev_pm_detach - Detach a device from its PM domain.
3060  * @dev: Device to detach.
3061  * @power_off: Currently not used
3062  *
3063  * Try to locate a corresponding generic PM domain, which the device was
3064  * attached to previously. If such is found, the device is detached from it.
3065  */
genpd_dev_pm_detach(struct device * dev,bool power_off)3066 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
3067 {
3068 	struct generic_pm_domain *pd;
3069 	unsigned int i;
3070 	int ret = 0;
3071 
3072 	pd = dev_to_genpd(dev);
3073 	if (IS_ERR(pd))
3074 		return;
3075 
3076 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
3077 
3078 	/* Drop the default performance state */
3079 	if (dev_gpd_data(dev)->default_pstate) {
3080 		dev_pm_genpd_set_performance_state(dev, 0);
3081 		dev_gpd_data(dev)->default_pstate = 0;
3082 	}
3083 
3084 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
3085 		ret = genpd_remove_device(pd, dev);
3086 		if (ret != -EAGAIN)
3087 			break;
3088 
3089 		mdelay(i);
3090 		cond_resched();
3091 	}
3092 
3093 	if (ret < 0) {
3094 		dev_err(dev, "failed to remove from PM domain %s: %d",
3095 			pd->name, ret);
3096 		return;
3097 	}
3098 
3099 	/* Check if PM domain can be powered off after removing this device. */
3100 	genpd_queue_power_off_work(pd);
3101 
3102 	/* Unregister the device if it was created by genpd. */
3103 	if (dev->bus == &genpd_bus_type)
3104 		device_unregister(dev);
3105 }
3106 
genpd_dev_pm_sync(struct device * dev)3107 static void genpd_dev_pm_sync(struct device *dev)
3108 {
3109 	struct generic_pm_domain *pd;
3110 
3111 	pd = dev_to_genpd(dev);
3112 	if (IS_ERR(pd))
3113 		return;
3114 
3115 	genpd_queue_power_off_work(pd);
3116 }
3117 
genpd_set_required_opp_dev(struct device * dev,struct device * base_dev)3118 static int genpd_set_required_opp_dev(struct device *dev,
3119 				      struct device *base_dev)
3120 {
3121 	struct dev_pm_opp_config config = {
3122 		.required_dev = dev,
3123 	};
3124 	int ret;
3125 
3126 	/* Limit support to non-providers for now. */
3127 	if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3128 		return 0;
3129 
3130 	if (!dev_pm_opp_of_has_required_opp(base_dev))
3131 		return 0;
3132 
3133 	ret = dev_pm_opp_set_config(base_dev, &config);
3134 	if (ret < 0)
3135 		return ret;
3136 
3137 	dev_gpd_data(dev)->opp_token = ret;
3138 	return 0;
3139 }
3140 
genpd_set_required_opp(struct device * dev,unsigned int index)3141 static int genpd_set_required_opp(struct device *dev, unsigned int index)
3142 {
3143 	int ret, pstate;
3144 
3145 	/* Set the default performance state */
3146 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
3147 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3148 		ret = pstate;
3149 		goto err;
3150 	} else if (pstate > 0) {
3151 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
3152 		if (ret)
3153 			goto err;
3154 		dev_gpd_data(dev)->default_pstate = pstate;
3155 	}
3156 
3157 	return 0;
3158 err:
3159 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3160 		dev_to_genpd(dev)->name, ret);
3161 	return ret;
3162 }
3163 
__genpd_dev_pm_attach(struct device * dev,struct device * base_dev,unsigned int index,unsigned int num_domains,bool power_on)3164 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3165 				 unsigned int index, unsigned int num_domains,
3166 				 bool power_on)
3167 {
3168 	struct of_phandle_args pd_args;
3169 	struct generic_pm_domain *pd;
3170 	int ret;
3171 
3172 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3173 				"#power-domain-cells", index, &pd_args);
3174 	if (ret < 0)
3175 		return ret;
3176 
3177 	mutex_lock(&gpd_list_lock);
3178 	pd = genpd_get_from_provider(&pd_args);
3179 	of_node_put(pd_args.np);
3180 	if (IS_ERR(pd)) {
3181 		mutex_unlock(&gpd_list_lock);
3182 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3183 			__func__, PTR_ERR(pd));
3184 		return driver_deferred_probe_check_state(base_dev);
3185 	}
3186 
3187 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3188 
3189 	ret = genpd_add_device(pd, dev, base_dev);
3190 	mutex_unlock(&gpd_list_lock);
3191 
3192 	if (ret < 0)
3193 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3194 
3195 	dev->pm_domain->detach = genpd_dev_pm_detach;
3196 	dev->pm_domain->sync = genpd_dev_pm_sync;
3197 
3198 	/*
3199 	 * For a single PM domain the index of the required OPP must be zero, so
3200 	 * let's try to assign a required dev in that case. In the multiple PM
3201 	 * domains case, we need platform code to specify the index.
3202 	 */
3203 	if (num_domains == 1) {
3204 		ret = genpd_set_required_opp_dev(dev, base_dev);
3205 		if (ret)
3206 			goto err;
3207 	}
3208 
3209 	ret = genpd_set_required_opp(dev, index);
3210 	if (ret)
3211 		goto err;
3212 
3213 	if (power_on) {
3214 		genpd_lock(pd);
3215 		ret = genpd_power_on(pd, 0);
3216 		genpd_unlock(pd);
3217 	}
3218 
3219 	if (ret) {
3220 		/* Drop the default performance state */
3221 		if (dev_gpd_data(dev)->default_pstate) {
3222 			dev_pm_genpd_set_performance_state(dev, 0);
3223 			dev_gpd_data(dev)->default_pstate = 0;
3224 		}
3225 
3226 		genpd_remove_device(pd, dev);
3227 		return -EPROBE_DEFER;
3228 	}
3229 
3230 	return 1;
3231 
3232 err:
3233 	genpd_remove_device(pd, dev);
3234 	return ret;
3235 }
3236 
3237 /**
3238  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3239  * @dev: Device to attach.
3240  *
3241  * Parse device's OF node to find a PM domain specifier. If such is found,
3242  * attaches the device to retrieved pm_domain ops.
3243  *
3244  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3245  * PM domain or when multiple power-domains exists for it, else a negative error
3246  * code. Note that if a power-domain exists for the device, but it cannot be
3247  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3248  * not probed and to re-try again later.
3249  */
genpd_dev_pm_attach(struct device * dev)3250 int genpd_dev_pm_attach(struct device *dev)
3251 {
3252 	if (!dev->of_node)
3253 		return 0;
3254 
3255 	/*
3256 	 * Devices with multiple PM domains must be attached separately, as we
3257 	 * can only attach one PM domain per device.
3258 	 */
3259 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
3260 				       "#power-domain-cells") != 1)
3261 		return 0;
3262 
3263 	return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3264 }
3265 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3266 
3267 /**
3268  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3269  * @dev: The device used to lookup the PM domain.
3270  * @index: The index of the PM domain.
3271  *
3272  * Parse device's OF node to find a PM domain specifier at the provided @index.
3273  * If such is found, creates a virtual device and attaches it to the retrieved
3274  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3275  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3276  *
3277  * Returns the created virtual device if successfully attached PM domain, NULL
3278  * when the device don't need a PM domain, else an ERR_PTR() in case of
3279  * failures. If a power-domain exists for the device, but cannot be found or
3280  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3281  * is not probed and to re-try again later.
3282  */
genpd_dev_pm_attach_by_id(struct device * dev,unsigned int index)3283 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3284 					 unsigned int index)
3285 {
3286 	struct device *virt_dev;
3287 	int num_domains;
3288 	int ret;
3289 
3290 	if (!dev->of_node)
3291 		return NULL;
3292 
3293 	/* Verify that the index is within a valid range. */
3294 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3295 						 "#power-domain-cells");
3296 	if (num_domains < 0 || index >= num_domains)
3297 		return NULL;
3298 
3299 	if (!genpd_bus_registered)
3300 		return ERR_PTR(-ENODEV);
3301 
3302 	/* Allocate and register device on the genpd bus. */
3303 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3304 	if (!virt_dev)
3305 		return ERR_PTR(-ENOMEM);
3306 
3307 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3308 	virt_dev->bus = &genpd_bus_type;
3309 	virt_dev->release = genpd_release_dev;
3310 	virt_dev->of_node = of_node_get(dev->of_node);
3311 
3312 	ret = device_register(virt_dev);
3313 	if (ret) {
3314 		put_device(virt_dev);
3315 		return ERR_PTR(ret);
3316 	}
3317 
3318 	/* Try to attach the device to the PM domain at the specified index. */
3319 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3320 	if (ret < 1) {
3321 		device_unregister(virt_dev);
3322 		return ret ? ERR_PTR(ret) : NULL;
3323 	}
3324 
3325 	pm_runtime_enable(virt_dev);
3326 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3327 
3328 	return virt_dev;
3329 }
3330 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3331 
3332 /**
3333  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3334  * @dev: The device used to lookup the PM domain.
3335  * @name: The name of the PM domain.
3336  *
3337  * Parse device's OF node to find a PM domain specifier using the
3338  * power-domain-names DT property. For further description see
3339  * genpd_dev_pm_attach_by_id().
3340  */
genpd_dev_pm_attach_by_name(struct device * dev,const char * name)3341 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3342 {
3343 	int index;
3344 
3345 	if (!dev->of_node)
3346 		return NULL;
3347 
3348 	index = of_property_match_string(dev->of_node, "power-domain-names",
3349 					 name);
3350 	if (index < 0)
3351 		return NULL;
3352 
3353 	return genpd_dev_pm_attach_by_id(dev, index);
3354 }
3355 
3356 static const struct of_device_id idle_state_match[] = {
3357 	{ .compatible = "domain-idle-state", },
3358 	{ }
3359 };
3360 
genpd_parse_state(struct genpd_power_state * genpd_state,struct device_node * state_node)3361 static int genpd_parse_state(struct genpd_power_state *genpd_state,
3362 				    struct device_node *state_node)
3363 {
3364 	int err;
3365 	u32 residency;
3366 	u32 entry_latency, exit_latency;
3367 
3368 	err = of_property_read_u32(state_node, "entry-latency-us",
3369 						&entry_latency);
3370 	if (err) {
3371 		pr_debug(" * %pOF missing entry-latency-us property\n",
3372 			 state_node);
3373 		return -EINVAL;
3374 	}
3375 
3376 	err = of_property_read_u32(state_node, "exit-latency-us",
3377 						&exit_latency);
3378 	if (err) {
3379 		pr_debug(" * %pOF missing exit-latency-us property\n",
3380 			 state_node);
3381 		return -EINVAL;
3382 	}
3383 
3384 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
3385 	if (!err)
3386 		genpd_state->residency_ns = 1000LL * residency;
3387 
3388 	of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3389 
3390 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3391 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3392 	genpd_state->fwnode = of_fwnode_handle(state_node);
3393 
3394 	return 0;
3395 }
3396 
genpd_iterate_idle_states(struct device_node * dn,struct genpd_power_state * states)3397 static int genpd_iterate_idle_states(struct device_node *dn,
3398 				     struct genpd_power_state *states)
3399 {
3400 	int ret;
3401 	struct of_phandle_iterator it;
3402 	struct device_node *np;
3403 	int i = 0;
3404 
3405 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3406 	if (ret <= 0)
3407 		return ret == -ENOENT ? 0 : ret;
3408 
3409 	/* Loop over the phandles until all the requested entry is found */
3410 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3411 		np = it.node;
3412 		if (!of_match_node(idle_state_match, np))
3413 			continue;
3414 
3415 		if (!of_device_is_available(np))
3416 			continue;
3417 
3418 		if (states) {
3419 			ret = genpd_parse_state(&states[i], np);
3420 			if (ret) {
3421 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3422 				       np, ret);
3423 				of_node_put(np);
3424 				return ret;
3425 			}
3426 		}
3427 		i++;
3428 	}
3429 
3430 	return i;
3431 }
3432 
3433 /**
3434  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3435  *
3436  * @dn: The genpd device node
3437  * @states: The pointer to which the state array will be saved.
3438  * @n: The count of elements in the array returned from this function.
3439  *
3440  * Returns the device states parsed from the OF node. The memory for the states
3441  * is allocated by this function and is the responsibility of the caller to
3442  * free the memory after use. If any or zero compatible domain idle states is
3443  * found it returns 0 and in case of errors, a negative error code is returned.
3444  */
of_genpd_parse_idle_states(struct device_node * dn,struct genpd_power_state ** states,int * n)3445 int of_genpd_parse_idle_states(struct device_node *dn,
3446 			struct genpd_power_state **states, int *n)
3447 {
3448 	struct genpd_power_state *st;
3449 	int ret;
3450 
3451 	ret = genpd_iterate_idle_states(dn, NULL);
3452 	if (ret < 0)
3453 		return ret;
3454 
3455 	if (!ret) {
3456 		*states = NULL;
3457 		*n = 0;
3458 		return 0;
3459 	}
3460 
3461 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3462 	if (!st)
3463 		return -ENOMEM;
3464 
3465 	ret = genpd_iterate_idle_states(dn, st);
3466 	if (ret <= 0) {
3467 		kfree(st);
3468 		return ret < 0 ? ret : -EINVAL;
3469 	}
3470 
3471 	*states = st;
3472 	*n = ret;
3473 
3474 	return 0;
3475 }
3476 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3477 
3478 /**
3479  * of_genpd_sync_state() - A common sync_state function for genpd providers
3480  * @np: The device node the genpd provider is associated with.
3481  *
3482  * The @np that corresponds to a genpd provider may provide one or multiple
3483  * genpds. This function makes use @np to find the genpds that belongs to the
3484  * provider. For each genpd we try a power-off.
3485  */
of_genpd_sync_state(struct device_node * np)3486 void of_genpd_sync_state(struct device_node *np)
3487 {
3488 	struct generic_pm_domain *genpd;
3489 
3490 	if (!np)
3491 		return;
3492 
3493 	mutex_lock(&gpd_list_lock);
3494 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3495 		if (genpd->provider == of_fwnode_handle(np)) {
3496 			genpd_lock(genpd);
3497 			genpd->stay_on = false;
3498 			genpd_power_off(genpd, false, 0);
3499 			genpd_unlock(genpd);
3500 		}
3501 	}
3502 	mutex_unlock(&gpd_list_lock);
3503 }
3504 EXPORT_SYMBOL_GPL(of_genpd_sync_state);
3505 
genpd_provider_probe(struct device * dev)3506 static int genpd_provider_probe(struct device *dev)
3507 {
3508 	return 0;
3509 }
3510 
genpd_provider_sync_state(struct device * dev)3511 static void genpd_provider_sync_state(struct device *dev)
3512 {
3513 	struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
3514 
3515 	switch (genpd->sync_state) {
3516 	case GENPD_SYNC_STATE_OFF:
3517 		break;
3518 
3519 	case GENPD_SYNC_STATE_ONECELL:
3520 		of_genpd_sync_state(dev->of_node);
3521 		break;
3522 
3523 	case GENPD_SYNC_STATE_SIMPLE:
3524 		genpd_lock(genpd);
3525 		genpd->stay_on = false;
3526 		genpd_power_off(genpd, false, 0);
3527 		genpd_unlock(genpd);
3528 		break;
3529 
3530 	default:
3531 		break;
3532 	}
3533 }
3534 
3535 static struct device_driver genpd_provider_drv = {
3536 	.name = "genpd_provider",
3537 	.bus = &genpd_provider_bus_type,
3538 	.probe = genpd_provider_probe,
3539 	.sync_state = genpd_provider_sync_state,
3540 	.suppress_bind_attrs = true,
3541 };
3542 
genpd_bus_init(void)3543 static int __init genpd_bus_init(void)
3544 {
3545 	int ret;
3546 
3547 	ret = device_register(&genpd_provider_bus);
3548 	if (ret) {
3549 		put_device(&genpd_provider_bus);
3550 		return ret;
3551 	}
3552 
3553 	ret = bus_register(&genpd_provider_bus_type);
3554 	if (ret)
3555 		goto err_dev;
3556 
3557 	ret = bus_register(&genpd_bus_type);
3558 	if (ret)
3559 		goto err_prov_bus;
3560 
3561 	ret = driver_register(&genpd_provider_drv);
3562 	if (ret)
3563 		goto err_bus;
3564 
3565 	genpd_bus_registered = true;
3566 	return 0;
3567 
3568 err_bus:
3569 	bus_unregister(&genpd_bus_type);
3570 err_prov_bus:
3571 	bus_unregister(&genpd_provider_bus_type);
3572 err_dev:
3573 	device_unregister(&genpd_provider_bus);
3574 	return ret;
3575 }
3576 core_initcall(genpd_bus_init);
3577 
3578 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3579 
3580 
3581 /***        debugfs support        ***/
3582 
3583 #ifdef CONFIG_DEBUG_FS
3584 /*
3585  * TODO: This function is a slightly modified version of rtpm_status_show
3586  * from sysfs.c, so generalize it.
3587  */
rtpm_status_str(struct seq_file * s,struct device * dev)3588 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3589 {
3590 	static const char * const status_lookup[] = {
3591 		[RPM_ACTIVE] = "active",
3592 		[RPM_RESUMING] = "resuming",
3593 		[RPM_SUSPENDED] = "suspended",
3594 		[RPM_SUSPENDING] = "suspending"
3595 	};
3596 	const char *p = "";
3597 
3598 	if (dev->power.runtime_error)
3599 		p = "error";
3600 	else if (dev->power.disable_depth)
3601 		p = "unsupported";
3602 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3603 		p = status_lookup[dev->power.runtime_status];
3604 	else
3605 		WARN_ON(1);
3606 
3607 	seq_printf(s, "%-26s  ", p);
3608 }
3609 
perf_status_str(struct seq_file * s,struct device * dev)3610 static void perf_status_str(struct seq_file *s, struct device *dev)
3611 {
3612 	struct generic_pm_domain_data *gpd_data;
3613 
3614 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3615 
3616 	seq_printf(s, "%-10u  ", gpd_data->performance_state);
3617 }
3618 
mode_status_str(struct seq_file * s,struct device * dev)3619 static void mode_status_str(struct seq_file *s, struct device *dev)
3620 {
3621 	struct generic_pm_domain_data *gpd_data;
3622 
3623 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3624 
3625 	seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3626 }
3627 
genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)3628 static int genpd_summary_one(struct seq_file *s,
3629 			struct generic_pm_domain *genpd)
3630 {
3631 	static const char * const status_lookup[] = {
3632 		[GENPD_STATE_ON] = "on",
3633 		[GENPD_STATE_OFF] = "off"
3634 	};
3635 	struct pm_domain_data *pm_data;
3636 	struct gpd_link *link;
3637 	char state[16];
3638 	int ret;
3639 
3640 	ret = genpd_lock_interruptible(genpd);
3641 	if (ret)
3642 		return -ERESTARTSYS;
3643 
3644 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3645 		goto exit;
3646 	if (!genpd_status_on(genpd))
3647 		snprintf(state, sizeof(state), "%s-%u",
3648 			 status_lookup[genpd->status], genpd->state_idx);
3649 	else
3650 		snprintf(state, sizeof(state), "%s",
3651 			 status_lookup[genpd->status]);
3652 	seq_printf(s, "%-30s  %-30s  %u", dev_name(&genpd->dev), state, genpd->performance_state);
3653 
3654 	/*
3655 	 * Modifications on the list require holding locks on both
3656 	 * parent and child, so we are safe.
3657 	 * Also the device name is immutable.
3658 	 */
3659 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3660 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3661 			seq_printf(s, "\n%48s", " ");
3662 		seq_printf(s, "%s", link->child->name);
3663 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3664 			seq_puts(s, ", ");
3665 	}
3666 
3667 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3668 		seq_printf(s, "\n    %-30s  ", dev_name(pm_data->dev));
3669 		rtpm_status_str(s, pm_data->dev);
3670 		perf_status_str(s, pm_data->dev);
3671 		mode_status_str(s, pm_data->dev);
3672 	}
3673 
3674 	seq_puts(s, "\n");
3675 exit:
3676 	genpd_unlock(genpd);
3677 
3678 	return 0;
3679 }
3680 
summary_show(struct seq_file * s,void * data)3681 static int summary_show(struct seq_file *s, void *data)
3682 {
3683 	struct generic_pm_domain *genpd;
3684 	int ret = 0;
3685 
3686 	seq_puts(s, "domain                          status          children        performance\n");
3687 	seq_puts(s, "    /device                         runtime status                  managed by\n");
3688 	seq_puts(s, "------------------------------------------------------------------------------\n");
3689 
3690 	ret = mutex_lock_interruptible(&gpd_list_lock);
3691 	if (ret)
3692 		return -ERESTARTSYS;
3693 
3694 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3695 		ret = genpd_summary_one(s, genpd);
3696 		if (ret)
3697 			break;
3698 	}
3699 	mutex_unlock(&gpd_list_lock);
3700 
3701 	return ret;
3702 }
3703 
status_show(struct seq_file * s,void * data)3704 static int status_show(struct seq_file *s, void *data)
3705 {
3706 	static const char * const status_lookup[] = {
3707 		[GENPD_STATE_ON] = "on",
3708 		[GENPD_STATE_OFF] = "off"
3709 	};
3710 
3711 	struct generic_pm_domain *genpd = s->private;
3712 	int ret = 0;
3713 
3714 	ret = genpd_lock_interruptible(genpd);
3715 	if (ret)
3716 		return -ERESTARTSYS;
3717 
3718 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3719 		goto exit;
3720 
3721 	if (genpd->status == GENPD_STATE_OFF)
3722 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3723 			genpd->state_idx);
3724 	else
3725 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3726 exit:
3727 	genpd_unlock(genpd);
3728 	return ret;
3729 }
3730 
sub_domains_show(struct seq_file * s,void * data)3731 static int sub_domains_show(struct seq_file *s, void *data)
3732 {
3733 	struct generic_pm_domain *genpd = s->private;
3734 	struct gpd_link *link;
3735 	int ret = 0;
3736 
3737 	ret = genpd_lock_interruptible(genpd);
3738 	if (ret)
3739 		return -ERESTARTSYS;
3740 
3741 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3742 		seq_printf(s, "%s\n", link->child->name);
3743 
3744 	genpd_unlock(genpd);
3745 	return ret;
3746 }
3747 
idle_states_show(struct seq_file * s,void * data)3748 static int idle_states_show(struct seq_file *s, void *data)
3749 {
3750 	struct generic_pm_domain *genpd = s->private;
3751 	u64 now, delta, idle_time = 0;
3752 	unsigned int i;
3753 	int ret = 0;
3754 
3755 	ret = genpd_lock_interruptible(genpd);
3756 	if (ret)
3757 		return -ERESTARTSYS;
3758 
3759 	seq_puts(s, "State          Time Spent(ms) Usage      Rejected   Above      Below\n");
3760 
3761 	for (i = 0; i < genpd->state_count; i++) {
3762 		struct genpd_power_state *state = &genpd->states[i];
3763 		char state_name[15];
3764 
3765 		idle_time += state->idle_time;
3766 
3767 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3768 			now = ktime_get_mono_fast_ns();
3769 			if (now > genpd->accounting_time) {
3770 				delta = now - genpd->accounting_time;
3771 				idle_time += delta;
3772 			}
3773 		}
3774 
3775 		if (!state->name)
3776 			snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
3777 
3778 		do_div(idle_time, NSEC_PER_MSEC);
3779 		seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
3780 			   state->name ?: state_name, idle_time,
3781 			   state->usage, state->rejected, state->above,
3782 			   state->below);
3783 	}
3784 
3785 	genpd_unlock(genpd);
3786 	return ret;
3787 }
3788 
active_time_show(struct seq_file * s,void * data)3789 static int active_time_show(struct seq_file *s, void *data)
3790 {
3791 	struct generic_pm_domain *genpd = s->private;
3792 	u64 now, on_time, delta = 0;
3793 	int ret = 0;
3794 
3795 	ret = genpd_lock_interruptible(genpd);
3796 	if (ret)
3797 		return -ERESTARTSYS;
3798 
3799 	if (genpd->status == GENPD_STATE_ON) {
3800 		now = ktime_get_mono_fast_ns();
3801 		if (now > genpd->accounting_time)
3802 			delta = now - genpd->accounting_time;
3803 	}
3804 
3805 	on_time = genpd->on_time + delta;
3806 	do_div(on_time, NSEC_PER_MSEC);
3807 	seq_printf(s, "%llu ms\n", on_time);
3808 
3809 	genpd_unlock(genpd);
3810 	return ret;
3811 }
3812 
total_idle_time_show(struct seq_file * s,void * data)3813 static int total_idle_time_show(struct seq_file *s, void *data)
3814 {
3815 	struct generic_pm_domain *genpd = s->private;
3816 	u64 now, delta, total = 0;
3817 	unsigned int i;
3818 	int ret = 0;
3819 
3820 	ret = genpd_lock_interruptible(genpd);
3821 	if (ret)
3822 		return -ERESTARTSYS;
3823 
3824 	for (i = 0; i < genpd->state_count; i++) {
3825 		total += genpd->states[i].idle_time;
3826 
3827 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3828 			now = ktime_get_mono_fast_ns();
3829 			if (now > genpd->accounting_time) {
3830 				delta = now - genpd->accounting_time;
3831 				total += delta;
3832 			}
3833 		}
3834 	}
3835 
3836 	do_div(total, NSEC_PER_MSEC);
3837 	seq_printf(s, "%llu ms\n", total);
3838 
3839 	genpd_unlock(genpd);
3840 	return ret;
3841 }
3842 
3843 
devices_show(struct seq_file * s,void * data)3844 static int devices_show(struct seq_file *s, void *data)
3845 {
3846 	struct generic_pm_domain *genpd = s->private;
3847 	struct pm_domain_data *pm_data;
3848 	int ret = 0;
3849 
3850 	ret = genpd_lock_interruptible(genpd);
3851 	if (ret)
3852 		return -ERESTARTSYS;
3853 
3854 	list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3855 		seq_printf(s, "%s\n", dev_name(pm_data->dev));
3856 
3857 	genpd_unlock(genpd);
3858 	return ret;
3859 }
3860 
perf_state_show(struct seq_file * s,void * data)3861 static int perf_state_show(struct seq_file *s, void *data)
3862 {
3863 	struct generic_pm_domain *genpd = s->private;
3864 
3865 	if (genpd_lock_interruptible(genpd))
3866 		return -ERESTARTSYS;
3867 
3868 	seq_printf(s, "%u\n", genpd->performance_state);
3869 
3870 	genpd_unlock(genpd);
3871 	return 0;
3872 }
3873 
3874 DEFINE_SHOW_ATTRIBUTE(summary);
3875 DEFINE_SHOW_ATTRIBUTE(status);
3876 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3877 DEFINE_SHOW_ATTRIBUTE(idle_states);
3878 DEFINE_SHOW_ATTRIBUTE(active_time);
3879 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3880 DEFINE_SHOW_ATTRIBUTE(devices);
3881 DEFINE_SHOW_ATTRIBUTE(perf_state);
3882 
genpd_debug_add(struct generic_pm_domain * genpd)3883 static void genpd_debug_add(struct generic_pm_domain *genpd)
3884 {
3885 	struct dentry *d;
3886 
3887 	if (!genpd_debugfs_dir)
3888 		return;
3889 
3890 	d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3891 
3892 	debugfs_create_file("current_state", 0444,
3893 			    d, genpd, &status_fops);
3894 	debugfs_create_file("sub_domains", 0444,
3895 			    d, genpd, &sub_domains_fops);
3896 	debugfs_create_file("idle_states", 0444,
3897 			    d, genpd, &idle_states_fops);
3898 	debugfs_create_file("active_time", 0444,
3899 			    d, genpd, &active_time_fops);
3900 	debugfs_create_file("total_idle_time", 0444,
3901 			    d, genpd, &total_idle_time_fops);
3902 	debugfs_create_file("devices", 0444,
3903 			    d, genpd, &devices_fops);
3904 	if (genpd->set_performance_state)
3905 		debugfs_create_file("perf_state", 0444,
3906 				    d, genpd, &perf_state_fops);
3907 }
3908 
genpd_debug_init(void)3909 static int __init genpd_debug_init(void)
3910 {
3911 	struct generic_pm_domain *genpd;
3912 
3913 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3914 
3915 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3916 			    NULL, &summary_fops);
3917 
3918 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3919 		genpd_debug_add(genpd);
3920 
3921 	return 0;
3922 }
3923 late_initcall(genpd_debug_init);
3924 
genpd_debug_exit(void)3925 static void __exit genpd_debug_exit(void)
3926 {
3927 	debugfs_remove_recursive(genpd_debugfs_dir);
3928 }
3929 __exitcall(genpd_debug_exit);
3930 #endif /* CONFIG_DEBUG_FS */
3931