xref: /linux/drivers/pmdomain/core.c (revision e6b7c8c5a173c7c99ae2222f640e02b5fa822691)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/domain.c - Common code related to device power domains.
4  *
5  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_domain.h>
17 #include <linux/pm_qos.h>
18 #include <linux/pm_clock.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/export.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
26 
27 /* Provides a unique ID for each genpd device */
28 static DEFINE_IDA(genpd_ida);
29 
30 #define GENPD_RETRY_MAX_MS	250		/* Approximate */
31 
32 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
33 ({								\
34 	type (*__routine)(struct device *__d); 			\
35 	type __ret = (type)0;					\
36 								\
37 	__routine = genpd->dev_ops.callback; 			\
38 	if (__routine) {					\
39 		__ret = __routine(dev); 			\
40 	}							\
41 	__ret;							\
42 })
43 
44 static LIST_HEAD(gpd_list);
45 static DEFINE_MUTEX(gpd_list_lock);
46 
47 struct genpd_lock_ops {
48 	void (*lock)(struct generic_pm_domain *genpd);
49 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
50 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
51 	void (*unlock)(struct generic_pm_domain *genpd);
52 };
53 
genpd_lock_mtx(struct generic_pm_domain * genpd)54 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
55 {
56 	mutex_lock(&genpd->mlock);
57 }
58 
genpd_lock_nested_mtx(struct generic_pm_domain * genpd,int depth)59 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
60 					int depth)
61 {
62 	mutex_lock_nested(&genpd->mlock, depth);
63 }
64 
genpd_lock_interruptible_mtx(struct generic_pm_domain * genpd)65 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
66 {
67 	return mutex_lock_interruptible(&genpd->mlock);
68 }
69 
genpd_unlock_mtx(struct generic_pm_domain * genpd)70 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
71 {
72 	return mutex_unlock(&genpd->mlock);
73 }
74 
75 static const struct genpd_lock_ops genpd_mtx_ops = {
76 	.lock = genpd_lock_mtx,
77 	.lock_nested = genpd_lock_nested_mtx,
78 	.lock_interruptible = genpd_lock_interruptible_mtx,
79 	.unlock = genpd_unlock_mtx,
80 };
81 
genpd_lock_spin(struct generic_pm_domain * genpd)82 static void genpd_lock_spin(struct generic_pm_domain *genpd)
83 	__acquires(&genpd->slock)
84 {
85 	unsigned long flags;
86 
87 	spin_lock_irqsave(&genpd->slock, flags);
88 	genpd->lock_flags = flags;
89 }
90 
genpd_lock_nested_spin(struct generic_pm_domain * genpd,int depth)91 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
92 					int depth)
93 	__acquires(&genpd->slock)
94 {
95 	unsigned long flags;
96 
97 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
98 	genpd->lock_flags = flags;
99 }
100 
genpd_lock_interruptible_spin(struct generic_pm_domain * genpd)101 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
102 	__acquires(&genpd->slock)
103 {
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&genpd->slock, flags);
107 	genpd->lock_flags = flags;
108 	return 0;
109 }
110 
genpd_unlock_spin(struct generic_pm_domain * genpd)111 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
112 	__releases(&genpd->slock)
113 {
114 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
115 }
116 
117 static const struct genpd_lock_ops genpd_spin_ops = {
118 	.lock = genpd_lock_spin,
119 	.lock_nested = genpd_lock_nested_spin,
120 	.lock_interruptible = genpd_lock_interruptible_spin,
121 	.unlock = genpd_unlock_spin,
122 };
123 
genpd_lock_raw_spin(struct generic_pm_domain * genpd)124 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
125 	__acquires(&genpd->raw_slock)
126 {
127 	unsigned long flags;
128 
129 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
130 	genpd->raw_lock_flags = flags;
131 }
132 
genpd_lock_nested_raw_spin(struct generic_pm_domain * genpd,int depth)133 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
134 					int depth)
135 	__acquires(&genpd->raw_slock)
136 {
137 	unsigned long flags;
138 
139 	raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
140 	genpd->raw_lock_flags = flags;
141 }
142 
genpd_lock_interruptible_raw_spin(struct generic_pm_domain * genpd)143 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
144 	__acquires(&genpd->raw_slock)
145 {
146 	unsigned long flags;
147 
148 	raw_spin_lock_irqsave(&genpd->raw_slock, flags);
149 	genpd->raw_lock_flags = flags;
150 	return 0;
151 }
152 
genpd_unlock_raw_spin(struct generic_pm_domain * genpd)153 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
154 	__releases(&genpd->raw_slock)
155 {
156 	raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
157 }
158 
159 static const struct genpd_lock_ops genpd_raw_spin_ops = {
160 	.lock = genpd_lock_raw_spin,
161 	.lock_nested = genpd_lock_nested_raw_spin,
162 	.lock_interruptible = genpd_lock_interruptible_raw_spin,
163 	.unlock = genpd_unlock_raw_spin,
164 };
165 
166 #define genpd_lock(p)			p->lock_ops->lock(p)
167 #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
168 #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
169 #define genpd_unlock(p)			p->lock_ops->unlock(p)
170 
171 #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
172 #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
173 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
174 #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
175 #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
176 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
177 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
178 #define genpd_is_dev_name_fw(genpd)	(genpd->flags & GENPD_FLAG_DEV_NAME_FW)
179 
irq_safe_dev_in_sleep_domain(struct device * dev,const struct generic_pm_domain * genpd)180 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
181 		const struct generic_pm_domain *genpd)
182 {
183 	bool ret;
184 
185 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
186 
187 	/*
188 	 * Warn once if an IRQ safe device is attached to a domain, which
189 	 * callbacks are allowed to sleep. This indicates a suboptimal
190 	 * configuration for PM, but it doesn't matter for an always on domain.
191 	 */
192 	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
193 		return ret;
194 
195 	if (ret)
196 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
197 			      dev_name(&genpd->dev));
198 
199 	return ret;
200 }
201 
202 static int genpd_runtime_suspend(struct device *dev);
203 
204 /*
205  * Get the generic PM domain for a particular struct device.
206  * This validates the struct device pointer, the PM domain pointer,
207  * and checks that the PM domain pointer is a real generic PM domain.
208  * Any failure results in NULL being returned.
209  */
dev_to_genpd_safe(struct device * dev)210 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
211 {
212 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
213 		return NULL;
214 
215 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
216 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
217 		return pd_to_genpd(dev->pm_domain);
218 
219 	return NULL;
220 }
221 
222 /*
223  * This should only be used where we are certain that the pm_domain
224  * attached to the device is a genpd domain.
225  */
dev_to_genpd(struct device * dev)226 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
227 {
228 	if (IS_ERR_OR_NULL(dev->pm_domain))
229 		return ERR_PTR(-EINVAL);
230 
231 	return pd_to_genpd(dev->pm_domain);
232 }
233 
dev_to_genpd_dev(struct device * dev)234 struct device *dev_to_genpd_dev(struct device *dev)
235 {
236 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
237 
238 	if (IS_ERR(genpd))
239 		return ERR_CAST(genpd);
240 
241 	return &genpd->dev;
242 }
243 
genpd_stop_dev(const struct generic_pm_domain * genpd,struct device * dev)244 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
245 			  struct device *dev)
246 {
247 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
248 }
249 
genpd_start_dev(const struct generic_pm_domain * genpd,struct device * dev)250 static int genpd_start_dev(const struct generic_pm_domain *genpd,
251 			   struct device *dev)
252 {
253 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
254 }
255 
genpd_sd_counter_dec(struct generic_pm_domain * genpd)256 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
257 {
258 	bool ret = false;
259 
260 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
261 		ret = !!atomic_dec_and_test(&genpd->sd_count);
262 
263 	return ret;
264 }
265 
genpd_sd_counter_inc(struct generic_pm_domain * genpd)266 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
267 {
268 	atomic_inc(&genpd->sd_count);
269 	smp_mb__after_atomic();
270 }
271 
272 #ifdef CONFIG_DEBUG_FS
273 static struct dentry *genpd_debugfs_dir;
274 
275 static void genpd_debug_add(struct generic_pm_domain *genpd);
276 
genpd_debug_remove(struct generic_pm_domain * genpd)277 static void genpd_debug_remove(struct generic_pm_domain *genpd)
278 {
279 	if (!genpd_debugfs_dir)
280 		return;
281 
282 	debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
283 }
284 
genpd_update_accounting(struct generic_pm_domain * genpd)285 static void genpd_update_accounting(struct generic_pm_domain *genpd)
286 {
287 	u64 delta, now;
288 
289 	now = ktime_get_mono_fast_ns();
290 	if (now <= genpd->accounting_time)
291 		return;
292 
293 	delta = now - genpd->accounting_time;
294 
295 	/*
296 	 * If genpd->status is active, it means we are just
297 	 * out of off and so update the idle time and vice
298 	 * versa.
299 	 */
300 	if (genpd->status == GENPD_STATE_ON)
301 		genpd->states[genpd->state_idx].idle_time += delta;
302 	else
303 		genpd->on_time += delta;
304 
305 	genpd->accounting_time = now;
306 }
307 #else
genpd_debug_add(struct generic_pm_domain * genpd)308 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
genpd_debug_remove(struct generic_pm_domain * genpd)309 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
genpd_update_accounting(struct generic_pm_domain * genpd)310 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
311 #endif
312 
_genpd_reeval_performance_state(struct generic_pm_domain * genpd,unsigned int state)313 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
314 					   unsigned int state)
315 {
316 	struct generic_pm_domain_data *pd_data;
317 	struct pm_domain_data *pdd;
318 	struct gpd_link *link;
319 
320 	/* New requested state is same as Max requested state */
321 	if (state == genpd->performance_state)
322 		return state;
323 
324 	/* New requested state is higher than Max requested state */
325 	if (state > genpd->performance_state)
326 		return state;
327 
328 	/* Traverse all devices within the domain */
329 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
330 		pd_data = to_gpd_data(pdd);
331 
332 		if (pd_data->performance_state > state)
333 			state = pd_data->performance_state;
334 	}
335 
336 	/*
337 	 * Traverse all sub-domains within the domain. This can be
338 	 * done without any additional locking as the link->performance_state
339 	 * field is protected by the parent genpd->lock, which is already taken.
340 	 *
341 	 * Also note that link->performance_state (subdomain's performance state
342 	 * requirement to parent domain) is different from
343 	 * link->child->performance_state (current performance state requirement
344 	 * of the devices/sub-domains of the subdomain) and so can have a
345 	 * different value.
346 	 *
347 	 * Note that we also take vote from powered-off sub-domains into account
348 	 * as the same is done for devices right now.
349 	 */
350 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
351 		if (link->performance_state > state)
352 			state = link->performance_state;
353 	}
354 
355 	return state;
356 }
357 
genpd_xlate_performance_state(struct generic_pm_domain * genpd,struct generic_pm_domain * parent,unsigned int pstate)358 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
359 					 struct generic_pm_domain *parent,
360 					 unsigned int pstate)
361 {
362 	if (!parent->set_performance_state)
363 		return pstate;
364 
365 	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
366 						  parent->opp_table,
367 						  pstate);
368 }
369 
370 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
371 					unsigned int state, int depth);
372 
_genpd_rollback_parent_state(struct gpd_link * link,int depth)373 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
374 {
375 	struct generic_pm_domain *parent = link->parent;
376 	int parent_state;
377 
378 	genpd_lock_nested(parent, depth + 1);
379 
380 	parent_state = link->prev_performance_state;
381 	link->performance_state = parent_state;
382 
383 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
384 	if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
385 		pr_err("%s: Failed to roll back to %d performance state\n",
386 		       parent->name, parent_state);
387 	}
388 
389 	genpd_unlock(parent);
390 }
391 
_genpd_set_parent_state(struct generic_pm_domain * genpd,struct gpd_link * link,unsigned int state,int depth)392 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
393 				   struct gpd_link *link,
394 				   unsigned int state, int depth)
395 {
396 	struct generic_pm_domain *parent = link->parent;
397 	int parent_state, ret;
398 
399 	/* Find parent's performance state */
400 	ret = genpd_xlate_performance_state(genpd, parent, state);
401 	if (unlikely(ret < 0))
402 		return ret;
403 
404 	parent_state = ret;
405 
406 	genpd_lock_nested(parent, depth + 1);
407 
408 	link->prev_performance_state = link->performance_state;
409 	link->performance_state = parent_state;
410 
411 	parent_state = _genpd_reeval_performance_state(parent, parent_state);
412 	ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
413 	if (ret)
414 		link->performance_state = link->prev_performance_state;
415 
416 	genpd_unlock(parent);
417 
418 	return ret;
419 }
420 
_genpd_set_performance_state(struct generic_pm_domain * genpd,unsigned int state,int depth)421 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
422 					unsigned int state, int depth)
423 {
424 	struct gpd_link *link = NULL;
425 	int ret;
426 
427 	if (state == genpd->performance_state)
428 		return 0;
429 
430 	/* When scaling up, propagate to parents first in normal order */
431 	if (state > genpd->performance_state) {
432 		list_for_each_entry(link, &genpd->child_links, child_node) {
433 			ret = _genpd_set_parent_state(genpd, link, state, depth);
434 			if (ret)
435 				goto rollback_parents_up;
436 		}
437 	}
438 
439 	if (genpd->set_performance_state) {
440 		ret = genpd->set_performance_state(genpd, state);
441 		if (ret) {
442 			if (link)
443 				goto rollback_parents_up;
444 			return ret;
445 		}
446 	}
447 
448 	/* When scaling down, propagate to parents last in reverse order */
449 	if (state < genpd->performance_state) {
450 		list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
451 			ret = _genpd_set_parent_state(genpd, link, state, depth);
452 			if (ret)
453 				goto rollback_parents_down;
454 		}
455 	}
456 
457 	genpd->performance_state = state;
458 	return 0;
459 
460 rollback_parents_up:
461 	list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
462 		_genpd_rollback_parent_state(link, depth);
463 	return ret;
464 rollback_parents_down:
465 	list_for_each_entry_continue(link, &genpd->child_links, child_node)
466 		_genpd_rollback_parent_state(link, depth);
467 	return ret;
468 }
469 
genpd_set_performance_state(struct device * dev,unsigned int state)470 static int genpd_set_performance_state(struct device *dev, unsigned int state)
471 {
472 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
473 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
474 	unsigned int prev_state;
475 	int ret;
476 
477 	prev_state = gpd_data->performance_state;
478 	if (prev_state == state)
479 		return 0;
480 
481 	gpd_data->performance_state = state;
482 	state = _genpd_reeval_performance_state(genpd, state);
483 
484 	ret = _genpd_set_performance_state(genpd, state, 0);
485 	if (ret)
486 		gpd_data->performance_state = prev_state;
487 
488 	return ret;
489 }
490 
genpd_drop_performance_state(struct device * dev)491 static int genpd_drop_performance_state(struct device *dev)
492 {
493 	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
494 
495 	if (!genpd_set_performance_state(dev, 0))
496 		return prev_state;
497 
498 	return 0;
499 }
500 
genpd_restore_performance_state(struct device * dev,unsigned int state)501 static void genpd_restore_performance_state(struct device *dev,
502 					    unsigned int state)
503 {
504 	if (state)
505 		genpd_set_performance_state(dev, state);
506 }
507 
genpd_dev_pm_set_performance_state(struct device * dev,unsigned int state)508 static int genpd_dev_pm_set_performance_state(struct device *dev,
509 					      unsigned int state)
510 {
511 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
512 	int ret = 0;
513 
514 	genpd_lock(genpd);
515 	if (pm_runtime_suspended(dev)) {
516 		dev_gpd_data(dev)->rpm_pstate = state;
517 	} else {
518 		ret = genpd_set_performance_state(dev, state);
519 		if (!ret)
520 			dev_gpd_data(dev)->rpm_pstate = 0;
521 	}
522 	genpd_unlock(genpd);
523 
524 	return ret;
525 }
526 
527 /**
528  * dev_pm_genpd_set_performance_state- Set performance state of device's power
529  * domain.
530  *
531  * @dev: Device for which the performance-state needs to be set.
532  * @state: Target performance state of the device. This can be set as 0 when the
533  *	   device doesn't have any performance state constraints left (And so
534  *	   the device wouldn't participate anymore to find the target
535  *	   performance state of the genpd).
536  *
537  * It is assumed that the users guarantee that the genpd wouldn't be detached
538  * while this routine is getting called.
539  *
540  * Returns 0 on success and negative error values on failures.
541  */
dev_pm_genpd_set_performance_state(struct device * dev,unsigned int state)542 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
543 {
544 	struct generic_pm_domain *genpd;
545 
546 	genpd = dev_to_genpd_safe(dev);
547 	if (!genpd)
548 		return -ENODEV;
549 
550 	if (WARN_ON(!dev->power.subsys_data ||
551 		     !dev->power.subsys_data->domain_data))
552 		return -EINVAL;
553 
554 	return genpd_dev_pm_set_performance_state(dev, state);
555 }
556 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
557 
558 /**
559  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
560  *
561  * @dev: Device to handle
562  * @next: impending interrupt/wakeup for the device
563  *
564  *
565  * Allow devices to inform of the next wakeup. It's assumed that the users
566  * guarantee that the genpd wouldn't be detached while this routine is getting
567  * called. Additionally, it's also assumed that @dev isn't runtime suspended
568  * (RPM_SUSPENDED)."
569  * Although devices are expected to update the next_wakeup after the end of
570  * their usecase as well, it is possible the devices themselves may not know
571  * about that, so stale @next will be ignored when powering off the domain.
572  */
dev_pm_genpd_set_next_wakeup(struct device * dev,ktime_t next)573 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
574 {
575 	struct generic_pm_domain *genpd;
576 	struct gpd_timing_data *td;
577 
578 	genpd = dev_to_genpd_safe(dev);
579 	if (!genpd)
580 		return;
581 
582 	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
583 	if (td)
584 		td->next_wakeup = next;
585 }
586 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
587 
588 /**
589  * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
590  * @dev: A device that is attached to the genpd.
591  *
592  * This routine should typically be called for a device, at the point of when a
593  * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
594  *
595  * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
596  * valid value have been set.
597  */
dev_pm_genpd_get_next_hrtimer(struct device * dev)598 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
599 {
600 	struct generic_pm_domain *genpd;
601 
602 	genpd = dev_to_genpd_safe(dev);
603 	if (!genpd)
604 		return KTIME_MAX;
605 
606 	if (genpd->gd)
607 		return genpd->gd->next_hrtimer;
608 
609 	return KTIME_MAX;
610 }
611 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
612 
613 /*
614  * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
615  *
616  * @dev: A device that is attached to the genpd.
617  *
618  * Allows a consumer of the genpd to notify the provider that the next power off
619  * should be synchronous.
620  *
621  * It is assumed that the users guarantee that the genpd wouldn't be detached
622  * while this routine is getting called.
623  */
dev_pm_genpd_synced_poweroff(struct device * dev)624 void dev_pm_genpd_synced_poweroff(struct device *dev)
625 {
626 	struct generic_pm_domain *genpd;
627 
628 	genpd = dev_to_genpd_safe(dev);
629 	if (!genpd)
630 		return;
631 
632 	genpd_lock(genpd);
633 	genpd->synced_poweroff = true;
634 	genpd_unlock(genpd);
635 }
636 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
637 
638 /**
639  * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
640  *
641  * @dev: Device for which the HW-mode should be changed.
642  * @enable: Value to set or unset the HW-mode.
643  *
644  * Some PM domains can rely on HW signals to control the power for a device. To
645  * allow a consumer driver to switch the behaviour for its device in runtime,
646  * which may be beneficial from a latency or energy point of view, this function
647  * may be called.
648  *
649  * It is assumed that the users guarantee that the genpd wouldn't be detached
650  * while this routine is getting called.
651  *
652  * Return: Returns 0 on success and negative error values on failures.
653  */
dev_pm_genpd_set_hwmode(struct device * dev,bool enable)654 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
655 {
656 	struct generic_pm_domain *genpd;
657 	int ret = 0;
658 
659 	genpd = dev_to_genpd_safe(dev);
660 	if (!genpd)
661 		return -ENODEV;
662 
663 	if (!genpd->set_hwmode_dev)
664 		return -EOPNOTSUPP;
665 
666 	genpd_lock(genpd);
667 
668 	if (dev_gpd_data(dev)->hw_mode == enable)
669 		goto out;
670 
671 	ret = genpd->set_hwmode_dev(genpd, dev, enable);
672 	if (!ret)
673 		dev_gpd_data(dev)->hw_mode = enable;
674 
675 out:
676 	genpd_unlock(genpd);
677 	return ret;
678 }
679 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
680 
681 /**
682  * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
683  *
684  * @dev: Device for which the current HW-mode setting should be fetched.
685  *
686  * This helper function allows consumer drivers to fetch the current HW mode
687  * setting of its the device.
688  *
689  * It is assumed that the users guarantee that the genpd wouldn't be detached
690  * while this routine is getting called.
691  *
692  * Return: Returns the HW mode setting of device from SW cached hw_mode.
693  */
dev_pm_genpd_get_hwmode(struct device * dev)694 bool dev_pm_genpd_get_hwmode(struct device *dev)
695 {
696 	return dev_gpd_data(dev)->hw_mode;
697 }
698 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
699 
_genpd_power_on(struct generic_pm_domain * genpd,bool timed)700 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
701 {
702 	unsigned int state_idx = genpd->state_idx;
703 	ktime_t time_start;
704 	s64 elapsed_ns;
705 	int ret;
706 
707 	/* Notify consumers that we are about to power on. */
708 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
709 					     GENPD_NOTIFY_PRE_ON,
710 					     GENPD_NOTIFY_OFF, NULL);
711 	ret = notifier_to_errno(ret);
712 	if (ret)
713 		return ret;
714 
715 	if (!genpd->power_on)
716 		goto out;
717 
718 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
719 	if (!timed) {
720 		ret = genpd->power_on(genpd);
721 		if (ret)
722 			goto err;
723 
724 		goto out;
725 	}
726 
727 	time_start = ktime_get();
728 	ret = genpd->power_on(genpd);
729 	if (ret)
730 		goto err;
731 
732 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
733 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
734 		goto out;
735 
736 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
737 	genpd->gd->max_off_time_changed = true;
738 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
739 		 dev_name(&genpd->dev), "on", elapsed_ns);
740 
741 out:
742 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
743 	genpd->synced_poweroff = false;
744 	return 0;
745 err:
746 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
747 				NULL);
748 	return ret;
749 }
750 
_genpd_power_off(struct generic_pm_domain * genpd,bool timed)751 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
752 {
753 	unsigned int state_idx = genpd->state_idx;
754 	ktime_t time_start;
755 	s64 elapsed_ns;
756 	int ret;
757 
758 	/* Notify consumers that we are about to power off. */
759 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
760 					     GENPD_NOTIFY_PRE_OFF,
761 					     GENPD_NOTIFY_ON, NULL);
762 	ret = notifier_to_errno(ret);
763 	if (ret)
764 		return ret;
765 
766 	if (!genpd->power_off)
767 		goto out;
768 
769 	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
770 	if (!timed) {
771 		ret = genpd->power_off(genpd);
772 		if (ret)
773 			goto busy;
774 
775 		goto out;
776 	}
777 
778 	time_start = ktime_get();
779 	ret = genpd->power_off(genpd);
780 	if (ret)
781 		goto busy;
782 
783 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
784 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
785 		goto out;
786 
787 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
788 	genpd->gd->max_off_time_changed = true;
789 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
790 		 dev_name(&genpd->dev), "off", elapsed_ns);
791 
792 out:
793 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
794 				NULL);
795 	return 0;
796 busy:
797 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
798 	return ret;
799 }
800 
801 /**
802  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
803  * @genpd: PM domain to power off.
804  *
805  * Queue up the execution of genpd_power_off() unless it's already been done
806  * before.
807  */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)808 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
809 {
810 	queue_work(pm_wq, &genpd->power_off_work);
811 }
812 
813 /**
814  * genpd_power_off - Remove power from a given PM domain.
815  * @genpd: PM domain to power down.
816  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
817  * RPM status of the releated device is in an intermediate state, not yet turned
818  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
819  * be RPM_SUSPENDED, while it tries to power off the PM domain.
820  * @depth: nesting count for lockdep.
821  *
822  * If all of the @genpd's devices have been suspended and all of its subdomains
823  * have been powered down, remove power from @genpd.
824  */
genpd_power_off(struct generic_pm_domain * genpd,bool one_dev_on,unsigned int depth)825 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
826 			   unsigned int depth)
827 {
828 	struct pm_domain_data *pdd;
829 	struct gpd_link *link;
830 	unsigned int not_suspended = 0;
831 	int ret;
832 
833 	/*
834 	 * Do not try to power off the domain in the following situations:
835 	 * (1) The domain is already in the "power off" state.
836 	 * (2) System suspend is in progress.
837 	 */
838 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
839 		return 0;
840 
841 	/*
842 	 * Abort power off for the PM domain in the following situations:
843 	 * (1) The domain is configured as always on.
844 	 * (2) When the domain has a subdomain being powered on.
845 	 */
846 	if (genpd_is_always_on(genpd) ||
847 			genpd_is_rpm_always_on(genpd) ||
848 			atomic_read(&genpd->sd_count) > 0)
849 		return -EBUSY;
850 
851 	/*
852 	 * The children must be in their deepest (powered-off) states to allow
853 	 * the parent to be powered off. Note that, there's no need for
854 	 * additional locking, as powering on a child, requires the parent's
855 	 * lock to be acquired first.
856 	 */
857 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
858 		struct generic_pm_domain *child = link->child;
859 		if (child->state_idx < child->state_count - 1)
860 			return -EBUSY;
861 	}
862 
863 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
864 		/*
865 		 * Do not allow PM domain to be powered off, when an IRQ safe
866 		 * device is part of a non-IRQ safe domain.
867 		 */
868 		if (!pm_runtime_suspended(pdd->dev) ||
869 			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
870 			not_suspended++;
871 	}
872 
873 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
874 		return -EBUSY;
875 
876 	if (genpd->gov && genpd->gov->power_down_ok) {
877 		if (!genpd->gov->power_down_ok(&genpd->domain))
878 			return -EAGAIN;
879 	}
880 
881 	/* Default to shallowest state. */
882 	if (!genpd->gov)
883 		genpd->state_idx = 0;
884 
885 	/* Don't power off, if a child domain is waiting to power on. */
886 	if (atomic_read(&genpd->sd_count) > 0)
887 		return -EBUSY;
888 
889 	ret = _genpd_power_off(genpd, true);
890 	if (ret) {
891 		genpd->states[genpd->state_idx].rejected++;
892 		return ret;
893 	}
894 
895 	genpd->status = GENPD_STATE_OFF;
896 	genpd_update_accounting(genpd);
897 	genpd->states[genpd->state_idx].usage++;
898 
899 	list_for_each_entry(link, &genpd->child_links, child_node) {
900 		genpd_sd_counter_dec(link->parent);
901 		genpd_lock_nested(link->parent, depth + 1);
902 		genpd_power_off(link->parent, false, depth + 1);
903 		genpd_unlock(link->parent);
904 	}
905 
906 	return 0;
907 }
908 
909 /**
910  * genpd_power_on - Restore power to a given PM domain and its parents.
911  * @genpd: PM domain to power up.
912  * @depth: nesting count for lockdep.
913  *
914  * Restore power to @genpd and all of its parents so that it is possible to
915  * resume a device belonging to it.
916  */
genpd_power_on(struct generic_pm_domain * genpd,unsigned int depth)917 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
918 {
919 	struct gpd_link *link;
920 	int ret = 0;
921 
922 	if (genpd_status_on(genpd))
923 		return 0;
924 
925 	/*
926 	 * The list is guaranteed not to change while the loop below is being
927 	 * executed, unless one of the parents' .power_on() callbacks fiddles
928 	 * with it.
929 	 */
930 	list_for_each_entry(link, &genpd->child_links, child_node) {
931 		struct generic_pm_domain *parent = link->parent;
932 
933 		genpd_sd_counter_inc(parent);
934 
935 		genpd_lock_nested(parent, depth + 1);
936 		ret = genpd_power_on(parent, depth + 1);
937 		genpd_unlock(parent);
938 
939 		if (ret) {
940 			genpd_sd_counter_dec(parent);
941 			goto err;
942 		}
943 	}
944 
945 	ret = _genpd_power_on(genpd, true);
946 	if (ret)
947 		goto err;
948 
949 	genpd->status = GENPD_STATE_ON;
950 	genpd_update_accounting(genpd);
951 
952 	return 0;
953 
954  err:
955 	list_for_each_entry_continue_reverse(link,
956 					&genpd->child_links,
957 					child_node) {
958 		genpd_sd_counter_dec(link->parent);
959 		genpd_lock_nested(link->parent, depth + 1);
960 		genpd_power_off(link->parent, false, depth + 1);
961 		genpd_unlock(link->parent);
962 	}
963 
964 	return ret;
965 }
966 
genpd_dev_pm_start(struct device * dev)967 static int genpd_dev_pm_start(struct device *dev)
968 {
969 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
970 
971 	return genpd_start_dev(genpd, dev);
972 }
973 
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)974 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
975 				     unsigned long val, void *ptr)
976 {
977 	struct generic_pm_domain_data *gpd_data;
978 	struct device *dev;
979 
980 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
981 	dev = gpd_data->base.dev;
982 
983 	for (;;) {
984 		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
985 		struct pm_domain_data *pdd;
986 		struct gpd_timing_data *td;
987 
988 		spin_lock_irq(&dev->power.lock);
989 
990 		pdd = dev->power.subsys_data ?
991 				dev->power.subsys_data->domain_data : NULL;
992 		if (pdd) {
993 			td = to_gpd_data(pdd)->td;
994 			if (td) {
995 				td->constraint_changed = true;
996 				genpd = dev_to_genpd(dev);
997 			}
998 		}
999 
1000 		spin_unlock_irq(&dev->power.lock);
1001 
1002 		if (!IS_ERR(genpd)) {
1003 			genpd_lock(genpd);
1004 			genpd->gd->max_off_time_changed = true;
1005 			genpd_unlock(genpd);
1006 		}
1007 
1008 		dev = dev->parent;
1009 		if (!dev || dev->power.ignore_children)
1010 			break;
1011 	}
1012 
1013 	return NOTIFY_DONE;
1014 }
1015 
1016 /**
1017  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1018  * @work: Work structure used for scheduling the execution of this function.
1019  */
genpd_power_off_work_fn(struct work_struct * work)1020 static void genpd_power_off_work_fn(struct work_struct *work)
1021 {
1022 	struct generic_pm_domain *genpd;
1023 
1024 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
1025 
1026 	genpd_lock(genpd);
1027 	genpd_power_off(genpd, false, 0);
1028 	genpd_unlock(genpd);
1029 }
1030 
1031 /**
1032  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1033  * @dev: Device to handle.
1034  */
__genpd_runtime_suspend(struct device * dev)1035 static int __genpd_runtime_suspend(struct device *dev)
1036 {
1037 	int (*cb)(struct device *__dev);
1038 
1039 	if (dev->type && dev->type->pm)
1040 		cb = dev->type->pm->runtime_suspend;
1041 	else if (dev->class && dev->class->pm)
1042 		cb = dev->class->pm->runtime_suspend;
1043 	else if (dev->bus && dev->bus->pm)
1044 		cb = dev->bus->pm->runtime_suspend;
1045 	else
1046 		cb = NULL;
1047 
1048 	if (!cb && dev->driver && dev->driver->pm)
1049 		cb = dev->driver->pm->runtime_suspend;
1050 
1051 	return cb ? cb(dev) : 0;
1052 }
1053 
1054 /**
1055  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1056  * @dev: Device to handle.
1057  */
__genpd_runtime_resume(struct device * dev)1058 static int __genpd_runtime_resume(struct device *dev)
1059 {
1060 	int (*cb)(struct device *__dev);
1061 
1062 	if (dev->type && dev->type->pm)
1063 		cb = dev->type->pm->runtime_resume;
1064 	else if (dev->class && dev->class->pm)
1065 		cb = dev->class->pm->runtime_resume;
1066 	else if (dev->bus && dev->bus->pm)
1067 		cb = dev->bus->pm->runtime_resume;
1068 	else
1069 		cb = NULL;
1070 
1071 	if (!cb && dev->driver && dev->driver->pm)
1072 		cb = dev->driver->pm->runtime_resume;
1073 
1074 	return cb ? cb(dev) : 0;
1075 }
1076 
1077 /**
1078  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1079  * @dev: Device to suspend.
1080  *
1081  * Carry out a runtime suspend of a device under the assumption that its
1082  * pm_domain field points to the domain member of an object of type
1083  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1084  */
genpd_runtime_suspend(struct device * dev)1085 static int genpd_runtime_suspend(struct device *dev)
1086 {
1087 	struct generic_pm_domain *genpd;
1088 	bool (*suspend_ok)(struct device *__dev);
1089 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1090 	struct gpd_timing_data *td = gpd_data->td;
1091 	bool runtime_pm = pm_runtime_enabled(dev);
1092 	ktime_t time_start = 0;
1093 	s64 elapsed_ns;
1094 	int ret;
1095 
1096 	dev_dbg(dev, "%s()\n", __func__);
1097 
1098 	genpd = dev_to_genpd(dev);
1099 	if (IS_ERR(genpd))
1100 		return -EINVAL;
1101 
1102 	/*
1103 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
1104 	 * callbacks for other purposes than runtime PM. In those scenarios
1105 	 * runtime PM is disabled. Under these circumstances, we shall skip
1106 	 * validating/measuring the PM QoS latency.
1107 	 */
1108 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1109 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
1110 		return -EBUSY;
1111 
1112 	/* Measure suspend latency. */
1113 	if (td && runtime_pm)
1114 		time_start = ktime_get();
1115 
1116 	ret = __genpd_runtime_suspend(dev);
1117 	if (ret)
1118 		return ret;
1119 
1120 	ret = genpd_stop_dev(genpd, dev);
1121 	if (ret) {
1122 		__genpd_runtime_resume(dev);
1123 		return ret;
1124 	}
1125 
1126 	/* Update suspend latency value if the measured time exceeds it. */
1127 	if (td && runtime_pm) {
1128 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1129 		if (elapsed_ns > td->suspend_latency_ns) {
1130 			td->suspend_latency_ns = elapsed_ns;
1131 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1132 				elapsed_ns);
1133 			genpd->gd->max_off_time_changed = true;
1134 			td->constraint_changed = true;
1135 		}
1136 	}
1137 
1138 	/*
1139 	 * If power.irq_safe is set, this routine may be run with
1140 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1141 	 */
1142 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1143 		return 0;
1144 
1145 	genpd_lock(genpd);
1146 	genpd_power_off(genpd, true, 0);
1147 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1148 	genpd_unlock(genpd);
1149 
1150 	return 0;
1151 }
1152 
1153 /**
1154  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1155  * @dev: Device to resume.
1156  *
1157  * Carry out a runtime resume of a device under the assumption that its
1158  * pm_domain field points to the domain member of an object of type
1159  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1160  */
genpd_runtime_resume(struct device * dev)1161 static int genpd_runtime_resume(struct device *dev)
1162 {
1163 	struct generic_pm_domain *genpd;
1164 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1165 	struct gpd_timing_data *td = gpd_data->td;
1166 	bool timed = td && pm_runtime_enabled(dev);
1167 	ktime_t time_start = 0;
1168 	s64 elapsed_ns;
1169 	int ret;
1170 
1171 	dev_dbg(dev, "%s()\n", __func__);
1172 
1173 	genpd = dev_to_genpd(dev);
1174 	if (IS_ERR(genpd))
1175 		return -EINVAL;
1176 
1177 	/*
1178 	 * As we don't power off a non IRQ safe domain, which holds
1179 	 * an IRQ safe device, we don't need to restore power to it.
1180 	 */
1181 	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1182 		goto out;
1183 
1184 	genpd_lock(genpd);
1185 	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1186 	ret = genpd_power_on(genpd, 0);
1187 	genpd_unlock(genpd);
1188 
1189 	if (ret)
1190 		return ret;
1191 
1192  out:
1193 	/* Measure resume latency. */
1194 	if (timed)
1195 		time_start = ktime_get();
1196 
1197 	ret = genpd_start_dev(genpd, dev);
1198 	if (ret)
1199 		goto err_poweroff;
1200 
1201 	ret = __genpd_runtime_resume(dev);
1202 	if (ret)
1203 		goto err_stop;
1204 
1205 	/* Update resume latency value if the measured time exceeds it. */
1206 	if (timed) {
1207 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1208 		if (elapsed_ns > td->resume_latency_ns) {
1209 			td->resume_latency_ns = elapsed_ns;
1210 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1211 				elapsed_ns);
1212 			genpd->gd->max_off_time_changed = true;
1213 			td->constraint_changed = true;
1214 		}
1215 	}
1216 
1217 	return 0;
1218 
1219 err_stop:
1220 	genpd_stop_dev(genpd, dev);
1221 err_poweroff:
1222 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1223 		genpd_lock(genpd);
1224 		genpd_power_off(genpd, true, 0);
1225 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1226 		genpd_unlock(genpd);
1227 	}
1228 
1229 	return ret;
1230 }
1231 
1232 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)1233 static int __init pd_ignore_unused_setup(char *__unused)
1234 {
1235 	pd_ignore_unused = true;
1236 	return 1;
1237 }
1238 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1239 
1240 /**
1241  * genpd_power_off_unused - Power off all PM domains with no devices in use.
1242  */
genpd_power_off_unused(void)1243 static int __init genpd_power_off_unused(void)
1244 {
1245 	struct generic_pm_domain *genpd;
1246 
1247 	if (pd_ignore_unused) {
1248 		pr_warn("genpd: Not disabling unused power domains\n");
1249 		return 0;
1250 	}
1251 
1252 	pr_info("genpd: Disabling unused power domains\n");
1253 	mutex_lock(&gpd_list_lock);
1254 
1255 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1256 		genpd_queue_power_off_work(genpd);
1257 
1258 	mutex_unlock(&gpd_list_lock);
1259 
1260 	return 0;
1261 }
1262 late_initcall_sync(genpd_power_off_unused);
1263 
1264 #ifdef CONFIG_PM_SLEEP
1265 
1266 /**
1267  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1268  * @genpd: PM domain to power off, if possible.
1269  * @use_lock: use the lock.
1270  * @depth: nesting count for lockdep.
1271  *
1272  * Check if the given PM domain can be powered off (during system suspend or
1273  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1274  *
1275  * This function is only called in "noirq" and "syscore" stages of system power
1276  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1277  * these cases the lock must be held.
1278  */
genpd_sync_power_off(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1279 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1280 				 unsigned int depth)
1281 {
1282 	struct gpd_link *link;
1283 
1284 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1285 		return;
1286 
1287 	if (genpd->suspended_count != genpd->device_count
1288 	    || atomic_read(&genpd->sd_count) > 0)
1289 		return;
1290 
1291 	/* Check that the children are in their deepest (powered-off) state. */
1292 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1293 		struct generic_pm_domain *child = link->child;
1294 		if (child->state_idx < child->state_count - 1)
1295 			return;
1296 	}
1297 
1298 	/* Choose the deepest state when suspending */
1299 	genpd->state_idx = genpd->state_count - 1;
1300 	if (_genpd_power_off(genpd, false)) {
1301 		genpd->states[genpd->state_idx].rejected++;
1302 		return;
1303 	} else {
1304 		genpd->states[genpd->state_idx].usage++;
1305 	}
1306 
1307 	genpd->status = GENPD_STATE_OFF;
1308 
1309 	list_for_each_entry(link, &genpd->child_links, child_node) {
1310 		genpd_sd_counter_dec(link->parent);
1311 
1312 		if (use_lock)
1313 			genpd_lock_nested(link->parent, depth + 1);
1314 
1315 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1316 
1317 		if (use_lock)
1318 			genpd_unlock(link->parent);
1319 	}
1320 }
1321 
1322 /**
1323  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1324  * @genpd: PM domain to power on.
1325  * @use_lock: use the lock.
1326  * @depth: nesting count for lockdep.
1327  *
1328  * This function is only called in "noirq" and "syscore" stages of system power
1329  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1330  * these cases the lock must be held.
1331  */
genpd_sync_power_on(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1332 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1333 				unsigned int depth)
1334 {
1335 	struct gpd_link *link;
1336 
1337 	if (genpd_status_on(genpd))
1338 		return;
1339 
1340 	list_for_each_entry(link, &genpd->child_links, child_node) {
1341 		genpd_sd_counter_inc(link->parent);
1342 
1343 		if (use_lock)
1344 			genpd_lock_nested(link->parent, depth + 1);
1345 
1346 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1347 
1348 		if (use_lock)
1349 			genpd_unlock(link->parent);
1350 	}
1351 
1352 	_genpd_power_on(genpd, false);
1353 	genpd->status = GENPD_STATE_ON;
1354 }
1355 
1356 /**
1357  * genpd_prepare - Start power transition of a device in a PM domain.
1358  * @dev: Device to start the transition of.
1359  *
1360  * Start a power transition of a device (during a system-wide power transition)
1361  * under the assumption that its pm_domain field points to the domain member of
1362  * an object of type struct generic_pm_domain representing a PM domain
1363  * consisting of I/O devices.
1364  */
genpd_prepare(struct device * dev)1365 static int genpd_prepare(struct device *dev)
1366 {
1367 	struct generic_pm_domain *genpd;
1368 	int ret;
1369 
1370 	dev_dbg(dev, "%s()\n", __func__);
1371 
1372 	genpd = dev_to_genpd(dev);
1373 	if (IS_ERR(genpd))
1374 		return -EINVAL;
1375 
1376 	genpd_lock(genpd);
1377 	genpd->prepared_count++;
1378 	genpd_unlock(genpd);
1379 
1380 	ret = pm_generic_prepare(dev);
1381 	if (ret < 0) {
1382 		genpd_lock(genpd);
1383 
1384 		genpd->prepared_count--;
1385 
1386 		genpd_unlock(genpd);
1387 	}
1388 
1389 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1390 	return ret >= 0 ? 0 : ret;
1391 }
1392 
1393 /**
1394  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1395  *   I/O pm domain.
1396  * @dev: Device to suspend.
1397  * @suspend_noirq: Generic suspend_noirq callback.
1398  * @resume_noirq: Generic resume_noirq callback.
1399  *
1400  * Stop the device and remove power from the domain if all devices in it have
1401  * been stopped.
1402  */
genpd_finish_suspend(struct device * dev,int (* suspend_noirq)(struct device * dev),int (* resume_noirq)(struct device * dev))1403 static int genpd_finish_suspend(struct device *dev,
1404 				int (*suspend_noirq)(struct device *dev),
1405 				int (*resume_noirq)(struct device *dev))
1406 {
1407 	struct generic_pm_domain *genpd;
1408 	int ret = 0;
1409 
1410 	genpd = dev_to_genpd(dev);
1411 	if (IS_ERR(genpd))
1412 		return -EINVAL;
1413 
1414 	ret = suspend_noirq(dev);
1415 	if (ret)
1416 		return ret;
1417 
1418 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1419 		return 0;
1420 
1421 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1422 	    !pm_runtime_status_suspended(dev)) {
1423 		ret = genpd_stop_dev(genpd, dev);
1424 		if (ret) {
1425 			resume_noirq(dev);
1426 			return ret;
1427 		}
1428 	}
1429 
1430 	genpd_lock(genpd);
1431 	genpd->suspended_count++;
1432 	genpd_sync_power_off(genpd, true, 0);
1433 	genpd_unlock(genpd);
1434 
1435 	return 0;
1436 }
1437 
1438 /**
1439  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1440  * @dev: Device to suspend.
1441  *
1442  * Stop the device and remove power from the domain if all devices in it have
1443  * been stopped.
1444  */
genpd_suspend_noirq(struct device * dev)1445 static int genpd_suspend_noirq(struct device *dev)
1446 {
1447 	dev_dbg(dev, "%s()\n", __func__);
1448 
1449 	return genpd_finish_suspend(dev,
1450 				    pm_generic_suspend_noirq,
1451 				    pm_generic_resume_noirq);
1452 }
1453 
1454 /**
1455  * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1456  * @dev: Device to resume.
1457  * @resume_noirq: Generic resume_noirq callback.
1458  *
1459  * Restore power to the device's PM domain, if necessary, and start the device.
1460  */
genpd_finish_resume(struct device * dev,int (* resume_noirq)(struct device * dev))1461 static int genpd_finish_resume(struct device *dev,
1462 			       int (*resume_noirq)(struct device *dev))
1463 {
1464 	struct generic_pm_domain *genpd;
1465 	int ret;
1466 
1467 	dev_dbg(dev, "%s()\n", __func__);
1468 
1469 	genpd = dev_to_genpd(dev);
1470 	if (IS_ERR(genpd))
1471 		return -EINVAL;
1472 
1473 	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1474 		return resume_noirq(dev);
1475 
1476 	genpd_lock(genpd);
1477 	genpd_sync_power_on(genpd, true, 0);
1478 	genpd->suspended_count--;
1479 	genpd_unlock(genpd);
1480 
1481 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1482 	    !pm_runtime_status_suspended(dev)) {
1483 		ret = genpd_start_dev(genpd, dev);
1484 		if (ret)
1485 			return ret;
1486 	}
1487 
1488 	return pm_generic_resume_noirq(dev);
1489 }
1490 
1491 /**
1492  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1493  * @dev: Device to resume.
1494  *
1495  * Restore power to the device's PM domain, if necessary, and start the device.
1496  */
genpd_resume_noirq(struct device * dev)1497 static int genpd_resume_noirq(struct device *dev)
1498 {
1499 	dev_dbg(dev, "%s()\n", __func__);
1500 
1501 	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1502 }
1503 
1504 /**
1505  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1506  * @dev: Device to freeze.
1507  *
1508  * Carry out a late freeze of a device under the assumption that its
1509  * pm_domain field points to the domain member of an object of type
1510  * struct generic_pm_domain representing a power domain consisting of I/O
1511  * devices.
1512  */
genpd_freeze_noirq(struct device * dev)1513 static int genpd_freeze_noirq(struct device *dev)
1514 {
1515 	dev_dbg(dev, "%s()\n", __func__);
1516 
1517 	return genpd_finish_suspend(dev,
1518 				    pm_generic_freeze_noirq,
1519 				    pm_generic_thaw_noirq);
1520 }
1521 
1522 /**
1523  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1524  * @dev: Device to thaw.
1525  *
1526  * Start the device, unless power has been removed from the domain already
1527  * before the system transition.
1528  */
genpd_thaw_noirq(struct device * dev)1529 static int genpd_thaw_noirq(struct device *dev)
1530 {
1531 	dev_dbg(dev, "%s()\n", __func__);
1532 
1533 	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1534 }
1535 
1536 /**
1537  * genpd_poweroff_noirq - Completion of hibernation of device in an
1538  *   I/O PM domain.
1539  * @dev: Device to poweroff.
1540  *
1541  * Stop the device and remove power from the domain if all devices in it have
1542  * been stopped.
1543  */
genpd_poweroff_noirq(struct device * dev)1544 static int genpd_poweroff_noirq(struct device *dev)
1545 {
1546 	dev_dbg(dev, "%s()\n", __func__);
1547 
1548 	return genpd_finish_suspend(dev,
1549 				    pm_generic_poweroff_noirq,
1550 				    pm_generic_restore_noirq);
1551 }
1552 
1553 /**
1554  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1555  * @dev: Device to resume.
1556  *
1557  * Make sure the domain will be in the same power state as before the
1558  * hibernation the system is resuming from and start the device if necessary.
1559  */
genpd_restore_noirq(struct device * dev)1560 static int genpd_restore_noirq(struct device *dev)
1561 {
1562 	dev_dbg(dev, "%s()\n", __func__);
1563 
1564 	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1565 }
1566 
1567 /**
1568  * genpd_complete - Complete power transition of a device in a power domain.
1569  * @dev: Device to complete the transition of.
1570  *
1571  * Complete a power transition of a device (during a system-wide power
1572  * transition) under the assumption that its pm_domain field points to the
1573  * domain member of an object of type struct generic_pm_domain representing
1574  * a power domain consisting of I/O devices.
1575  */
genpd_complete(struct device * dev)1576 static void genpd_complete(struct device *dev)
1577 {
1578 	struct generic_pm_domain *genpd;
1579 
1580 	dev_dbg(dev, "%s()\n", __func__);
1581 
1582 	genpd = dev_to_genpd(dev);
1583 	if (IS_ERR(genpd))
1584 		return;
1585 
1586 	pm_generic_complete(dev);
1587 
1588 	genpd_lock(genpd);
1589 
1590 	genpd->prepared_count--;
1591 	if (!genpd->prepared_count)
1592 		genpd_queue_power_off_work(genpd);
1593 
1594 	genpd_unlock(genpd);
1595 }
1596 
genpd_switch_state(struct device * dev,bool suspend)1597 static void genpd_switch_state(struct device *dev, bool suspend)
1598 {
1599 	struct generic_pm_domain *genpd;
1600 	bool use_lock;
1601 
1602 	genpd = dev_to_genpd_safe(dev);
1603 	if (!genpd)
1604 		return;
1605 
1606 	use_lock = genpd_is_irq_safe(genpd);
1607 
1608 	if (use_lock)
1609 		genpd_lock(genpd);
1610 
1611 	if (suspend) {
1612 		genpd->suspended_count++;
1613 		genpd_sync_power_off(genpd, use_lock, 0);
1614 	} else {
1615 		genpd_sync_power_on(genpd, use_lock, 0);
1616 		genpd->suspended_count--;
1617 	}
1618 
1619 	if (use_lock)
1620 		genpd_unlock(genpd);
1621 }
1622 
1623 /**
1624  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1625  * @dev: The device that is attached to the genpd, that can be suspended.
1626  *
1627  * This routine should typically be called for a device that needs to be
1628  * suspended during the syscore suspend phase. It may also be called during
1629  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1630  * genpd.
1631  */
dev_pm_genpd_suspend(struct device * dev)1632 void dev_pm_genpd_suspend(struct device *dev)
1633 {
1634 	genpd_switch_state(dev, true);
1635 }
1636 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1637 
1638 /**
1639  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1640  * @dev: The device that is attached to the genpd, which needs to be resumed.
1641  *
1642  * This routine should typically be called for a device that needs to be resumed
1643  * during the syscore resume phase. It may also be called during suspend-to-idle
1644  * to resume a corresponding CPU device that is attached to a genpd.
1645  */
dev_pm_genpd_resume(struct device * dev)1646 void dev_pm_genpd_resume(struct device *dev)
1647 {
1648 	genpd_switch_state(dev, false);
1649 }
1650 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1651 
1652 #else /* !CONFIG_PM_SLEEP */
1653 
1654 #define genpd_prepare		NULL
1655 #define genpd_suspend_noirq	NULL
1656 #define genpd_resume_noirq	NULL
1657 #define genpd_freeze_noirq	NULL
1658 #define genpd_thaw_noirq	NULL
1659 #define genpd_poweroff_noirq	NULL
1660 #define genpd_restore_noirq	NULL
1661 #define genpd_complete		NULL
1662 
1663 #endif /* CONFIG_PM_SLEEP */
1664 
genpd_alloc_dev_data(struct device * dev,bool has_governor)1665 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1666 							   bool has_governor)
1667 {
1668 	struct generic_pm_domain_data *gpd_data;
1669 	struct gpd_timing_data *td;
1670 	int ret;
1671 
1672 	ret = dev_pm_get_subsys_data(dev);
1673 	if (ret)
1674 		return ERR_PTR(ret);
1675 
1676 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1677 	if (!gpd_data) {
1678 		ret = -ENOMEM;
1679 		goto err_put;
1680 	}
1681 
1682 	gpd_data->base.dev = dev;
1683 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1684 
1685 	/* Allocate data used by a governor. */
1686 	if (has_governor) {
1687 		td = kzalloc(sizeof(*td), GFP_KERNEL);
1688 		if (!td) {
1689 			ret = -ENOMEM;
1690 			goto err_free;
1691 		}
1692 
1693 		td->constraint_changed = true;
1694 		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1695 		td->next_wakeup = KTIME_MAX;
1696 		gpd_data->td = td;
1697 	}
1698 
1699 	spin_lock_irq(&dev->power.lock);
1700 
1701 	if (dev->power.subsys_data->domain_data)
1702 		ret = -EINVAL;
1703 	else
1704 		dev->power.subsys_data->domain_data = &gpd_data->base;
1705 
1706 	spin_unlock_irq(&dev->power.lock);
1707 
1708 	if (ret)
1709 		goto err_free;
1710 
1711 	return gpd_data;
1712 
1713  err_free:
1714 	kfree(gpd_data->td);
1715 	kfree(gpd_data);
1716  err_put:
1717 	dev_pm_put_subsys_data(dev);
1718 	return ERR_PTR(ret);
1719 }
1720 
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1721 static void genpd_free_dev_data(struct device *dev,
1722 				struct generic_pm_domain_data *gpd_data)
1723 {
1724 	spin_lock_irq(&dev->power.lock);
1725 
1726 	dev->power.subsys_data->domain_data = NULL;
1727 
1728 	spin_unlock_irq(&dev->power.lock);
1729 
1730 	dev_pm_opp_clear_config(gpd_data->opp_token);
1731 	kfree(gpd_data->td);
1732 	kfree(gpd_data);
1733 	dev_pm_put_subsys_data(dev);
1734 }
1735 
genpd_update_cpumask(struct generic_pm_domain * genpd,int cpu,bool set,unsigned int depth)1736 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1737 				 int cpu, bool set, unsigned int depth)
1738 {
1739 	struct gpd_link *link;
1740 
1741 	if (!genpd_is_cpu_domain(genpd))
1742 		return;
1743 
1744 	list_for_each_entry(link, &genpd->child_links, child_node) {
1745 		struct generic_pm_domain *parent = link->parent;
1746 
1747 		genpd_lock_nested(parent, depth + 1);
1748 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1749 		genpd_unlock(parent);
1750 	}
1751 
1752 	if (set)
1753 		cpumask_set_cpu(cpu, genpd->cpus);
1754 	else
1755 		cpumask_clear_cpu(cpu, genpd->cpus);
1756 }
1757 
genpd_set_cpumask(struct generic_pm_domain * genpd,int cpu)1758 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1759 {
1760 	if (cpu >= 0)
1761 		genpd_update_cpumask(genpd, cpu, true, 0);
1762 }
1763 
genpd_clear_cpumask(struct generic_pm_domain * genpd,int cpu)1764 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1765 {
1766 	if (cpu >= 0)
1767 		genpd_update_cpumask(genpd, cpu, false, 0);
1768 }
1769 
genpd_get_cpu(struct generic_pm_domain * genpd,struct device * dev)1770 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1771 {
1772 	int cpu;
1773 
1774 	if (!genpd_is_cpu_domain(genpd))
1775 		return -1;
1776 
1777 	for_each_possible_cpu(cpu) {
1778 		if (get_cpu_device(cpu) == dev)
1779 			return cpu;
1780 	}
1781 
1782 	return -1;
1783 }
1784 
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct device * base_dev)1785 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1786 			    struct device *base_dev)
1787 {
1788 	struct genpd_governor_data *gd = genpd->gd;
1789 	struct generic_pm_domain_data *gpd_data;
1790 	int ret;
1791 
1792 	dev_dbg(dev, "%s()\n", __func__);
1793 
1794 	gpd_data = genpd_alloc_dev_data(dev, gd);
1795 	if (IS_ERR(gpd_data))
1796 		return PTR_ERR(gpd_data);
1797 
1798 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1799 
1800 	gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1801 
1802 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1803 	if (ret)
1804 		goto out;
1805 
1806 	genpd_lock(genpd);
1807 
1808 	genpd_set_cpumask(genpd, gpd_data->cpu);
1809 
1810 	genpd->device_count++;
1811 	if (gd)
1812 		gd->max_off_time_changed = true;
1813 
1814 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1815 
1816 	genpd_unlock(genpd);
1817 	dev_pm_domain_set(dev, &genpd->domain);
1818  out:
1819 	if (ret)
1820 		genpd_free_dev_data(dev, gpd_data);
1821 	else
1822 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1823 					DEV_PM_QOS_RESUME_LATENCY);
1824 
1825 	return ret;
1826 }
1827 
1828 /**
1829  * pm_genpd_add_device - Add a device to an I/O PM domain.
1830  * @genpd: PM domain to add the device to.
1831  * @dev: Device to be added.
1832  */
pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev)1833 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1834 {
1835 	int ret;
1836 
1837 	if (!genpd || !dev)
1838 		return -EINVAL;
1839 
1840 	mutex_lock(&gpd_list_lock);
1841 	ret = genpd_add_device(genpd, dev, dev);
1842 	mutex_unlock(&gpd_list_lock);
1843 
1844 	return ret;
1845 }
1846 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1847 
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1848 static int genpd_remove_device(struct generic_pm_domain *genpd,
1849 			       struct device *dev)
1850 {
1851 	struct generic_pm_domain_data *gpd_data;
1852 	struct pm_domain_data *pdd;
1853 	int ret = 0;
1854 
1855 	dev_dbg(dev, "%s()\n", __func__);
1856 
1857 	pdd = dev->power.subsys_data->domain_data;
1858 	gpd_data = to_gpd_data(pdd);
1859 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1860 				   DEV_PM_QOS_RESUME_LATENCY);
1861 
1862 	genpd_lock(genpd);
1863 
1864 	if (genpd->prepared_count > 0) {
1865 		ret = -EAGAIN;
1866 		goto out;
1867 	}
1868 
1869 	genpd->device_count--;
1870 	if (genpd->gd)
1871 		genpd->gd->max_off_time_changed = true;
1872 
1873 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1874 
1875 	list_del_init(&pdd->list_node);
1876 
1877 	genpd_unlock(genpd);
1878 
1879 	dev_pm_domain_set(dev, NULL);
1880 
1881 	if (genpd->detach_dev)
1882 		genpd->detach_dev(genpd, dev);
1883 
1884 	genpd_free_dev_data(dev, gpd_data);
1885 
1886 	return 0;
1887 
1888  out:
1889 	genpd_unlock(genpd);
1890 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1891 
1892 	return ret;
1893 }
1894 
1895 /**
1896  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1897  * @dev: Device to be removed.
1898  */
pm_genpd_remove_device(struct device * dev)1899 int pm_genpd_remove_device(struct device *dev)
1900 {
1901 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1902 
1903 	if (!genpd)
1904 		return -EINVAL;
1905 
1906 	return genpd_remove_device(genpd, dev);
1907 }
1908 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1909 
1910 /**
1911  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1912  *
1913  * @dev: Device that should be associated with the notifier
1914  * @nb: The notifier block to register
1915  *
1916  * Users may call this function to add a genpd power on/off notifier for an
1917  * attached @dev. Only one notifier per device is allowed. The notifier is
1918  * sent when genpd is powering on/off the PM domain.
1919  *
1920  * It is assumed that the user guarantee that the genpd wouldn't be detached
1921  * while this routine is getting called.
1922  *
1923  * Returns 0 on success and negative error values on failures.
1924  */
dev_pm_genpd_add_notifier(struct device * dev,struct notifier_block * nb)1925 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1926 {
1927 	struct generic_pm_domain *genpd;
1928 	struct generic_pm_domain_data *gpd_data;
1929 	int ret;
1930 
1931 	genpd = dev_to_genpd_safe(dev);
1932 	if (!genpd)
1933 		return -ENODEV;
1934 
1935 	if (WARN_ON(!dev->power.subsys_data ||
1936 		     !dev->power.subsys_data->domain_data))
1937 		return -EINVAL;
1938 
1939 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1940 	if (gpd_data->power_nb)
1941 		return -EEXIST;
1942 
1943 	genpd_lock(genpd);
1944 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1945 	genpd_unlock(genpd);
1946 
1947 	if (ret) {
1948 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1949 			 dev_name(&genpd->dev));
1950 		return ret;
1951 	}
1952 
1953 	gpd_data->power_nb = nb;
1954 	return 0;
1955 }
1956 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1957 
1958 /**
1959  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1960  *
1961  * @dev: Device that is associated with the notifier
1962  *
1963  * Users may call this function to remove a genpd power on/off notifier for an
1964  * attached @dev.
1965  *
1966  * It is assumed that the user guarantee that the genpd wouldn't be detached
1967  * while this routine is getting called.
1968  *
1969  * Returns 0 on success and negative error values on failures.
1970  */
dev_pm_genpd_remove_notifier(struct device * dev)1971 int dev_pm_genpd_remove_notifier(struct device *dev)
1972 {
1973 	struct generic_pm_domain *genpd;
1974 	struct generic_pm_domain_data *gpd_data;
1975 	int ret;
1976 
1977 	genpd = dev_to_genpd_safe(dev);
1978 	if (!genpd)
1979 		return -ENODEV;
1980 
1981 	if (WARN_ON(!dev->power.subsys_data ||
1982 		     !dev->power.subsys_data->domain_data))
1983 		return -EINVAL;
1984 
1985 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1986 	if (!gpd_data->power_nb)
1987 		return -ENODEV;
1988 
1989 	genpd_lock(genpd);
1990 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1991 					    gpd_data->power_nb);
1992 	genpd_unlock(genpd);
1993 
1994 	if (ret) {
1995 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1996 			 dev_name(&genpd->dev));
1997 		return ret;
1998 	}
1999 
2000 	gpd_data->power_nb = NULL;
2001 	return 0;
2002 }
2003 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2004 
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2005 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2006 			       struct generic_pm_domain *subdomain)
2007 {
2008 	struct gpd_link *link, *itr;
2009 	int ret = 0;
2010 
2011 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2012 	    || genpd == subdomain)
2013 		return -EINVAL;
2014 
2015 	/*
2016 	 * If the domain can be powered on/off in an IRQ safe
2017 	 * context, ensure that the subdomain can also be
2018 	 * powered on/off in that context.
2019 	 */
2020 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2021 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2022 		     dev_name(&genpd->dev), subdomain->name);
2023 		return -EINVAL;
2024 	}
2025 
2026 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2027 	if (!link)
2028 		return -ENOMEM;
2029 
2030 	genpd_lock(subdomain);
2031 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2032 
2033 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2034 		ret = -EINVAL;
2035 		goto out;
2036 	}
2037 
2038 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2039 		if (itr->child == subdomain && itr->parent == genpd) {
2040 			ret = -EINVAL;
2041 			goto out;
2042 		}
2043 	}
2044 
2045 	link->parent = genpd;
2046 	list_add_tail(&link->parent_node, &genpd->parent_links);
2047 	link->child = subdomain;
2048 	list_add_tail(&link->child_node, &subdomain->child_links);
2049 	if (genpd_status_on(subdomain))
2050 		genpd_sd_counter_inc(genpd);
2051 
2052  out:
2053 	genpd_unlock(genpd);
2054 	genpd_unlock(subdomain);
2055 	if (ret)
2056 		kfree(link);
2057 	return ret;
2058 }
2059 
2060 /**
2061  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2062  * @genpd: Leader PM domain to add the subdomain to.
2063  * @subdomain: Subdomain to be added.
2064  */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2065 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2066 			   struct generic_pm_domain *subdomain)
2067 {
2068 	int ret;
2069 
2070 	mutex_lock(&gpd_list_lock);
2071 	ret = genpd_add_subdomain(genpd, subdomain);
2072 	mutex_unlock(&gpd_list_lock);
2073 
2074 	return ret;
2075 }
2076 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2077 
2078 /**
2079  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2080  * @genpd: Leader PM domain to remove the subdomain from.
2081  * @subdomain: Subdomain to be removed.
2082  */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2083 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2084 			      struct generic_pm_domain *subdomain)
2085 {
2086 	struct gpd_link *l, *link;
2087 	int ret = -EINVAL;
2088 
2089 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2090 		return -EINVAL;
2091 
2092 	genpd_lock(subdomain);
2093 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2094 
2095 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2096 		pr_warn("%s: unable to remove subdomain %s\n",
2097 			dev_name(&genpd->dev), subdomain->name);
2098 		ret = -EBUSY;
2099 		goto out;
2100 	}
2101 
2102 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2103 		if (link->child != subdomain)
2104 			continue;
2105 
2106 		list_del(&link->parent_node);
2107 		list_del(&link->child_node);
2108 		kfree(link);
2109 		if (genpd_status_on(subdomain))
2110 			genpd_sd_counter_dec(genpd);
2111 
2112 		ret = 0;
2113 		break;
2114 	}
2115 
2116 out:
2117 	genpd_unlock(genpd);
2118 	genpd_unlock(subdomain);
2119 
2120 	return ret;
2121 }
2122 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2123 
genpd_free_default_power_state(struct genpd_power_state * states,unsigned int state_count)2124 static void genpd_free_default_power_state(struct genpd_power_state *states,
2125 					   unsigned int state_count)
2126 {
2127 	kfree(states);
2128 }
2129 
genpd_set_default_power_state(struct generic_pm_domain * genpd)2130 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2131 {
2132 	struct genpd_power_state *state;
2133 
2134 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2135 	if (!state)
2136 		return -ENOMEM;
2137 
2138 	genpd->states = state;
2139 	genpd->state_count = 1;
2140 	genpd->free_states = genpd_free_default_power_state;
2141 
2142 	return 0;
2143 }
2144 
genpd_provider_release(struct device * dev)2145 static void genpd_provider_release(struct device *dev)
2146 {
2147 	/* nothing to be done here */
2148 }
2149 
genpd_alloc_data(struct generic_pm_domain * genpd)2150 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2151 {
2152 	struct genpd_governor_data *gd = NULL;
2153 	int ret;
2154 
2155 	if (genpd_is_cpu_domain(genpd) &&
2156 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2157 		return -ENOMEM;
2158 
2159 	if (genpd->gov) {
2160 		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2161 		if (!gd) {
2162 			ret = -ENOMEM;
2163 			goto free;
2164 		}
2165 
2166 		gd->max_off_time_ns = -1;
2167 		gd->max_off_time_changed = true;
2168 		gd->next_wakeup = KTIME_MAX;
2169 		gd->next_hrtimer = KTIME_MAX;
2170 	}
2171 
2172 	/* Use only one "off" state if there were no states declared */
2173 	if (genpd->state_count == 0) {
2174 		ret = genpd_set_default_power_state(genpd);
2175 		if (ret)
2176 			goto free;
2177 	}
2178 
2179 	genpd->gd = gd;
2180 	device_initialize(&genpd->dev);
2181 	genpd->dev.release = genpd_provider_release;
2182 
2183 	if (!genpd_is_dev_name_fw(genpd)) {
2184 		dev_set_name(&genpd->dev, "%s", genpd->name);
2185 	} else {
2186 		ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2187 		if (ret < 0)
2188 			goto put;
2189 
2190 		genpd->device_id = ret;
2191 		dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2192 	}
2193 
2194 	return 0;
2195 put:
2196 	put_device(&genpd->dev);
2197 	if (genpd->free_states == genpd_free_default_power_state)
2198 		kfree(genpd->states);
2199 free:
2200 	if (genpd_is_cpu_domain(genpd))
2201 		free_cpumask_var(genpd->cpus);
2202 	kfree(gd);
2203 	return ret;
2204 }
2205 
genpd_free_data(struct generic_pm_domain * genpd)2206 static void genpd_free_data(struct generic_pm_domain *genpd)
2207 {
2208 	put_device(&genpd->dev);
2209 	if (genpd->device_id != -ENXIO)
2210 		ida_free(&genpd_ida, genpd->device_id);
2211 	if (genpd_is_cpu_domain(genpd))
2212 		free_cpumask_var(genpd->cpus);
2213 	if (genpd->free_states)
2214 		genpd->free_states(genpd->states, genpd->state_count);
2215 	kfree(genpd->gd);
2216 }
2217 
genpd_lock_init(struct generic_pm_domain * genpd)2218 static void genpd_lock_init(struct generic_pm_domain *genpd)
2219 {
2220 	if (genpd_is_cpu_domain(genpd)) {
2221 		raw_spin_lock_init(&genpd->raw_slock);
2222 		genpd->lock_ops = &genpd_raw_spin_ops;
2223 	} else if (genpd_is_irq_safe(genpd)) {
2224 		spin_lock_init(&genpd->slock);
2225 		genpd->lock_ops = &genpd_spin_ops;
2226 	} else {
2227 		mutex_init(&genpd->mlock);
2228 		genpd->lock_ops = &genpd_mtx_ops;
2229 	}
2230 }
2231 
2232 /**
2233  * pm_genpd_init - Initialize a generic I/O PM domain object.
2234  * @genpd: PM domain object to initialize.
2235  * @gov: PM domain governor to associate with the domain (may be NULL).
2236  * @is_off: Initial value of the domain's power_is_off field.
2237  *
2238  * Returns 0 on successful initialization, else a negative error code.
2239  */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)2240 int pm_genpd_init(struct generic_pm_domain *genpd,
2241 		  struct dev_power_governor *gov, bool is_off)
2242 {
2243 	int ret;
2244 
2245 	if (IS_ERR_OR_NULL(genpd))
2246 		return -EINVAL;
2247 
2248 	INIT_LIST_HEAD(&genpd->parent_links);
2249 	INIT_LIST_HEAD(&genpd->child_links);
2250 	INIT_LIST_HEAD(&genpd->dev_list);
2251 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2252 	genpd_lock_init(genpd);
2253 	genpd->gov = gov;
2254 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2255 	atomic_set(&genpd->sd_count, 0);
2256 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2257 	genpd->device_count = 0;
2258 	genpd->provider = NULL;
2259 	genpd->device_id = -ENXIO;
2260 	genpd->has_provider = false;
2261 	genpd->accounting_time = ktime_get_mono_fast_ns();
2262 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2263 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2264 	genpd->domain.ops.prepare = genpd_prepare;
2265 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2266 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2267 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2268 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2269 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2270 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2271 	genpd->domain.ops.complete = genpd_complete;
2272 	genpd->domain.start = genpd_dev_pm_start;
2273 	genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2274 
2275 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2276 		genpd->dev_ops.stop = pm_clk_suspend;
2277 		genpd->dev_ops.start = pm_clk_resume;
2278 	}
2279 
2280 	/* The always-on governor works better with the corresponding flag. */
2281 	if (gov == &pm_domain_always_on_gov)
2282 		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2283 
2284 	/* Always-on domains must be powered on at initialization. */
2285 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2286 			!genpd_status_on(genpd)) {
2287 		pr_err("always-on PM domain %s is not on\n", genpd->name);
2288 		return -EINVAL;
2289 	}
2290 
2291 	/* Multiple states but no governor doesn't make sense. */
2292 	if (!gov && genpd->state_count > 1)
2293 		pr_warn("%s: no governor for states\n", genpd->name);
2294 
2295 	ret = genpd_alloc_data(genpd);
2296 	if (ret)
2297 		return ret;
2298 
2299 	mutex_lock(&gpd_list_lock);
2300 	list_add(&genpd->gpd_list_node, &gpd_list);
2301 	mutex_unlock(&gpd_list_lock);
2302 	genpd_debug_add(genpd);
2303 
2304 	return 0;
2305 }
2306 EXPORT_SYMBOL_GPL(pm_genpd_init);
2307 
genpd_remove(struct generic_pm_domain * genpd)2308 static int genpd_remove(struct generic_pm_domain *genpd)
2309 {
2310 	struct gpd_link *l, *link;
2311 
2312 	if (IS_ERR_OR_NULL(genpd))
2313 		return -EINVAL;
2314 
2315 	genpd_lock(genpd);
2316 
2317 	if (genpd->has_provider) {
2318 		genpd_unlock(genpd);
2319 		pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2320 		return -EBUSY;
2321 	}
2322 
2323 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2324 		genpd_unlock(genpd);
2325 		pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2326 		return -EBUSY;
2327 	}
2328 
2329 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2330 		list_del(&link->parent_node);
2331 		list_del(&link->child_node);
2332 		kfree(link);
2333 	}
2334 
2335 	list_del(&genpd->gpd_list_node);
2336 	genpd_unlock(genpd);
2337 	genpd_debug_remove(genpd);
2338 	cancel_work_sync(&genpd->power_off_work);
2339 	genpd_free_data(genpd);
2340 
2341 	pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2342 
2343 	return 0;
2344 }
2345 
2346 /**
2347  * pm_genpd_remove - Remove a generic I/O PM domain
2348  * @genpd: Pointer to PM domain that is to be removed.
2349  *
2350  * To remove the PM domain, this function:
2351  *  - Removes the PM domain as a subdomain to any parent domains,
2352  *    if it was added.
2353  *  - Removes the PM domain from the list of registered PM domains.
2354  *
2355  * The PM domain will only be removed, if the associated provider has
2356  * been removed, it is not a parent to any other PM domain and has no
2357  * devices associated with it.
2358  */
pm_genpd_remove(struct generic_pm_domain * genpd)2359 int pm_genpd_remove(struct generic_pm_domain *genpd)
2360 {
2361 	int ret;
2362 
2363 	mutex_lock(&gpd_list_lock);
2364 	ret = genpd_remove(genpd);
2365 	mutex_unlock(&gpd_list_lock);
2366 
2367 	return ret;
2368 }
2369 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2370 
2371 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2372 
2373 /*
2374  * Device Tree based PM domain providers.
2375  *
2376  * The code below implements generic device tree based PM domain providers that
2377  * bind device tree nodes with generic PM domains registered in the system.
2378  *
2379  * Any driver that registers generic PM domains and needs to support binding of
2380  * devices to these domains is supposed to register a PM domain provider, which
2381  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2382  *
2383  * Two simple mapping functions have been provided for convenience:
2384  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2385  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2386  *    index.
2387  */
2388 
2389 /**
2390  * struct of_genpd_provider - PM domain provider registration structure
2391  * @link: Entry in global list of PM domain providers
2392  * @node: Pointer to device tree node of PM domain provider
2393  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2394  *         into a PM domain.
2395  * @data: context pointer to be passed into @xlate callback
2396  */
2397 struct of_genpd_provider {
2398 	struct list_head link;
2399 	struct device_node *node;
2400 	genpd_xlate_t xlate;
2401 	void *data;
2402 };
2403 
2404 /* List of registered PM domain providers. */
2405 static LIST_HEAD(of_genpd_providers);
2406 /* Mutex to protect the list above. */
2407 static DEFINE_MUTEX(of_genpd_mutex);
2408 
2409 /**
2410  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2411  * @genpdspec: OF phandle args to map into a PM domain
2412  * @data: xlate function private data - pointer to struct generic_pm_domain
2413  *
2414  * This is a generic xlate function that can be used to model PM domains that
2415  * have their own device tree nodes. The private data of xlate function needs
2416  * to be a valid pointer to struct generic_pm_domain.
2417  */
genpd_xlate_simple(const struct of_phandle_args * genpdspec,void * data)2418 static struct generic_pm_domain *genpd_xlate_simple(
2419 					const struct of_phandle_args *genpdspec,
2420 					void *data)
2421 {
2422 	return data;
2423 }
2424 
2425 /**
2426  * genpd_xlate_onecell() - Xlate function using a single index.
2427  * @genpdspec: OF phandle args to map into a PM domain
2428  * @data: xlate function private data - pointer to struct genpd_onecell_data
2429  *
2430  * This is a generic xlate function that can be used to model simple PM domain
2431  * controllers that have one device tree node and provide multiple PM domains.
2432  * A single cell is used as an index into an array of PM domains specified in
2433  * the genpd_onecell_data struct when registering the provider.
2434  */
genpd_xlate_onecell(const struct of_phandle_args * genpdspec,void * data)2435 static struct generic_pm_domain *genpd_xlate_onecell(
2436 					const struct of_phandle_args *genpdspec,
2437 					void *data)
2438 {
2439 	struct genpd_onecell_data *genpd_data = data;
2440 	unsigned int idx = genpdspec->args[0];
2441 
2442 	if (genpdspec->args_count != 1)
2443 		return ERR_PTR(-EINVAL);
2444 
2445 	if (idx >= genpd_data->num_domains) {
2446 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2447 		return ERR_PTR(-EINVAL);
2448 	}
2449 
2450 	if (!genpd_data->domains[idx])
2451 		return ERR_PTR(-ENOENT);
2452 
2453 	return genpd_data->domains[idx];
2454 }
2455 
2456 /**
2457  * genpd_add_provider() - Register a PM domain provider for a node
2458  * @np: Device node pointer associated with the PM domain provider.
2459  * @xlate: Callback for decoding PM domain from phandle arguments.
2460  * @data: Context pointer for @xlate callback.
2461  */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)2462 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2463 			      void *data)
2464 {
2465 	struct of_genpd_provider *cp;
2466 
2467 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2468 	if (!cp)
2469 		return -ENOMEM;
2470 
2471 	cp->node = of_node_get(np);
2472 	cp->data = data;
2473 	cp->xlate = xlate;
2474 	fwnode_dev_initialized(&np->fwnode, true);
2475 
2476 	mutex_lock(&of_genpd_mutex);
2477 	list_add(&cp->link, &of_genpd_providers);
2478 	mutex_unlock(&of_genpd_mutex);
2479 	pr_debug("Added domain provider from %pOF\n", np);
2480 
2481 	return 0;
2482 }
2483 
genpd_present(const struct generic_pm_domain * genpd)2484 static bool genpd_present(const struct generic_pm_domain *genpd)
2485 {
2486 	bool ret = false;
2487 	const struct generic_pm_domain *gpd;
2488 
2489 	mutex_lock(&gpd_list_lock);
2490 	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2491 		if (gpd == genpd) {
2492 			ret = true;
2493 			break;
2494 		}
2495 	}
2496 	mutex_unlock(&gpd_list_lock);
2497 
2498 	return ret;
2499 }
2500 
2501 /**
2502  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2503  * @np: Device node pointer associated with the PM domain provider.
2504  * @genpd: Pointer to PM domain associated with the PM domain provider.
2505  */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)2506 int of_genpd_add_provider_simple(struct device_node *np,
2507 				 struct generic_pm_domain *genpd)
2508 {
2509 	int ret;
2510 
2511 	if (!np || !genpd)
2512 		return -EINVAL;
2513 
2514 	if (!genpd_present(genpd))
2515 		return -EINVAL;
2516 
2517 	genpd->dev.of_node = np;
2518 
2519 	/* Parse genpd OPP table */
2520 	if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2521 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2522 		if (ret)
2523 			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2524 
2525 		/*
2526 		 * Save table for faster processing while setting performance
2527 		 * state.
2528 		 */
2529 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2530 		WARN_ON(IS_ERR(genpd->opp_table));
2531 	}
2532 
2533 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2534 	if (ret) {
2535 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2536 			dev_pm_opp_put_opp_table(genpd->opp_table);
2537 			dev_pm_opp_of_remove_table(&genpd->dev);
2538 		}
2539 
2540 		return ret;
2541 	}
2542 
2543 	genpd->provider = &np->fwnode;
2544 	genpd->has_provider = true;
2545 
2546 	return 0;
2547 }
2548 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2549 
2550 /**
2551  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2552  * @np: Device node pointer associated with the PM domain provider.
2553  * @data: Pointer to the data associated with the PM domain provider.
2554  */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)2555 int of_genpd_add_provider_onecell(struct device_node *np,
2556 				  struct genpd_onecell_data *data)
2557 {
2558 	struct generic_pm_domain *genpd;
2559 	unsigned int i;
2560 	int ret = -EINVAL;
2561 
2562 	if (!np || !data)
2563 		return -EINVAL;
2564 
2565 	if (!data->xlate)
2566 		data->xlate = genpd_xlate_onecell;
2567 
2568 	for (i = 0; i < data->num_domains; i++) {
2569 		genpd = data->domains[i];
2570 
2571 		if (!genpd)
2572 			continue;
2573 		if (!genpd_present(genpd))
2574 			goto error;
2575 
2576 		genpd->dev.of_node = np;
2577 
2578 		/* Parse genpd OPP table */
2579 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2580 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2581 			if (ret) {
2582 				dev_err_probe(&genpd->dev, ret,
2583 					      "Failed to add OPP table for index %d\n", i);
2584 				goto error;
2585 			}
2586 
2587 			/*
2588 			 * Save table for faster processing while setting
2589 			 * performance state.
2590 			 */
2591 			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2592 			WARN_ON(IS_ERR(genpd->opp_table));
2593 		}
2594 
2595 		genpd->provider = &np->fwnode;
2596 		genpd->has_provider = true;
2597 	}
2598 
2599 	ret = genpd_add_provider(np, data->xlate, data);
2600 	if (ret < 0)
2601 		goto error;
2602 
2603 	return 0;
2604 
2605 error:
2606 	while (i--) {
2607 		genpd = data->domains[i];
2608 
2609 		if (!genpd)
2610 			continue;
2611 
2612 		genpd->provider = NULL;
2613 		genpd->has_provider = false;
2614 
2615 		if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2616 			dev_pm_opp_put_opp_table(genpd->opp_table);
2617 			dev_pm_opp_of_remove_table(&genpd->dev);
2618 		}
2619 	}
2620 
2621 	return ret;
2622 }
2623 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2624 
2625 /**
2626  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2627  * @np: Device node pointer associated with the PM domain provider
2628  */
of_genpd_del_provider(struct device_node * np)2629 void of_genpd_del_provider(struct device_node *np)
2630 {
2631 	struct of_genpd_provider *cp, *tmp;
2632 	struct generic_pm_domain *gpd;
2633 
2634 	mutex_lock(&gpd_list_lock);
2635 	mutex_lock(&of_genpd_mutex);
2636 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2637 		if (cp->node == np) {
2638 			/*
2639 			 * For each PM domain associated with the
2640 			 * provider, set the 'has_provider' to false
2641 			 * so that the PM domain can be safely removed.
2642 			 */
2643 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2644 				if (gpd->provider == &np->fwnode) {
2645 					gpd->has_provider = false;
2646 
2647 					if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
2648 						continue;
2649 
2650 					dev_pm_opp_put_opp_table(gpd->opp_table);
2651 					dev_pm_opp_of_remove_table(&gpd->dev);
2652 				}
2653 			}
2654 
2655 			fwnode_dev_initialized(&cp->node->fwnode, false);
2656 			list_del(&cp->link);
2657 			of_node_put(cp->node);
2658 			kfree(cp);
2659 			break;
2660 		}
2661 	}
2662 	mutex_unlock(&of_genpd_mutex);
2663 	mutex_unlock(&gpd_list_lock);
2664 }
2665 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2666 
2667 /**
2668  * genpd_get_from_provider() - Look-up PM domain
2669  * @genpdspec: OF phandle args to use for look-up
2670  *
2671  * Looks for a PM domain provider under the node specified by @genpdspec and if
2672  * found, uses xlate function of the provider to map phandle args to a PM
2673  * domain.
2674  *
2675  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2676  * on failure.
2677  */
genpd_get_from_provider(const struct of_phandle_args * genpdspec)2678 static struct generic_pm_domain *genpd_get_from_provider(
2679 					const struct of_phandle_args *genpdspec)
2680 {
2681 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2682 	struct of_genpd_provider *provider;
2683 
2684 	if (!genpdspec)
2685 		return ERR_PTR(-EINVAL);
2686 
2687 	mutex_lock(&of_genpd_mutex);
2688 
2689 	/* Check if we have such a provider in our array */
2690 	list_for_each_entry(provider, &of_genpd_providers, link) {
2691 		if (provider->node == genpdspec->np)
2692 			genpd = provider->xlate(genpdspec, provider->data);
2693 		if (!IS_ERR(genpd))
2694 			break;
2695 	}
2696 
2697 	mutex_unlock(&of_genpd_mutex);
2698 
2699 	return genpd;
2700 }
2701 
2702 /**
2703  * of_genpd_add_device() - Add a device to an I/O PM domain
2704  * @genpdspec: OF phandle args to use for look-up PM domain
2705  * @dev: Device to be added.
2706  *
2707  * Looks-up an I/O PM domain based upon phandle args provided and adds
2708  * the device to the PM domain. Returns a negative error code on failure.
2709  */
of_genpd_add_device(const struct of_phandle_args * genpdspec,struct device * dev)2710 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2711 {
2712 	struct generic_pm_domain *genpd;
2713 	int ret;
2714 
2715 	if (!dev)
2716 		return -EINVAL;
2717 
2718 	mutex_lock(&gpd_list_lock);
2719 
2720 	genpd = genpd_get_from_provider(genpdspec);
2721 	if (IS_ERR(genpd)) {
2722 		ret = PTR_ERR(genpd);
2723 		goto out;
2724 	}
2725 
2726 	ret = genpd_add_device(genpd, dev, dev);
2727 
2728 out:
2729 	mutex_unlock(&gpd_list_lock);
2730 
2731 	return ret;
2732 }
2733 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2734 
2735 /**
2736  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2737  * @parent_spec: OF phandle args to use for parent PM domain look-up
2738  * @subdomain_spec: OF phandle args to use for subdomain look-up
2739  *
2740  * Looks-up a parent PM domain and subdomain based upon phandle args
2741  * provided and adds the subdomain to the parent PM domain. Returns a
2742  * negative error code on failure.
2743  */
of_genpd_add_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2744 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2745 			   const struct of_phandle_args *subdomain_spec)
2746 {
2747 	struct generic_pm_domain *parent, *subdomain;
2748 	int ret;
2749 
2750 	mutex_lock(&gpd_list_lock);
2751 
2752 	parent = genpd_get_from_provider(parent_spec);
2753 	if (IS_ERR(parent)) {
2754 		ret = PTR_ERR(parent);
2755 		goto out;
2756 	}
2757 
2758 	subdomain = genpd_get_from_provider(subdomain_spec);
2759 	if (IS_ERR(subdomain)) {
2760 		ret = PTR_ERR(subdomain);
2761 		goto out;
2762 	}
2763 
2764 	ret = genpd_add_subdomain(parent, subdomain);
2765 
2766 out:
2767 	mutex_unlock(&gpd_list_lock);
2768 
2769 	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2770 }
2771 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2772 
2773 /**
2774  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2775  * @parent_spec: OF phandle args to use for parent PM domain look-up
2776  * @subdomain_spec: OF phandle args to use for subdomain look-up
2777  *
2778  * Looks-up a parent PM domain and subdomain based upon phandle args
2779  * provided and removes the subdomain from the parent PM domain. Returns a
2780  * negative error code on failure.
2781  */
of_genpd_remove_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2782 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2783 			      const struct of_phandle_args *subdomain_spec)
2784 {
2785 	struct generic_pm_domain *parent, *subdomain;
2786 	int ret;
2787 
2788 	mutex_lock(&gpd_list_lock);
2789 
2790 	parent = genpd_get_from_provider(parent_spec);
2791 	if (IS_ERR(parent)) {
2792 		ret = PTR_ERR(parent);
2793 		goto out;
2794 	}
2795 
2796 	subdomain = genpd_get_from_provider(subdomain_spec);
2797 	if (IS_ERR(subdomain)) {
2798 		ret = PTR_ERR(subdomain);
2799 		goto out;
2800 	}
2801 
2802 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2803 
2804 out:
2805 	mutex_unlock(&gpd_list_lock);
2806 
2807 	return ret;
2808 }
2809 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2810 
2811 /**
2812  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2813  * @np: Pointer to device node associated with provider
2814  *
2815  * Find the last PM domain that was added by a particular provider and
2816  * remove this PM domain from the list of PM domains. The provider is
2817  * identified by the 'provider' device structure that is passed. The PM
2818  * domain will only be removed, if the provider associated with domain
2819  * has been removed.
2820  *
2821  * Returns a valid pointer to struct generic_pm_domain on success or
2822  * ERR_PTR() on failure.
2823  */
of_genpd_remove_last(struct device_node * np)2824 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2825 {
2826 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2827 	int ret;
2828 
2829 	if (IS_ERR_OR_NULL(np))
2830 		return ERR_PTR(-EINVAL);
2831 
2832 	mutex_lock(&gpd_list_lock);
2833 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2834 		if (gpd->provider == &np->fwnode) {
2835 			ret = genpd_remove(gpd);
2836 			genpd = ret ? ERR_PTR(ret) : gpd;
2837 			break;
2838 		}
2839 	}
2840 	mutex_unlock(&gpd_list_lock);
2841 
2842 	return genpd;
2843 }
2844 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2845 
genpd_release_dev(struct device * dev)2846 static void genpd_release_dev(struct device *dev)
2847 {
2848 	of_node_put(dev->of_node);
2849 	kfree(dev);
2850 }
2851 
2852 static const struct bus_type genpd_bus_type = {
2853 	.name		= "genpd",
2854 };
2855 
2856 /**
2857  * genpd_dev_pm_detach - Detach a device from its PM domain.
2858  * @dev: Device to detach.
2859  * @power_off: Currently not used
2860  *
2861  * Try to locate a corresponding generic PM domain, which the device was
2862  * attached to previously. If such is found, the device is detached from it.
2863  */
genpd_dev_pm_detach(struct device * dev,bool power_off)2864 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2865 {
2866 	struct generic_pm_domain *pd;
2867 	unsigned int i;
2868 	int ret = 0;
2869 
2870 	pd = dev_to_genpd(dev);
2871 	if (IS_ERR(pd))
2872 		return;
2873 
2874 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2875 
2876 	/* Drop the default performance state */
2877 	if (dev_gpd_data(dev)->default_pstate) {
2878 		dev_pm_genpd_set_performance_state(dev, 0);
2879 		dev_gpd_data(dev)->default_pstate = 0;
2880 	}
2881 
2882 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2883 		ret = genpd_remove_device(pd, dev);
2884 		if (ret != -EAGAIN)
2885 			break;
2886 
2887 		mdelay(i);
2888 		cond_resched();
2889 	}
2890 
2891 	if (ret < 0) {
2892 		dev_err(dev, "failed to remove from PM domain %s: %d",
2893 			pd->name, ret);
2894 		return;
2895 	}
2896 
2897 	/* Check if PM domain can be powered off after removing this device. */
2898 	genpd_queue_power_off_work(pd);
2899 
2900 	/* Unregister the device if it was created by genpd. */
2901 	if (dev->bus == &genpd_bus_type)
2902 		device_unregister(dev);
2903 }
2904 
genpd_dev_pm_sync(struct device * dev)2905 static void genpd_dev_pm_sync(struct device *dev)
2906 {
2907 	struct generic_pm_domain *pd;
2908 
2909 	pd = dev_to_genpd(dev);
2910 	if (IS_ERR(pd))
2911 		return;
2912 
2913 	genpd_queue_power_off_work(pd);
2914 }
2915 
genpd_set_required_opp_dev(struct device * dev,struct device * base_dev)2916 static int genpd_set_required_opp_dev(struct device *dev,
2917 				      struct device *base_dev)
2918 {
2919 	struct dev_pm_opp_config config = {
2920 		.required_dev = dev,
2921 	};
2922 	int ret;
2923 
2924 	/* Limit support to non-providers for now. */
2925 	if (of_property_present(base_dev->of_node, "#power-domain-cells"))
2926 		return 0;
2927 
2928 	if (!dev_pm_opp_of_has_required_opp(base_dev))
2929 		return 0;
2930 
2931 	ret = dev_pm_opp_set_config(base_dev, &config);
2932 	if (ret < 0)
2933 		return ret;
2934 
2935 	dev_gpd_data(dev)->opp_token = ret;
2936 	return 0;
2937 }
2938 
genpd_set_required_opp(struct device * dev,unsigned int index)2939 static int genpd_set_required_opp(struct device *dev, unsigned int index)
2940 {
2941 	int ret, pstate;
2942 
2943 	/* Set the default performance state */
2944 	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2945 	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2946 		ret = pstate;
2947 		goto err;
2948 	} else if (pstate > 0) {
2949 		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2950 		if (ret)
2951 			goto err;
2952 		dev_gpd_data(dev)->default_pstate = pstate;
2953 	}
2954 
2955 	return 0;
2956 err:
2957 	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2958 		dev_to_genpd(dev)->name, ret);
2959 	return ret;
2960 }
2961 
__genpd_dev_pm_attach(struct device * dev,struct device * base_dev,unsigned int index,unsigned int num_domains,bool power_on)2962 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2963 				 unsigned int index, unsigned int num_domains,
2964 				 bool power_on)
2965 {
2966 	struct of_phandle_args pd_args;
2967 	struct generic_pm_domain *pd;
2968 	int ret;
2969 
2970 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2971 				"#power-domain-cells", index, &pd_args);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	mutex_lock(&gpd_list_lock);
2976 	pd = genpd_get_from_provider(&pd_args);
2977 	of_node_put(pd_args.np);
2978 	if (IS_ERR(pd)) {
2979 		mutex_unlock(&gpd_list_lock);
2980 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2981 			__func__, PTR_ERR(pd));
2982 		return driver_deferred_probe_check_state(base_dev);
2983 	}
2984 
2985 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2986 
2987 	ret = genpd_add_device(pd, dev, base_dev);
2988 	mutex_unlock(&gpd_list_lock);
2989 
2990 	if (ret < 0)
2991 		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2992 
2993 	dev->pm_domain->detach = genpd_dev_pm_detach;
2994 	dev->pm_domain->sync = genpd_dev_pm_sync;
2995 
2996 	/*
2997 	 * For a single PM domain the index of the required OPP must be zero, so
2998 	 * let's try to assign a required dev in that case. In the multiple PM
2999 	 * domains case, we need platform code to specify the index.
3000 	 */
3001 	if (num_domains == 1) {
3002 		ret = genpd_set_required_opp_dev(dev, base_dev);
3003 		if (ret)
3004 			goto err;
3005 	}
3006 
3007 	ret = genpd_set_required_opp(dev, index);
3008 	if (ret)
3009 		goto err;
3010 
3011 	if (power_on) {
3012 		genpd_lock(pd);
3013 		ret = genpd_power_on(pd, 0);
3014 		genpd_unlock(pd);
3015 	}
3016 
3017 	if (ret) {
3018 		/* Drop the default performance state */
3019 		if (dev_gpd_data(dev)->default_pstate) {
3020 			dev_pm_genpd_set_performance_state(dev, 0);
3021 			dev_gpd_data(dev)->default_pstate = 0;
3022 		}
3023 
3024 		genpd_remove_device(pd, dev);
3025 		return -EPROBE_DEFER;
3026 	}
3027 
3028 	return 1;
3029 
3030 err:
3031 	genpd_remove_device(pd, dev);
3032 	return ret;
3033 }
3034 
3035 /**
3036  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3037  * @dev: Device to attach.
3038  *
3039  * Parse device's OF node to find a PM domain specifier. If such is found,
3040  * attaches the device to retrieved pm_domain ops.
3041  *
3042  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3043  * PM domain or when multiple power-domains exists for it, else a negative error
3044  * code. Note that if a power-domain exists for the device, but it cannot be
3045  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3046  * not probed and to re-try again later.
3047  */
genpd_dev_pm_attach(struct device * dev)3048 int genpd_dev_pm_attach(struct device *dev)
3049 {
3050 	if (!dev->of_node)
3051 		return 0;
3052 
3053 	/*
3054 	 * Devices with multiple PM domains must be attached separately, as we
3055 	 * can only attach one PM domain per device.
3056 	 */
3057 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
3058 				       "#power-domain-cells") != 1)
3059 		return 0;
3060 
3061 	return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3062 }
3063 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3064 
3065 /**
3066  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3067  * @dev: The device used to lookup the PM domain.
3068  * @index: The index of the PM domain.
3069  *
3070  * Parse device's OF node to find a PM domain specifier at the provided @index.
3071  * If such is found, creates a virtual device and attaches it to the retrieved
3072  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3073  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3074  *
3075  * Returns the created virtual device if successfully attached PM domain, NULL
3076  * when the device don't need a PM domain, else an ERR_PTR() in case of
3077  * failures. If a power-domain exists for the device, but cannot be found or
3078  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3079  * is not probed and to re-try again later.
3080  */
genpd_dev_pm_attach_by_id(struct device * dev,unsigned int index)3081 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3082 					 unsigned int index)
3083 {
3084 	struct device *virt_dev;
3085 	int num_domains;
3086 	int ret;
3087 
3088 	if (!dev->of_node)
3089 		return NULL;
3090 
3091 	/* Verify that the index is within a valid range. */
3092 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3093 						 "#power-domain-cells");
3094 	if (index >= num_domains)
3095 		return NULL;
3096 
3097 	/* Allocate and register device on the genpd bus. */
3098 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3099 	if (!virt_dev)
3100 		return ERR_PTR(-ENOMEM);
3101 
3102 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3103 	virt_dev->bus = &genpd_bus_type;
3104 	virt_dev->release = genpd_release_dev;
3105 	virt_dev->of_node = of_node_get(dev->of_node);
3106 
3107 	ret = device_register(virt_dev);
3108 	if (ret) {
3109 		put_device(virt_dev);
3110 		return ERR_PTR(ret);
3111 	}
3112 
3113 	/* Try to attach the device to the PM domain at the specified index. */
3114 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3115 	if (ret < 1) {
3116 		device_unregister(virt_dev);
3117 		return ret ? ERR_PTR(ret) : NULL;
3118 	}
3119 
3120 	pm_runtime_enable(virt_dev);
3121 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3122 
3123 	return virt_dev;
3124 }
3125 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3126 
3127 /**
3128  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3129  * @dev: The device used to lookup the PM domain.
3130  * @name: The name of the PM domain.
3131  *
3132  * Parse device's OF node to find a PM domain specifier using the
3133  * power-domain-names DT property. For further description see
3134  * genpd_dev_pm_attach_by_id().
3135  */
genpd_dev_pm_attach_by_name(struct device * dev,const char * name)3136 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3137 {
3138 	int index;
3139 
3140 	if (!dev->of_node)
3141 		return NULL;
3142 
3143 	index = of_property_match_string(dev->of_node, "power-domain-names",
3144 					 name);
3145 	if (index < 0)
3146 		return NULL;
3147 
3148 	return genpd_dev_pm_attach_by_id(dev, index);
3149 }
3150 
3151 static const struct of_device_id idle_state_match[] = {
3152 	{ .compatible = "domain-idle-state", },
3153 	{ }
3154 };
3155 
genpd_parse_state(struct genpd_power_state * genpd_state,struct device_node * state_node)3156 static int genpd_parse_state(struct genpd_power_state *genpd_state,
3157 				    struct device_node *state_node)
3158 {
3159 	int err;
3160 	u32 residency;
3161 	u32 entry_latency, exit_latency;
3162 
3163 	err = of_property_read_u32(state_node, "entry-latency-us",
3164 						&entry_latency);
3165 	if (err) {
3166 		pr_debug(" * %pOF missing entry-latency-us property\n",
3167 			 state_node);
3168 		return -EINVAL;
3169 	}
3170 
3171 	err = of_property_read_u32(state_node, "exit-latency-us",
3172 						&exit_latency);
3173 	if (err) {
3174 		pr_debug(" * %pOF missing exit-latency-us property\n",
3175 			 state_node);
3176 		return -EINVAL;
3177 	}
3178 
3179 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
3180 	if (!err)
3181 		genpd_state->residency_ns = 1000LL * residency;
3182 
3183 	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3184 	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3185 	genpd_state->fwnode = &state_node->fwnode;
3186 
3187 	return 0;
3188 }
3189 
genpd_iterate_idle_states(struct device_node * dn,struct genpd_power_state * states)3190 static int genpd_iterate_idle_states(struct device_node *dn,
3191 				     struct genpd_power_state *states)
3192 {
3193 	int ret;
3194 	struct of_phandle_iterator it;
3195 	struct device_node *np;
3196 	int i = 0;
3197 
3198 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3199 	if (ret <= 0)
3200 		return ret == -ENOENT ? 0 : ret;
3201 
3202 	/* Loop over the phandles until all the requested entry is found */
3203 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3204 		np = it.node;
3205 		if (!of_match_node(idle_state_match, np))
3206 			continue;
3207 
3208 		if (!of_device_is_available(np))
3209 			continue;
3210 
3211 		if (states) {
3212 			ret = genpd_parse_state(&states[i], np);
3213 			if (ret) {
3214 				pr_err("Parsing idle state node %pOF failed with err %d\n",
3215 				       np, ret);
3216 				of_node_put(np);
3217 				return ret;
3218 			}
3219 		}
3220 		i++;
3221 	}
3222 
3223 	return i;
3224 }
3225 
3226 /**
3227  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3228  *
3229  * @dn: The genpd device node
3230  * @states: The pointer to which the state array will be saved.
3231  * @n: The count of elements in the array returned from this function.
3232  *
3233  * Returns the device states parsed from the OF node. The memory for the states
3234  * is allocated by this function and is the responsibility of the caller to
3235  * free the memory after use. If any or zero compatible domain idle states is
3236  * found it returns 0 and in case of errors, a negative error code is returned.
3237  */
of_genpd_parse_idle_states(struct device_node * dn,struct genpd_power_state ** states,int * n)3238 int of_genpd_parse_idle_states(struct device_node *dn,
3239 			struct genpd_power_state **states, int *n)
3240 {
3241 	struct genpd_power_state *st;
3242 	int ret;
3243 
3244 	ret = genpd_iterate_idle_states(dn, NULL);
3245 	if (ret < 0)
3246 		return ret;
3247 
3248 	if (!ret) {
3249 		*states = NULL;
3250 		*n = 0;
3251 		return 0;
3252 	}
3253 
3254 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3255 	if (!st)
3256 		return -ENOMEM;
3257 
3258 	ret = genpd_iterate_idle_states(dn, st);
3259 	if (ret <= 0) {
3260 		kfree(st);
3261 		return ret < 0 ? ret : -EINVAL;
3262 	}
3263 
3264 	*states = st;
3265 	*n = ret;
3266 
3267 	return 0;
3268 }
3269 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3270 
genpd_bus_init(void)3271 static int __init genpd_bus_init(void)
3272 {
3273 	return bus_register(&genpd_bus_type);
3274 }
3275 core_initcall(genpd_bus_init);
3276 
3277 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3278 
3279 
3280 /***        debugfs support        ***/
3281 
3282 #ifdef CONFIG_DEBUG_FS
3283 /*
3284  * TODO: This function is a slightly modified version of rtpm_status_show
3285  * from sysfs.c, so generalize it.
3286  */
rtpm_status_str(struct seq_file * s,struct device * dev)3287 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3288 {
3289 	static const char * const status_lookup[] = {
3290 		[RPM_ACTIVE] = "active",
3291 		[RPM_RESUMING] = "resuming",
3292 		[RPM_SUSPENDED] = "suspended",
3293 		[RPM_SUSPENDING] = "suspending"
3294 	};
3295 	const char *p = "";
3296 
3297 	if (dev->power.runtime_error)
3298 		p = "error";
3299 	else if (dev->power.disable_depth)
3300 		p = "unsupported";
3301 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3302 		p = status_lookup[dev->power.runtime_status];
3303 	else
3304 		WARN_ON(1);
3305 
3306 	seq_printf(s, "%-26s  ", p);
3307 }
3308 
perf_status_str(struct seq_file * s,struct device * dev)3309 static void perf_status_str(struct seq_file *s, struct device *dev)
3310 {
3311 	struct generic_pm_domain_data *gpd_data;
3312 
3313 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3314 
3315 	seq_printf(s, "%-10u  ", gpd_data->performance_state);
3316 }
3317 
mode_status_str(struct seq_file * s,struct device * dev)3318 static void mode_status_str(struct seq_file *s, struct device *dev)
3319 {
3320 	struct generic_pm_domain_data *gpd_data;
3321 
3322 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3323 
3324 	seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3325 }
3326 
genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)3327 static int genpd_summary_one(struct seq_file *s,
3328 			struct generic_pm_domain *genpd)
3329 {
3330 	static const char * const status_lookup[] = {
3331 		[GENPD_STATE_ON] = "on",
3332 		[GENPD_STATE_OFF] = "off"
3333 	};
3334 	struct pm_domain_data *pm_data;
3335 	struct gpd_link *link;
3336 	char state[16];
3337 	int ret;
3338 
3339 	ret = genpd_lock_interruptible(genpd);
3340 	if (ret)
3341 		return -ERESTARTSYS;
3342 
3343 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3344 		goto exit;
3345 	if (!genpd_status_on(genpd))
3346 		snprintf(state, sizeof(state), "%s-%u",
3347 			 status_lookup[genpd->status], genpd->state_idx);
3348 	else
3349 		snprintf(state, sizeof(state), "%s",
3350 			 status_lookup[genpd->status]);
3351 	seq_printf(s, "%-30s  %-30s  %u", dev_name(&genpd->dev), state, genpd->performance_state);
3352 
3353 	/*
3354 	 * Modifications on the list require holding locks on both
3355 	 * parent and child, so we are safe.
3356 	 * Also the device name is immutable.
3357 	 */
3358 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3359 		if (list_is_first(&link->parent_node, &genpd->parent_links))
3360 			seq_printf(s, "\n%48s", " ");
3361 		seq_printf(s, "%s", link->child->name);
3362 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3363 			seq_puts(s, ", ");
3364 	}
3365 
3366 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3367 		seq_printf(s, "\n    %-30s  ", dev_name(pm_data->dev));
3368 		rtpm_status_str(s, pm_data->dev);
3369 		perf_status_str(s, pm_data->dev);
3370 		mode_status_str(s, pm_data->dev);
3371 	}
3372 
3373 	seq_puts(s, "\n");
3374 exit:
3375 	genpd_unlock(genpd);
3376 
3377 	return 0;
3378 }
3379 
summary_show(struct seq_file * s,void * data)3380 static int summary_show(struct seq_file *s, void *data)
3381 {
3382 	struct generic_pm_domain *genpd;
3383 	int ret = 0;
3384 
3385 	seq_puts(s, "domain                          status          children        performance\n");
3386 	seq_puts(s, "    /device                         runtime status                  managed by\n");
3387 	seq_puts(s, "------------------------------------------------------------------------------\n");
3388 
3389 	ret = mutex_lock_interruptible(&gpd_list_lock);
3390 	if (ret)
3391 		return -ERESTARTSYS;
3392 
3393 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3394 		ret = genpd_summary_one(s, genpd);
3395 		if (ret)
3396 			break;
3397 	}
3398 	mutex_unlock(&gpd_list_lock);
3399 
3400 	return ret;
3401 }
3402 
status_show(struct seq_file * s,void * data)3403 static int status_show(struct seq_file *s, void *data)
3404 {
3405 	static const char * const status_lookup[] = {
3406 		[GENPD_STATE_ON] = "on",
3407 		[GENPD_STATE_OFF] = "off"
3408 	};
3409 
3410 	struct generic_pm_domain *genpd = s->private;
3411 	int ret = 0;
3412 
3413 	ret = genpd_lock_interruptible(genpd);
3414 	if (ret)
3415 		return -ERESTARTSYS;
3416 
3417 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3418 		goto exit;
3419 
3420 	if (genpd->status == GENPD_STATE_OFF)
3421 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3422 			genpd->state_idx);
3423 	else
3424 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3425 exit:
3426 	genpd_unlock(genpd);
3427 	return ret;
3428 }
3429 
sub_domains_show(struct seq_file * s,void * data)3430 static int sub_domains_show(struct seq_file *s, void *data)
3431 {
3432 	struct generic_pm_domain *genpd = s->private;
3433 	struct gpd_link *link;
3434 	int ret = 0;
3435 
3436 	ret = genpd_lock_interruptible(genpd);
3437 	if (ret)
3438 		return -ERESTARTSYS;
3439 
3440 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3441 		seq_printf(s, "%s\n", link->child->name);
3442 
3443 	genpd_unlock(genpd);
3444 	return ret;
3445 }
3446 
idle_states_show(struct seq_file * s,void * data)3447 static int idle_states_show(struct seq_file *s, void *data)
3448 {
3449 	struct generic_pm_domain *genpd = s->private;
3450 	u64 now, delta, idle_time = 0;
3451 	unsigned int i;
3452 	int ret = 0;
3453 
3454 	ret = genpd_lock_interruptible(genpd);
3455 	if (ret)
3456 		return -ERESTARTSYS;
3457 
3458 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3459 
3460 	for (i = 0; i < genpd->state_count; i++) {
3461 		idle_time += genpd->states[i].idle_time;
3462 
3463 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3464 			now = ktime_get_mono_fast_ns();
3465 			if (now > genpd->accounting_time) {
3466 				delta = now - genpd->accounting_time;
3467 				idle_time += delta;
3468 			}
3469 		}
3470 
3471 		do_div(idle_time, NSEC_PER_MSEC);
3472 		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3473 			   genpd->states[i].usage, genpd->states[i].rejected);
3474 	}
3475 
3476 	genpd_unlock(genpd);
3477 	return ret;
3478 }
3479 
active_time_show(struct seq_file * s,void * data)3480 static int active_time_show(struct seq_file *s, void *data)
3481 {
3482 	struct generic_pm_domain *genpd = s->private;
3483 	u64 now, on_time, delta = 0;
3484 	int ret = 0;
3485 
3486 	ret = genpd_lock_interruptible(genpd);
3487 	if (ret)
3488 		return -ERESTARTSYS;
3489 
3490 	if (genpd->status == GENPD_STATE_ON) {
3491 		now = ktime_get_mono_fast_ns();
3492 		if (now > genpd->accounting_time)
3493 			delta = now - genpd->accounting_time;
3494 	}
3495 
3496 	on_time = genpd->on_time + delta;
3497 	do_div(on_time, NSEC_PER_MSEC);
3498 	seq_printf(s, "%llu ms\n", on_time);
3499 
3500 	genpd_unlock(genpd);
3501 	return ret;
3502 }
3503 
total_idle_time_show(struct seq_file * s,void * data)3504 static int total_idle_time_show(struct seq_file *s, void *data)
3505 {
3506 	struct generic_pm_domain *genpd = s->private;
3507 	u64 now, delta, total = 0;
3508 	unsigned int i;
3509 	int ret = 0;
3510 
3511 	ret = genpd_lock_interruptible(genpd);
3512 	if (ret)
3513 		return -ERESTARTSYS;
3514 
3515 	for (i = 0; i < genpd->state_count; i++) {
3516 		total += genpd->states[i].idle_time;
3517 
3518 		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3519 			now = ktime_get_mono_fast_ns();
3520 			if (now > genpd->accounting_time) {
3521 				delta = now - genpd->accounting_time;
3522 				total += delta;
3523 			}
3524 		}
3525 	}
3526 
3527 	do_div(total, NSEC_PER_MSEC);
3528 	seq_printf(s, "%llu ms\n", total);
3529 
3530 	genpd_unlock(genpd);
3531 	return ret;
3532 }
3533 
3534 
devices_show(struct seq_file * s,void * data)3535 static int devices_show(struct seq_file *s, void *data)
3536 {
3537 	struct generic_pm_domain *genpd = s->private;
3538 	struct pm_domain_data *pm_data;
3539 	int ret = 0;
3540 
3541 	ret = genpd_lock_interruptible(genpd);
3542 	if (ret)
3543 		return -ERESTARTSYS;
3544 
3545 	list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3546 		seq_printf(s, "%s\n", dev_name(pm_data->dev));
3547 
3548 	genpd_unlock(genpd);
3549 	return ret;
3550 }
3551 
perf_state_show(struct seq_file * s,void * data)3552 static int perf_state_show(struct seq_file *s, void *data)
3553 {
3554 	struct generic_pm_domain *genpd = s->private;
3555 
3556 	if (genpd_lock_interruptible(genpd))
3557 		return -ERESTARTSYS;
3558 
3559 	seq_printf(s, "%u\n", genpd->performance_state);
3560 
3561 	genpd_unlock(genpd);
3562 	return 0;
3563 }
3564 
3565 DEFINE_SHOW_ATTRIBUTE(summary);
3566 DEFINE_SHOW_ATTRIBUTE(status);
3567 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3568 DEFINE_SHOW_ATTRIBUTE(idle_states);
3569 DEFINE_SHOW_ATTRIBUTE(active_time);
3570 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3571 DEFINE_SHOW_ATTRIBUTE(devices);
3572 DEFINE_SHOW_ATTRIBUTE(perf_state);
3573 
genpd_debug_add(struct generic_pm_domain * genpd)3574 static void genpd_debug_add(struct generic_pm_domain *genpd)
3575 {
3576 	struct dentry *d;
3577 
3578 	if (!genpd_debugfs_dir)
3579 		return;
3580 
3581 	d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3582 
3583 	debugfs_create_file("current_state", 0444,
3584 			    d, genpd, &status_fops);
3585 	debugfs_create_file("sub_domains", 0444,
3586 			    d, genpd, &sub_domains_fops);
3587 	debugfs_create_file("idle_states", 0444,
3588 			    d, genpd, &idle_states_fops);
3589 	debugfs_create_file("active_time", 0444,
3590 			    d, genpd, &active_time_fops);
3591 	debugfs_create_file("total_idle_time", 0444,
3592 			    d, genpd, &total_idle_time_fops);
3593 	debugfs_create_file("devices", 0444,
3594 			    d, genpd, &devices_fops);
3595 	if (genpd->set_performance_state)
3596 		debugfs_create_file("perf_state", 0444,
3597 				    d, genpd, &perf_state_fops);
3598 }
3599 
genpd_debug_init(void)3600 static int __init genpd_debug_init(void)
3601 {
3602 	struct generic_pm_domain *genpd;
3603 
3604 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3605 
3606 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3607 			    NULL, &summary_fops);
3608 
3609 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3610 		genpd_debug_add(genpd);
3611 
3612 	return 0;
3613 }
3614 late_initcall(genpd_debug_init);
3615 
genpd_debug_exit(void)3616 static void __exit genpd_debug_exit(void)
3617 {
3618 	debugfs_remove_recursive(genpd_debugfs_dir);
3619 }
3620 __exitcall(genpd_debug_exit);
3621 #endif /* CONFIG_DEBUG_FS */
3622