1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7 #define pr_fmt(fmt) "PM: " fmt
8
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/io.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_domain.h>
17 #include <linux/pm_qos.h>
18 #include <linux/pm_clock.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/export.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
26
27 /* Provides a unique ID for each genpd device */
28 static DEFINE_IDA(genpd_ida);
29
30 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
31
32 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
33 ({ \
34 type (*__routine)(struct device *__d); \
35 type __ret = (type)0; \
36 \
37 __routine = genpd->dev_ops.callback; \
38 if (__routine) { \
39 __ret = __routine(dev); \
40 } \
41 __ret; \
42 })
43
44 static LIST_HEAD(gpd_list);
45 static DEFINE_MUTEX(gpd_list_lock);
46
47 struct genpd_lock_ops {
48 void (*lock)(struct generic_pm_domain *genpd);
49 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
50 int (*lock_interruptible)(struct generic_pm_domain *genpd);
51 void (*unlock)(struct generic_pm_domain *genpd);
52 };
53
genpd_lock_mtx(struct generic_pm_domain * genpd)54 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
55 {
56 mutex_lock(&genpd->mlock);
57 }
58
genpd_lock_nested_mtx(struct generic_pm_domain * genpd,int depth)59 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
60 int depth)
61 {
62 mutex_lock_nested(&genpd->mlock, depth);
63 }
64
genpd_lock_interruptible_mtx(struct generic_pm_domain * genpd)65 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
66 {
67 return mutex_lock_interruptible(&genpd->mlock);
68 }
69
genpd_unlock_mtx(struct generic_pm_domain * genpd)70 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
71 {
72 return mutex_unlock(&genpd->mlock);
73 }
74
75 static const struct genpd_lock_ops genpd_mtx_ops = {
76 .lock = genpd_lock_mtx,
77 .lock_nested = genpd_lock_nested_mtx,
78 .lock_interruptible = genpd_lock_interruptible_mtx,
79 .unlock = genpd_unlock_mtx,
80 };
81
genpd_lock_spin(struct generic_pm_domain * genpd)82 static void genpd_lock_spin(struct generic_pm_domain *genpd)
83 __acquires(&genpd->slock)
84 {
85 unsigned long flags;
86
87 spin_lock_irqsave(&genpd->slock, flags);
88 genpd->lock_flags = flags;
89 }
90
genpd_lock_nested_spin(struct generic_pm_domain * genpd,int depth)91 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
92 int depth)
93 __acquires(&genpd->slock)
94 {
95 unsigned long flags;
96
97 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
98 genpd->lock_flags = flags;
99 }
100
genpd_lock_interruptible_spin(struct generic_pm_domain * genpd)101 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
102 __acquires(&genpd->slock)
103 {
104 unsigned long flags;
105
106 spin_lock_irqsave(&genpd->slock, flags);
107 genpd->lock_flags = flags;
108 return 0;
109 }
110
genpd_unlock_spin(struct generic_pm_domain * genpd)111 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
112 __releases(&genpd->slock)
113 {
114 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
115 }
116
117 static const struct genpd_lock_ops genpd_spin_ops = {
118 .lock = genpd_lock_spin,
119 .lock_nested = genpd_lock_nested_spin,
120 .lock_interruptible = genpd_lock_interruptible_spin,
121 .unlock = genpd_unlock_spin,
122 };
123
genpd_lock_raw_spin(struct generic_pm_domain * genpd)124 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
125 __acquires(&genpd->raw_slock)
126 {
127 unsigned long flags;
128
129 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
130 genpd->raw_lock_flags = flags;
131 }
132
genpd_lock_nested_raw_spin(struct generic_pm_domain * genpd,int depth)133 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
134 int depth)
135 __acquires(&genpd->raw_slock)
136 {
137 unsigned long flags;
138
139 raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
140 genpd->raw_lock_flags = flags;
141 }
142
genpd_lock_interruptible_raw_spin(struct generic_pm_domain * genpd)143 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
144 __acquires(&genpd->raw_slock)
145 {
146 unsigned long flags;
147
148 raw_spin_lock_irqsave(&genpd->raw_slock, flags);
149 genpd->raw_lock_flags = flags;
150 return 0;
151 }
152
genpd_unlock_raw_spin(struct generic_pm_domain * genpd)153 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
154 __releases(&genpd->raw_slock)
155 {
156 raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
157 }
158
159 static const struct genpd_lock_ops genpd_raw_spin_ops = {
160 .lock = genpd_lock_raw_spin,
161 .lock_nested = genpd_lock_nested_raw_spin,
162 .lock_interruptible = genpd_lock_interruptible_raw_spin,
163 .unlock = genpd_unlock_raw_spin,
164 };
165
166 #define genpd_lock(p) p->lock_ops->lock(p)
167 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
168 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
169 #define genpd_unlock(p) p->lock_ops->unlock(p)
170
171 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
172 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
173 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
174 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
175 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
176 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
177 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
178 #define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
179
irq_safe_dev_in_sleep_domain(struct device * dev,const struct generic_pm_domain * genpd)180 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
181 const struct generic_pm_domain *genpd)
182 {
183 bool ret;
184
185 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
186
187 /*
188 * Warn once if an IRQ safe device is attached to a domain, which
189 * callbacks are allowed to sleep. This indicates a suboptimal
190 * configuration for PM, but it doesn't matter for an always on domain.
191 */
192 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
193 return ret;
194
195 if (ret)
196 dev_warn_once(dev, "PM domain %s will not be powered off\n",
197 dev_name(&genpd->dev));
198
199 return ret;
200 }
201
202 static int genpd_runtime_suspend(struct device *dev);
203
204 /*
205 * Get the generic PM domain for a particular struct device.
206 * This validates the struct device pointer, the PM domain pointer,
207 * and checks that the PM domain pointer is a real generic PM domain.
208 * Any failure results in NULL being returned.
209 */
dev_to_genpd_safe(struct device * dev)210 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
211 {
212 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
213 return NULL;
214
215 /* A genpd's always have its ->runtime_suspend() callback assigned. */
216 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
217 return pd_to_genpd(dev->pm_domain);
218
219 return NULL;
220 }
221
222 /*
223 * This should only be used where we are certain that the pm_domain
224 * attached to the device is a genpd domain.
225 */
dev_to_genpd(struct device * dev)226 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
227 {
228 if (IS_ERR_OR_NULL(dev->pm_domain))
229 return ERR_PTR(-EINVAL);
230
231 return pd_to_genpd(dev->pm_domain);
232 }
233
dev_to_genpd_dev(struct device * dev)234 struct device *dev_to_genpd_dev(struct device *dev)
235 {
236 struct generic_pm_domain *genpd = dev_to_genpd(dev);
237
238 if (IS_ERR(genpd))
239 return ERR_CAST(genpd);
240
241 return &genpd->dev;
242 }
243
genpd_stop_dev(const struct generic_pm_domain * genpd,struct device * dev)244 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
245 struct device *dev)
246 {
247 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
248 }
249
genpd_start_dev(const struct generic_pm_domain * genpd,struct device * dev)250 static int genpd_start_dev(const struct generic_pm_domain *genpd,
251 struct device *dev)
252 {
253 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
254 }
255
genpd_sd_counter_dec(struct generic_pm_domain * genpd)256 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
257 {
258 bool ret = false;
259
260 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
261 ret = !!atomic_dec_and_test(&genpd->sd_count);
262
263 return ret;
264 }
265
genpd_sd_counter_inc(struct generic_pm_domain * genpd)266 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
267 {
268 atomic_inc(&genpd->sd_count);
269 smp_mb__after_atomic();
270 }
271
272 #ifdef CONFIG_DEBUG_FS
273 static struct dentry *genpd_debugfs_dir;
274
275 static void genpd_debug_add(struct generic_pm_domain *genpd);
276
genpd_debug_remove(struct generic_pm_domain * genpd)277 static void genpd_debug_remove(struct generic_pm_domain *genpd)
278 {
279 if (!genpd_debugfs_dir)
280 return;
281
282 debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir);
283 }
284
genpd_update_accounting(struct generic_pm_domain * genpd)285 static void genpd_update_accounting(struct generic_pm_domain *genpd)
286 {
287 u64 delta, now;
288
289 now = ktime_get_mono_fast_ns();
290 if (now <= genpd->accounting_time)
291 return;
292
293 delta = now - genpd->accounting_time;
294
295 /*
296 * If genpd->status is active, it means we are just
297 * out of off and so update the idle time and vice
298 * versa.
299 */
300 if (genpd->status == GENPD_STATE_ON)
301 genpd->states[genpd->state_idx].idle_time += delta;
302 else
303 genpd->on_time += delta;
304
305 genpd->accounting_time = now;
306 }
307
genpd_reflect_residency(struct generic_pm_domain * genpd)308 static void genpd_reflect_residency(struct generic_pm_domain *genpd)
309 {
310 struct genpd_governor_data *gd = genpd->gd;
311 struct genpd_power_state *state, *next_state;
312 unsigned int state_idx;
313 s64 sleep_ns, target_ns;
314
315 if (!gd || !gd->reflect_residency)
316 return;
317
318 sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
319 state_idx = genpd->state_idx;
320 state = &genpd->states[state_idx];
321 target_ns = state->power_off_latency_ns + state->residency_ns;
322
323 if (sleep_ns < target_ns) {
324 state->above++;
325 } else if (state_idx < (genpd->state_count -1)) {
326 next_state = &genpd->states[state_idx + 1];
327 target_ns = next_state->power_off_latency_ns +
328 next_state->residency_ns;
329
330 if (sleep_ns >= target_ns)
331 state->below++;
332 }
333
334 gd->reflect_residency = false;
335 }
336 #else
genpd_debug_add(struct generic_pm_domain * genpd)337 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
genpd_debug_remove(struct generic_pm_domain * genpd)338 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
genpd_update_accounting(struct generic_pm_domain * genpd)339 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
genpd_reflect_residency(struct generic_pm_domain * genpd)340 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
341 #endif
342
_genpd_reeval_performance_state(struct generic_pm_domain * genpd,unsigned int state)343 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
344 unsigned int state)
345 {
346 struct generic_pm_domain_data *pd_data;
347 struct pm_domain_data *pdd;
348 struct gpd_link *link;
349
350 /* New requested state is same as Max requested state */
351 if (state == genpd->performance_state)
352 return state;
353
354 /* New requested state is higher than Max requested state */
355 if (state > genpd->performance_state)
356 return state;
357
358 /* Traverse all devices within the domain */
359 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
360 pd_data = to_gpd_data(pdd);
361
362 if (pd_data->performance_state > state)
363 state = pd_data->performance_state;
364 }
365
366 /*
367 * Traverse all sub-domains within the domain. This can be
368 * done without any additional locking as the link->performance_state
369 * field is protected by the parent genpd->lock, which is already taken.
370 *
371 * Also note that link->performance_state (subdomain's performance state
372 * requirement to parent domain) is different from
373 * link->child->performance_state (current performance state requirement
374 * of the devices/sub-domains of the subdomain) and so can have a
375 * different value.
376 *
377 * Note that we also take vote from powered-off sub-domains into account
378 * as the same is done for devices right now.
379 */
380 list_for_each_entry(link, &genpd->parent_links, parent_node) {
381 if (link->performance_state > state)
382 state = link->performance_state;
383 }
384
385 return state;
386 }
387
genpd_xlate_performance_state(struct generic_pm_domain * genpd,struct generic_pm_domain * parent,unsigned int pstate)388 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
389 struct generic_pm_domain *parent,
390 unsigned int pstate)
391 {
392 if (!parent->set_performance_state)
393 return pstate;
394
395 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
396 parent->opp_table,
397 pstate);
398 }
399
400 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
401 unsigned int state, int depth);
402
_genpd_rollback_parent_state(struct gpd_link * link,int depth)403 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth)
404 {
405 struct generic_pm_domain *parent = link->parent;
406 int parent_state;
407
408 genpd_lock_nested(parent, depth + 1);
409
410 parent_state = link->prev_performance_state;
411 link->performance_state = parent_state;
412
413 parent_state = _genpd_reeval_performance_state(parent, parent_state);
414 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
415 pr_err("%s: Failed to roll back to %d performance state\n",
416 parent->name, parent_state);
417 }
418
419 genpd_unlock(parent);
420 }
421
_genpd_set_parent_state(struct generic_pm_domain * genpd,struct gpd_link * link,unsigned int state,int depth)422 static int _genpd_set_parent_state(struct generic_pm_domain *genpd,
423 struct gpd_link *link,
424 unsigned int state, int depth)
425 {
426 struct generic_pm_domain *parent = link->parent;
427 int parent_state, ret;
428
429 /* Find parent's performance state */
430 ret = genpd_xlate_performance_state(genpd, parent, state);
431 if (unlikely(ret < 0))
432 return ret;
433
434 parent_state = ret;
435
436 genpd_lock_nested(parent, depth + 1);
437
438 link->prev_performance_state = link->performance_state;
439 link->performance_state = parent_state;
440
441 parent_state = _genpd_reeval_performance_state(parent, parent_state);
442 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
443 if (ret)
444 link->performance_state = link->prev_performance_state;
445
446 genpd_unlock(parent);
447
448 return ret;
449 }
450
_genpd_set_performance_state(struct generic_pm_domain * genpd,unsigned int state,int depth)451 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
452 unsigned int state, int depth)
453 {
454 struct gpd_link *link = NULL;
455 int ret;
456
457 if (state == genpd->performance_state)
458 return 0;
459
460 /* When scaling up, propagate to parents first in normal order */
461 if (state > genpd->performance_state) {
462 list_for_each_entry(link, &genpd->child_links, child_node) {
463 ret = _genpd_set_parent_state(genpd, link, state, depth);
464 if (ret)
465 goto rollback_parents_up;
466 }
467 }
468
469 if (genpd->set_performance_state) {
470 ret = genpd->set_performance_state(genpd, state);
471 if (ret) {
472 if (link)
473 goto rollback_parents_up;
474 return ret;
475 }
476 }
477
478 /* When scaling down, propagate to parents last in reverse order */
479 if (state < genpd->performance_state) {
480 list_for_each_entry_reverse(link, &genpd->child_links, child_node) {
481 ret = _genpd_set_parent_state(genpd, link, state, depth);
482 if (ret)
483 goto rollback_parents_down;
484 }
485 }
486
487 genpd->performance_state = state;
488 return 0;
489
490 rollback_parents_up:
491 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node)
492 _genpd_rollback_parent_state(link, depth);
493 return ret;
494 rollback_parents_down:
495 list_for_each_entry_continue(link, &genpd->child_links, child_node)
496 _genpd_rollback_parent_state(link, depth);
497 return ret;
498 }
499
genpd_set_performance_state(struct device * dev,unsigned int state)500 static int genpd_set_performance_state(struct device *dev, unsigned int state)
501 {
502 struct generic_pm_domain *genpd = dev_to_genpd(dev);
503 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
504 unsigned int prev_state;
505 int ret;
506
507 prev_state = gpd_data->performance_state;
508 if (prev_state == state)
509 return 0;
510
511 gpd_data->performance_state = state;
512 state = _genpd_reeval_performance_state(genpd, state);
513
514 ret = _genpd_set_performance_state(genpd, state, 0);
515 if (ret)
516 gpd_data->performance_state = prev_state;
517
518 return ret;
519 }
520
genpd_drop_performance_state(struct device * dev)521 static int genpd_drop_performance_state(struct device *dev)
522 {
523 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
524
525 if (!genpd_set_performance_state(dev, 0))
526 return prev_state;
527
528 return 0;
529 }
530
genpd_restore_performance_state(struct device * dev,unsigned int state)531 static void genpd_restore_performance_state(struct device *dev,
532 unsigned int state)
533 {
534 if (state)
535 genpd_set_performance_state(dev, state);
536 }
537
genpd_dev_pm_set_performance_state(struct device * dev,unsigned int state)538 static int genpd_dev_pm_set_performance_state(struct device *dev,
539 unsigned int state)
540 {
541 struct generic_pm_domain *genpd = dev_to_genpd(dev);
542 int ret = 0;
543
544 genpd_lock(genpd);
545 if (pm_runtime_suspended(dev)) {
546 dev_gpd_data(dev)->rpm_pstate = state;
547 } else {
548 ret = genpd_set_performance_state(dev, state);
549 if (!ret)
550 dev_gpd_data(dev)->rpm_pstate = 0;
551 }
552 genpd_unlock(genpd);
553
554 return ret;
555 }
556
557 /**
558 * dev_pm_genpd_set_performance_state- Set performance state of device's power
559 * domain.
560 *
561 * @dev: Device for which the performance-state needs to be set.
562 * @state: Target performance state of the device. This can be set as 0 when the
563 * device doesn't have any performance state constraints left (And so
564 * the device wouldn't participate anymore to find the target
565 * performance state of the genpd).
566 *
567 * It is assumed that the users guarantee that the genpd wouldn't be detached
568 * while this routine is getting called.
569 *
570 * Returns 0 on success and negative error values on failures.
571 */
dev_pm_genpd_set_performance_state(struct device * dev,unsigned int state)572 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
573 {
574 struct generic_pm_domain *genpd;
575
576 genpd = dev_to_genpd_safe(dev);
577 if (!genpd)
578 return -ENODEV;
579
580 if (WARN_ON(!dev->power.subsys_data ||
581 !dev->power.subsys_data->domain_data))
582 return -EINVAL;
583
584 return genpd_dev_pm_set_performance_state(dev, state);
585 }
586 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
587
588 /**
589 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
590 *
591 * @dev: Device to handle
592 * @next: impending interrupt/wakeup for the device
593 *
594 *
595 * Allow devices to inform of the next wakeup. It's assumed that the users
596 * guarantee that the genpd wouldn't be detached while this routine is getting
597 * called. Additionally, it's also assumed that @dev isn't runtime suspended
598 * (RPM_SUSPENDED)."
599 * Although devices are expected to update the next_wakeup after the end of
600 * their usecase as well, it is possible the devices themselves may not know
601 * about that, so stale @next will be ignored when powering off the domain.
602 */
dev_pm_genpd_set_next_wakeup(struct device * dev,ktime_t next)603 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
604 {
605 struct generic_pm_domain *genpd;
606 struct gpd_timing_data *td;
607
608 genpd = dev_to_genpd_safe(dev);
609 if (!genpd)
610 return;
611
612 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
613 if (td)
614 td->next_wakeup = next;
615 }
616 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
617
618 /**
619 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
620 * @dev: A device that is attached to the genpd.
621 *
622 * This routine should typically be called for a device, at the point of when a
623 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
624 *
625 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
626 * valid value have been set.
627 */
dev_pm_genpd_get_next_hrtimer(struct device * dev)628 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
629 {
630 struct generic_pm_domain *genpd;
631
632 genpd = dev_to_genpd_safe(dev);
633 if (!genpd)
634 return KTIME_MAX;
635
636 if (genpd->gd)
637 return genpd->gd->next_hrtimer;
638
639 return KTIME_MAX;
640 }
641 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
642
643 /*
644 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
645 *
646 * @dev: A device that is attached to the genpd.
647 *
648 * Allows a consumer of the genpd to notify the provider that the next power off
649 * should be synchronous.
650 *
651 * It is assumed that the users guarantee that the genpd wouldn't be detached
652 * while this routine is getting called.
653 */
dev_pm_genpd_synced_poweroff(struct device * dev)654 void dev_pm_genpd_synced_poweroff(struct device *dev)
655 {
656 struct generic_pm_domain *genpd;
657
658 genpd = dev_to_genpd_safe(dev);
659 if (!genpd)
660 return;
661
662 genpd_lock(genpd);
663 genpd->synced_poweroff = true;
664 genpd_unlock(genpd);
665 }
666 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
667
668 /**
669 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain.
670 *
671 * @dev: Device for which the HW-mode should be changed.
672 * @enable: Value to set or unset the HW-mode.
673 *
674 * Some PM domains can rely on HW signals to control the power for a device. To
675 * allow a consumer driver to switch the behaviour for its device in runtime,
676 * which may be beneficial from a latency or energy point of view, this function
677 * may be called.
678 *
679 * It is assumed that the users guarantee that the genpd wouldn't be detached
680 * while this routine is getting called.
681 *
682 * Return: Returns 0 on success and negative error values on failures.
683 */
dev_pm_genpd_set_hwmode(struct device * dev,bool enable)684 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
685 {
686 struct generic_pm_domain *genpd;
687 int ret = 0;
688
689 genpd = dev_to_genpd_safe(dev);
690 if (!genpd)
691 return -ENODEV;
692
693 if (!genpd->set_hwmode_dev)
694 return -EOPNOTSUPP;
695
696 genpd_lock(genpd);
697
698 if (dev_gpd_data(dev)->hw_mode == enable)
699 goto out;
700
701 ret = genpd->set_hwmode_dev(genpd, dev, enable);
702 if (!ret)
703 dev_gpd_data(dev)->hw_mode = enable;
704
705 out:
706 genpd_unlock(genpd);
707 return ret;
708 }
709 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode);
710
711 /**
712 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device.
713 *
714 * @dev: Device for which the current HW-mode setting should be fetched.
715 *
716 * This helper function allows consumer drivers to fetch the current HW mode
717 * setting of its the device.
718 *
719 * It is assumed that the users guarantee that the genpd wouldn't be detached
720 * while this routine is getting called.
721 *
722 * Return: Returns the HW mode setting of device from SW cached hw_mode.
723 */
dev_pm_genpd_get_hwmode(struct device * dev)724 bool dev_pm_genpd_get_hwmode(struct device *dev)
725 {
726 return dev_gpd_data(dev)->hw_mode;
727 }
728 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
729
730 /**
731 * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
732 *
733 * @dev: Device for which the PM domain may need to stay on for.
734 * @on: Value to set or unset for the condition.
735 *
736 * For some usecases a consumer driver requires its device to remain power-on
737 * from the PM domain perspective during runtime. This function allows the
738 * behaviour to be dynamically controlled for a device attached to a genpd.
739 *
740 * It is assumed that the users guarantee that the genpd wouldn't be detached
741 * while this routine is getting called.
742 *
743 * Return: Returns 0 on success and negative error values on failures.
744 */
dev_pm_genpd_rpm_always_on(struct device * dev,bool on)745 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
746 {
747 struct generic_pm_domain *genpd;
748
749 genpd = dev_to_genpd_safe(dev);
750 if (!genpd)
751 return -ENODEV;
752
753 genpd_lock(genpd);
754 dev_gpd_data(dev)->rpm_always_on = on;
755 genpd_unlock(genpd);
756
757 return 0;
758 }
759 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
760
761 /**
762 * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
763 *
764 * @genpd: The PM domain the idle-state belongs to.
765 * @state_idx: The index of the idle-state that failed.
766 *
767 * In some special cases the ->power_off() callback is asynchronously powering
768 * off the PM domain, leading to that it may return zero to indicate success,
769 * even though the actual power-off could fail. To account for this correctly in
770 * the rejected/usage counts for the idle-state statistics, users can call this
771 * function to adjust the values.
772 *
773 * It is assumed that the users guarantee that the genpd doesn't get removed
774 * while this routine is getting called.
775 */
pm_genpd_inc_rejected(struct generic_pm_domain * genpd,unsigned int state_idx)776 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
777 unsigned int state_idx)
778 {
779 genpd_lock(genpd);
780 genpd->states[genpd->state_idx].rejected++;
781 genpd->states[genpd->state_idx].usage--;
782 genpd_unlock(genpd);
783 }
784 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
785
_genpd_power_on(struct generic_pm_domain * genpd,bool timed)786 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
787 {
788 unsigned int state_idx = genpd->state_idx;
789 ktime_t time_start;
790 s64 elapsed_ns;
791 int ret;
792
793 /* Notify consumers that we are about to power on. */
794 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
795 GENPD_NOTIFY_PRE_ON,
796 GENPD_NOTIFY_OFF, NULL);
797 ret = notifier_to_errno(ret);
798 if (ret)
799 return ret;
800
801 if (!genpd->power_on)
802 goto out;
803
804 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
805 if (!timed) {
806 ret = genpd->power_on(genpd);
807 if (ret)
808 goto err;
809
810 goto out;
811 }
812
813 time_start = ktime_get();
814 ret = genpd->power_on(genpd);
815 if (ret)
816 goto err;
817
818 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
819 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
820 goto out;
821
822 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
823 genpd->gd->max_off_time_changed = true;
824 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
825 dev_name(&genpd->dev), "on", elapsed_ns);
826
827 out:
828 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
829 genpd->synced_poweroff = false;
830 return 0;
831 err:
832 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
833 NULL);
834 return ret;
835 }
836
_genpd_power_off(struct generic_pm_domain * genpd,bool timed)837 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
838 {
839 unsigned int state_idx = genpd->state_idx;
840 ktime_t time_start;
841 s64 elapsed_ns;
842 int ret;
843
844 /* Notify consumers that we are about to power off. */
845 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
846 GENPD_NOTIFY_PRE_OFF,
847 GENPD_NOTIFY_ON, NULL);
848 ret = notifier_to_errno(ret);
849 if (ret)
850 return ret;
851
852 if (!genpd->power_off)
853 goto out;
854
855 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
856 if (!timed) {
857 ret = genpd->power_off(genpd);
858 if (ret)
859 goto busy;
860
861 goto out;
862 }
863
864 time_start = ktime_get();
865 ret = genpd->power_off(genpd);
866 if (ret)
867 goto busy;
868
869 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
870 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
871 goto out;
872
873 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
874 genpd->gd->max_off_time_changed = true;
875 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
876 dev_name(&genpd->dev), "off", elapsed_ns);
877
878 out:
879 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
880 NULL);
881 return 0;
882 busy:
883 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
884 return ret;
885 }
886
887 /**
888 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
889 * @genpd: PM domain to power off.
890 *
891 * Queue up the execution of genpd_power_off() unless it's already been done
892 * before.
893 */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)894 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
895 {
896 queue_work(pm_wq, &genpd->power_off_work);
897 }
898
899 /**
900 * genpd_power_off - Remove power from a given PM domain.
901 * @genpd: PM domain to power down.
902 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
903 * RPM status of the releated device is in an intermediate state, not yet turned
904 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
905 * be RPM_SUSPENDED, while it tries to power off the PM domain.
906 * @depth: nesting count for lockdep.
907 *
908 * If all of the @genpd's devices have been suspended and all of its subdomains
909 * have been powered down, remove power from @genpd.
910 */
genpd_power_off(struct generic_pm_domain * genpd,bool one_dev_on,unsigned int depth)911 static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
912 unsigned int depth)
913 {
914 struct pm_domain_data *pdd;
915 struct gpd_link *link;
916 unsigned int not_suspended = 0;
917
918 /*
919 * Do not try to power off the domain in the following situations:
920 * The domain is already in the "power off" state.
921 * System suspend is in progress.
922 * The domain is configured as always on.
923 * The domain has a subdomain being powered on.
924 */
925 if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
926 genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
927 atomic_read(&genpd->sd_count) > 0)
928 return;
929
930 /*
931 * The children must be in their deepest (powered-off) states to allow
932 * the parent to be powered off. Note that, there's no need for
933 * additional locking, as powering on a child, requires the parent's
934 * lock to be acquired first.
935 */
936 list_for_each_entry(link, &genpd->parent_links, parent_node) {
937 struct generic_pm_domain *child = link->child;
938 if (child->state_idx < child->state_count - 1)
939 return;
940 }
941
942 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
943 /*
944 * Do not allow PM domain to be powered off, when an IRQ safe
945 * device is part of a non-IRQ safe domain.
946 */
947 if (!pm_runtime_suspended(pdd->dev) ||
948 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
949 not_suspended++;
950
951 /* The device may need its PM domain to stay powered on. */
952 if (to_gpd_data(pdd)->rpm_always_on)
953 return;
954 }
955
956 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
957 return;
958
959 if (genpd->gov && genpd->gov->power_down_ok) {
960 if (!genpd->gov->power_down_ok(&genpd->domain))
961 return;
962 }
963
964 /* Default to shallowest state. */
965 if (!genpd->gov)
966 genpd->state_idx = 0;
967
968 /* Don't power off, if a child domain is waiting to power on. */
969 if (atomic_read(&genpd->sd_count) > 0)
970 return;
971
972 if (_genpd_power_off(genpd, true)) {
973 genpd->states[genpd->state_idx].rejected++;
974 return;
975 }
976
977 genpd->status = GENPD_STATE_OFF;
978 genpd_update_accounting(genpd);
979 genpd->states[genpd->state_idx].usage++;
980
981 list_for_each_entry(link, &genpd->child_links, child_node) {
982 genpd_sd_counter_dec(link->parent);
983 genpd_lock_nested(link->parent, depth + 1);
984 genpd_power_off(link->parent, false, depth + 1);
985 genpd_unlock(link->parent);
986 }
987 }
988
989 /**
990 * genpd_power_on - Restore power to a given PM domain and its parents.
991 * @genpd: PM domain to power up.
992 * @depth: nesting count for lockdep.
993 *
994 * Restore power to @genpd and all of its parents so that it is possible to
995 * resume a device belonging to it.
996 */
genpd_power_on(struct generic_pm_domain * genpd,unsigned int depth)997 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
998 {
999 struct gpd_link *link;
1000 int ret = 0;
1001
1002 if (genpd_status_on(genpd))
1003 return 0;
1004
1005 /* Reflect over the entered idle-states residency for debugfs. */
1006 genpd_reflect_residency(genpd);
1007
1008 /*
1009 * The list is guaranteed not to change while the loop below is being
1010 * executed, unless one of the parents' .power_on() callbacks fiddles
1011 * with it.
1012 */
1013 list_for_each_entry(link, &genpd->child_links, child_node) {
1014 struct generic_pm_domain *parent = link->parent;
1015
1016 genpd_sd_counter_inc(parent);
1017
1018 genpd_lock_nested(parent, depth + 1);
1019 ret = genpd_power_on(parent, depth + 1);
1020 genpd_unlock(parent);
1021
1022 if (ret) {
1023 genpd_sd_counter_dec(parent);
1024 goto err;
1025 }
1026 }
1027
1028 ret = _genpd_power_on(genpd, true);
1029 if (ret)
1030 goto err;
1031
1032 genpd->status = GENPD_STATE_ON;
1033 genpd_update_accounting(genpd);
1034
1035 return 0;
1036
1037 err:
1038 list_for_each_entry_continue_reverse(link,
1039 &genpd->child_links,
1040 child_node) {
1041 genpd_sd_counter_dec(link->parent);
1042 genpd_lock_nested(link->parent, depth + 1);
1043 genpd_power_off(link->parent, false, depth + 1);
1044 genpd_unlock(link->parent);
1045 }
1046
1047 return ret;
1048 }
1049
genpd_dev_pm_start(struct device * dev)1050 static int genpd_dev_pm_start(struct device *dev)
1051 {
1052 struct generic_pm_domain *genpd = dev_to_genpd(dev);
1053
1054 return genpd_start_dev(genpd, dev);
1055 }
1056
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)1057 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
1058 unsigned long val, void *ptr)
1059 {
1060 struct generic_pm_domain_data *gpd_data;
1061 struct device *dev;
1062
1063 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
1064 dev = gpd_data->base.dev;
1065
1066 for (;;) {
1067 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
1068 struct pm_domain_data *pdd;
1069 struct gpd_timing_data *td;
1070
1071 spin_lock_irq(&dev->power.lock);
1072
1073 pdd = dev->power.subsys_data ?
1074 dev->power.subsys_data->domain_data : NULL;
1075 if (pdd) {
1076 td = to_gpd_data(pdd)->td;
1077 if (td) {
1078 td->constraint_changed = true;
1079 genpd = dev_to_genpd(dev);
1080 }
1081 }
1082
1083 spin_unlock_irq(&dev->power.lock);
1084
1085 if (!IS_ERR(genpd)) {
1086 genpd_lock(genpd);
1087 genpd->gd->max_off_time_changed = true;
1088 genpd_unlock(genpd);
1089 }
1090
1091 dev = dev->parent;
1092 if (!dev || dev->power.ignore_children)
1093 break;
1094 }
1095
1096 return NOTIFY_DONE;
1097 }
1098
1099 /**
1100 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
1101 * @work: Work structure used for scheduling the execution of this function.
1102 */
genpd_power_off_work_fn(struct work_struct * work)1103 static void genpd_power_off_work_fn(struct work_struct *work)
1104 {
1105 struct generic_pm_domain *genpd;
1106
1107 genpd = container_of(work, struct generic_pm_domain, power_off_work);
1108
1109 genpd_lock(genpd);
1110 genpd_power_off(genpd, false, 0);
1111 genpd_unlock(genpd);
1112 }
1113
1114 /**
1115 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
1116 * @dev: Device to handle.
1117 */
__genpd_runtime_suspend(struct device * dev)1118 static int __genpd_runtime_suspend(struct device *dev)
1119 {
1120 int (*cb)(struct device *__dev);
1121
1122 if (dev->type && dev->type->pm)
1123 cb = dev->type->pm->runtime_suspend;
1124 else if (dev->class && dev->class->pm)
1125 cb = dev->class->pm->runtime_suspend;
1126 else if (dev->bus && dev->bus->pm)
1127 cb = dev->bus->pm->runtime_suspend;
1128 else
1129 cb = NULL;
1130
1131 if (!cb && dev->driver && dev->driver->pm)
1132 cb = dev->driver->pm->runtime_suspend;
1133
1134 return cb ? cb(dev) : 0;
1135 }
1136
1137 /**
1138 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
1139 * @dev: Device to handle.
1140 */
__genpd_runtime_resume(struct device * dev)1141 static int __genpd_runtime_resume(struct device *dev)
1142 {
1143 int (*cb)(struct device *__dev);
1144
1145 if (dev->type && dev->type->pm)
1146 cb = dev->type->pm->runtime_resume;
1147 else if (dev->class && dev->class->pm)
1148 cb = dev->class->pm->runtime_resume;
1149 else if (dev->bus && dev->bus->pm)
1150 cb = dev->bus->pm->runtime_resume;
1151 else
1152 cb = NULL;
1153
1154 if (!cb && dev->driver && dev->driver->pm)
1155 cb = dev->driver->pm->runtime_resume;
1156
1157 return cb ? cb(dev) : 0;
1158 }
1159
1160 /**
1161 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
1162 * @dev: Device to suspend.
1163 *
1164 * Carry out a runtime suspend of a device under the assumption that its
1165 * pm_domain field points to the domain member of an object of type
1166 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1167 */
genpd_runtime_suspend(struct device * dev)1168 static int genpd_runtime_suspend(struct device *dev)
1169 {
1170 struct generic_pm_domain *genpd;
1171 bool (*suspend_ok)(struct device *__dev);
1172 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1173 struct gpd_timing_data *td = gpd_data->td;
1174 bool runtime_pm = pm_runtime_enabled(dev);
1175 ktime_t time_start = 0;
1176 s64 elapsed_ns;
1177 int ret;
1178
1179 dev_dbg(dev, "%s()\n", __func__);
1180
1181 genpd = dev_to_genpd(dev);
1182 if (IS_ERR(genpd))
1183 return -EINVAL;
1184
1185 /*
1186 * A runtime PM centric subsystem/driver may re-use the runtime PM
1187 * callbacks for other purposes than runtime PM. In those scenarios
1188 * runtime PM is disabled. Under these circumstances, we shall skip
1189 * validating/measuring the PM QoS latency.
1190 */
1191 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
1192 if (runtime_pm && suspend_ok && !suspend_ok(dev))
1193 return -EBUSY;
1194
1195 /* Measure suspend latency. */
1196 if (td && runtime_pm)
1197 time_start = ktime_get();
1198
1199 ret = __genpd_runtime_suspend(dev);
1200 if (ret)
1201 return ret;
1202
1203 ret = genpd_stop_dev(genpd, dev);
1204 if (ret) {
1205 __genpd_runtime_resume(dev);
1206 return ret;
1207 }
1208
1209 /* Update suspend latency value if the measured time exceeds it. */
1210 if (td && runtime_pm) {
1211 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1212 if (elapsed_ns > td->suspend_latency_ns) {
1213 td->suspend_latency_ns = elapsed_ns;
1214 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
1215 elapsed_ns);
1216 genpd->gd->max_off_time_changed = true;
1217 td->constraint_changed = true;
1218 }
1219 }
1220
1221 /*
1222 * If power.irq_safe is set, this routine may be run with
1223 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
1224 */
1225 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1226 return 0;
1227
1228 genpd_lock(genpd);
1229 genpd_power_off(genpd, true, 0);
1230 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1231 genpd_unlock(genpd);
1232
1233 return 0;
1234 }
1235
1236 /**
1237 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
1238 * @dev: Device to resume.
1239 *
1240 * Carry out a runtime resume of a device under the assumption that its
1241 * pm_domain field points to the domain member of an object of type
1242 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1243 */
genpd_runtime_resume(struct device * dev)1244 static int genpd_runtime_resume(struct device *dev)
1245 {
1246 struct generic_pm_domain *genpd;
1247 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1248 struct gpd_timing_data *td = gpd_data->td;
1249 bool timed = td && pm_runtime_enabled(dev);
1250 ktime_t time_start = 0;
1251 s64 elapsed_ns;
1252 int ret;
1253
1254 dev_dbg(dev, "%s()\n", __func__);
1255
1256 genpd = dev_to_genpd(dev);
1257 if (IS_ERR(genpd))
1258 return -EINVAL;
1259
1260 /*
1261 * As we don't power off a non IRQ safe domain, which holds
1262 * an IRQ safe device, we don't need to restore power to it.
1263 */
1264 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1265 goto out;
1266
1267 genpd_lock(genpd);
1268 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1269 ret = genpd_power_on(genpd, 0);
1270 genpd_unlock(genpd);
1271
1272 if (ret)
1273 return ret;
1274
1275 out:
1276 /* Measure resume latency. */
1277 if (timed)
1278 time_start = ktime_get();
1279
1280 ret = genpd_start_dev(genpd, dev);
1281 if (ret)
1282 goto err_poweroff;
1283
1284 ret = __genpd_runtime_resume(dev);
1285 if (ret)
1286 goto err_stop;
1287
1288 /* Update resume latency value if the measured time exceeds it. */
1289 if (timed) {
1290 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1291 if (elapsed_ns > td->resume_latency_ns) {
1292 td->resume_latency_ns = elapsed_ns;
1293 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1294 elapsed_ns);
1295 genpd->gd->max_off_time_changed = true;
1296 td->constraint_changed = true;
1297 }
1298 }
1299
1300 return 0;
1301
1302 err_stop:
1303 genpd_stop_dev(genpd, dev);
1304 err_poweroff:
1305 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1306 genpd_lock(genpd);
1307 genpd_power_off(genpd, true, 0);
1308 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1309 genpd_unlock(genpd);
1310 }
1311
1312 return ret;
1313 }
1314
1315 static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)1316 static int __init pd_ignore_unused_setup(char *__unused)
1317 {
1318 pd_ignore_unused = true;
1319 return 1;
1320 }
1321 __setup("pd_ignore_unused", pd_ignore_unused_setup);
1322
1323 /**
1324 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1325 */
genpd_power_off_unused(void)1326 static int __init genpd_power_off_unused(void)
1327 {
1328 struct generic_pm_domain *genpd;
1329
1330 if (pd_ignore_unused) {
1331 pr_warn("genpd: Not disabling unused power domains\n");
1332 return 0;
1333 }
1334
1335 pr_info("genpd: Disabling unused power domains\n");
1336 mutex_lock(&gpd_list_lock);
1337
1338 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1339 genpd_queue_power_off_work(genpd);
1340
1341 mutex_unlock(&gpd_list_lock);
1342
1343 return 0;
1344 }
1345 late_initcall_sync(genpd_power_off_unused);
1346
1347 #ifdef CONFIG_PM_SLEEP
1348
1349 /**
1350 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1351 * @genpd: PM domain to power off, if possible.
1352 * @use_lock: use the lock.
1353 * @depth: nesting count for lockdep.
1354 *
1355 * Check if the given PM domain can be powered off (during system suspend or
1356 * hibernation) and do that if so. Also, in that case propagate to its parents.
1357 *
1358 * This function is only called in "noirq" and "syscore" stages of system power
1359 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1360 * these cases the lock must be held.
1361 */
genpd_sync_power_off(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1362 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1363 unsigned int depth)
1364 {
1365 struct gpd_link *link;
1366
1367 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1368 return;
1369
1370 if (genpd->suspended_count != genpd->device_count
1371 || atomic_read(&genpd->sd_count) > 0)
1372 return;
1373
1374 /* Check that the children are in their deepest (powered-off) state. */
1375 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1376 struct generic_pm_domain *child = link->child;
1377 if (child->state_idx < child->state_count - 1)
1378 return;
1379 }
1380
1381 /* Choose the deepest state when suspending */
1382 genpd->state_idx = genpd->state_count - 1;
1383 if (_genpd_power_off(genpd, false)) {
1384 genpd->states[genpd->state_idx].rejected++;
1385 return;
1386 } else {
1387 genpd->states[genpd->state_idx].usage++;
1388 }
1389
1390 genpd->status = GENPD_STATE_OFF;
1391
1392 list_for_each_entry(link, &genpd->child_links, child_node) {
1393 genpd_sd_counter_dec(link->parent);
1394
1395 if (use_lock)
1396 genpd_lock_nested(link->parent, depth + 1);
1397
1398 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1399
1400 if (use_lock)
1401 genpd_unlock(link->parent);
1402 }
1403 }
1404
1405 /**
1406 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1407 * @genpd: PM domain to power on.
1408 * @use_lock: use the lock.
1409 * @depth: nesting count for lockdep.
1410 *
1411 * This function is only called in "noirq" and "syscore" stages of system power
1412 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1413 * these cases the lock must be held.
1414 */
genpd_sync_power_on(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1415 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1416 unsigned int depth)
1417 {
1418 struct gpd_link *link;
1419
1420 if (genpd_status_on(genpd))
1421 return;
1422
1423 list_for_each_entry(link, &genpd->child_links, child_node) {
1424 genpd_sd_counter_inc(link->parent);
1425
1426 if (use_lock)
1427 genpd_lock_nested(link->parent, depth + 1);
1428
1429 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1430
1431 if (use_lock)
1432 genpd_unlock(link->parent);
1433 }
1434
1435 _genpd_power_on(genpd, false);
1436 genpd->status = GENPD_STATE_ON;
1437 }
1438
1439 /**
1440 * genpd_prepare - Start power transition of a device in a PM domain.
1441 * @dev: Device to start the transition of.
1442 *
1443 * Start a power transition of a device (during a system-wide power transition)
1444 * under the assumption that its pm_domain field points to the domain member of
1445 * an object of type struct generic_pm_domain representing a PM domain
1446 * consisting of I/O devices.
1447 */
genpd_prepare(struct device * dev)1448 static int genpd_prepare(struct device *dev)
1449 {
1450 struct generic_pm_domain *genpd;
1451 int ret;
1452
1453 dev_dbg(dev, "%s()\n", __func__);
1454
1455 genpd = dev_to_genpd(dev);
1456 if (IS_ERR(genpd))
1457 return -EINVAL;
1458
1459 genpd_lock(genpd);
1460 genpd->prepared_count++;
1461 genpd_unlock(genpd);
1462
1463 ret = pm_generic_prepare(dev);
1464 if (ret < 0) {
1465 genpd_lock(genpd);
1466
1467 genpd->prepared_count--;
1468
1469 genpd_unlock(genpd);
1470 }
1471
1472 /* Never return 1, as genpd don't cope with the direct_complete path. */
1473 return ret >= 0 ? 0 : ret;
1474 }
1475
1476 /**
1477 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1478 * I/O pm domain.
1479 * @dev: Device to suspend.
1480 * @suspend_noirq: Generic suspend_noirq callback.
1481 * @resume_noirq: Generic resume_noirq callback.
1482 *
1483 * Stop the device and remove power from the domain if all devices in it have
1484 * been stopped.
1485 */
genpd_finish_suspend(struct device * dev,int (* suspend_noirq)(struct device * dev),int (* resume_noirq)(struct device * dev))1486 static int genpd_finish_suspend(struct device *dev,
1487 int (*suspend_noirq)(struct device *dev),
1488 int (*resume_noirq)(struct device *dev))
1489 {
1490 struct generic_pm_domain *genpd;
1491 int ret = 0;
1492
1493 genpd = dev_to_genpd(dev);
1494 if (IS_ERR(genpd))
1495 return -EINVAL;
1496
1497 ret = suspend_noirq(dev);
1498 if (ret)
1499 return ret;
1500
1501 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1502 return 0;
1503
1504 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1505 !pm_runtime_status_suspended(dev)) {
1506 ret = genpd_stop_dev(genpd, dev);
1507 if (ret) {
1508 resume_noirq(dev);
1509 return ret;
1510 }
1511 }
1512
1513 genpd_lock(genpd);
1514 genpd->suspended_count++;
1515 genpd_sync_power_off(genpd, true, 0);
1516 genpd_unlock(genpd);
1517
1518 return 0;
1519 }
1520
1521 /**
1522 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1523 * @dev: Device to suspend.
1524 *
1525 * Stop the device and remove power from the domain if all devices in it have
1526 * been stopped.
1527 */
genpd_suspend_noirq(struct device * dev)1528 static int genpd_suspend_noirq(struct device *dev)
1529 {
1530 dev_dbg(dev, "%s()\n", __func__);
1531
1532 return genpd_finish_suspend(dev,
1533 pm_generic_suspend_noirq,
1534 pm_generic_resume_noirq);
1535 }
1536
1537 /**
1538 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1539 * @dev: Device to resume.
1540 * @resume_noirq: Generic resume_noirq callback.
1541 *
1542 * Restore power to the device's PM domain, if necessary, and start the device.
1543 */
genpd_finish_resume(struct device * dev,int (* resume_noirq)(struct device * dev))1544 static int genpd_finish_resume(struct device *dev,
1545 int (*resume_noirq)(struct device *dev))
1546 {
1547 struct generic_pm_domain *genpd;
1548 int ret;
1549
1550 dev_dbg(dev, "%s()\n", __func__);
1551
1552 genpd = dev_to_genpd(dev);
1553 if (IS_ERR(genpd))
1554 return -EINVAL;
1555
1556 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
1557 return resume_noirq(dev);
1558
1559 genpd_lock(genpd);
1560 genpd_sync_power_on(genpd, true, 0);
1561 genpd->suspended_count--;
1562 genpd_unlock(genpd);
1563
1564 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1565 !pm_runtime_status_suspended(dev)) {
1566 ret = genpd_start_dev(genpd, dev);
1567 if (ret)
1568 return ret;
1569 }
1570
1571 return pm_generic_resume_noirq(dev);
1572 }
1573
1574 /**
1575 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1576 * @dev: Device to resume.
1577 *
1578 * Restore power to the device's PM domain, if necessary, and start the device.
1579 */
genpd_resume_noirq(struct device * dev)1580 static int genpd_resume_noirq(struct device *dev)
1581 {
1582 dev_dbg(dev, "%s()\n", __func__);
1583
1584 return genpd_finish_resume(dev, pm_generic_resume_noirq);
1585 }
1586
1587 /**
1588 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1589 * @dev: Device to freeze.
1590 *
1591 * Carry out a late freeze of a device under the assumption that its
1592 * pm_domain field points to the domain member of an object of type
1593 * struct generic_pm_domain representing a power domain consisting of I/O
1594 * devices.
1595 */
genpd_freeze_noirq(struct device * dev)1596 static int genpd_freeze_noirq(struct device *dev)
1597 {
1598 dev_dbg(dev, "%s()\n", __func__);
1599
1600 return genpd_finish_suspend(dev,
1601 pm_generic_freeze_noirq,
1602 pm_generic_thaw_noirq);
1603 }
1604
1605 /**
1606 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1607 * @dev: Device to thaw.
1608 *
1609 * Start the device, unless power has been removed from the domain already
1610 * before the system transition.
1611 */
genpd_thaw_noirq(struct device * dev)1612 static int genpd_thaw_noirq(struct device *dev)
1613 {
1614 dev_dbg(dev, "%s()\n", __func__);
1615
1616 return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1617 }
1618
1619 /**
1620 * genpd_poweroff_noirq - Completion of hibernation of device in an
1621 * I/O PM domain.
1622 * @dev: Device to poweroff.
1623 *
1624 * Stop the device and remove power from the domain if all devices in it have
1625 * been stopped.
1626 */
genpd_poweroff_noirq(struct device * dev)1627 static int genpd_poweroff_noirq(struct device *dev)
1628 {
1629 dev_dbg(dev, "%s()\n", __func__);
1630
1631 return genpd_finish_suspend(dev,
1632 pm_generic_poweroff_noirq,
1633 pm_generic_restore_noirq);
1634 }
1635
1636 /**
1637 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1638 * @dev: Device to resume.
1639 *
1640 * Make sure the domain will be in the same power state as before the
1641 * hibernation the system is resuming from and start the device if necessary.
1642 */
genpd_restore_noirq(struct device * dev)1643 static int genpd_restore_noirq(struct device *dev)
1644 {
1645 dev_dbg(dev, "%s()\n", __func__);
1646
1647 return genpd_finish_resume(dev, pm_generic_restore_noirq);
1648 }
1649
1650 /**
1651 * genpd_complete - Complete power transition of a device in a power domain.
1652 * @dev: Device to complete the transition of.
1653 *
1654 * Complete a power transition of a device (during a system-wide power
1655 * transition) under the assumption that its pm_domain field points to the
1656 * domain member of an object of type struct generic_pm_domain representing
1657 * a power domain consisting of I/O devices.
1658 */
genpd_complete(struct device * dev)1659 static void genpd_complete(struct device *dev)
1660 {
1661 struct generic_pm_domain *genpd;
1662
1663 dev_dbg(dev, "%s()\n", __func__);
1664
1665 genpd = dev_to_genpd(dev);
1666 if (IS_ERR(genpd))
1667 return;
1668
1669 pm_generic_complete(dev);
1670
1671 genpd_lock(genpd);
1672
1673 genpd->prepared_count--;
1674 if (!genpd->prepared_count)
1675 genpd_queue_power_off_work(genpd);
1676
1677 genpd_unlock(genpd);
1678 }
1679
genpd_switch_state(struct device * dev,bool suspend)1680 static void genpd_switch_state(struct device *dev, bool suspend)
1681 {
1682 struct generic_pm_domain *genpd;
1683 bool use_lock;
1684
1685 genpd = dev_to_genpd_safe(dev);
1686 if (!genpd)
1687 return;
1688
1689 use_lock = genpd_is_irq_safe(genpd);
1690
1691 if (use_lock)
1692 genpd_lock(genpd);
1693
1694 if (suspend) {
1695 genpd->suspended_count++;
1696 genpd_sync_power_off(genpd, use_lock, 0);
1697 } else {
1698 genpd_sync_power_on(genpd, use_lock, 0);
1699 genpd->suspended_count--;
1700 }
1701
1702 if (use_lock)
1703 genpd_unlock(genpd);
1704 }
1705
1706 /**
1707 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1708 * @dev: The device that is attached to the genpd, that can be suspended.
1709 *
1710 * This routine should typically be called for a device that needs to be
1711 * suspended during the syscore suspend phase. It may also be called during
1712 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1713 * genpd.
1714 */
dev_pm_genpd_suspend(struct device * dev)1715 void dev_pm_genpd_suspend(struct device *dev)
1716 {
1717 genpd_switch_state(dev, true);
1718 }
1719 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1720
1721 /**
1722 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1723 * @dev: The device that is attached to the genpd, which needs to be resumed.
1724 *
1725 * This routine should typically be called for a device that needs to be resumed
1726 * during the syscore resume phase. It may also be called during suspend-to-idle
1727 * to resume a corresponding CPU device that is attached to a genpd.
1728 */
dev_pm_genpd_resume(struct device * dev)1729 void dev_pm_genpd_resume(struct device *dev)
1730 {
1731 genpd_switch_state(dev, false);
1732 }
1733 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1734
1735 #else /* !CONFIG_PM_SLEEP */
1736
1737 #define genpd_prepare NULL
1738 #define genpd_suspend_noirq NULL
1739 #define genpd_resume_noirq NULL
1740 #define genpd_freeze_noirq NULL
1741 #define genpd_thaw_noirq NULL
1742 #define genpd_poweroff_noirq NULL
1743 #define genpd_restore_noirq NULL
1744 #define genpd_complete NULL
1745
1746 #endif /* CONFIG_PM_SLEEP */
1747
genpd_alloc_dev_data(struct device * dev,bool has_governor)1748 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1749 bool has_governor)
1750 {
1751 struct generic_pm_domain_data *gpd_data;
1752 struct gpd_timing_data *td;
1753 int ret;
1754
1755 ret = dev_pm_get_subsys_data(dev);
1756 if (ret)
1757 return ERR_PTR(ret);
1758
1759 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1760 if (!gpd_data) {
1761 ret = -ENOMEM;
1762 goto err_put;
1763 }
1764
1765 gpd_data->base.dev = dev;
1766 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1767
1768 /* Allocate data used by a governor. */
1769 if (has_governor) {
1770 td = kzalloc(sizeof(*td), GFP_KERNEL);
1771 if (!td) {
1772 ret = -ENOMEM;
1773 goto err_free;
1774 }
1775
1776 td->constraint_changed = true;
1777 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1778 td->next_wakeup = KTIME_MAX;
1779 gpd_data->td = td;
1780 }
1781
1782 spin_lock_irq(&dev->power.lock);
1783
1784 if (dev->power.subsys_data->domain_data)
1785 ret = -EINVAL;
1786 else
1787 dev->power.subsys_data->domain_data = &gpd_data->base;
1788
1789 spin_unlock_irq(&dev->power.lock);
1790
1791 if (ret)
1792 goto err_free;
1793
1794 return gpd_data;
1795
1796 err_free:
1797 kfree(gpd_data->td);
1798 kfree(gpd_data);
1799 err_put:
1800 dev_pm_put_subsys_data(dev);
1801 return ERR_PTR(ret);
1802 }
1803
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1804 static void genpd_free_dev_data(struct device *dev,
1805 struct generic_pm_domain_data *gpd_data)
1806 {
1807 spin_lock_irq(&dev->power.lock);
1808
1809 dev->power.subsys_data->domain_data = NULL;
1810
1811 spin_unlock_irq(&dev->power.lock);
1812
1813 dev_pm_opp_clear_config(gpd_data->opp_token);
1814 kfree(gpd_data->td);
1815 kfree(gpd_data);
1816 dev_pm_put_subsys_data(dev);
1817 }
1818
genpd_update_cpumask(struct generic_pm_domain * genpd,int cpu,bool set,unsigned int depth)1819 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1820 int cpu, bool set, unsigned int depth)
1821 {
1822 struct gpd_link *link;
1823
1824 if (!genpd_is_cpu_domain(genpd))
1825 return;
1826
1827 list_for_each_entry(link, &genpd->child_links, child_node) {
1828 struct generic_pm_domain *parent = link->parent;
1829
1830 genpd_lock_nested(parent, depth + 1);
1831 genpd_update_cpumask(parent, cpu, set, depth + 1);
1832 genpd_unlock(parent);
1833 }
1834
1835 if (set)
1836 cpumask_set_cpu(cpu, genpd->cpus);
1837 else
1838 cpumask_clear_cpu(cpu, genpd->cpus);
1839 }
1840
genpd_set_cpumask(struct generic_pm_domain * genpd,int cpu)1841 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1842 {
1843 if (cpu >= 0)
1844 genpd_update_cpumask(genpd, cpu, true, 0);
1845 }
1846
genpd_clear_cpumask(struct generic_pm_domain * genpd,int cpu)1847 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1848 {
1849 if (cpu >= 0)
1850 genpd_update_cpumask(genpd, cpu, false, 0);
1851 }
1852
genpd_get_cpu(struct generic_pm_domain * genpd,struct device * dev)1853 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1854 {
1855 int cpu;
1856
1857 if (!genpd_is_cpu_domain(genpd))
1858 return -1;
1859
1860 for_each_possible_cpu(cpu) {
1861 if (get_cpu_device(cpu) == dev)
1862 return cpu;
1863 }
1864
1865 return -1;
1866 }
1867
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct device * base_dev)1868 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1869 struct device *base_dev)
1870 {
1871 struct genpd_governor_data *gd = genpd->gd;
1872 struct generic_pm_domain_data *gpd_data;
1873 int ret;
1874
1875 dev_dbg(dev, "%s()\n", __func__);
1876
1877 gpd_data = genpd_alloc_dev_data(dev, gd);
1878 if (IS_ERR(gpd_data))
1879 return PTR_ERR(gpd_data);
1880
1881 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1882
1883 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false;
1884
1885 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1886 if (ret)
1887 goto out;
1888
1889 genpd_lock(genpd);
1890
1891 genpd_set_cpumask(genpd, gpd_data->cpu);
1892
1893 genpd->device_count++;
1894 if (gd)
1895 gd->max_off_time_changed = true;
1896
1897 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1898
1899 genpd_unlock(genpd);
1900 dev_pm_domain_set(dev, &genpd->domain);
1901 out:
1902 if (ret)
1903 genpd_free_dev_data(dev, gpd_data);
1904 else
1905 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1906 DEV_PM_QOS_RESUME_LATENCY);
1907
1908 return ret;
1909 }
1910
1911 /**
1912 * pm_genpd_add_device - Add a device to an I/O PM domain.
1913 * @genpd: PM domain to add the device to.
1914 * @dev: Device to be added.
1915 */
pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev)1916 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1917 {
1918 int ret;
1919
1920 if (!genpd || !dev)
1921 return -EINVAL;
1922
1923 mutex_lock(&gpd_list_lock);
1924 ret = genpd_add_device(genpd, dev, dev);
1925 mutex_unlock(&gpd_list_lock);
1926
1927 return ret;
1928 }
1929 EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1930
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1931 static int genpd_remove_device(struct generic_pm_domain *genpd,
1932 struct device *dev)
1933 {
1934 struct generic_pm_domain_data *gpd_data;
1935 struct pm_domain_data *pdd;
1936 int ret = 0;
1937
1938 dev_dbg(dev, "%s()\n", __func__);
1939
1940 pdd = dev->power.subsys_data->domain_data;
1941 gpd_data = to_gpd_data(pdd);
1942 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1943 DEV_PM_QOS_RESUME_LATENCY);
1944
1945 genpd_lock(genpd);
1946
1947 if (genpd->prepared_count > 0) {
1948 ret = -EAGAIN;
1949 goto out;
1950 }
1951
1952 genpd->device_count--;
1953 if (genpd->gd)
1954 genpd->gd->max_off_time_changed = true;
1955
1956 genpd_clear_cpumask(genpd, gpd_data->cpu);
1957
1958 list_del_init(&pdd->list_node);
1959
1960 genpd_unlock(genpd);
1961
1962 dev_pm_domain_set(dev, NULL);
1963
1964 if (genpd->detach_dev)
1965 genpd->detach_dev(genpd, dev);
1966
1967 genpd_free_dev_data(dev, gpd_data);
1968
1969 return 0;
1970
1971 out:
1972 genpd_unlock(genpd);
1973 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1974
1975 return ret;
1976 }
1977
1978 /**
1979 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1980 * @dev: Device to be removed.
1981 */
pm_genpd_remove_device(struct device * dev)1982 int pm_genpd_remove_device(struct device *dev)
1983 {
1984 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1985
1986 if (!genpd)
1987 return -EINVAL;
1988
1989 return genpd_remove_device(genpd, dev);
1990 }
1991 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1992
1993 /**
1994 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1995 *
1996 * @dev: Device that should be associated with the notifier
1997 * @nb: The notifier block to register
1998 *
1999 * Users may call this function to add a genpd power on/off notifier for an
2000 * attached @dev. Only one notifier per device is allowed. The notifier is
2001 * sent when genpd is powering on/off the PM domain.
2002 *
2003 * It is assumed that the user guarantee that the genpd wouldn't be detached
2004 * while this routine is getting called.
2005 *
2006 * Returns 0 on success and negative error values on failures.
2007 */
dev_pm_genpd_add_notifier(struct device * dev,struct notifier_block * nb)2008 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
2009 {
2010 struct generic_pm_domain *genpd;
2011 struct generic_pm_domain_data *gpd_data;
2012 int ret;
2013
2014 genpd = dev_to_genpd_safe(dev);
2015 if (!genpd)
2016 return -ENODEV;
2017
2018 if (WARN_ON(!dev->power.subsys_data ||
2019 !dev->power.subsys_data->domain_data))
2020 return -EINVAL;
2021
2022 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2023 if (gpd_data->power_nb)
2024 return -EEXIST;
2025
2026 genpd_lock(genpd);
2027 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
2028 genpd_unlock(genpd);
2029
2030 if (ret) {
2031 dev_warn(dev, "failed to add notifier for PM domain %s\n",
2032 dev_name(&genpd->dev));
2033 return ret;
2034 }
2035
2036 gpd_data->power_nb = nb;
2037 return 0;
2038 }
2039 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
2040
2041 /**
2042 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
2043 *
2044 * @dev: Device that is associated with the notifier
2045 *
2046 * Users may call this function to remove a genpd power on/off notifier for an
2047 * attached @dev.
2048 *
2049 * It is assumed that the user guarantee that the genpd wouldn't be detached
2050 * while this routine is getting called.
2051 *
2052 * Returns 0 on success and negative error values on failures.
2053 */
dev_pm_genpd_remove_notifier(struct device * dev)2054 int dev_pm_genpd_remove_notifier(struct device *dev)
2055 {
2056 struct generic_pm_domain *genpd;
2057 struct generic_pm_domain_data *gpd_data;
2058 int ret;
2059
2060 genpd = dev_to_genpd_safe(dev);
2061 if (!genpd)
2062 return -ENODEV;
2063
2064 if (WARN_ON(!dev->power.subsys_data ||
2065 !dev->power.subsys_data->domain_data))
2066 return -EINVAL;
2067
2068 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2069 if (!gpd_data->power_nb)
2070 return -ENODEV;
2071
2072 genpd_lock(genpd);
2073 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
2074 gpd_data->power_nb);
2075 genpd_unlock(genpd);
2076
2077 if (ret) {
2078 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
2079 dev_name(&genpd->dev));
2080 return ret;
2081 }
2082
2083 gpd_data->power_nb = NULL;
2084 return 0;
2085 }
2086 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
2087
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2088 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
2089 struct generic_pm_domain *subdomain)
2090 {
2091 struct gpd_link *link, *itr;
2092 int ret = 0;
2093
2094 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
2095 || genpd == subdomain)
2096 return -EINVAL;
2097
2098 /*
2099 * If the domain can be powered on/off in an IRQ safe
2100 * context, ensure that the subdomain can also be
2101 * powered on/off in that context.
2102 */
2103 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
2104 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
2105 dev_name(&genpd->dev), subdomain->name);
2106 return -EINVAL;
2107 }
2108
2109 link = kzalloc(sizeof(*link), GFP_KERNEL);
2110 if (!link)
2111 return -ENOMEM;
2112
2113 genpd_lock(subdomain);
2114 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2115
2116 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
2117 ret = -EINVAL;
2118 goto out;
2119 }
2120
2121 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
2122 if (itr->child == subdomain && itr->parent == genpd) {
2123 ret = -EINVAL;
2124 goto out;
2125 }
2126 }
2127
2128 link->parent = genpd;
2129 list_add_tail(&link->parent_node, &genpd->parent_links);
2130 link->child = subdomain;
2131 list_add_tail(&link->child_node, &subdomain->child_links);
2132 if (genpd_status_on(subdomain))
2133 genpd_sd_counter_inc(genpd);
2134
2135 out:
2136 genpd_unlock(genpd);
2137 genpd_unlock(subdomain);
2138 if (ret)
2139 kfree(link);
2140 return ret;
2141 }
2142
2143 /**
2144 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2145 * @genpd: Leader PM domain to add the subdomain to.
2146 * @subdomain: Subdomain to be added.
2147 */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2148 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
2149 struct generic_pm_domain *subdomain)
2150 {
2151 int ret;
2152
2153 mutex_lock(&gpd_list_lock);
2154 ret = genpd_add_subdomain(genpd, subdomain);
2155 mutex_unlock(&gpd_list_lock);
2156
2157 return ret;
2158 }
2159 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
2160
2161 /**
2162 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2163 * @genpd: Leader PM domain to remove the subdomain from.
2164 * @subdomain: Subdomain to be removed.
2165 */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)2166 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
2167 struct generic_pm_domain *subdomain)
2168 {
2169 struct gpd_link *l, *link;
2170 int ret = -EINVAL;
2171
2172 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
2173 return -EINVAL;
2174
2175 genpd_lock(subdomain);
2176 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
2177
2178 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
2179 pr_warn("%s: unable to remove subdomain %s\n",
2180 dev_name(&genpd->dev), subdomain->name);
2181 ret = -EBUSY;
2182 goto out;
2183 }
2184
2185 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
2186 if (link->child != subdomain)
2187 continue;
2188
2189 list_del(&link->parent_node);
2190 list_del(&link->child_node);
2191 kfree(link);
2192 if (genpd_status_on(subdomain))
2193 genpd_sd_counter_dec(genpd);
2194
2195 ret = 0;
2196 break;
2197 }
2198
2199 out:
2200 genpd_unlock(genpd);
2201 genpd_unlock(subdomain);
2202
2203 return ret;
2204 }
2205 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
2206
genpd_free_default_power_state(struct genpd_power_state * states,unsigned int state_count)2207 static void genpd_free_default_power_state(struct genpd_power_state *states,
2208 unsigned int state_count)
2209 {
2210 kfree(states);
2211 }
2212
genpd_set_default_power_state(struct generic_pm_domain * genpd)2213 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
2214 {
2215 struct genpd_power_state *state;
2216
2217 state = kzalloc(sizeof(*state), GFP_KERNEL);
2218 if (!state)
2219 return -ENOMEM;
2220
2221 genpd->states = state;
2222 genpd->state_count = 1;
2223 genpd->free_states = genpd_free_default_power_state;
2224
2225 return 0;
2226 }
2227
genpd_provider_release(struct device * dev)2228 static void genpd_provider_release(struct device *dev)
2229 {
2230 /* nothing to be done here */
2231 }
2232
genpd_alloc_data(struct generic_pm_domain * genpd)2233 static int genpd_alloc_data(struct generic_pm_domain *genpd)
2234 {
2235 struct genpd_governor_data *gd = NULL;
2236 int ret;
2237
2238 if (genpd_is_cpu_domain(genpd) &&
2239 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2240 return -ENOMEM;
2241
2242 if (genpd->gov) {
2243 gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2244 if (!gd) {
2245 ret = -ENOMEM;
2246 goto free;
2247 }
2248
2249 gd->max_off_time_ns = -1;
2250 gd->max_off_time_changed = true;
2251 gd->next_wakeup = KTIME_MAX;
2252 gd->next_hrtimer = KTIME_MAX;
2253 }
2254
2255 /* Use only one "off" state if there were no states declared */
2256 if (genpd->state_count == 0) {
2257 ret = genpd_set_default_power_state(genpd);
2258 if (ret)
2259 goto free;
2260 }
2261
2262 genpd->gd = gd;
2263 device_initialize(&genpd->dev);
2264 genpd->dev.release = genpd_provider_release;
2265
2266 if (!genpd_is_dev_name_fw(genpd)) {
2267 dev_set_name(&genpd->dev, "%s", genpd->name);
2268 } else {
2269 ret = ida_alloc(&genpd_ida, GFP_KERNEL);
2270 if (ret < 0)
2271 goto put;
2272
2273 genpd->device_id = ret;
2274 dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id);
2275 }
2276
2277 return 0;
2278 put:
2279 put_device(&genpd->dev);
2280 if (genpd->free_states == genpd_free_default_power_state) {
2281 kfree(genpd->states);
2282 genpd->states = NULL;
2283 }
2284 free:
2285 if (genpd_is_cpu_domain(genpd))
2286 free_cpumask_var(genpd->cpus);
2287 kfree(gd);
2288 return ret;
2289 }
2290
genpd_free_data(struct generic_pm_domain * genpd)2291 static void genpd_free_data(struct generic_pm_domain *genpd)
2292 {
2293 put_device(&genpd->dev);
2294 if (genpd->device_id != -ENXIO)
2295 ida_free(&genpd_ida, genpd->device_id);
2296 if (genpd_is_cpu_domain(genpd))
2297 free_cpumask_var(genpd->cpus);
2298 if (genpd->free_states)
2299 genpd->free_states(genpd->states, genpd->state_count);
2300 kfree(genpd->gd);
2301 }
2302
genpd_lock_init(struct generic_pm_domain * genpd)2303 static void genpd_lock_init(struct generic_pm_domain *genpd)
2304 {
2305 if (genpd_is_cpu_domain(genpd)) {
2306 raw_spin_lock_init(&genpd->raw_slock);
2307 genpd->lock_ops = &genpd_raw_spin_ops;
2308 } else if (genpd_is_irq_safe(genpd)) {
2309 spin_lock_init(&genpd->slock);
2310 genpd->lock_ops = &genpd_spin_ops;
2311 } else {
2312 mutex_init(&genpd->mlock);
2313 genpd->lock_ops = &genpd_mtx_ops;
2314 }
2315 }
2316
2317 /**
2318 * pm_genpd_init - Initialize a generic I/O PM domain object.
2319 * @genpd: PM domain object to initialize.
2320 * @gov: PM domain governor to associate with the domain (may be NULL).
2321 * @is_off: Initial value of the domain's power_is_off field.
2322 *
2323 * Returns 0 on successful initialization, else a negative error code.
2324 */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)2325 int pm_genpd_init(struct generic_pm_domain *genpd,
2326 struct dev_power_governor *gov, bool is_off)
2327 {
2328 int ret;
2329
2330 if (IS_ERR_OR_NULL(genpd))
2331 return -EINVAL;
2332
2333 INIT_LIST_HEAD(&genpd->parent_links);
2334 INIT_LIST_HEAD(&genpd->child_links);
2335 INIT_LIST_HEAD(&genpd->dev_list);
2336 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2337 genpd_lock_init(genpd);
2338 genpd->gov = gov;
2339 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2340 atomic_set(&genpd->sd_count, 0);
2341 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2342 genpd->device_count = 0;
2343 genpd->provider = NULL;
2344 genpd->device_id = -ENXIO;
2345 genpd->has_provider = false;
2346 genpd->opp_table = NULL;
2347 genpd->accounting_time = ktime_get_mono_fast_ns();
2348 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2349 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2350 genpd->domain.ops.prepare = genpd_prepare;
2351 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2352 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2353 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2354 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2355 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2356 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2357 genpd->domain.ops.complete = genpd_complete;
2358 genpd->domain.start = genpd_dev_pm_start;
2359 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
2360
2361 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2362 genpd->dev_ops.stop = pm_clk_suspend;
2363 genpd->dev_ops.start = pm_clk_resume;
2364 }
2365
2366 /* The always-on governor works better with the corresponding flag. */
2367 if (gov == &pm_domain_always_on_gov)
2368 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2369
2370 /* Always-on domains must be powered on at initialization. */
2371 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2372 !genpd_status_on(genpd)) {
2373 pr_err("always-on PM domain %s is not on\n", genpd->name);
2374 return -EINVAL;
2375 }
2376
2377 /* Multiple states but no governor doesn't make sense. */
2378 if (!gov && genpd->state_count > 1)
2379 pr_warn("%s: no governor for states\n", genpd->name);
2380
2381 ret = genpd_alloc_data(genpd);
2382 if (ret)
2383 return ret;
2384
2385 mutex_lock(&gpd_list_lock);
2386 list_add(&genpd->gpd_list_node, &gpd_list);
2387 mutex_unlock(&gpd_list_lock);
2388 genpd_debug_add(genpd);
2389
2390 return 0;
2391 }
2392 EXPORT_SYMBOL_GPL(pm_genpd_init);
2393
genpd_remove(struct generic_pm_domain * genpd)2394 static int genpd_remove(struct generic_pm_domain *genpd)
2395 {
2396 struct gpd_link *l, *link;
2397
2398 if (IS_ERR_OR_NULL(genpd))
2399 return -EINVAL;
2400
2401 genpd_lock(genpd);
2402
2403 if (genpd->has_provider) {
2404 genpd_unlock(genpd);
2405 pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev));
2406 return -EBUSY;
2407 }
2408
2409 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2410 genpd_unlock(genpd);
2411 pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev));
2412 return -EBUSY;
2413 }
2414
2415 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2416 list_del(&link->parent_node);
2417 list_del(&link->child_node);
2418 kfree(link);
2419 }
2420
2421 list_del(&genpd->gpd_list_node);
2422 genpd_unlock(genpd);
2423 genpd_debug_remove(genpd);
2424 cancel_work_sync(&genpd->power_off_work);
2425 genpd_free_data(genpd);
2426
2427 pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev));
2428
2429 return 0;
2430 }
2431
2432 /**
2433 * pm_genpd_remove - Remove a generic I/O PM domain
2434 * @genpd: Pointer to PM domain that is to be removed.
2435 *
2436 * To remove the PM domain, this function:
2437 * - Removes the PM domain as a subdomain to any parent domains,
2438 * if it was added.
2439 * - Removes the PM domain from the list of registered PM domains.
2440 *
2441 * The PM domain will only be removed, if the associated provider has
2442 * been removed, it is not a parent to any other PM domain and has no
2443 * devices associated with it.
2444 */
pm_genpd_remove(struct generic_pm_domain * genpd)2445 int pm_genpd_remove(struct generic_pm_domain *genpd)
2446 {
2447 int ret;
2448
2449 mutex_lock(&gpd_list_lock);
2450 ret = genpd_remove(genpd);
2451 mutex_unlock(&gpd_list_lock);
2452
2453 return ret;
2454 }
2455 EXPORT_SYMBOL_GPL(pm_genpd_remove);
2456
2457 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2458
2459 /*
2460 * Device Tree based PM domain providers.
2461 *
2462 * The code below implements generic device tree based PM domain providers that
2463 * bind device tree nodes with generic PM domains registered in the system.
2464 *
2465 * Any driver that registers generic PM domains and needs to support binding of
2466 * devices to these domains is supposed to register a PM domain provider, which
2467 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2468 *
2469 * Two simple mapping functions have been provided for convenience:
2470 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2471 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2472 * index.
2473 */
2474
2475 /**
2476 * struct of_genpd_provider - PM domain provider registration structure
2477 * @link: Entry in global list of PM domain providers
2478 * @node: Pointer to device tree node of PM domain provider
2479 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2480 * into a PM domain.
2481 * @data: context pointer to be passed into @xlate callback
2482 */
2483 struct of_genpd_provider {
2484 struct list_head link;
2485 struct device_node *node;
2486 genpd_xlate_t xlate;
2487 void *data;
2488 };
2489
2490 /* List of registered PM domain providers. */
2491 static LIST_HEAD(of_genpd_providers);
2492 /* Mutex to protect the list above. */
2493 static DEFINE_MUTEX(of_genpd_mutex);
2494
2495 /**
2496 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2497 * @genpdspec: OF phandle args to map into a PM domain
2498 * @data: xlate function private data - pointer to struct generic_pm_domain
2499 *
2500 * This is a generic xlate function that can be used to model PM domains that
2501 * have their own device tree nodes. The private data of xlate function needs
2502 * to be a valid pointer to struct generic_pm_domain.
2503 */
genpd_xlate_simple(const struct of_phandle_args * genpdspec,void * data)2504 static struct generic_pm_domain *genpd_xlate_simple(
2505 const struct of_phandle_args *genpdspec,
2506 void *data)
2507 {
2508 return data;
2509 }
2510
2511 /**
2512 * genpd_xlate_onecell() - Xlate function using a single index.
2513 * @genpdspec: OF phandle args to map into a PM domain
2514 * @data: xlate function private data - pointer to struct genpd_onecell_data
2515 *
2516 * This is a generic xlate function that can be used to model simple PM domain
2517 * controllers that have one device tree node and provide multiple PM domains.
2518 * A single cell is used as an index into an array of PM domains specified in
2519 * the genpd_onecell_data struct when registering the provider.
2520 */
genpd_xlate_onecell(const struct of_phandle_args * genpdspec,void * data)2521 static struct generic_pm_domain *genpd_xlate_onecell(
2522 const struct of_phandle_args *genpdspec,
2523 void *data)
2524 {
2525 struct genpd_onecell_data *genpd_data = data;
2526 unsigned int idx = genpdspec->args[0];
2527
2528 if (genpdspec->args_count != 1)
2529 return ERR_PTR(-EINVAL);
2530
2531 if (idx >= genpd_data->num_domains) {
2532 pr_err("%s: invalid domain index %u\n", __func__, idx);
2533 return ERR_PTR(-EINVAL);
2534 }
2535
2536 if (!genpd_data->domains[idx])
2537 return ERR_PTR(-ENOENT);
2538
2539 return genpd_data->domains[idx];
2540 }
2541
2542 /**
2543 * genpd_add_provider() - Register a PM domain provider for a node
2544 * @np: Device node pointer associated with the PM domain provider.
2545 * @xlate: Callback for decoding PM domain from phandle arguments.
2546 * @data: Context pointer for @xlate callback.
2547 */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)2548 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2549 void *data)
2550 {
2551 struct of_genpd_provider *cp;
2552
2553 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2554 if (!cp)
2555 return -ENOMEM;
2556
2557 cp->node = of_node_get(np);
2558 cp->data = data;
2559 cp->xlate = xlate;
2560 fwnode_dev_initialized(&np->fwnode, true);
2561
2562 mutex_lock(&of_genpd_mutex);
2563 list_add(&cp->link, &of_genpd_providers);
2564 mutex_unlock(&of_genpd_mutex);
2565 pr_debug("Added domain provider from %pOF\n", np);
2566
2567 return 0;
2568 }
2569
genpd_present(const struct generic_pm_domain * genpd)2570 static bool genpd_present(const struct generic_pm_domain *genpd)
2571 {
2572 bool ret = false;
2573 const struct generic_pm_domain *gpd;
2574
2575 mutex_lock(&gpd_list_lock);
2576 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2577 if (gpd == genpd) {
2578 ret = true;
2579 break;
2580 }
2581 }
2582 mutex_unlock(&gpd_list_lock);
2583
2584 return ret;
2585 }
2586
2587 /**
2588 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2589 * @np: Device node pointer associated with the PM domain provider.
2590 * @genpd: Pointer to PM domain associated with the PM domain provider.
2591 */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)2592 int of_genpd_add_provider_simple(struct device_node *np,
2593 struct generic_pm_domain *genpd)
2594 {
2595 int ret;
2596
2597 if (!np || !genpd)
2598 return -EINVAL;
2599
2600 if (!genpd_present(genpd))
2601 return -EINVAL;
2602
2603 genpd->dev.of_node = np;
2604
2605 /* Parse genpd OPP table */
2606 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2607 ret = dev_pm_opp_of_add_table(&genpd->dev);
2608 if (ret)
2609 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2610
2611 /*
2612 * Save table for faster processing while setting performance
2613 * state.
2614 */
2615 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2616 WARN_ON(IS_ERR(genpd->opp_table));
2617 }
2618
2619 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2620 if (ret) {
2621 if (genpd->opp_table) {
2622 dev_pm_opp_put_opp_table(genpd->opp_table);
2623 dev_pm_opp_of_remove_table(&genpd->dev);
2624 }
2625
2626 return ret;
2627 }
2628
2629 genpd->provider = &np->fwnode;
2630 genpd->has_provider = true;
2631
2632 return 0;
2633 }
2634 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2635
2636 /**
2637 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2638 * @np: Device node pointer associated with the PM domain provider.
2639 * @data: Pointer to the data associated with the PM domain provider.
2640 */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)2641 int of_genpd_add_provider_onecell(struct device_node *np,
2642 struct genpd_onecell_data *data)
2643 {
2644 struct generic_pm_domain *genpd;
2645 unsigned int i;
2646 int ret = -EINVAL;
2647
2648 if (!np || !data)
2649 return -EINVAL;
2650
2651 if (!data->xlate)
2652 data->xlate = genpd_xlate_onecell;
2653
2654 for (i = 0; i < data->num_domains; i++) {
2655 genpd = data->domains[i];
2656
2657 if (!genpd)
2658 continue;
2659 if (!genpd_present(genpd))
2660 goto error;
2661
2662 genpd->dev.of_node = np;
2663
2664 /* Parse genpd OPP table */
2665 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
2666 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2667 if (ret) {
2668 dev_err_probe(&genpd->dev, ret,
2669 "Failed to add OPP table for index %d\n", i);
2670 goto error;
2671 }
2672
2673 /*
2674 * Save table for faster processing while setting
2675 * performance state.
2676 */
2677 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2678 WARN_ON(IS_ERR(genpd->opp_table));
2679 }
2680
2681 genpd->provider = &np->fwnode;
2682 genpd->has_provider = true;
2683 }
2684
2685 ret = genpd_add_provider(np, data->xlate, data);
2686 if (ret < 0)
2687 goto error;
2688
2689 return 0;
2690
2691 error:
2692 while (i--) {
2693 genpd = data->domains[i];
2694
2695 if (!genpd)
2696 continue;
2697
2698 genpd->provider = NULL;
2699 genpd->has_provider = false;
2700
2701 if (genpd->opp_table) {
2702 dev_pm_opp_put_opp_table(genpd->opp_table);
2703 dev_pm_opp_of_remove_table(&genpd->dev);
2704 }
2705 }
2706
2707 return ret;
2708 }
2709 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2710
2711 /**
2712 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2713 * @np: Device node pointer associated with the PM domain provider
2714 */
of_genpd_del_provider(struct device_node * np)2715 void of_genpd_del_provider(struct device_node *np)
2716 {
2717 struct of_genpd_provider *cp, *tmp;
2718 struct generic_pm_domain *gpd;
2719
2720 mutex_lock(&gpd_list_lock);
2721 mutex_lock(&of_genpd_mutex);
2722 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2723 if (cp->node == np) {
2724 /*
2725 * For each PM domain associated with the
2726 * provider, set the 'has_provider' to false
2727 * so that the PM domain can be safely removed.
2728 */
2729 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2730 if (gpd->provider == &np->fwnode) {
2731 gpd->has_provider = false;
2732
2733 if (gpd->opp_table) {
2734 dev_pm_opp_put_opp_table(gpd->opp_table);
2735 dev_pm_opp_of_remove_table(&gpd->dev);
2736 }
2737 }
2738 }
2739
2740 fwnode_dev_initialized(&cp->node->fwnode, false);
2741 list_del(&cp->link);
2742 of_node_put(cp->node);
2743 kfree(cp);
2744 break;
2745 }
2746 }
2747 mutex_unlock(&of_genpd_mutex);
2748 mutex_unlock(&gpd_list_lock);
2749 }
2750 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2751
2752 /**
2753 * genpd_get_from_provider() - Look-up PM domain
2754 * @genpdspec: OF phandle args to use for look-up
2755 *
2756 * Looks for a PM domain provider under the node specified by @genpdspec and if
2757 * found, uses xlate function of the provider to map phandle args to a PM
2758 * domain.
2759 *
2760 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2761 * on failure.
2762 */
genpd_get_from_provider(const struct of_phandle_args * genpdspec)2763 static struct generic_pm_domain *genpd_get_from_provider(
2764 const struct of_phandle_args *genpdspec)
2765 {
2766 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2767 struct of_genpd_provider *provider;
2768
2769 if (!genpdspec)
2770 return ERR_PTR(-EINVAL);
2771
2772 mutex_lock(&of_genpd_mutex);
2773
2774 /* Check if we have such a provider in our array */
2775 list_for_each_entry(provider, &of_genpd_providers, link) {
2776 if (provider->node == genpdspec->np)
2777 genpd = provider->xlate(genpdspec, provider->data);
2778 if (!IS_ERR(genpd))
2779 break;
2780 }
2781
2782 mutex_unlock(&of_genpd_mutex);
2783
2784 return genpd;
2785 }
2786
2787 /**
2788 * of_genpd_add_device() - Add a device to an I/O PM domain
2789 * @genpdspec: OF phandle args to use for look-up PM domain
2790 * @dev: Device to be added.
2791 *
2792 * Looks-up an I/O PM domain based upon phandle args provided and adds
2793 * the device to the PM domain. Returns a negative error code on failure.
2794 */
of_genpd_add_device(const struct of_phandle_args * genpdspec,struct device * dev)2795 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev)
2796 {
2797 struct generic_pm_domain *genpd;
2798 int ret;
2799
2800 if (!dev)
2801 return -EINVAL;
2802
2803 mutex_lock(&gpd_list_lock);
2804
2805 genpd = genpd_get_from_provider(genpdspec);
2806 if (IS_ERR(genpd)) {
2807 ret = PTR_ERR(genpd);
2808 goto out;
2809 }
2810
2811 ret = genpd_add_device(genpd, dev, dev);
2812
2813 out:
2814 mutex_unlock(&gpd_list_lock);
2815
2816 return ret;
2817 }
2818 EXPORT_SYMBOL_GPL(of_genpd_add_device);
2819
2820 /**
2821 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2822 * @parent_spec: OF phandle args to use for parent PM domain look-up
2823 * @subdomain_spec: OF phandle args to use for subdomain look-up
2824 *
2825 * Looks-up a parent PM domain and subdomain based upon phandle args
2826 * provided and adds the subdomain to the parent PM domain. Returns a
2827 * negative error code on failure.
2828 */
of_genpd_add_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2829 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
2830 const struct of_phandle_args *subdomain_spec)
2831 {
2832 struct generic_pm_domain *parent, *subdomain;
2833 int ret;
2834
2835 mutex_lock(&gpd_list_lock);
2836
2837 parent = genpd_get_from_provider(parent_spec);
2838 if (IS_ERR(parent)) {
2839 ret = PTR_ERR(parent);
2840 goto out;
2841 }
2842
2843 subdomain = genpd_get_from_provider(subdomain_spec);
2844 if (IS_ERR(subdomain)) {
2845 ret = PTR_ERR(subdomain);
2846 goto out;
2847 }
2848
2849 ret = genpd_add_subdomain(parent, subdomain);
2850
2851 out:
2852 mutex_unlock(&gpd_list_lock);
2853
2854 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2855 }
2856 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2857
2858 /**
2859 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2860 * @parent_spec: OF phandle args to use for parent PM domain look-up
2861 * @subdomain_spec: OF phandle args to use for subdomain look-up
2862 *
2863 * Looks-up a parent PM domain and subdomain based upon phandle args
2864 * provided and removes the subdomain from the parent PM domain. Returns a
2865 * negative error code on failure.
2866 */
of_genpd_remove_subdomain(const struct of_phandle_args * parent_spec,const struct of_phandle_args * subdomain_spec)2867 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
2868 const struct of_phandle_args *subdomain_spec)
2869 {
2870 struct generic_pm_domain *parent, *subdomain;
2871 int ret;
2872
2873 mutex_lock(&gpd_list_lock);
2874
2875 parent = genpd_get_from_provider(parent_spec);
2876 if (IS_ERR(parent)) {
2877 ret = PTR_ERR(parent);
2878 goto out;
2879 }
2880
2881 subdomain = genpd_get_from_provider(subdomain_spec);
2882 if (IS_ERR(subdomain)) {
2883 ret = PTR_ERR(subdomain);
2884 goto out;
2885 }
2886
2887 ret = pm_genpd_remove_subdomain(parent, subdomain);
2888
2889 out:
2890 mutex_unlock(&gpd_list_lock);
2891
2892 return ret;
2893 }
2894 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2895
2896 /**
2897 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2898 * @np: Pointer to device node associated with provider
2899 *
2900 * Find the last PM domain that was added by a particular provider and
2901 * remove this PM domain from the list of PM domains. The provider is
2902 * identified by the 'provider' device structure that is passed. The PM
2903 * domain will only be removed, if the provider associated with domain
2904 * has been removed.
2905 *
2906 * Returns a valid pointer to struct generic_pm_domain on success or
2907 * ERR_PTR() on failure.
2908 */
of_genpd_remove_last(struct device_node * np)2909 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2910 {
2911 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2912 int ret;
2913
2914 if (IS_ERR_OR_NULL(np))
2915 return ERR_PTR(-EINVAL);
2916
2917 mutex_lock(&gpd_list_lock);
2918 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2919 if (gpd->provider == &np->fwnode) {
2920 ret = genpd_remove(gpd);
2921 genpd = ret ? ERR_PTR(ret) : gpd;
2922 break;
2923 }
2924 }
2925 mutex_unlock(&gpd_list_lock);
2926
2927 return genpd;
2928 }
2929 EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2930
genpd_release_dev(struct device * dev)2931 static void genpd_release_dev(struct device *dev)
2932 {
2933 of_node_put(dev->of_node);
2934 kfree(dev);
2935 }
2936
2937 static const struct bus_type genpd_bus_type = {
2938 .name = "genpd",
2939 };
2940
2941 /**
2942 * genpd_dev_pm_detach - Detach a device from its PM domain.
2943 * @dev: Device to detach.
2944 * @power_off: Currently not used
2945 *
2946 * Try to locate a corresponding generic PM domain, which the device was
2947 * attached to previously. If such is found, the device is detached from it.
2948 */
genpd_dev_pm_detach(struct device * dev,bool power_off)2949 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2950 {
2951 struct generic_pm_domain *pd;
2952 unsigned int i;
2953 int ret = 0;
2954
2955 pd = dev_to_genpd(dev);
2956 if (IS_ERR(pd))
2957 return;
2958
2959 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2960
2961 /* Drop the default performance state */
2962 if (dev_gpd_data(dev)->default_pstate) {
2963 dev_pm_genpd_set_performance_state(dev, 0);
2964 dev_gpd_data(dev)->default_pstate = 0;
2965 }
2966
2967 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2968 ret = genpd_remove_device(pd, dev);
2969 if (ret != -EAGAIN)
2970 break;
2971
2972 mdelay(i);
2973 cond_resched();
2974 }
2975
2976 if (ret < 0) {
2977 dev_err(dev, "failed to remove from PM domain %s: %d",
2978 pd->name, ret);
2979 return;
2980 }
2981
2982 /* Check if PM domain can be powered off after removing this device. */
2983 genpd_queue_power_off_work(pd);
2984
2985 /* Unregister the device if it was created by genpd. */
2986 if (dev->bus == &genpd_bus_type)
2987 device_unregister(dev);
2988 }
2989
genpd_dev_pm_sync(struct device * dev)2990 static void genpd_dev_pm_sync(struct device *dev)
2991 {
2992 struct generic_pm_domain *pd;
2993
2994 pd = dev_to_genpd(dev);
2995 if (IS_ERR(pd))
2996 return;
2997
2998 genpd_queue_power_off_work(pd);
2999 }
3000
genpd_set_required_opp_dev(struct device * dev,struct device * base_dev)3001 static int genpd_set_required_opp_dev(struct device *dev,
3002 struct device *base_dev)
3003 {
3004 struct dev_pm_opp_config config = {
3005 .required_dev = dev,
3006 };
3007 int ret;
3008
3009 /* Limit support to non-providers for now. */
3010 if (of_property_present(base_dev->of_node, "#power-domain-cells"))
3011 return 0;
3012
3013 if (!dev_pm_opp_of_has_required_opp(base_dev))
3014 return 0;
3015
3016 ret = dev_pm_opp_set_config(base_dev, &config);
3017 if (ret < 0)
3018 return ret;
3019
3020 dev_gpd_data(dev)->opp_token = ret;
3021 return 0;
3022 }
3023
genpd_set_required_opp(struct device * dev,unsigned int index)3024 static int genpd_set_required_opp(struct device *dev, unsigned int index)
3025 {
3026 int ret, pstate;
3027
3028 /* Set the default performance state */
3029 pstate = of_get_required_opp_performance_state(dev->of_node, index);
3030 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
3031 ret = pstate;
3032 goto err;
3033 } else if (pstate > 0) {
3034 ret = dev_pm_genpd_set_performance_state(dev, pstate);
3035 if (ret)
3036 goto err;
3037 dev_gpd_data(dev)->default_pstate = pstate;
3038 }
3039
3040 return 0;
3041 err:
3042 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
3043 dev_to_genpd(dev)->name, ret);
3044 return ret;
3045 }
3046
__genpd_dev_pm_attach(struct device * dev,struct device * base_dev,unsigned int index,unsigned int num_domains,bool power_on)3047 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
3048 unsigned int index, unsigned int num_domains,
3049 bool power_on)
3050 {
3051 struct of_phandle_args pd_args;
3052 struct generic_pm_domain *pd;
3053 int ret;
3054
3055 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
3056 "#power-domain-cells", index, &pd_args);
3057 if (ret < 0)
3058 return ret;
3059
3060 mutex_lock(&gpd_list_lock);
3061 pd = genpd_get_from_provider(&pd_args);
3062 of_node_put(pd_args.np);
3063 if (IS_ERR(pd)) {
3064 mutex_unlock(&gpd_list_lock);
3065 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
3066 __func__, PTR_ERR(pd));
3067 return driver_deferred_probe_check_state(base_dev);
3068 }
3069
3070 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
3071
3072 ret = genpd_add_device(pd, dev, base_dev);
3073 mutex_unlock(&gpd_list_lock);
3074
3075 if (ret < 0)
3076 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
3077
3078 dev->pm_domain->detach = genpd_dev_pm_detach;
3079 dev->pm_domain->sync = genpd_dev_pm_sync;
3080
3081 /*
3082 * For a single PM domain the index of the required OPP must be zero, so
3083 * let's try to assign a required dev in that case. In the multiple PM
3084 * domains case, we need platform code to specify the index.
3085 */
3086 if (num_domains == 1) {
3087 ret = genpd_set_required_opp_dev(dev, base_dev);
3088 if (ret)
3089 goto err;
3090 }
3091
3092 ret = genpd_set_required_opp(dev, index);
3093 if (ret)
3094 goto err;
3095
3096 if (power_on) {
3097 genpd_lock(pd);
3098 ret = genpd_power_on(pd, 0);
3099 genpd_unlock(pd);
3100 }
3101
3102 if (ret) {
3103 /* Drop the default performance state */
3104 if (dev_gpd_data(dev)->default_pstate) {
3105 dev_pm_genpd_set_performance_state(dev, 0);
3106 dev_gpd_data(dev)->default_pstate = 0;
3107 }
3108
3109 genpd_remove_device(pd, dev);
3110 return -EPROBE_DEFER;
3111 }
3112
3113 return 1;
3114
3115 err:
3116 genpd_remove_device(pd, dev);
3117 return ret;
3118 }
3119
3120 /**
3121 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
3122 * @dev: Device to attach.
3123 *
3124 * Parse device's OF node to find a PM domain specifier. If such is found,
3125 * attaches the device to retrieved pm_domain ops.
3126 *
3127 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
3128 * PM domain or when multiple power-domains exists for it, else a negative error
3129 * code. Note that if a power-domain exists for the device, but it cannot be
3130 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
3131 * not probed and to re-try again later.
3132 */
genpd_dev_pm_attach(struct device * dev)3133 int genpd_dev_pm_attach(struct device *dev)
3134 {
3135 if (!dev->of_node)
3136 return 0;
3137
3138 /*
3139 * Devices with multiple PM domains must be attached separately, as we
3140 * can only attach one PM domain per device.
3141 */
3142 if (of_count_phandle_with_args(dev->of_node, "power-domains",
3143 "#power-domain-cells") != 1)
3144 return 0;
3145
3146 return __genpd_dev_pm_attach(dev, dev, 0, 1, true);
3147 }
3148 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
3149
3150 /**
3151 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
3152 * @dev: The device used to lookup the PM domain.
3153 * @index: The index of the PM domain.
3154 *
3155 * Parse device's OF node to find a PM domain specifier at the provided @index.
3156 * If such is found, creates a virtual device and attaches it to the retrieved
3157 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
3158 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
3159 *
3160 * Returns the created virtual device if successfully attached PM domain, NULL
3161 * when the device don't need a PM domain, else an ERR_PTR() in case of
3162 * failures. If a power-domain exists for the device, but cannot be found or
3163 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
3164 * is not probed and to re-try again later.
3165 */
genpd_dev_pm_attach_by_id(struct device * dev,unsigned int index)3166 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
3167 unsigned int index)
3168 {
3169 struct device *virt_dev;
3170 int num_domains;
3171 int ret;
3172
3173 if (!dev->of_node)
3174 return NULL;
3175
3176 /* Verify that the index is within a valid range. */
3177 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
3178 "#power-domain-cells");
3179 if (num_domains < 0 || index >= num_domains)
3180 return NULL;
3181
3182 /* Allocate and register device on the genpd bus. */
3183 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
3184 if (!virt_dev)
3185 return ERR_PTR(-ENOMEM);
3186
3187 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
3188 virt_dev->bus = &genpd_bus_type;
3189 virt_dev->release = genpd_release_dev;
3190 virt_dev->of_node = of_node_get(dev->of_node);
3191
3192 ret = device_register(virt_dev);
3193 if (ret) {
3194 put_device(virt_dev);
3195 return ERR_PTR(ret);
3196 }
3197
3198 /* Try to attach the device to the PM domain at the specified index. */
3199 ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false);
3200 if (ret < 1) {
3201 device_unregister(virt_dev);
3202 return ret ? ERR_PTR(ret) : NULL;
3203 }
3204
3205 pm_runtime_enable(virt_dev);
3206 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
3207
3208 return virt_dev;
3209 }
3210 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
3211
3212 /**
3213 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
3214 * @dev: The device used to lookup the PM domain.
3215 * @name: The name of the PM domain.
3216 *
3217 * Parse device's OF node to find a PM domain specifier using the
3218 * power-domain-names DT property. For further description see
3219 * genpd_dev_pm_attach_by_id().
3220 */
genpd_dev_pm_attach_by_name(struct device * dev,const char * name)3221 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
3222 {
3223 int index;
3224
3225 if (!dev->of_node)
3226 return NULL;
3227
3228 index = of_property_match_string(dev->of_node, "power-domain-names",
3229 name);
3230 if (index < 0)
3231 return NULL;
3232
3233 return genpd_dev_pm_attach_by_id(dev, index);
3234 }
3235
3236 static const struct of_device_id idle_state_match[] = {
3237 { .compatible = "domain-idle-state", },
3238 { }
3239 };
3240
genpd_parse_state(struct genpd_power_state * genpd_state,struct device_node * state_node)3241 static int genpd_parse_state(struct genpd_power_state *genpd_state,
3242 struct device_node *state_node)
3243 {
3244 int err;
3245 u32 residency;
3246 u32 entry_latency, exit_latency;
3247
3248 err = of_property_read_u32(state_node, "entry-latency-us",
3249 &entry_latency);
3250 if (err) {
3251 pr_debug(" * %pOF missing entry-latency-us property\n",
3252 state_node);
3253 return -EINVAL;
3254 }
3255
3256 err = of_property_read_u32(state_node, "exit-latency-us",
3257 &exit_latency);
3258 if (err) {
3259 pr_debug(" * %pOF missing exit-latency-us property\n",
3260 state_node);
3261 return -EINVAL;
3262 }
3263
3264 err = of_property_read_u32(state_node, "min-residency-us", &residency);
3265 if (!err)
3266 genpd_state->residency_ns = 1000LL * residency;
3267
3268 of_property_read_string(state_node, "idle-state-name", &genpd_state->name);
3269
3270 genpd_state->power_on_latency_ns = 1000LL * exit_latency;
3271 genpd_state->power_off_latency_ns = 1000LL * entry_latency;
3272 genpd_state->fwnode = &state_node->fwnode;
3273
3274 return 0;
3275 }
3276
genpd_iterate_idle_states(struct device_node * dn,struct genpd_power_state * states)3277 static int genpd_iterate_idle_states(struct device_node *dn,
3278 struct genpd_power_state *states)
3279 {
3280 int ret;
3281 struct of_phandle_iterator it;
3282 struct device_node *np;
3283 int i = 0;
3284
3285 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
3286 if (ret <= 0)
3287 return ret == -ENOENT ? 0 : ret;
3288
3289 /* Loop over the phandles until all the requested entry is found */
3290 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
3291 np = it.node;
3292 if (!of_match_node(idle_state_match, np))
3293 continue;
3294
3295 if (!of_device_is_available(np))
3296 continue;
3297
3298 if (states) {
3299 ret = genpd_parse_state(&states[i], np);
3300 if (ret) {
3301 pr_err("Parsing idle state node %pOF failed with err %d\n",
3302 np, ret);
3303 of_node_put(np);
3304 return ret;
3305 }
3306 }
3307 i++;
3308 }
3309
3310 return i;
3311 }
3312
3313 /**
3314 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3315 *
3316 * @dn: The genpd device node
3317 * @states: The pointer to which the state array will be saved.
3318 * @n: The count of elements in the array returned from this function.
3319 *
3320 * Returns the device states parsed from the OF node. The memory for the states
3321 * is allocated by this function and is the responsibility of the caller to
3322 * free the memory after use. If any or zero compatible domain idle states is
3323 * found it returns 0 and in case of errors, a negative error code is returned.
3324 */
of_genpd_parse_idle_states(struct device_node * dn,struct genpd_power_state ** states,int * n)3325 int of_genpd_parse_idle_states(struct device_node *dn,
3326 struct genpd_power_state **states, int *n)
3327 {
3328 struct genpd_power_state *st;
3329 int ret;
3330
3331 ret = genpd_iterate_idle_states(dn, NULL);
3332 if (ret < 0)
3333 return ret;
3334
3335 if (!ret) {
3336 *states = NULL;
3337 *n = 0;
3338 return 0;
3339 }
3340
3341 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3342 if (!st)
3343 return -ENOMEM;
3344
3345 ret = genpd_iterate_idle_states(dn, st);
3346 if (ret <= 0) {
3347 kfree(st);
3348 return ret < 0 ? ret : -EINVAL;
3349 }
3350
3351 *states = st;
3352 *n = ret;
3353
3354 return 0;
3355 }
3356 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3357
genpd_bus_init(void)3358 static int __init genpd_bus_init(void)
3359 {
3360 return bus_register(&genpd_bus_type);
3361 }
3362 core_initcall(genpd_bus_init);
3363
3364 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3365
3366
3367 /*** debugfs support ***/
3368
3369 #ifdef CONFIG_DEBUG_FS
3370 /*
3371 * TODO: This function is a slightly modified version of rtpm_status_show
3372 * from sysfs.c, so generalize it.
3373 */
rtpm_status_str(struct seq_file * s,struct device * dev)3374 static void rtpm_status_str(struct seq_file *s, struct device *dev)
3375 {
3376 static const char * const status_lookup[] = {
3377 [RPM_ACTIVE] = "active",
3378 [RPM_RESUMING] = "resuming",
3379 [RPM_SUSPENDED] = "suspended",
3380 [RPM_SUSPENDING] = "suspending"
3381 };
3382 const char *p = "";
3383
3384 if (dev->power.runtime_error)
3385 p = "error";
3386 else if (dev->power.disable_depth)
3387 p = "unsupported";
3388 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3389 p = status_lookup[dev->power.runtime_status];
3390 else
3391 WARN_ON(1);
3392
3393 seq_printf(s, "%-26s ", p);
3394 }
3395
perf_status_str(struct seq_file * s,struct device * dev)3396 static void perf_status_str(struct seq_file *s, struct device *dev)
3397 {
3398 struct generic_pm_domain_data *gpd_data;
3399
3400 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3401
3402 seq_printf(s, "%-10u ", gpd_data->performance_state);
3403 }
3404
mode_status_str(struct seq_file * s,struct device * dev)3405 static void mode_status_str(struct seq_file *s, struct device *dev)
3406 {
3407 struct generic_pm_domain_data *gpd_data;
3408
3409 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3410
3411 seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW");
3412 }
3413
genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)3414 static int genpd_summary_one(struct seq_file *s,
3415 struct generic_pm_domain *genpd)
3416 {
3417 static const char * const status_lookup[] = {
3418 [GENPD_STATE_ON] = "on",
3419 [GENPD_STATE_OFF] = "off"
3420 };
3421 struct pm_domain_data *pm_data;
3422 struct gpd_link *link;
3423 char state[16];
3424 int ret;
3425
3426 ret = genpd_lock_interruptible(genpd);
3427 if (ret)
3428 return -ERESTARTSYS;
3429
3430 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3431 goto exit;
3432 if (!genpd_status_on(genpd))
3433 snprintf(state, sizeof(state), "%s-%u",
3434 status_lookup[genpd->status], genpd->state_idx);
3435 else
3436 snprintf(state, sizeof(state), "%s",
3437 status_lookup[genpd->status]);
3438 seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state);
3439
3440 /*
3441 * Modifications on the list require holding locks on both
3442 * parent and child, so we are safe.
3443 * Also the device name is immutable.
3444 */
3445 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3446 if (list_is_first(&link->parent_node, &genpd->parent_links))
3447 seq_printf(s, "\n%48s", " ");
3448 seq_printf(s, "%s", link->child->name);
3449 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3450 seq_puts(s, ", ");
3451 }
3452
3453 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3454 seq_printf(s, "\n %-30s ", dev_name(pm_data->dev));
3455 rtpm_status_str(s, pm_data->dev);
3456 perf_status_str(s, pm_data->dev);
3457 mode_status_str(s, pm_data->dev);
3458 }
3459
3460 seq_puts(s, "\n");
3461 exit:
3462 genpd_unlock(genpd);
3463
3464 return 0;
3465 }
3466
summary_show(struct seq_file * s,void * data)3467 static int summary_show(struct seq_file *s, void *data)
3468 {
3469 struct generic_pm_domain *genpd;
3470 int ret = 0;
3471
3472 seq_puts(s, "domain status children performance\n");
3473 seq_puts(s, " /device runtime status managed by\n");
3474 seq_puts(s, "------------------------------------------------------------------------------\n");
3475
3476 ret = mutex_lock_interruptible(&gpd_list_lock);
3477 if (ret)
3478 return -ERESTARTSYS;
3479
3480 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3481 ret = genpd_summary_one(s, genpd);
3482 if (ret)
3483 break;
3484 }
3485 mutex_unlock(&gpd_list_lock);
3486
3487 return ret;
3488 }
3489
status_show(struct seq_file * s,void * data)3490 static int status_show(struct seq_file *s, void *data)
3491 {
3492 static const char * const status_lookup[] = {
3493 [GENPD_STATE_ON] = "on",
3494 [GENPD_STATE_OFF] = "off"
3495 };
3496
3497 struct generic_pm_domain *genpd = s->private;
3498 int ret = 0;
3499
3500 ret = genpd_lock_interruptible(genpd);
3501 if (ret)
3502 return -ERESTARTSYS;
3503
3504 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3505 goto exit;
3506
3507 if (genpd->status == GENPD_STATE_OFF)
3508 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3509 genpd->state_idx);
3510 else
3511 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3512 exit:
3513 genpd_unlock(genpd);
3514 return ret;
3515 }
3516
sub_domains_show(struct seq_file * s,void * data)3517 static int sub_domains_show(struct seq_file *s, void *data)
3518 {
3519 struct generic_pm_domain *genpd = s->private;
3520 struct gpd_link *link;
3521 int ret = 0;
3522
3523 ret = genpd_lock_interruptible(genpd);
3524 if (ret)
3525 return -ERESTARTSYS;
3526
3527 list_for_each_entry(link, &genpd->parent_links, parent_node)
3528 seq_printf(s, "%s\n", link->child->name);
3529
3530 genpd_unlock(genpd);
3531 return ret;
3532 }
3533
idle_states_show(struct seq_file * s,void * data)3534 static int idle_states_show(struct seq_file *s, void *data)
3535 {
3536 struct generic_pm_domain *genpd = s->private;
3537 u64 now, delta, idle_time = 0;
3538 unsigned int i;
3539 int ret = 0;
3540
3541 ret = genpd_lock_interruptible(genpd);
3542 if (ret)
3543 return -ERESTARTSYS;
3544
3545 seq_puts(s, "State Time Spent(ms) Usage Rejected Above Below\n");
3546
3547 for (i = 0; i < genpd->state_count; i++) {
3548 struct genpd_power_state *state = &genpd->states[i];
3549 char state_name[15];
3550
3551 idle_time += state->idle_time;
3552
3553 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3554 now = ktime_get_mono_fast_ns();
3555 if (now > genpd->accounting_time) {
3556 delta = now - genpd->accounting_time;
3557 idle_time += delta;
3558 }
3559 }
3560
3561 if (!state->name)
3562 snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
3563
3564 do_div(idle_time, NSEC_PER_MSEC);
3565 seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
3566 state->name ?: state_name, idle_time,
3567 state->usage, state->rejected, state->above,
3568 state->below);
3569 }
3570
3571 genpd_unlock(genpd);
3572 return ret;
3573 }
3574
active_time_show(struct seq_file * s,void * data)3575 static int active_time_show(struct seq_file *s, void *data)
3576 {
3577 struct generic_pm_domain *genpd = s->private;
3578 u64 now, on_time, delta = 0;
3579 int ret = 0;
3580
3581 ret = genpd_lock_interruptible(genpd);
3582 if (ret)
3583 return -ERESTARTSYS;
3584
3585 if (genpd->status == GENPD_STATE_ON) {
3586 now = ktime_get_mono_fast_ns();
3587 if (now > genpd->accounting_time)
3588 delta = now - genpd->accounting_time;
3589 }
3590
3591 on_time = genpd->on_time + delta;
3592 do_div(on_time, NSEC_PER_MSEC);
3593 seq_printf(s, "%llu ms\n", on_time);
3594
3595 genpd_unlock(genpd);
3596 return ret;
3597 }
3598
total_idle_time_show(struct seq_file * s,void * data)3599 static int total_idle_time_show(struct seq_file *s, void *data)
3600 {
3601 struct generic_pm_domain *genpd = s->private;
3602 u64 now, delta, total = 0;
3603 unsigned int i;
3604 int ret = 0;
3605
3606 ret = genpd_lock_interruptible(genpd);
3607 if (ret)
3608 return -ERESTARTSYS;
3609
3610 for (i = 0; i < genpd->state_count; i++) {
3611 total += genpd->states[i].idle_time;
3612
3613 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3614 now = ktime_get_mono_fast_ns();
3615 if (now > genpd->accounting_time) {
3616 delta = now - genpd->accounting_time;
3617 total += delta;
3618 }
3619 }
3620 }
3621
3622 do_div(total, NSEC_PER_MSEC);
3623 seq_printf(s, "%llu ms\n", total);
3624
3625 genpd_unlock(genpd);
3626 return ret;
3627 }
3628
3629
devices_show(struct seq_file * s,void * data)3630 static int devices_show(struct seq_file *s, void *data)
3631 {
3632 struct generic_pm_domain *genpd = s->private;
3633 struct pm_domain_data *pm_data;
3634 int ret = 0;
3635
3636 ret = genpd_lock_interruptible(genpd);
3637 if (ret)
3638 return -ERESTARTSYS;
3639
3640 list_for_each_entry(pm_data, &genpd->dev_list, list_node)
3641 seq_printf(s, "%s\n", dev_name(pm_data->dev));
3642
3643 genpd_unlock(genpd);
3644 return ret;
3645 }
3646
perf_state_show(struct seq_file * s,void * data)3647 static int perf_state_show(struct seq_file *s, void *data)
3648 {
3649 struct generic_pm_domain *genpd = s->private;
3650
3651 if (genpd_lock_interruptible(genpd))
3652 return -ERESTARTSYS;
3653
3654 seq_printf(s, "%u\n", genpd->performance_state);
3655
3656 genpd_unlock(genpd);
3657 return 0;
3658 }
3659
3660 DEFINE_SHOW_ATTRIBUTE(summary);
3661 DEFINE_SHOW_ATTRIBUTE(status);
3662 DEFINE_SHOW_ATTRIBUTE(sub_domains);
3663 DEFINE_SHOW_ATTRIBUTE(idle_states);
3664 DEFINE_SHOW_ATTRIBUTE(active_time);
3665 DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3666 DEFINE_SHOW_ATTRIBUTE(devices);
3667 DEFINE_SHOW_ATTRIBUTE(perf_state);
3668
genpd_debug_add(struct generic_pm_domain * genpd)3669 static void genpd_debug_add(struct generic_pm_domain *genpd)
3670 {
3671 struct dentry *d;
3672
3673 if (!genpd_debugfs_dir)
3674 return;
3675
3676 d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir);
3677
3678 debugfs_create_file("current_state", 0444,
3679 d, genpd, &status_fops);
3680 debugfs_create_file("sub_domains", 0444,
3681 d, genpd, &sub_domains_fops);
3682 debugfs_create_file("idle_states", 0444,
3683 d, genpd, &idle_states_fops);
3684 debugfs_create_file("active_time", 0444,
3685 d, genpd, &active_time_fops);
3686 debugfs_create_file("total_idle_time", 0444,
3687 d, genpd, &total_idle_time_fops);
3688 debugfs_create_file("devices", 0444,
3689 d, genpd, &devices_fops);
3690 if (genpd->set_performance_state)
3691 debugfs_create_file("perf_state", 0444,
3692 d, genpd, &perf_state_fops);
3693 }
3694
genpd_debug_init(void)3695 static int __init genpd_debug_init(void)
3696 {
3697 struct generic_pm_domain *genpd;
3698
3699 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3700
3701 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3702 NULL, &summary_fops);
3703
3704 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3705 genpd_debug_add(genpd);
3706
3707 return 0;
3708 }
3709 late_initcall(genpd_debug_init);
3710
genpd_debug_exit(void)3711 static void __exit genpd_debug_exit(void)
3712 {
3713 debugfs_remove_recursive(genpd_debugfs_dir);
3714 }
3715 __exitcall(genpd_debug_exit);
3716 #endif /* CONFIG_DEBUG_FS */
3717