1d6948c13SUlf Hansson // SPDX-License-Identifier: GPL-2.0 2d6948c13SUlf Hansson /* 3d6948c13SUlf Hansson * drivers/base/power/domain.c - Common code related to device power domains. 4d6948c13SUlf Hansson * 5d6948c13SUlf Hansson * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6d6948c13SUlf Hansson */ 7d6948c13SUlf Hansson #define pr_fmt(fmt) "PM: " fmt 8d6948c13SUlf Hansson 9d6948c13SUlf Hansson #include <linux/delay.h> 10d6948c13SUlf Hansson #include <linux/kernel.h> 11d6948c13SUlf Hansson #include <linux/io.h> 12d6948c13SUlf Hansson #include <linux/platform_device.h> 13d6948c13SUlf Hansson #include <linux/pm_opp.h> 14d6948c13SUlf Hansson #include <linux/pm_runtime.h> 15d6948c13SUlf Hansson #include <linux/pm_domain.h> 16d6948c13SUlf Hansson #include <linux/pm_qos.h> 17d6948c13SUlf Hansson #include <linux/pm_clock.h> 18d6948c13SUlf Hansson #include <linux/slab.h> 19d6948c13SUlf Hansson #include <linux/err.h> 20d6948c13SUlf Hansson #include <linux/sched.h> 21d6948c13SUlf Hansson #include <linux/suspend.h> 22d6948c13SUlf Hansson #include <linux/export.h> 23d6948c13SUlf Hansson #include <linux/cpu.h> 24d6948c13SUlf Hansson #include <linux/debugfs.h> 25d6948c13SUlf Hansson 26d6948c13SUlf Hansson #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 27d6948c13SUlf Hansson 28d6948c13SUlf Hansson #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 29d6948c13SUlf Hansson ({ \ 30d6948c13SUlf Hansson type (*__routine)(struct device *__d); \ 31d6948c13SUlf Hansson type __ret = (type)0; \ 32d6948c13SUlf Hansson \ 33d6948c13SUlf Hansson __routine = genpd->dev_ops.callback; \ 34d6948c13SUlf Hansson if (__routine) { \ 35d6948c13SUlf Hansson __ret = __routine(dev); \ 36d6948c13SUlf Hansson } \ 37d6948c13SUlf Hansson __ret; \ 38d6948c13SUlf Hansson }) 39d6948c13SUlf Hansson 40d6948c13SUlf Hansson static LIST_HEAD(gpd_list); 41d6948c13SUlf Hansson static DEFINE_MUTEX(gpd_list_lock); 42d6948c13SUlf Hansson 43d6948c13SUlf Hansson struct genpd_lock_ops { 44d6948c13SUlf Hansson void (*lock)(struct generic_pm_domain *genpd); 45d6948c13SUlf Hansson void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 46d6948c13SUlf Hansson int (*lock_interruptible)(struct generic_pm_domain *genpd); 47d6948c13SUlf Hansson void (*unlock)(struct generic_pm_domain *genpd); 48d6948c13SUlf Hansson }; 49d6948c13SUlf Hansson 50d6948c13SUlf Hansson static void genpd_lock_mtx(struct generic_pm_domain *genpd) 51d6948c13SUlf Hansson { 52d6948c13SUlf Hansson mutex_lock(&genpd->mlock); 53d6948c13SUlf Hansson } 54d6948c13SUlf Hansson 55d6948c13SUlf Hansson static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 56d6948c13SUlf Hansson int depth) 57d6948c13SUlf Hansson { 58d6948c13SUlf Hansson mutex_lock_nested(&genpd->mlock, depth); 59d6948c13SUlf Hansson } 60d6948c13SUlf Hansson 61d6948c13SUlf Hansson static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 62d6948c13SUlf Hansson { 63d6948c13SUlf Hansson return mutex_lock_interruptible(&genpd->mlock); 64d6948c13SUlf Hansson } 65d6948c13SUlf Hansson 66d6948c13SUlf Hansson static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 67d6948c13SUlf Hansson { 68d6948c13SUlf Hansson return mutex_unlock(&genpd->mlock); 69d6948c13SUlf Hansson } 70d6948c13SUlf Hansson 71d6948c13SUlf Hansson static const struct genpd_lock_ops genpd_mtx_ops = { 72d6948c13SUlf Hansson .lock = genpd_lock_mtx, 73d6948c13SUlf Hansson .lock_nested = genpd_lock_nested_mtx, 74d6948c13SUlf Hansson .lock_interruptible = genpd_lock_interruptible_mtx, 75d6948c13SUlf Hansson .unlock = genpd_unlock_mtx, 76d6948c13SUlf Hansson }; 77d6948c13SUlf Hansson 78d6948c13SUlf Hansson static void genpd_lock_spin(struct generic_pm_domain *genpd) 79d6948c13SUlf Hansson __acquires(&genpd->slock) 80d6948c13SUlf Hansson { 81d6948c13SUlf Hansson unsigned long flags; 82d6948c13SUlf Hansson 83d6948c13SUlf Hansson spin_lock_irqsave(&genpd->slock, flags); 84d6948c13SUlf Hansson genpd->lock_flags = flags; 85d6948c13SUlf Hansson } 86d6948c13SUlf Hansson 87d6948c13SUlf Hansson static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 88d6948c13SUlf Hansson int depth) 89d6948c13SUlf Hansson __acquires(&genpd->slock) 90d6948c13SUlf Hansson { 91d6948c13SUlf Hansson unsigned long flags; 92d6948c13SUlf Hansson 93d6948c13SUlf Hansson spin_lock_irqsave_nested(&genpd->slock, flags, depth); 94d6948c13SUlf Hansson genpd->lock_flags = flags; 95d6948c13SUlf Hansson } 96d6948c13SUlf Hansson 97d6948c13SUlf Hansson static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 98d6948c13SUlf Hansson __acquires(&genpd->slock) 99d6948c13SUlf Hansson { 100d6948c13SUlf Hansson unsigned long flags; 101d6948c13SUlf Hansson 102d6948c13SUlf Hansson spin_lock_irqsave(&genpd->slock, flags); 103d6948c13SUlf Hansson genpd->lock_flags = flags; 104d6948c13SUlf Hansson return 0; 105d6948c13SUlf Hansson } 106d6948c13SUlf Hansson 107d6948c13SUlf Hansson static void genpd_unlock_spin(struct generic_pm_domain *genpd) 108d6948c13SUlf Hansson __releases(&genpd->slock) 109d6948c13SUlf Hansson { 110d6948c13SUlf Hansson spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 111d6948c13SUlf Hansson } 112d6948c13SUlf Hansson 113d6948c13SUlf Hansson static const struct genpd_lock_ops genpd_spin_ops = { 114d6948c13SUlf Hansson .lock = genpd_lock_spin, 115d6948c13SUlf Hansson .lock_nested = genpd_lock_nested_spin, 116d6948c13SUlf Hansson .lock_interruptible = genpd_lock_interruptible_spin, 117d6948c13SUlf Hansson .unlock = genpd_unlock_spin, 118d6948c13SUlf Hansson }; 119d6948c13SUlf Hansson 120d6948c13SUlf Hansson #define genpd_lock(p) p->lock_ops->lock(p) 121d6948c13SUlf Hansson #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 122d6948c13SUlf Hansson #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 123d6948c13SUlf Hansson #define genpd_unlock(p) p->lock_ops->unlock(p) 124d6948c13SUlf Hansson 125d6948c13SUlf Hansson #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 126d6948c13SUlf Hansson #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 127d6948c13SUlf Hansson #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 128d6948c13SUlf Hansson #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 129d6948c13SUlf Hansson #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 130d6948c13SUlf Hansson #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 131d6948c13SUlf Hansson #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 132d6948c13SUlf Hansson 133d6948c13SUlf Hansson static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 134d6948c13SUlf Hansson const struct generic_pm_domain *genpd) 135d6948c13SUlf Hansson { 136d6948c13SUlf Hansson bool ret; 137d6948c13SUlf Hansson 138d6948c13SUlf Hansson ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 139d6948c13SUlf Hansson 140d6948c13SUlf Hansson /* 141d6948c13SUlf Hansson * Warn once if an IRQ safe device is attached to a domain, which 142d6948c13SUlf Hansson * callbacks are allowed to sleep. This indicates a suboptimal 143d6948c13SUlf Hansson * configuration for PM, but it doesn't matter for an always on domain. 144d6948c13SUlf Hansson */ 145d6948c13SUlf Hansson if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 146d6948c13SUlf Hansson return ret; 147d6948c13SUlf Hansson 148d6948c13SUlf Hansson if (ret) 149d6948c13SUlf Hansson dev_warn_once(dev, "PM domain %s will not be powered off\n", 150d6948c13SUlf Hansson genpd->name); 151d6948c13SUlf Hansson 152d6948c13SUlf Hansson return ret; 153d6948c13SUlf Hansson } 154d6948c13SUlf Hansson 155d6948c13SUlf Hansson static int genpd_runtime_suspend(struct device *dev); 156d6948c13SUlf Hansson 157d6948c13SUlf Hansson /* 158d6948c13SUlf Hansson * Get the generic PM domain for a particular struct device. 159d6948c13SUlf Hansson * This validates the struct device pointer, the PM domain pointer, 160d6948c13SUlf Hansson * and checks that the PM domain pointer is a real generic PM domain. 161d6948c13SUlf Hansson * Any failure results in NULL being returned. 162d6948c13SUlf Hansson */ 163d6948c13SUlf Hansson static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 164d6948c13SUlf Hansson { 165d6948c13SUlf Hansson if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 166d6948c13SUlf Hansson return NULL; 167d6948c13SUlf Hansson 168d6948c13SUlf Hansson /* A genpd's always have its ->runtime_suspend() callback assigned. */ 169d6948c13SUlf Hansson if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 170d6948c13SUlf Hansson return pd_to_genpd(dev->pm_domain); 171d6948c13SUlf Hansson 172d6948c13SUlf Hansson return NULL; 173d6948c13SUlf Hansson } 174d6948c13SUlf Hansson 175d6948c13SUlf Hansson /* 176d6948c13SUlf Hansson * This should only be used where we are certain that the pm_domain 177d6948c13SUlf Hansson * attached to the device is a genpd domain. 178d6948c13SUlf Hansson */ 179d6948c13SUlf Hansson static struct generic_pm_domain *dev_to_genpd(struct device *dev) 180d6948c13SUlf Hansson { 181d6948c13SUlf Hansson if (IS_ERR_OR_NULL(dev->pm_domain)) 182d6948c13SUlf Hansson return ERR_PTR(-EINVAL); 183d6948c13SUlf Hansson 184d6948c13SUlf Hansson return pd_to_genpd(dev->pm_domain); 185d6948c13SUlf Hansson } 186d6948c13SUlf Hansson 1872a56c462SViresh Kumar struct device *dev_to_genpd_dev(struct device *dev) 1882a56c462SViresh Kumar { 1892a56c462SViresh Kumar struct generic_pm_domain *genpd = dev_to_genpd(dev); 1902a56c462SViresh Kumar 1912a56c462SViresh Kumar if (IS_ERR(genpd)) 1922a56c462SViresh Kumar return ERR_CAST(genpd); 1932a56c462SViresh Kumar 1942a56c462SViresh Kumar return &genpd->dev; 1952a56c462SViresh Kumar } 1962a56c462SViresh Kumar 197d6948c13SUlf Hansson static int genpd_stop_dev(const struct generic_pm_domain *genpd, 198d6948c13SUlf Hansson struct device *dev) 199d6948c13SUlf Hansson { 200d6948c13SUlf Hansson return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 201d6948c13SUlf Hansson } 202d6948c13SUlf Hansson 203d6948c13SUlf Hansson static int genpd_start_dev(const struct generic_pm_domain *genpd, 204d6948c13SUlf Hansson struct device *dev) 205d6948c13SUlf Hansson { 206d6948c13SUlf Hansson return GENPD_DEV_CALLBACK(genpd, int, start, dev); 207d6948c13SUlf Hansson } 208d6948c13SUlf Hansson 209d6948c13SUlf Hansson static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 210d6948c13SUlf Hansson { 211d6948c13SUlf Hansson bool ret = false; 212d6948c13SUlf Hansson 213d6948c13SUlf Hansson if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 214d6948c13SUlf Hansson ret = !!atomic_dec_and_test(&genpd->sd_count); 215d6948c13SUlf Hansson 216d6948c13SUlf Hansson return ret; 217d6948c13SUlf Hansson } 218d6948c13SUlf Hansson 219d6948c13SUlf Hansson static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 220d6948c13SUlf Hansson { 221d6948c13SUlf Hansson atomic_inc(&genpd->sd_count); 222d6948c13SUlf Hansson smp_mb__after_atomic(); 223d6948c13SUlf Hansson } 224d6948c13SUlf Hansson 225d6948c13SUlf Hansson #ifdef CONFIG_DEBUG_FS 226d6948c13SUlf Hansson static struct dentry *genpd_debugfs_dir; 227d6948c13SUlf Hansson 228d6948c13SUlf Hansson static void genpd_debug_add(struct generic_pm_domain *genpd); 229d6948c13SUlf Hansson 230d6948c13SUlf Hansson static void genpd_debug_remove(struct generic_pm_domain *genpd) 231d6948c13SUlf Hansson { 232d6948c13SUlf Hansson if (!genpd_debugfs_dir) 233d6948c13SUlf Hansson return; 234d6948c13SUlf Hansson 235d6948c13SUlf Hansson debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); 236d6948c13SUlf Hansson } 237d6948c13SUlf Hansson 238d6948c13SUlf Hansson static void genpd_update_accounting(struct generic_pm_domain *genpd) 239d6948c13SUlf Hansson { 240d6948c13SUlf Hansson u64 delta, now; 241d6948c13SUlf Hansson 242d6948c13SUlf Hansson now = ktime_get_mono_fast_ns(); 243d6948c13SUlf Hansson if (now <= genpd->accounting_time) 244d6948c13SUlf Hansson return; 245d6948c13SUlf Hansson 246d6948c13SUlf Hansson delta = now - genpd->accounting_time; 247d6948c13SUlf Hansson 248d6948c13SUlf Hansson /* 249d6948c13SUlf Hansson * If genpd->status is active, it means we are just 250d6948c13SUlf Hansson * out of off and so update the idle time and vice 251d6948c13SUlf Hansson * versa. 252d6948c13SUlf Hansson */ 253d6948c13SUlf Hansson if (genpd->status == GENPD_STATE_ON) 254d6948c13SUlf Hansson genpd->states[genpd->state_idx].idle_time += delta; 255d6948c13SUlf Hansson else 256d6948c13SUlf Hansson genpd->on_time += delta; 257d6948c13SUlf Hansson 258d6948c13SUlf Hansson genpd->accounting_time = now; 259d6948c13SUlf Hansson } 260d6948c13SUlf Hansson #else 261d6948c13SUlf Hansson static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 262d6948c13SUlf Hansson static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 263d6948c13SUlf Hansson static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 264d6948c13SUlf Hansson #endif 265d6948c13SUlf Hansson 266d6948c13SUlf Hansson static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 267d6948c13SUlf Hansson unsigned int state) 268d6948c13SUlf Hansson { 269d6948c13SUlf Hansson struct generic_pm_domain_data *pd_data; 270d6948c13SUlf Hansson struct pm_domain_data *pdd; 271d6948c13SUlf Hansson struct gpd_link *link; 272d6948c13SUlf Hansson 273d6948c13SUlf Hansson /* New requested state is same as Max requested state */ 274d6948c13SUlf Hansson if (state == genpd->performance_state) 275d6948c13SUlf Hansson return state; 276d6948c13SUlf Hansson 277d6948c13SUlf Hansson /* New requested state is higher than Max requested state */ 278d6948c13SUlf Hansson if (state > genpd->performance_state) 279d6948c13SUlf Hansson return state; 280d6948c13SUlf Hansson 281d6948c13SUlf Hansson /* Traverse all devices within the domain */ 282d6948c13SUlf Hansson list_for_each_entry(pdd, &genpd->dev_list, list_node) { 283d6948c13SUlf Hansson pd_data = to_gpd_data(pdd); 284d6948c13SUlf Hansson 285d6948c13SUlf Hansson if (pd_data->performance_state > state) 286d6948c13SUlf Hansson state = pd_data->performance_state; 287d6948c13SUlf Hansson } 288d6948c13SUlf Hansson 289d6948c13SUlf Hansson /* 290d6948c13SUlf Hansson * Traverse all sub-domains within the domain. This can be 291d6948c13SUlf Hansson * done without any additional locking as the link->performance_state 292d6948c13SUlf Hansson * field is protected by the parent genpd->lock, which is already taken. 293d6948c13SUlf Hansson * 294d6948c13SUlf Hansson * Also note that link->performance_state (subdomain's performance state 295d6948c13SUlf Hansson * requirement to parent domain) is different from 296d6948c13SUlf Hansson * link->child->performance_state (current performance state requirement 297d6948c13SUlf Hansson * of the devices/sub-domains of the subdomain) and so can have a 298d6948c13SUlf Hansson * different value. 299d6948c13SUlf Hansson * 300d6948c13SUlf Hansson * Note that we also take vote from powered-off sub-domains into account 301d6948c13SUlf Hansson * as the same is done for devices right now. 302d6948c13SUlf Hansson */ 303d6948c13SUlf Hansson list_for_each_entry(link, &genpd->parent_links, parent_node) { 304d6948c13SUlf Hansson if (link->performance_state > state) 305d6948c13SUlf Hansson state = link->performance_state; 306d6948c13SUlf Hansson } 307d6948c13SUlf Hansson 308d6948c13SUlf Hansson return state; 309d6948c13SUlf Hansson } 310d6948c13SUlf Hansson 311d6948c13SUlf Hansson static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 312d6948c13SUlf Hansson struct generic_pm_domain *parent, 313d6948c13SUlf Hansson unsigned int pstate) 314d6948c13SUlf Hansson { 315d6948c13SUlf Hansson if (!parent->set_performance_state) 316d6948c13SUlf Hansson return pstate; 317d6948c13SUlf Hansson 318d6948c13SUlf Hansson return dev_pm_opp_xlate_performance_state(genpd->opp_table, 319d6948c13SUlf Hansson parent->opp_table, 320d6948c13SUlf Hansson pstate); 321d6948c13SUlf Hansson } 322d6948c13SUlf Hansson 323d6948c13SUlf Hansson static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 3242b391c4cSStephan Gerhold unsigned int state, int depth); 3252b391c4cSStephan Gerhold 3262b391c4cSStephan Gerhold static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) 327d6948c13SUlf Hansson { 3282b391c4cSStephan Gerhold struct generic_pm_domain *parent = link->parent; 3292b391c4cSStephan Gerhold int parent_state; 330d6948c13SUlf Hansson 331d6948c13SUlf Hansson genpd_lock_nested(parent, depth + 1); 332d6948c13SUlf Hansson 333d6948c13SUlf Hansson parent_state = link->prev_performance_state; 334d6948c13SUlf Hansson link->performance_state = parent_state; 335d6948c13SUlf Hansson 3362b391c4cSStephan Gerhold parent_state = _genpd_reeval_performance_state(parent, parent_state); 337d6948c13SUlf Hansson if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 338d6948c13SUlf Hansson pr_err("%s: Failed to roll back to %d performance state\n", 339d6948c13SUlf Hansson parent->name, parent_state); 340d6948c13SUlf Hansson } 341d6948c13SUlf Hansson 342d6948c13SUlf Hansson genpd_unlock(parent); 343d6948c13SUlf Hansson } 344d6948c13SUlf Hansson 3452b391c4cSStephan Gerhold static int _genpd_set_parent_state(struct generic_pm_domain *genpd, 3462b391c4cSStephan Gerhold struct gpd_link *link, 3472b391c4cSStephan Gerhold unsigned int state, int depth) 3482b391c4cSStephan Gerhold { 3492b391c4cSStephan Gerhold struct generic_pm_domain *parent = link->parent; 3502b391c4cSStephan Gerhold int parent_state, ret; 3512b391c4cSStephan Gerhold 3522b391c4cSStephan Gerhold /* Find parent's performance state */ 3532b391c4cSStephan Gerhold ret = genpd_xlate_performance_state(genpd, parent, state); 3542b391c4cSStephan Gerhold if (unlikely(ret < 0)) 3552b391c4cSStephan Gerhold return ret; 3562b391c4cSStephan Gerhold 3572b391c4cSStephan Gerhold parent_state = ret; 3582b391c4cSStephan Gerhold 3592b391c4cSStephan Gerhold genpd_lock_nested(parent, depth + 1); 3602b391c4cSStephan Gerhold 3612b391c4cSStephan Gerhold link->prev_performance_state = link->performance_state; 3622b391c4cSStephan Gerhold link->performance_state = parent_state; 3632b391c4cSStephan Gerhold 3642b391c4cSStephan Gerhold parent_state = _genpd_reeval_performance_state(parent, parent_state); 3652b391c4cSStephan Gerhold ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 3662b391c4cSStephan Gerhold if (ret) 3672b391c4cSStephan Gerhold link->performance_state = link->prev_performance_state; 3682b391c4cSStephan Gerhold 3692b391c4cSStephan Gerhold genpd_unlock(parent); 3702b391c4cSStephan Gerhold 3712b391c4cSStephan Gerhold return ret; 3722b391c4cSStephan Gerhold } 3732b391c4cSStephan Gerhold 3742b391c4cSStephan Gerhold static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 3752b391c4cSStephan Gerhold unsigned int state, int depth) 3762b391c4cSStephan Gerhold { 3772b391c4cSStephan Gerhold struct gpd_link *link = NULL; 3782b391c4cSStephan Gerhold int ret; 3792b391c4cSStephan Gerhold 3802b391c4cSStephan Gerhold if (state == genpd->performance_state) 3812b391c4cSStephan Gerhold return 0; 3822b391c4cSStephan Gerhold 3832b391c4cSStephan Gerhold /* When scaling up, propagate to parents first in normal order */ 3842b391c4cSStephan Gerhold if (state > genpd->performance_state) { 3852b391c4cSStephan Gerhold list_for_each_entry(link, &genpd->child_links, child_node) { 3862b391c4cSStephan Gerhold ret = _genpd_set_parent_state(genpd, link, state, depth); 3872b391c4cSStephan Gerhold if (ret) 3882b391c4cSStephan Gerhold goto rollback_parents_up; 3892b391c4cSStephan Gerhold } 3902b391c4cSStephan Gerhold } 3912b391c4cSStephan Gerhold 3922b391c4cSStephan Gerhold if (genpd->set_performance_state) { 3932b391c4cSStephan Gerhold ret = genpd->set_performance_state(genpd, state); 3942b391c4cSStephan Gerhold if (ret) { 3952b391c4cSStephan Gerhold if (link) 3962b391c4cSStephan Gerhold goto rollback_parents_up; 3972b391c4cSStephan Gerhold return ret; 3982b391c4cSStephan Gerhold } 3992b391c4cSStephan Gerhold } 4002b391c4cSStephan Gerhold 4012b391c4cSStephan Gerhold /* When scaling down, propagate to parents last in reverse order */ 4022b391c4cSStephan Gerhold if (state < genpd->performance_state) { 4032b391c4cSStephan Gerhold list_for_each_entry_reverse(link, &genpd->child_links, child_node) { 4042b391c4cSStephan Gerhold ret = _genpd_set_parent_state(genpd, link, state, depth); 4052b391c4cSStephan Gerhold if (ret) 4062b391c4cSStephan Gerhold goto rollback_parents_down; 4072b391c4cSStephan Gerhold } 4082b391c4cSStephan Gerhold } 4092b391c4cSStephan Gerhold 4102b391c4cSStephan Gerhold genpd->performance_state = state; 4112b391c4cSStephan Gerhold return 0; 4122b391c4cSStephan Gerhold 4132b391c4cSStephan Gerhold rollback_parents_up: 4142b391c4cSStephan Gerhold list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) 4152b391c4cSStephan Gerhold _genpd_rollback_parent_state(link, depth); 4162b391c4cSStephan Gerhold return ret; 4172b391c4cSStephan Gerhold rollback_parents_down: 4182b391c4cSStephan Gerhold list_for_each_entry_continue(link, &genpd->child_links, child_node) 4192b391c4cSStephan Gerhold _genpd_rollback_parent_state(link, depth); 420d6948c13SUlf Hansson return ret; 421d6948c13SUlf Hansson } 422d6948c13SUlf Hansson 423d6948c13SUlf Hansson static int genpd_set_performance_state(struct device *dev, unsigned int state) 424d6948c13SUlf Hansson { 425d6948c13SUlf Hansson struct generic_pm_domain *genpd = dev_to_genpd(dev); 426d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 427d6948c13SUlf Hansson unsigned int prev_state; 428d6948c13SUlf Hansson int ret; 429d6948c13SUlf Hansson 430d6948c13SUlf Hansson prev_state = gpd_data->performance_state; 431d6948c13SUlf Hansson if (prev_state == state) 432d6948c13SUlf Hansson return 0; 433d6948c13SUlf Hansson 434d6948c13SUlf Hansson gpd_data->performance_state = state; 435d6948c13SUlf Hansson state = _genpd_reeval_performance_state(genpd, state); 436d6948c13SUlf Hansson 437d6948c13SUlf Hansson ret = _genpd_set_performance_state(genpd, state, 0); 438d6948c13SUlf Hansson if (ret) 439d6948c13SUlf Hansson gpd_data->performance_state = prev_state; 440d6948c13SUlf Hansson 441d6948c13SUlf Hansson return ret; 442d6948c13SUlf Hansson } 443d6948c13SUlf Hansson 444d6948c13SUlf Hansson static int genpd_drop_performance_state(struct device *dev) 445d6948c13SUlf Hansson { 446d6948c13SUlf Hansson unsigned int prev_state = dev_gpd_data(dev)->performance_state; 447d6948c13SUlf Hansson 448d6948c13SUlf Hansson if (!genpd_set_performance_state(dev, 0)) 449d6948c13SUlf Hansson return prev_state; 450d6948c13SUlf Hansson 451d6948c13SUlf Hansson return 0; 452d6948c13SUlf Hansson } 453d6948c13SUlf Hansson 454d6948c13SUlf Hansson static void genpd_restore_performance_state(struct device *dev, 455d6948c13SUlf Hansson unsigned int state) 456d6948c13SUlf Hansson { 457d6948c13SUlf Hansson if (state) 458d6948c13SUlf Hansson genpd_set_performance_state(dev, state); 459d6948c13SUlf Hansson } 460d6948c13SUlf Hansson 461d6948c13SUlf Hansson static int genpd_dev_pm_set_performance_state(struct device *dev, 462d6948c13SUlf Hansson unsigned int state) 463d6948c13SUlf Hansson { 464d6948c13SUlf Hansson struct generic_pm_domain *genpd = dev_to_genpd(dev); 465d6948c13SUlf Hansson int ret = 0; 466d6948c13SUlf Hansson 467d6948c13SUlf Hansson genpd_lock(genpd); 468d6948c13SUlf Hansson if (pm_runtime_suspended(dev)) { 469d6948c13SUlf Hansson dev_gpd_data(dev)->rpm_pstate = state; 470d6948c13SUlf Hansson } else { 471d6948c13SUlf Hansson ret = genpd_set_performance_state(dev, state); 472d6948c13SUlf Hansson if (!ret) 473d6948c13SUlf Hansson dev_gpd_data(dev)->rpm_pstate = 0; 474d6948c13SUlf Hansson } 475d6948c13SUlf Hansson genpd_unlock(genpd); 476d6948c13SUlf Hansson 477d6948c13SUlf Hansson return ret; 478d6948c13SUlf Hansson } 479d6948c13SUlf Hansson 480d6948c13SUlf Hansson /** 481d6948c13SUlf Hansson * dev_pm_genpd_set_performance_state- Set performance state of device's power 482d6948c13SUlf Hansson * domain. 483d6948c13SUlf Hansson * 484d6948c13SUlf Hansson * @dev: Device for which the performance-state needs to be set. 485d6948c13SUlf Hansson * @state: Target performance state of the device. This can be set as 0 when the 486d6948c13SUlf Hansson * device doesn't have any performance state constraints left (And so 487d6948c13SUlf Hansson * the device wouldn't participate anymore to find the target 488d6948c13SUlf Hansson * performance state of the genpd). 489d6948c13SUlf Hansson * 490d6948c13SUlf Hansson * It is assumed that the users guarantee that the genpd wouldn't be detached 491d6948c13SUlf Hansson * while this routine is getting called. 492d6948c13SUlf Hansson * 493d6948c13SUlf Hansson * Returns 0 on success and negative error values on failures. 494d6948c13SUlf Hansson */ 495d6948c13SUlf Hansson int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 496d6948c13SUlf Hansson { 497d6948c13SUlf Hansson struct generic_pm_domain *genpd; 498d6948c13SUlf Hansson 499d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 500d6948c13SUlf Hansson if (!genpd) 501d6948c13SUlf Hansson return -ENODEV; 502d6948c13SUlf Hansson 503d6948c13SUlf Hansson if (WARN_ON(!dev->power.subsys_data || 504d6948c13SUlf Hansson !dev->power.subsys_data->domain_data)) 505d6948c13SUlf Hansson return -EINVAL; 506d6948c13SUlf Hansson 507d6948c13SUlf Hansson return genpd_dev_pm_set_performance_state(dev, state); 508d6948c13SUlf Hansson } 509d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 510d6948c13SUlf Hansson 511d6948c13SUlf Hansson /** 512d6948c13SUlf Hansson * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 513d6948c13SUlf Hansson * 514d6948c13SUlf Hansson * @dev: Device to handle 515d6948c13SUlf Hansson * @next: impending interrupt/wakeup for the device 516d6948c13SUlf Hansson * 517d6948c13SUlf Hansson * 518d6948c13SUlf Hansson * Allow devices to inform of the next wakeup. It's assumed that the users 519d6948c13SUlf Hansson * guarantee that the genpd wouldn't be detached while this routine is getting 520d6948c13SUlf Hansson * called. Additionally, it's also assumed that @dev isn't runtime suspended 521d6948c13SUlf Hansson * (RPM_SUSPENDED)." 522d6948c13SUlf Hansson * Although devices are expected to update the next_wakeup after the end of 523d6948c13SUlf Hansson * their usecase as well, it is possible the devices themselves may not know 524d6948c13SUlf Hansson * about that, so stale @next will be ignored when powering off the domain. 525d6948c13SUlf Hansson */ 526d6948c13SUlf Hansson void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 527d6948c13SUlf Hansson { 528d6948c13SUlf Hansson struct generic_pm_domain *genpd; 529d6948c13SUlf Hansson struct gpd_timing_data *td; 530d6948c13SUlf Hansson 531d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 532d6948c13SUlf Hansson if (!genpd) 533d6948c13SUlf Hansson return; 534d6948c13SUlf Hansson 535d6948c13SUlf Hansson td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 536d6948c13SUlf Hansson if (td) 537d6948c13SUlf Hansson td->next_wakeup = next; 538d6948c13SUlf Hansson } 539d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 540d6948c13SUlf Hansson 541d6948c13SUlf Hansson /** 542d6948c13SUlf Hansson * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd 543d6948c13SUlf Hansson * @dev: A device that is attached to the genpd. 544d6948c13SUlf Hansson * 545d6948c13SUlf Hansson * This routine should typically be called for a device, at the point of when a 546d6948c13SUlf Hansson * GENPD_NOTIFY_PRE_OFF notification has been sent for it. 547d6948c13SUlf Hansson * 548d6948c13SUlf Hansson * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no 549d6948c13SUlf Hansson * valid value have been set. 550d6948c13SUlf Hansson */ 551d6948c13SUlf Hansson ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) 552d6948c13SUlf Hansson { 553d6948c13SUlf Hansson struct generic_pm_domain *genpd; 554d6948c13SUlf Hansson 555d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 556d6948c13SUlf Hansson if (!genpd) 557d6948c13SUlf Hansson return KTIME_MAX; 558d6948c13SUlf Hansson 559d6948c13SUlf Hansson if (genpd->gd) 560d6948c13SUlf Hansson return genpd->gd->next_hrtimer; 561d6948c13SUlf Hansson 562d6948c13SUlf Hansson return KTIME_MAX; 563d6948c13SUlf Hansson } 564d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); 565d6948c13SUlf Hansson 566d6948c13SUlf Hansson /* 567d6948c13SUlf Hansson * dev_pm_genpd_synced_poweroff - Next power off should be synchronous 568d6948c13SUlf Hansson * 569d6948c13SUlf Hansson * @dev: A device that is attached to the genpd. 570d6948c13SUlf Hansson * 571d6948c13SUlf Hansson * Allows a consumer of the genpd to notify the provider that the next power off 572d6948c13SUlf Hansson * should be synchronous. 573d6948c13SUlf Hansson * 574d6948c13SUlf Hansson * It is assumed that the users guarantee that the genpd wouldn't be detached 575d6948c13SUlf Hansson * while this routine is getting called. 576d6948c13SUlf Hansson */ 577d6948c13SUlf Hansson void dev_pm_genpd_synced_poweroff(struct device *dev) 578d6948c13SUlf Hansson { 579d6948c13SUlf Hansson struct generic_pm_domain *genpd; 580d6948c13SUlf Hansson 581d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 582d6948c13SUlf Hansson if (!genpd) 583d6948c13SUlf Hansson return; 584d6948c13SUlf Hansson 585d6948c13SUlf Hansson genpd_lock(genpd); 586d6948c13SUlf Hansson genpd->synced_poweroff = true; 587d6948c13SUlf Hansson genpd_unlock(genpd); 588d6948c13SUlf Hansson } 589d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); 590d6948c13SUlf Hansson 591*95f6454dSUlf Hansson /** 592*95f6454dSUlf Hansson * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. 593*95f6454dSUlf Hansson * 594*95f6454dSUlf Hansson * @dev: Device for which the HW-mode should be changed. 595*95f6454dSUlf Hansson * @enable: Value to set or unset the HW-mode. 596*95f6454dSUlf Hansson * 597*95f6454dSUlf Hansson * Some PM domains can rely on HW signals to control the power for a device. To 598*95f6454dSUlf Hansson * allow a consumer driver to switch the behaviour for its device in runtime, 599*95f6454dSUlf Hansson * which may be beneficial from a latency or energy point of view, this function 600*95f6454dSUlf Hansson * may be called. 601*95f6454dSUlf Hansson * 602*95f6454dSUlf Hansson * It is assumed that the users guarantee that the genpd wouldn't be detached 603*95f6454dSUlf Hansson * while this routine is getting called. 604*95f6454dSUlf Hansson * 605*95f6454dSUlf Hansson * Return: Returns 0 on success and negative error values on failures. 606*95f6454dSUlf Hansson */ 607*95f6454dSUlf Hansson int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) 608*95f6454dSUlf Hansson { 609*95f6454dSUlf Hansson struct generic_pm_domain *genpd; 610*95f6454dSUlf Hansson int ret = 0; 611*95f6454dSUlf Hansson 612*95f6454dSUlf Hansson genpd = dev_to_genpd_safe(dev); 613*95f6454dSUlf Hansson if (!genpd) 614*95f6454dSUlf Hansson return -ENODEV; 615*95f6454dSUlf Hansson 616*95f6454dSUlf Hansson if (!genpd->set_hwmode_dev) 617*95f6454dSUlf Hansson return -EOPNOTSUPP; 618*95f6454dSUlf Hansson 619*95f6454dSUlf Hansson genpd_lock(genpd); 620*95f6454dSUlf Hansson 621*95f6454dSUlf Hansson if (dev_gpd_data(dev)->hw_mode == enable) 622*95f6454dSUlf Hansson goto out; 623*95f6454dSUlf Hansson 624*95f6454dSUlf Hansson ret = genpd->set_hwmode_dev(genpd, dev, enable); 625*95f6454dSUlf Hansson if (!ret) 626*95f6454dSUlf Hansson dev_gpd_data(dev)->hw_mode = enable; 627*95f6454dSUlf Hansson 628*95f6454dSUlf Hansson out: 629*95f6454dSUlf Hansson genpd_unlock(genpd); 630*95f6454dSUlf Hansson return ret; 631*95f6454dSUlf Hansson } 632*95f6454dSUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); 633*95f6454dSUlf Hansson 634*95f6454dSUlf Hansson /** 635*95f6454dSUlf Hansson * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. 636*95f6454dSUlf Hansson * 637*95f6454dSUlf Hansson * @dev: Device for which the current HW-mode setting should be fetched. 638*95f6454dSUlf Hansson * 639*95f6454dSUlf Hansson * This helper function allows consumer drivers to fetch the current HW mode 640*95f6454dSUlf Hansson * setting of its the device. 641*95f6454dSUlf Hansson * 642*95f6454dSUlf Hansson * It is assumed that the users guarantee that the genpd wouldn't be detached 643*95f6454dSUlf Hansson * while this routine is getting called. 644*95f6454dSUlf Hansson * 645*95f6454dSUlf Hansson * Return: Returns the HW mode setting of device from SW cached hw_mode. 646*95f6454dSUlf Hansson */ 647*95f6454dSUlf Hansson bool dev_pm_genpd_get_hwmode(struct device *dev) 648*95f6454dSUlf Hansson { 649*95f6454dSUlf Hansson return dev_gpd_data(dev)->hw_mode; 650*95f6454dSUlf Hansson } 651*95f6454dSUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); 652*95f6454dSUlf Hansson 653d6948c13SUlf Hansson static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 654d6948c13SUlf Hansson { 655d6948c13SUlf Hansson unsigned int state_idx = genpd->state_idx; 656d6948c13SUlf Hansson ktime_t time_start; 657d6948c13SUlf Hansson s64 elapsed_ns; 658d6948c13SUlf Hansson int ret; 659d6948c13SUlf Hansson 660d6948c13SUlf Hansson /* Notify consumers that we are about to power on. */ 661d6948c13SUlf Hansson ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 662d6948c13SUlf Hansson GENPD_NOTIFY_PRE_ON, 663d6948c13SUlf Hansson GENPD_NOTIFY_OFF, NULL); 664d6948c13SUlf Hansson ret = notifier_to_errno(ret); 665d6948c13SUlf Hansson if (ret) 666d6948c13SUlf Hansson return ret; 667d6948c13SUlf Hansson 668d6948c13SUlf Hansson if (!genpd->power_on) 669d6948c13SUlf Hansson goto out; 670d6948c13SUlf Hansson 671d6948c13SUlf Hansson timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 672d6948c13SUlf Hansson if (!timed) { 673d6948c13SUlf Hansson ret = genpd->power_on(genpd); 674d6948c13SUlf Hansson if (ret) 675d6948c13SUlf Hansson goto err; 676d6948c13SUlf Hansson 677d6948c13SUlf Hansson goto out; 678d6948c13SUlf Hansson } 679d6948c13SUlf Hansson 680d6948c13SUlf Hansson time_start = ktime_get(); 681d6948c13SUlf Hansson ret = genpd->power_on(genpd); 682d6948c13SUlf Hansson if (ret) 683d6948c13SUlf Hansson goto err; 684d6948c13SUlf Hansson 685d6948c13SUlf Hansson elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 686d6948c13SUlf Hansson if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 687d6948c13SUlf Hansson goto out; 688d6948c13SUlf Hansson 689d6948c13SUlf Hansson genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 690d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 691d6948c13SUlf Hansson pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 692d6948c13SUlf Hansson genpd->name, "on", elapsed_ns); 693d6948c13SUlf Hansson 694d6948c13SUlf Hansson out: 695d6948c13SUlf Hansson raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 696d6948c13SUlf Hansson genpd->synced_poweroff = false; 697d6948c13SUlf Hansson return 0; 698d6948c13SUlf Hansson err: 699d6948c13SUlf Hansson raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 700d6948c13SUlf Hansson NULL); 701d6948c13SUlf Hansson return ret; 702d6948c13SUlf Hansson } 703d6948c13SUlf Hansson 704d6948c13SUlf Hansson static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 705d6948c13SUlf Hansson { 706d6948c13SUlf Hansson unsigned int state_idx = genpd->state_idx; 707d6948c13SUlf Hansson ktime_t time_start; 708d6948c13SUlf Hansson s64 elapsed_ns; 709d6948c13SUlf Hansson int ret; 710d6948c13SUlf Hansson 711d6948c13SUlf Hansson /* Notify consumers that we are about to power off. */ 712d6948c13SUlf Hansson ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 713d6948c13SUlf Hansson GENPD_NOTIFY_PRE_OFF, 714d6948c13SUlf Hansson GENPD_NOTIFY_ON, NULL); 715d6948c13SUlf Hansson ret = notifier_to_errno(ret); 716d6948c13SUlf Hansson if (ret) 717d6948c13SUlf Hansson return ret; 718d6948c13SUlf Hansson 719d6948c13SUlf Hansson if (!genpd->power_off) 720d6948c13SUlf Hansson goto out; 721d6948c13SUlf Hansson 722d6948c13SUlf Hansson timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 723d6948c13SUlf Hansson if (!timed) { 724d6948c13SUlf Hansson ret = genpd->power_off(genpd); 725d6948c13SUlf Hansson if (ret) 726d6948c13SUlf Hansson goto busy; 727d6948c13SUlf Hansson 728d6948c13SUlf Hansson goto out; 729d6948c13SUlf Hansson } 730d6948c13SUlf Hansson 731d6948c13SUlf Hansson time_start = ktime_get(); 732d6948c13SUlf Hansson ret = genpd->power_off(genpd); 733d6948c13SUlf Hansson if (ret) 734d6948c13SUlf Hansson goto busy; 735d6948c13SUlf Hansson 736d6948c13SUlf Hansson elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 737d6948c13SUlf Hansson if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 738d6948c13SUlf Hansson goto out; 739d6948c13SUlf Hansson 740d6948c13SUlf Hansson genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 741d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 742d6948c13SUlf Hansson pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 743d6948c13SUlf Hansson genpd->name, "off", elapsed_ns); 744d6948c13SUlf Hansson 745d6948c13SUlf Hansson out: 746d6948c13SUlf Hansson raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 747d6948c13SUlf Hansson NULL); 748d6948c13SUlf Hansson return 0; 749d6948c13SUlf Hansson busy: 750d6948c13SUlf Hansson raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 751d6948c13SUlf Hansson return ret; 752d6948c13SUlf Hansson } 753d6948c13SUlf Hansson 754d6948c13SUlf Hansson /** 755d6948c13SUlf Hansson * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 756d6948c13SUlf Hansson * @genpd: PM domain to power off. 757d6948c13SUlf Hansson * 758d6948c13SUlf Hansson * Queue up the execution of genpd_power_off() unless it's already been done 759d6948c13SUlf Hansson * before. 760d6948c13SUlf Hansson */ 761d6948c13SUlf Hansson static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 762d6948c13SUlf Hansson { 763d6948c13SUlf Hansson queue_work(pm_wq, &genpd->power_off_work); 764d6948c13SUlf Hansson } 765d6948c13SUlf Hansson 766d6948c13SUlf Hansson /** 767d6948c13SUlf Hansson * genpd_power_off - Remove power from a given PM domain. 768d6948c13SUlf Hansson * @genpd: PM domain to power down. 769d6948c13SUlf Hansson * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 770d6948c13SUlf Hansson * RPM status of the releated device is in an intermediate state, not yet turned 771d6948c13SUlf Hansson * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 772d6948c13SUlf Hansson * be RPM_SUSPENDED, while it tries to power off the PM domain. 773d6948c13SUlf Hansson * @depth: nesting count for lockdep. 774d6948c13SUlf Hansson * 775d6948c13SUlf Hansson * If all of the @genpd's devices have been suspended and all of its subdomains 776d6948c13SUlf Hansson * have been powered down, remove power from @genpd. 777d6948c13SUlf Hansson */ 778d6948c13SUlf Hansson static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 779d6948c13SUlf Hansson unsigned int depth) 780d6948c13SUlf Hansson { 781d6948c13SUlf Hansson struct pm_domain_data *pdd; 782d6948c13SUlf Hansson struct gpd_link *link; 783d6948c13SUlf Hansson unsigned int not_suspended = 0; 784d6948c13SUlf Hansson int ret; 785d6948c13SUlf Hansson 786d6948c13SUlf Hansson /* 787d6948c13SUlf Hansson * Do not try to power off the domain in the following situations: 788d6948c13SUlf Hansson * (1) The domain is already in the "power off" state. 789d6948c13SUlf Hansson * (2) System suspend is in progress. 790d6948c13SUlf Hansson */ 791d6948c13SUlf Hansson if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 792d6948c13SUlf Hansson return 0; 793d6948c13SUlf Hansson 794d6948c13SUlf Hansson /* 795d6948c13SUlf Hansson * Abort power off for the PM domain in the following situations: 796d6948c13SUlf Hansson * (1) The domain is configured as always on. 797d6948c13SUlf Hansson * (2) When the domain has a subdomain being powered on. 798d6948c13SUlf Hansson */ 799d6948c13SUlf Hansson if (genpd_is_always_on(genpd) || 800d6948c13SUlf Hansson genpd_is_rpm_always_on(genpd) || 801d6948c13SUlf Hansson atomic_read(&genpd->sd_count) > 0) 802d6948c13SUlf Hansson return -EBUSY; 803d6948c13SUlf Hansson 804d6948c13SUlf Hansson /* 805d6948c13SUlf Hansson * The children must be in their deepest (powered-off) states to allow 806d6948c13SUlf Hansson * the parent to be powered off. Note that, there's no need for 807d6948c13SUlf Hansson * additional locking, as powering on a child, requires the parent's 808d6948c13SUlf Hansson * lock to be acquired first. 809d6948c13SUlf Hansson */ 810d6948c13SUlf Hansson list_for_each_entry(link, &genpd->parent_links, parent_node) { 811d6948c13SUlf Hansson struct generic_pm_domain *child = link->child; 812d6948c13SUlf Hansson if (child->state_idx < child->state_count - 1) 813d6948c13SUlf Hansson return -EBUSY; 814d6948c13SUlf Hansson } 815d6948c13SUlf Hansson 816d6948c13SUlf Hansson list_for_each_entry(pdd, &genpd->dev_list, list_node) { 817d6948c13SUlf Hansson /* 818d6948c13SUlf Hansson * Do not allow PM domain to be powered off, when an IRQ safe 819d6948c13SUlf Hansson * device is part of a non-IRQ safe domain. 820d6948c13SUlf Hansson */ 821d6948c13SUlf Hansson if (!pm_runtime_suspended(pdd->dev) || 822d6948c13SUlf Hansson irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 823d6948c13SUlf Hansson not_suspended++; 824d6948c13SUlf Hansson } 825d6948c13SUlf Hansson 826d6948c13SUlf Hansson if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 827d6948c13SUlf Hansson return -EBUSY; 828d6948c13SUlf Hansson 829d6948c13SUlf Hansson if (genpd->gov && genpd->gov->power_down_ok) { 830d6948c13SUlf Hansson if (!genpd->gov->power_down_ok(&genpd->domain)) 831d6948c13SUlf Hansson return -EAGAIN; 832d6948c13SUlf Hansson } 833d6948c13SUlf Hansson 834d6948c13SUlf Hansson /* Default to shallowest state. */ 835d6948c13SUlf Hansson if (!genpd->gov) 836d6948c13SUlf Hansson genpd->state_idx = 0; 837d6948c13SUlf Hansson 838d6948c13SUlf Hansson /* Don't power off, if a child domain is waiting to power on. */ 839d6948c13SUlf Hansson if (atomic_read(&genpd->sd_count) > 0) 840d6948c13SUlf Hansson return -EBUSY; 841d6948c13SUlf Hansson 842d6948c13SUlf Hansson ret = _genpd_power_off(genpd, true); 843d6948c13SUlf Hansson if (ret) { 844d6948c13SUlf Hansson genpd->states[genpd->state_idx].rejected++; 845d6948c13SUlf Hansson return ret; 846d6948c13SUlf Hansson } 847d6948c13SUlf Hansson 848d6948c13SUlf Hansson genpd->status = GENPD_STATE_OFF; 849d6948c13SUlf Hansson genpd_update_accounting(genpd); 850d6948c13SUlf Hansson genpd->states[genpd->state_idx].usage++; 851d6948c13SUlf Hansson 852d6948c13SUlf Hansson list_for_each_entry(link, &genpd->child_links, child_node) { 853d6948c13SUlf Hansson genpd_sd_counter_dec(link->parent); 854d6948c13SUlf Hansson genpd_lock_nested(link->parent, depth + 1); 855d6948c13SUlf Hansson genpd_power_off(link->parent, false, depth + 1); 856d6948c13SUlf Hansson genpd_unlock(link->parent); 857d6948c13SUlf Hansson } 858d6948c13SUlf Hansson 859d6948c13SUlf Hansson return 0; 860d6948c13SUlf Hansson } 861d6948c13SUlf Hansson 862d6948c13SUlf Hansson /** 863d6948c13SUlf Hansson * genpd_power_on - Restore power to a given PM domain and its parents. 864d6948c13SUlf Hansson * @genpd: PM domain to power up. 865d6948c13SUlf Hansson * @depth: nesting count for lockdep. 866d6948c13SUlf Hansson * 867d6948c13SUlf Hansson * Restore power to @genpd and all of its parents so that it is possible to 868d6948c13SUlf Hansson * resume a device belonging to it. 869d6948c13SUlf Hansson */ 870d6948c13SUlf Hansson static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 871d6948c13SUlf Hansson { 872d6948c13SUlf Hansson struct gpd_link *link; 873d6948c13SUlf Hansson int ret = 0; 874d6948c13SUlf Hansson 875d6948c13SUlf Hansson if (genpd_status_on(genpd)) 876d6948c13SUlf Hansson return 0; 877d6948c13SUlf Hansson 878d6948c13SUlf Hansson /* 879d6948c13SUlf Hansson * The list is guaranteed not to change while the loop below is being 880d6948c13SUlf Hansson * executed, unless one of the parents' .power_on() callbacks fiddles 881d6948c13SUlf Hansson * with it. 882d6948c13SUlf Hansson */ 883d6948c13SUlf Hansson list_for_each_entry(link, &genpd->child_links, child_node) { 884d6948c13SUlf Hansson struct generic_pm_domain *parent = link->parent; 885d6948c13SUlf Hansson 886d6948c13SUlf Hansson genpd_sd_counter_inc(parent); 887d6948c13SUlf Hansson 888d6948c13SUlf Hansson genpd_lock_nested(parent, depth + 1); 889d6948c13SUlf Hansson ret = genpd_power_on(parent, depth + 1); 890d6948c13SUlf Hansson genpd_unlock(parent); 891d6948c13SUlf Hansson 892d6948c13SUlf Hansson if (ret) { 893d6948c13SUlf Hansson genpd_sd_counter_dec(parent); 894d6948c13SUlf Hansson goto err; 895d6948c13SUlf Hansson } 896d6948c13SUlf Hansson } 897d6948c13SUlf Hansson 898d6948c13SUlf Hansson ret = _genpd_power_on(genpd, true); 899d6948c13SUlf Hansson if (ret) 900d6948c13SUlf Hansson goto err; 901d6948c13SUlf Hansson 902d6948c13SUlf Hansson genpd->status = GENPD_STATE_ON; 903d6948c13SUlf Hansson genpd_update_accounting(genpd); 904d6948c13SUlf Hansson 905d6948c13SUlf Hansson return 0; 906d6948c13SUlf Hansson 907d6948c13SUlf Hansson err: 908d6948c13SUlf Hansson list_for_each_entry_continue_reverse(link, 909d6948c13SUlf Hansson &genpd->child_links, 910d6948c13SUlf Hansson child_node) { 911d6948c13SUlf Hansson genpd_sd_counter_dec(link->parent); 912d6948c13SUlf Hansson genpd_lock_nested(link->parent, depth + 1); 913d6948c13SUlf Hansson genpd_power_off(link->parent, false, depth + 1); 914d6948c13SUlf Hansson genpd_unlock(link->parent); 915d6948c13SUlf Hansson } 916d6948c13SUlf Hansson 917d6948c13SUlf Hansson return ret; 918d6948c13SUlf Hansson } 919d6948c13SUlf Hansson 920d6948c13SUlf Hansson static int genpd_dev_pm_start(struct device *dev) 921d6948c13SUlf Hansson { 922d6948c13SUlf Hansson struct generic_pm_domain *genpd = dev_to_genpd(dev); 923d6948c13SUlf Hansson 924d6948c13SUlf Hansson return genpd_start_dev(genpd, dev); 925d6948c13SUlf Hansson } 926d6948c13SUlf Hansson 927d6948c13SUlf Hansson static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 928d6948c13SUlf Hansson unsigned long val, void *ptr) 929d6948c13SUlf Hansson { 930d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 931d6948c13SUlf Hansson struct device *dev; 932d6948c13SUlf Hansson 933d6948c13SUlf Hansson gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 934d6948c13SUlf Hansson dev = gpd_data->base.dev; 935d6948c13SUlf Hansson 936d6948c13SUlf Hansson for (;;) { 937d6948c13SUlf Hansson struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 938d6948c13SUlf Hansson struct pm_domain_data *pdd; 939d6948c13SUlf Hansson struct gpd_timing_data *td; 940d6948c13SUlf Hansson 941d6948c13SUlf Hansson spin_lock_irq(&dev->power.lock); 942d6948c13SUlf Hansson 943d6948c13SUlf Hansson pdd = dev->power.subsys_data ? 944d6948c13SUlf Hansson dev->power.subsys_data->domain_data : NULL; 945d6948c13SUlf Hansson if (pdd) { 946d6948c13SUlf Hansson td = to_gpd_data(pdd)->td; 947d6948c13SUlf Hansson if (td) { 948d6948c13SUlf Hansson td->constraint_changed = true; 949d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 950d6948c13SUlf Hansson } 951d6948c13SUlf Hansson } 952d6948c13SUlf Hansson 953d6948c13SUlf Hansson spin_unlock_irq(&dev->power.lock); 954d6948c13SUlf Hansson 955d6948c13SUlf Hansson if (!IS_ERR(genpd)) { 956d6948c13SUlf Hansson genpd_lock(genpd); 957d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 958d6948c13SUlf Hansson genpd_unlock(genpd); 959d6948c13SUlf Hansson } 960d6948c13SUlf Hansson 961d6948c13SUlf Hansson dev = dev->parent; 962d6948c13SUlf Hansson if (!dev || dev->power.ignore_children) 963d6948c13SUlf Hansson break; 964d6948c13SUlf Hansson } 965d6948c13SUlf Hansson 966d6948c13SUlf Hansson return NOTIFY_DONE; 967d6948c13SUlf Hansson } 968d6948c13SUlf Hansson 969d6948c13SUlf Hansson /** 970d6948c13SUlf Hansson * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 971d6948c13SUlf Hansson * @work: Work structure used for scheduling the execution of this function. 972d6948c13SUlf Hansson */ 973d6948c13SUlf Hansson static void genpd_power_off_work_fn(struct work_struct *work) 974d6948c13SUlf Hansson { 975d6948c13SUlf Hansson struct generic_pm_domain *genpd; 976d6948c13SUlf Hansson 977d6948c13SUlf Hansson genpd = container_of(work, struct generic_pm_domain, power_off_work); 978d6948c13SUlf Hansson 979d6948c13SUlf Hansson genpd_lock(genpd); 980d6948c13SUlf Hansson genpd_power_off(genpd, false, 0); 981d6948c13SUlf Hansson genpd_unlock(genpd); 982d6948c13SUlf Hansson } 983d6948c13SUlf Hansson 984d6948c13SUlf Hansson /** 985d6948c13SUlf Hansson * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 986d6948c13SUlf Hansson * @dev: Device to handle. 987d6948c13SUlf Hansson */ 988d6948c13SUlf Hansson static int __genpd_runtime_suspend(struct device *dev) 989d6948c13SUlf Hansson { 990d6948c13SUlf Hansson int (*cb)(struct device *__dev); 991d6948c13SUlf Hansson 992d6948c13SUlf Hansson if (dev->type && dev->type->pm) 993d6948c13SUlf Hansson cb = dev->type->pm->runtime_suspend; 994d6948c13SUlf Hansson else if (dev->class && dev->class->pm) 995d6948c13SUlf Hansson cb = dev->class->pm->runtime_suspend; 996d6948c13SUlf Hansson else if (dev->bus && dev->bus->pm) 997d6948c13SUlf Hansson cb = dev->bus->pm->runtime_suspend; 998d6948c13SUlf Hansson else 999d6948c13SUlf Hansson cb = NULL; 1000d6948c13SUlf Hansson 1001d6948c13SUlf Hansson if (!cb && dev->driver && dev->driver->pm) 1002d6948c13SUlf Hansson cb = dev->driver->pm->runtime_suspend; 1003d6948c13SUlf Hansson 1004d6948c13SUlf Hansson return cb ? cb(dev) : 0; 1005d6948c13SUlf Hansson } 1006d6948c13SUlf Hansson 1007d6948c13SUlf Hansson /** 1008d6948c13SUlf Hansson * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 1009d6948c13SUlf Hansson * @dev: Device to handle. 1010d6948c13SUlf Hansson */ 1011d6948c13SUlf Hansson static int __genpd_runtime_resume(struct device *dev) 1012d6948c13SUlf Hansson { 1013d6948c13SUlf Hansson int (*cb)(struct device *__dev); 1014d6948c13SUlf Hansson 1015d6948c13SUlf Hansson if (dev->type && dev->type->pm) 1016d6948c13SUlf Hansson cb = dev->type->pm->runtime_resume; 1017d6948c13SUlf Hansson else if (dev->class && dev->class->pm) 1018d6948c13SUlf Hansson cb = dev->class->pm->runtime_resume; 1019d6948c13SUlf Hansson else if (dev->bus && dev->bus->pm) 1020d6948c13SUlf Hansson cb = dev->bus->pm->runtime_resume; 1021d6948c13SUlf Hansson else 1022d6948c13SUlf Hansson cb = NULL; 1023d6948c13SUlf Hansson 1024d6948c13SUlf Hansson if (!cb && dev->driver && dev->driver->pm) 1025d6948c13SUlf Hansson cb = dev->driver->pm->runtime_resume; 1026d6948c13SUlf Hansson 1027d6948c13SUlf Hansson return cb ? cb(dev) : 0; 1028d6948c13SUlf Hansson } 1029d6948c13SUlf Hansson 1030d6948c13SUlf Hansson /** 1031d6948c13SUlf Hansson * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 1032d6948c13SUlf Hansson * @dev: Device to suspend. 1033d6948c13SUlf Hansson * 1034d6948c13SUlf Hansson * Carry out a runtime suspend of a device under the assumption that its 1035d6948c13SUlf Hansson * pm_domain field points to the domain member of an object of type 1036d6948c13SUlf Hansson * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1037d6948c13SUlf Hansson */ 1038d6948c13SUlf Hansson static int genpd_runtime_suspend(struct device *dev) 1039d6948c13SUlf Hansson { 1040d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1041d6948c13SUlf Hansson bool (*suspend_ok)(struct device *__dev); 1042d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1043d6948c13SUlf Hansson struct gpd_timing_data *td = gpd_data->td; 1044d6948c13SUlf Hansson bool runtime_pm = pm_runtime_enabled(dev); 1045d6948c13SUlf Hansson ktime_t time_start = 0; 1046d6948c13SUlf Hansson s64 elapsed_ns; 1047d6948c13SUlf Hansson int ret; 1048d6948c13SUlf Hansson 1049d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1050d6948c13SUlf Hansson 1051d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1052d6948c13SUlf Hansson if (IS_ERR(genpd)) 1053d6948c13SUlf Hansson return -EINVAL; 1054d6948c13SUlf Hansson 1055d6948c13SUlf Hansson /* 1056d6948c13SUlf Hansson * A runtime PM centric subsystem/driver may re-use the runtime PM 1057d6948c13SUlf Hansson * callbacks for other purposes than runtime PM. In those scenarios 1058d6948c13SUlf Hansson * runtime PM is disabled. Under these circumstances, we shall skip 1059d6948c13SUlf Hansson * validating/measuring the PM QoS latency. 1060d6948c13SUlf Hansson */ 1061d6948c13SUlf Hansson suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 1062d6948c13SUlf Hansson if (runtime_pm && suspend_ok && !suspend_ok(dev)) 1063d6948c13SUlf Hansson return -EBUSY; 1064d6948c13SUlf Hansson 1065d6948c13SUlf Hansson /* Measure suspend latency. */ 1066d6948c13SUlf Hansson if (td && runtime_pm) 1067d6948c13SUlf Hansson time_start = ktime_get(); 1068d6948c13SUlf Hansson 1069d6948c13SUlf Hansson ret = __genpd_runtime_suspend(dev); 1070d6948c13SUlf Hansson if (ret) 1071d6948c13SUlf Hansson return ret; 1072d6948c13SUlf Hansson 1073d6948c13SUlf Hansson ret = genpd_stop_dev(genpd, dev); 1074d6948c13SUlf Hansson if (ret) { 1075d6948c13SUlf Hansson __genpd_runtime_resume(dev); 1076d6948c13SUlf Hansson return ret; 1077d6948c13SUlf Hansson } 1078d6948c13SUlf Hansson 1079d6948c13SUlf Hansson /* Update suspend latency value if the measured time exceeds it. */ 1080d6948c13SUlf Hansson if (td && runtime_pm) { 1081d6948c13SUlf Hansson elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1082d6948c13SUlf Hansson if (elapsed_ns > td->suspend_latency_ns) { 1083d6948c13SUlf Hansson td->suspend_latency_ns = elapsed_ns; 1084d6948c13SUlf Hansson dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 1085d6948c13SUlf Hansson elapsed_ns); 1086d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 1087d6948c13SUlf Hansson td->constraint_changed = true; 1088d6948c13SUlf Hansson } 1089d6948c13SUlf Hansson } 1090d6948c13SUlf Hansson 1091d6948c13SUlf Hansson /* 1092d6948c13SUlf Hansson * If power.irq_safe is set, this routine may be run with 1093d6948c13SUlf Hansson * IRQs disabled, so suspend only if the PM domain also is irq_safe. 1094d6948c13SUlf Hansson */ 1095d6948c13SUlf Hansson if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1096d6948c13SUlf Hansson return 0; 1097d6948c13SUlf Hansson 1098d6948c13SUlf Hansson genpd_lock(genpd); 1099d6948c13SUlf Hansson genpd_power_off(genpd, true, 0); 1100d6948c13SUlf Hansson gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1101d6948c13SUlf Hansson genpd_unlock(genpd); 1102d6948c13SUlf Hansson 1103d6948c13SUlf Hansson return 0; 1104d6948c13SUlf Hansson } 1105d6948c13SUlf Hansson 1106d6948c13SUlf Hansson /** 1107d6948c13SUlf Hansson * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 1108d6948c13SUlf Hansson * @dev: Device to resume. 1109d6948c13SUlf Hansson * 1110d6948c13SUlf Hansson * Carry out a runtime resume of a device under the assumption that its 1111d6948c13SUlf Hansson * pm_domain field points to the domain member of an object of type 1112d6948c13SUlf Hansson * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1113d6948c13SUlf Hansson */ 1114d6948c13SUlf Hansson static int genpd_runtime_resume(struct device *dev) 1115d6948c13SUlf Hansson { 1116d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1117d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1118d6948c13SUlf Hansson struct gpd_timing_data *td = gpd_data->td; 1119d6948c13SUlf Hansson bool timed = td && pm_runtime_enabled(dev); 1120d6948c13SUlf Hansson ktime_t time_start = 0; 1121d6948c13SUlf Hansson s64 elapsed_ns; 1122d6948c13SUlf Hansson int ret; 1123d6948c13SUlf Hansson 1124d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1125d6948c13SUlf Hansson 1126d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1127d6948c13SUlf Hansson if (IS_ERR(genpd)) 1128d6948c13SUlf Hansson return -EINVAL; 1129d6948c13SUlf Hansson 1130d6948c13SUlf Hansson /* 1131d6948c13SUlf Hansson * As we don't power off a non IRQ safe domain, which holds 1132d6948c13SUlf Hansson * an IRQ safe device, we don't need to restore power to it. 1133d6948c13SUlf Hansson */ 1134d6948c13SUlf Hansson if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1135d6948c13SUlf Hansson goto out; 1136d6948c13SUlf Hansson 1137d6948c13SUlf Hansson genpd_lock(genpd); 1138d6948c13SUlf Hansson genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 1139d6948c13SUlf Hansson ret = genpd_power_on(genpd, 0); 1140d6948c13SUlf Hansson genpd_unlock(genpd); 1141d6948c13SUlf Hansson 1142d6948c13SUlf Hansson if (ret) 1143d6948c13SUlf Hansson return ret; 1144d6948c13SUlf Hansson 1145d6948c13SUlf Hansson out: 1146d6948c13SUlf Hansson /* Measure resume latency. */ 1147d6948c13SUlf Hansson if (timed) 1148d6948c13SUlf Hansson time_start = ktime_get(); 1149d6948c13SUlf Hansson 1150d6948c13SUlf Hansson ret = genpd_start_dev(genpd, dev); 1151d6948c13SUlf Hansson if (ret) 1152d6948c13SUlf Hansson goto err_poweroff; 1153d6948c13SUlf Hansson 1154d6948c13SUlf Hansson ret = __genpd_runtime_resume(dev); 1155d6948c13SUlf Hansson if (ret) 1156d6948c13SUlf Hansson goto err_stop; 1157d6948c13SUlf Hansson 1158d6948c13SUlf Hansson /* Update resume latency value if the measured time exceeds it. */ 1159d6948c13SUlf Hansson if (timed) { 1160d6948c13SUlf Hansson elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1161d6948c13SUlf Hansson if (elapsed_ns > td->resume_latency_ns) { 1162d6948c13SUlf Hansson td->resume_latency_ns = elapsed_ns; 1163d6948c13SUlf Hansson dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1164d6948c13SUlf Hansson elapsed_ns); 1165d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 1166d6948c13SUlf Hansson td->constraint_changed = true; 1167d6948c13SUlf Hansson } 1168d6948c13SUlf Hansson } 1169d6948c13SUlf Hansson 1170d6948c13SUlf Hansson return 0; 1171d6948c13SUlf Hansson 1172d6948c13SUlf Hansson err_stop: 1173d6948c13SUlf Hansson genpd_stop_dev(genpd, dev); 1174d6948c13SUlf Hansson err_poweroff: 1175d6948c13SUlf Hansson if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1176d6948c13SUlf Hansson genpd_lock(genpd); 1177d6948c13SUlf Hansson genpd_power_off(genpd, true, 0); 1178d6948c13SUlf Hansson gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1179d6948c13SUlf Hansson genpd_unlock(genpd); 1180d6948c13SUlf Hansson } 1181d6948c13SUlf Hansson 1182d6948c13SUlf Hansson return ret; 1183d6948c13SUlf Hansson } 1184d6948c13SUlf Hansson 1185d6948c13SUlf Hansson static bool pd_ignore_unused; 1186d6948c13SUlf Hansson static int __init pd_ignore_unused_setup(char *__unused) 1187d6948c13SUlf Hansson { 1188d6948c13SUlf Hansson pd_ignore_unused = true; 1189d6948c13SUlf Hansson return 1; 1190d6948c13SUlf Hansson } 1191d6948c13SUlf Hansson __setup("pd_ignore_unused", pd_ignore_unused_setup); 1192d6948c13SUlf Hansson 1193d6948c13SUlf Hansson /** 1194d6948c13SUlf Hansson * genpd_power_off_unused - Power off all PM domains with no devices in use. 1195d6948c13SUlf Hansson */ 1196d6948c13SUlf Hansson static int __init genpd_power_off_unused(void) 1197d6948c13SUlf Hansson { 1198d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1199d6948c13SUlf Hansson 1200d6948c13SUlf Hansson if (pd_ignore_unused) { 1201d6948c13SUlf Hansson pr_warn("genpd: Not disabling unused power domains\n"); 1202d6948c13SUlf Hansson return 0; 1203d6948c13SUlf Hansson } 1204d6948c13SUlf Hansson 1205745fe55bSKonrad Dybcio pr_info("genpd: Disabling unused power domains\n"); 1206d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 1207d6948c13SUlf Hansson 1208d6948c13SUlf Hansson list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1209d6948c13SUlf Hansson genpd_queue_power_off_work(genpd); 1210d6948c13SUlf Hansson 1211d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 1212d6948c13SUlf Hansson 1213d6948c13SUlf Hansson return 0; 1214d6948c13SUlf Hansson } 1215741ba013SKonrad Dybcio late_initcall_sync(genpd_power_off_unused); 1216d6948c13SUlf Hansson 1217d6948c13SUlf Hansson #ifdef CONFIG_PM_SLEEP 1218d6948c13SUlf Hansson 1219d6948c13SUlf Hansson /** 1220d6948c13SUlf Hansson * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1221d6948c13SUlf Hansson * @genpd: PM domain to power off, if possible. 1222d6948c13SUlf Hansson * @use_lock: use the lock. 1223d6948c13SUlf Hansson * @depth: nesting count for lockdep. 1224d6948c13SUlf Hansson * 1225d6948c13SUlf Hansson * Check if the given PM domain can be powered off (during system suspend or 1226d6948c13SUlf Hansson * hibernation) and do that if so. Also, in that case propagate to its parents. 1227d6948c13SUlf Hansson * 1228d6948c13SUlf Hansson * This function is only called in "noirq" and "syscore" stages of system power 1229d6948c13SUlf Hansson * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1230d6948c13SUlf Hansson * these cases the lock must be held. 1231d6948c13SUlf Hansson */ 1232d6948c13SUlf Hansson static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1233d6948c13SUlf Hansson unsigned int depth) 1234d6948c13SUlf Hansson { 1235d6948c13SUlf Hansson struct gpd_link *link; 1236d6948c13SUlf Hansson 1237d6948c13SUlf Hansson if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1238d6948c13SUlf Hansson return; 1239d6948c13SUlf Hansson 1240d6948c13SUlf Hansson if (genpd->suspended_count != genpd->device_count 1241d6948c13SUlf Hansson || atomic_read(&genpd->sd_count) > 0) 1242d6948c13SUlf Hansson return; 1243d6948c13SUlf Hansson 1244d6948c13SUlf Hansson /* Check that the children are in their deepest (powered-off) state. */ 1245d6948c13SUlf Hansson list_for_each_entry(link, &genpd->parent_links, parent_node) { 1246d6948c13SUlf Hansson struct generic_pm_domain *child = link->child; 1247d6948c13SUlf Hansson if (child->state_idx < child->state_count - 1) 1248d6948c13SUlf Hansson return; 1249d6948c13SUlf Hansson } 1250d6948c13SUlf Hansson 1251d6948c13SUlf Hansson /* Choose the deepest state when suspending */ 1252d6948c13SUlf Hansson genpd->state_idx = genpd->state_count - 1; 12535af7f593SUlf Hansson if (_genpd_power_off(genpd, false)) { 12545af7f593SUlf Hansson genpd->states[genpd->state_idx].rejected++; 1255d6948c13SUlf Hansson return; 12565af7f593SUlf Hansson } else { 12575af7f593SUlf Hansson genpd->states[genpd->state_idx].usage++; 12585af7f593SUlf Hansson } 1259d6948c13SUlf Hansson 1260d6948c13SUlf Hansson genpd->status = GENPD_STATE_OFF; 1261d6948c13SUlf Hansson 1262d6948c13SUlf Hansson list_for_each_entry(link, &genpd->child_links, child_node) { 1263d6948c13SUlf Hansson genpd_sd_counter_dec(link->parent); 1264d6948c13SUlf Hansson 1265d6948c13SUlf Hansson if (use_lock) 1266d6948c13SUlf Hansson genpd_lock_nested(link->parent, depth + 1); 1267d6948c13SUlf Hansson 1268d6948c13SUlf Hansson genpd_sync_power_off(link->parent, use_lock, depth + 1); 1269d6948c13SUlf Hansson 1270d6948c13SUlf Hansson if (use_lock) 1271d6948c13SUlf Hansson genpd_unlock(link->parent); 1272d6948c13SUlf Hansson } 1273d6948c13SUlf Hansson } 1274d6948c13SUlf Hansson 1275d6948c13SUlf Hansson /** 1276d6948c13SUlf Hansson * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1277d6948c13SUlf Hansson * @genpd: PM domain to power on. 1278d6948c13SUlf Hansson * @use_lock: use the lock. 1279d6948c13SUlf Hansson * @depth: nesting count for lockdep. 1280d6948c13SUlf Hansson * 1281d6948c13SUlf Hansson * This function is only called in "noirq" and "syscore" stages of system power 1282d6948c13SUlf Hansson * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1283d6948c13SUlf Hansson * these cases the lock must be held. 1284d6948c13SUlf Hansson */ 1285d6948c13SUlf Hansson static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1286d6948c13SUlf Hansson unsigned int depth) 1287d6948c13SUlf Hansson { 1288d6948c13SUlf Hansson struct gpd_link *link; 1289d6948c13SUlf Hansson 1290d6948c13SUlf Hansson if (genpd_status_on(genpd)) 1291d6948c13SUlf Hansson return; 1292d6948c13SUlf Hansson 1293d6948c13SUlf Hansson list_for_each_entry(link, &genpd->child_links, child_node) { 1294d6948c13SUlf Hansson genpd_sd_counter_inc(link->parent); 1295d6948c13SUlf Hansson 1296d6948c13SUlf Hansson if (use_lock) 1297d6948c13SUlf Hansson genpd_lock_nested(link->parent, depth + 1); 1298d6948c13SUlf Hansson 1299d6948c13SUlf Hansson genpd_sync_power_on(link->parent, use_lock, depth + 1); 1300d6948c13SUlf Hansson 1301d6948c13SUlf Hansson if (use_lock) 1302d6948c13SUlf Hansson genpd_unlock(link->parent); 1303d6948c13SUlf Hansson } 1304d6948c13SUlf Hansson 1305d6948c13SUlf Hansson _genpd_power_on(genpd, false); 1306d6948c13SUlf Hansson genpd->status = GENPD_STATE_ON; 1307d6948c13SUlf Hansson } 1308d6948c13SUlf Hansson 1309d6948c13SUlf Hansson /** 1310d6948c13SUlf Hansson * genpd_prepare - Start power transition of a device in a PM domain. 1311d6948c13SUlf Hansson * @dev: Device to start the transition of. 1312d6948c13SUlf Hansson * 1313d6948c13SUlf Hansson * Start a power transition of a device (during a system-wide power transition) 1314d6948c13SUlf Hansson * under the assumption that its pm_domain field points to the domain member of 1315d6948c13SUlf Hansson * an object of type struct generic_pm_domain representing a PM domain 1316d6948c13SUlf Hansson * consisting of I/O devices. 1317d6948c13SUlf Hansson */ 1318d6948c13SUlf Hansson static int genpd_prepare(struct device *dev) 1319d6948c13SUlf Hansson { 1320d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1321d6948c13SUlf Hansson int ret; 1322d6948c13SUlf Hansson 1323d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1324d6948c13SUlf Hansson 1325d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1326d6948c13SUlf Hansson if (IS_ERR(genpd)) 1327d6948c13SUlf Hansson return -EINVAL; 1328d6948c13SUlf Hansson 1329d6948c13SUlf Hansson genpd_lock(genpd); 13300cebf7cbSUlf Hansson genpd->prepared_count++; 1331d6948c13SUlf Hansson genpd_unlock(genpd); 1332d6948c13SUlf Hansson 1333d6948c13SUlf Hansson ret = pm_generic_prepare(dev); 1334d6948c13SUlf Hansson if (ret < 0) { 1335d6948c13SUlf Hansson genpd_lock(genpd); 1336d6948c13SUlf Hansson 1337d6948c13SUlf Hansson genpd->prepared_count--; 1338d6948c13SUlf Hansson 1339d6948c13SUlf Hansson genpd_unlock(genpd); 1340d6948c13SUlf Hansson } 1341d6948c13SUlf Hansson 1342d6948c13SUlf Hansson /* Never return 1, as genpd don't cope with the direct_complete path. */ 1343d6948c13SUlf Hansson return ret >= 0 ? 0 : ret; 1344d6948c13SUlf Hansson } 1345d6948c13SUlf Hansson 1346d6948c13SUlf Hansson /** 1347d6948c13SUlf Hansson * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1348d6948c13SUlf Hansson * I/O pm domain. 1349d6948c13SUlf Hansson * @dev: Device to suspend. 1350d6948c13SUlf Hansson * @suspend_noirq: Generic suspend_noirq callback. 1351d6948c13SUlf Hansson * @resume_noirq: Generic resume_noirq callback. 1352d6948c13SUlf Hansson * 1353d6948c13SUlf Hansson * Stop the device and remove power from the domain if all devices in it have 1354d6948c13SUlf Hansson * been stopped. 1355d6948c13SUlf Hansson */ 1356d6948c13SUlf Hansson static int genpd_finish_suspend(struct device *dev, 1357d6948c13SUlf Hansson int (*suspend_noirq)(struct device *dev), 1358d6948c13SUlf Hansson int (*resume_noirq)(struct device *dev)) 1359d6948c13SUlf Hansson { 1360d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1361d6948c13SUlf Hansson int ret = 0; 1362d6948c13SUlf Hansson 1363d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1364d6948c13SUlf Hansson if (IS_ERR(genpd)) 1365d6948c13SUlf Hansson return -EINVAL; 1366d6948c13SUlf Hansson 1367d6948c13SUlf Hansson ret = suspend_noirq(dev); 1368d6948c13SUlf Hansson if (ret) 1369d6948c13SUlf Hansson return ret; 1370d6948c13SUlf Hansson 1371d6948c13SUlf Hansson if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1372d6948c13SUlf Hansson return 0; 1373d6948c13SUlf Hansson 1374d6948c13SUlf Hansson if (genpd->dev_ops.stop && genpd->dev_ops.start && 1375d6948c13SUlf Hansson !pm_runtime_status_suspended(dev)) { 1376d6948c13SUlf Hansson ret = genpd_stop_dev(genpd, dev); 1377d6948c13SUlf Hansson if (ret) { 1378d6948c13SUlf Hansson resume_noirq(dev); 1379d6948c13SUlf Hansson return ret; 1380d6948c13SUlf Hansson } 1381d6948c13SUlf Hansson } 1382d6948c13SUlf Hansson 1383d6948c13SUlf Hansson genpd_lock(genpd); 1384d6948c13SUlf Hansson genpd->suspended_count++; 1385d6948c13SUlf Hansson genpd_sync_power_off(genpd, true, 0); 1386d6948c13SUlf Hansson genpd_unlock(genpd); 1387d6948c13SUlf Hansson 1388d6948c13SUlf Hansson return 0; 1389d6948c13SUlf Hansson } 1390d6948c13SUlf Hansson 1391d6948c13SUlf Hansson /** 1392d6948c13SUlf Hansson * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1393d6948c13SUlf Hansson * @dev: Device to suspend. 1394d6948c13SUlf Hansson * 1395d6948c13SUlf Hansson * Stop the device and remove power from the domain if all devices in it have 1396d6948c13SUlf Hansson * been stopped. 1397d6948c13SUlf Hansson */ 1398d6948c13SUlf Hansson static int genpd_suspend_noirq(struct device *dev) 1399d6948c13SUlf Hansson { 1400d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1401d6948c13SUlf Hansson 1402d6948c13SUlf Hansson return genpd_finish_suspend(dev, 1403d6948c13SUlf Hansson pm_generic_suspend_noirq, 1404d6948c13SUlf Hansson pm_generic_resume_noirq); 1405d6948c13SUlf Hansson } 1406d6948c13SUlf Hansson 1407d6948c13SUlf Hansson /** 1408d6948c13SUlf Hansson * genpd_finish_resume - Completion of resume of device in an I/O PM domain. 1409d6948c13SUlf Hansson * @dev: Device to resume. 1410d6948c13SUlf Hansson * @resume_noirq: Generic resume_noirq callback. 1411d6948c13SUlf Hansson * 1412d6948c13SUlf Hansson * Restore power to the device's PM domain, if necessary, and start the device. 1413d6948c13SUlf Hansson */ 1414d6948c13SUlf Hansson static int genpd_finish_resume(struct device *dev, 1415d6948c13SUlf Hansson int (*resume_noirq)(struct device *dev)) 1416d6948c13SUlf Hansson { 1417d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1418d6948c13SUlf Hansson int ret; 1419d6948c13SUlf Hansson 1420d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1421d6948c13SUlf Hansson 1422d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1423d6948c13SUlf Hansson if (IS_ERR(genpd)) 1424d6948c13SUlf Hansson return -EINVAL; 1425d6948c13SUlf Hansson 1426d6948c13SUlf Hansson if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1427d6948c13SUlf Hansson return resume_noirq(dev); 1428d6948c13SUlf Hansson 1429d6948c13SUlf Hansson genpd_lock(genpd); 1430d6948c13SUlf Hansson genpd_sync_power_on(genpd, true, 0); 1431d6948c13SUlf Hansson genpd->suspended_count--; 1432d6948c13SUlf Hansson genpd_unlock(genpd); 1433d6948c13SUlf Hansson 1434d6948c13SUlf Hansson if (genpd->dev_ops.stop && genpd->dev_ops.start && 1435d6948c13SUlf Hansson !pm_runtime_status_suspended(dev)) { 1436d6948c13SUlf Hansson ret = genpd_start_dev(genpd, dev); 1437d6948c13SUlf Hansson if (ret) 1438d6948c13SUlf Hansson return ret; 1439d6948c13SUlf Hansson } 1440d6948c13SUlf Hansson 1441d6948c13SUlf Hansson return pm_generic_resume_noirq(dev); 1442d6948c13SUlf Hansson } 1443d6948c13SUlf Hansson 1444d6948c13SUlf Hansson /** 1445d6948c13SUlf Hansson * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1446d6948c13SUlf Hansson * @dev: Device to resume. 1447d6948c13SUlf Hansson * 1448d6948c13SUlf Hansson * Restore power to the device's PM domain, if necessary, and start the device. 1449d6948c13SUlf Hansson */ 1450d6948c13SUlf Hansson static int genpd_resume_noirq(struct device *dev) 1451d6948c13SUlf Hansson { 1452d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1453d6948c13SUlf Hansson 1454d6948c13SUlf Hansson return genpd_finish_resume(dev, pm_generic_resume_noirq); 1455d6948c13SUlf Hansson } 1456d6948c13SUlf Hansson 1457d6948c13SUlf Hansson /** 1458d6948c13SUlf Hansson * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1459d6948c13SUlf Hansson * @dev: Device to freeze. 1460d6948c13SUlf Hansson * 1461d6948c13SUlf Hansson * Carry out a late freeze of a device under the assumption that its 1462d6948c13SUlf Hansson * pm_domain field points to the domain member of an object of type 1463d6948c13SUlf Hansson * struct generic_pm_domain representing a power domain consisting of I/O 1464d6948c13SUlf Hansson * devices. 1465d6948c13SUlf Hansson */ 1466d6948c13SUlf Hansson static int genpd_freeze_noirq(struct device *dev) 1467d6948c13SUlf Hansson { 1468d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1469d6948c13SUlf Hansson 1470d6948c13SUlf Hansson return genpd_finish_suspend(dev, 1471d6948c13SUlf Hansson pm_generic_freeze_noirq, 1472d6948c13SUlf Hansson pm_generic_thaw_noirq); 1473d6948c13SUlf Hansson } 1474d6948c13SUlf Hansson 1475d6948c13SUlf Hansson /** 1476d6948c13SUlf Hansson * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1477d6948c13SUlf Hansson * @dev: Device to thaw. 1478d6948c13SUlf Hansson * 1479d6948c13SUlf Hansson * Start the device, unless power has been removed from the domain already 1480d6948c13SUlf Hansson * before the system transition. 1481d6948c13SUlf Hansson */ 1482d6948c13SUlf Hansson static int genpd_thaw_noirq(struct device *dev) 1483d6948c13SUlf Hansson { 1484d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1485d6948c13SUlf Hansson 1486d6948c13SUlf Hansson return genpd_finish_resume(dev, pm_generic_thaw_noirq); 1487d6948c13SUlf Hansson } 1488d6948c13SUlf Hansson 1489d6948c13SUlf Hansson /** 1490d6948c13SUlf Hansson * genpd_poweroff_noirq - Completion of hibernation of device in an 1491d6948c13SUlf Hansson * I/O PM domain. 1492d6948c13SUlf Hansson * @dev: Device to poweroff. 1493d6948c13SUlf Hansson * 1494d6948c13SUlf Hansson * Stop the device and remove power from the domain if all devices in it have 1495d6948c13SUlf Hansson * been stopped. 1496d6948c13SUlf Hansson */ 1497d6948c13SUlf Hansson static int genpd_poweroff_noirq(struct device *dev) 1498d6948c13SUlf Hansson { 1499d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1500d6948c13SUlf Hansson 1501d6948c13SUlf Hansson return genpd_finish_suspend(dev, 1502d6948c13SUlf Hansson pm_generic_poweroff_noirq, 1503d6948c13SUlf Hansson pm_generic_restore_noirq); 1504d6948c13SUlf Hansson } 1505d6948c13SUlf Hansson 1506d6948c13SUlf Hansson /** 1507d6948c13SUlf Hansson * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1508d6948c13SUlf Hansson * @dev: Device to resume. 1509d6948c13SUlf Hansson * 1510d6948c13SUlf Hansson * Make sure the domain will be in the same power state as before the 1511d6948c13SUlf Hansson * hibernation the system is resuming from and start the device if necessary. 1512d6948c13SUlf Hansson */ 1513d6948c13SUlf Hansson static int genpd_restore_noirq(struct device *dev) 1514d6948c13SUlf Hansson { 1515d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1516d6948c13SUlf Hansson 1517d6948c13SUlf Hansson return genpd_finish_resume(dev, pm_generic_restore_noirq); 1518d6948c13SUlf Hansson } 1519d6948c13SUlf Hansson 1520d6948c13SUlf Hansson /** 1521d6948c13SUlf Hansson * genpd_complete - Complete power transition of a device in a power domain. 1522d6948c13SUlf Hansson * @dev: Device to complete the transition of. 1523d6948c13SUlf Hansson * 1524d6948c13SUlf Hansson * Complete a power transition of a device (during a system-wide power 1525d6948c13SUlf Hansson * transition) under the assumption that its pm_domain field points to the 1526d6948c13SUlf Hansson * domain member of an object of type struct generic_pm_domain representing 1527d6948c13SUlf Hansson * a power domain consisting of I/O devices. 1528d6948c13SUlf Hansson */ 1529d6948c13SUlf Hansson static void genpd_complete(struct device *dev) 1530d6948c13SUlf Hansson { 1531d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1532d6948c13SUlf Hansson 1533d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1534d6948c13SUlf Hansson 1535d6948c13SUlf Hansson genpd = dev_to_genpd(dev); 1536d6948c13SUlf Hansson if (IS_ERR(genpd)) 1537d6948c13SUlf Hansson return; 1538d6948c13SUlf Hansson 1539d6948c13SUlf Hansson pm_generic_complete(dev); 1540d6948c13SUlf Hansson 1541d6948c13SUlf Hansson genpd_lock(genpd); 1542d6948c13SUlf Hansson 1543d6948c13SUlf Hansson genpd->prepared_count--; 1544d6948c13SUlf Hansson if (!genpd->prepared_count) 1545d6948c13SUlf Hansson genpd_queue_power_off_work(genpd); 1546d6948c13SUlf Hansson 1547d6948c13SUlf Hansson genpd_unlock(genpd); 1548d6948c13SUlf Hansson } 1549d6948c13SUlf Hansson 1550d6948c13SUlf Hansson static void genpd_switch_state(struct device *dev, bool suspend) 1551d6948c13SUlf Hansson { 1552d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1553d6948c13SUlf Hansson bool use_lock; 1554d6948c13SUlf Hansson 1555d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 1556d6948c13SUlf Hansson if (!genpd) 1557d6948c13SUlf Hansson return; 1558d6948c13SUlf Hansson 1559d6948c13SUlf Hansson use_lock = genpd_is_irq_safe(genpd); 1560d6948c13SUlf Hansson 1561d6948c13SUlf Hansson if (use_lock) 1562d6948c13SUlf Hansson genpd_lock(genpd); 1563d6948c13SUlf Hansson 1564d6948c13SUlf Hansson if (suspend) { 1565d6948c13SUlf Hansson genpd->suspended_count++; 1566d6948c13SUlf Hansson genpd_sync_power_off(genpd, use_lock, 0); 1567d6948c13SUlf Hansson } else { 1568d6948c13SUlf Hansson genpd_sync_power_on(genpd, use_lock, 0); 1569d6948c13SUlf Hansson genpd->suspended_count--; 1570d6948c13SUlf Hansson } 1571d6948c13SUlf Hansson 1572d6948c13SUlf Hansson if (use_lock) 1573d6948c13SUlf Hansson genpd_unlock(genpd); 1574d6948c13SUlf Hansson } 1575d6948c13SUlf Hansson 1576d6948c13SUlf Hansson /** 1577d6948c13SUlf Hansson * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1578d6948c13SUlf Hansson * @dev: The device that is attached to the genpd, that can be suspended. 1579d6948c13SUlf Hansson * 1580d6948c13SUlf Hansson * This routine should typically be called for a device that needs to be 1581d6948c13SUlf Hansson * suspended during the syscore suspend phase. It may also be called during 1582d6948c13SUlf Hansson * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1583d6948c13SUlf Hansson * genpd. 1584d6948c13SUlf Hansson */ 1585d6948c13SUlf Hansson void dev_pm_genpd_suspend(struct device *dev) 1586d6948c13SUlf Hansson { 1587d6948c13SUlf Hansson genpd_switch_state(dev, true); 1588d6948c13SUlf Hansson } 1589d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1590d6948c13SUlf Hansson 1591d6948c13SUlf Hansson /** 1592d6948c13SUlf Hansson * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1593d6948c13SUlf Hansson * @dev: The device that is attached to the genpd, which needs to be resumed. 1594d6948c13SUlf Hansson * 1595d6948c13SUlf Hansson * This routine should typically be called for a device that needs to be resumed 1596d6948c13SUlf Hansson * during the syscore resume phase. It may also be called during suspend-to-idle 1597d6948c13SUlf Hansson * to resume a corresponding CPU device that is attached to a genpd. 1598d6948c13SUlf Hansson */ 1599d6948c13SUlf Hansson void dev_pm_genpd_resume(struct device *dev) 1600d6948c13SUlf Hansson { 1601d6948c13SUlf Hansson genpd_switch_state(dev, false); 1602d6948c13SUlf Hansson } 1603d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1604d6948c13SUlf Hansson 1605d6948c13SUlf Hansson #else /* !CONFIG_PM_SLEEP */ 1606d6948c13SUlf Hansson 1607d6948c13SUlf Hansson #define genpd_prepare NULL 1608d6948c13SUlf Hansson #define genpd_suspend_noirq NULL 1609d6948c13SUlf Hansson #define genpd_resume_noirq NULL 1610d6948c13SUlf Hansson #define genpd_freeze_noirq NULL 1611d6948c13SUlf Hansson #define genpd_thaw_noirq NULL 1612d6948c13SUlf Hansson #define genpd_poweroff_noirq NULL 1613d6948c13SUlf Hansson #define genpd_restore_noirq NULL 1614d6948c13SUlf Hansson #define genpd_complete NULL 1615d6948c13SUlf Hansson 1616d6948c13SUlf Hansson #endif /* CONFIG_PM_SLEEP */ 1617d6948c13SUlf Hansson 1618d6948c13SUlf Hansson static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1619d6948c13SUlf Hansson bool has_governor) 1620d6948c13SUlf Hansson { 1621d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 1622d6948c13SUlf Hansson struct gpd_timing_data *td; 1623d6948c13SUlf Hansson int ret; 1624d6948c13SUlf Hansson 1625d6948c13SUlf Hansson ret = dev_pm_get_subsys_data(dev); 1626d6948c13SUlf Hansson if (ret) 1627d6948c13SUlf Hansson return ERR_PTR(ret); 1628d6948c13SUlf Hansson 1629d6948c13SUlf Hansson gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1630d6948c13SUlf Hansson if (!gpd_data) { 1631d6948c13SUlf Hansson ret = -ENOMEM; 1632d6948c13SUlf Hansson goto err_put; 1633d6948c13SUlf Hansson } 1634d6948c13SUlf Hansson 1635d6948c13SUlf Hansson gpd_data->base.dev = dev; 1636d6948c13SUlf Hansson gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1637d6948c13SUlf Hansson 1638d6948c13SUlf Hansson /* Allocate data used by a governor. */ 1639d6948c13SUlf Hansson if (has_governor) { 1640d6948c13SUlf Hansson td = kzalloc(sizeof(*td), GFP_KERNEL); 1641d6948c13SUlf Hansson if (!td) { 1642d6948c13SUlf Hansson ret = -ENOMEM; 1643d6948c13SUlf Hansson goto err_free; 1644d6948c13SUlf Hansson } 1645d6948c13SUlf Hansson 1646d6948c13SUlf Hansson td->constraint_changed = true; 1647d6948c13SUlf Hansson td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1648d6948c13SUlf Hansson td->next_wakeup = KTIME_MAX; 1649d6948c13SUlf Hansson gpd_data->td = td; 1650d6948c13SUlf Hansson } 1651d6948c13SUlf Hansson 1652d6948c13SUlf Hansson spin_lock_irq(&dev->power.lock); 1653d6948c13SUlf Hansson 1654d6948c13SUlf Hansson if (dev->power.subsys_data->domain_data) 1655d6948c13SUlf Hansson ret = -EINVAL; 1656d6948c13SUlf Hansson else 1657d6948c13SUlf Hansson dev->power.subsys_data->domain_data = &gpd_data->base; 1658d6948c13SUlf Hansson 1659d6948c13SUlf Hansson spin_unlock_irq(&dev->power.lock); 1660d6948c13SUlf Hansson 1661d6948c13SUlf Hansson if (ret) 1662d6948c13SUlf Hansson goto err_free; 1663d6948c13SUlf Hansson 1664d6948c13SUlf Hansson return gpd_data; 1665d6948c13SUlf Hansson 1666d6948c13SUlf Hansson err_free: 1667d6948c13SUlf Hansson kfree(gpd_data->td); 1668d6948c13SUlf Hansson kfree(gpd_data); 1669d6948c13SUlf Hansson err_put: 1670d6948c13SUlf Hansson dev_pm_put_subsys_data(dev); 1671d6948c13SUlf Hansson return ERR_PTR(ret); 1672d6948c13SUlf Hansson } 1673d6948c13SUlf Hansson 1674d6948c13SUlf Hansson static void genpd_free_dev_data(struct device *dev, 1675d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data) 1676d6948c13SUlf Hansson { 1677d6948c13SUlf Hansson spin_lock_irq(&dev->power.lock); 1678d6948c13SUlf Hansson 1679d6948c13SUlf Hansson dev->power.subsys_data->domain_data = NULL; 1680d6948c13SUlf Hansson 1681d6948c13SUlf Hansson spin_unlock_irq(&dev->power.lock); 1682d6948c13SUlf Hansson 1683d6948c13SUlf Hansson kfree(gpd_data->td); 1684d6948c13SUlf Hansson kfree(gpd_data); 1685d6948c13SUlf Hansson dev_pm_put_subsys_data(dev); 1686d6948c13SUlf Hansson } 1687d6948c13SUlf Hansson 1688d6948c13SUlf Hansson static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1689d6948c13SUlf Hansson int cpu, bool set, unsigned int depth) 1690d6948c13SUlf Hansson { 1691d6948c13SUlf Hansson struct gpd_link *link; 1692d6948c13SUlf Hansson 1693d6948c13SUlf Hansson if (!genpd_is_cpu_domain(genpd)) 1694d6948c13SUlf Hansson return; 1695d6948c13SUlf Hansson 1696d6948c13SUlf Hansson list_for_each_entry(link, &genpd->child_links, child_node) { 1697d6948c13SUlf Hansson struct generic_pm_domain *parent = link->parent; 1698d6948c13SUlf Hansson 1699d6948c13SUlf Hansson genpd_lock_nested(parent, depth + 1); 1700d6948c13SUlf Hansson genpd_update_cpumask(parent, cpu, set, depth + 1); 1701d6948c13SUlf Hansson genpd_unlock(parent); 1702d6948c13SUlf Hansson } 1703d6948c13SUlf Hansson 1704d6948c13SUlf Hansson if (set) 1705d6948c13SUlf Hansson cpumask_set_cpu(cpu, genpd->cpus); 1706d6948c13SUlf Hansson else 1707d6948c13SUlf Hansson cpumask_clear_cpu(cpu, genpd->cpus); 1708d6948c13SUlf Hansson } 1709d6948c13SUlf Hansson 1710d6948c13SUlf Hansson static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1711d6948c13SUlf Hansson { 1712d6948c13SUlf Hansson if (cpu >= 0) 1713d6948c13SUlf Hansson genpd_update_cpumask(genpd, cpu, true, 0); 1714d6948c13SUlf Hansson } 1715d6948c13SUlf Hansson 1716d6948c13SUlf Hansson static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1717d6948c13SUlf Hansson { 1718d6948c13SUlf Hansson if (cpu >= 0) 1719d6948c13SUlf Hansson genpd_update_cpumask(genpd, cpu, false, 0); 1720d6948c13SUlf Hansson } 1721d6948c13SUlf Hansson 1722d6948c13SUlf Hansson static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1723d6948c13SUlf Hansson { 1724d6948c13SUlf Hansson int cpu; 1725d6948c13SUlf Hansson 1726d6948c13SUlf Hansson if (!genpd_is_cpu_domain(genpd)) 1727d6948c13SUlf Hansson return -1; 1728d6948c13SUlf Hansson 1729d6948c13SUlf Hansson for_each_possible_cpu(cpu) { 1730d6948c13SUlf Hansson if (get_cpu_device(cpu) == dev) 1731d6948c13SUlf Hansson return cpu; 1732d6948c13SUlf Hansson } 1733d6948c13SUlf Hansson 1734d6948c13SUlf Hansson return -1; 1735d6948c13SUlf Hansson } 1736d6948c13SUlf Hansson 1737d6948c13SUlf Hansson static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1738d6948c13SUlf Hansson struct device *base_dev) 1739d6948c13SUlf Hansson { 1740d6948c13SUlf Hansson struct genpd_governor_data *gd = genpd->gd; 1741d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 1742d6948c13SUlf Hansson int ret; 1743d6948c13SUlf Hansson 1744d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1745d6948c13SUlf Hansson 1746d6948c13SUlf Hansson gpd_data = genpd_alloc_dev_data(dev, gd); 1747d6948c13SUlf Hansson if (IS_ERR(gpd_data)) 1748d6948c13SUlf Hansson return PTR_ERR(gpd_data); 1749d6948c13SUlf Hansson 1750d6948c13SUlf Hansson gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1751d6948c13SUlf Hansson 1752*95f6454dSUlf Hansson gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; 1753*95f6454dSUlf Hansson 1754d6948c13SUlf Hansson ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1755d6948c13SUlf Hansson if (ret) 1756d6948c13SUlf Hansson goto out; 1757d6948c13SUlf Hansson 1758d6948c13SUlf Hansson genpd_lock(genpd); 1759d6948c13SUlf Hansson 1760d6948c13SUlf Hansson genpd_set_cpumask(genpd, gpd_data->cpu); 1761d6948c13SUlf Hansson dev_pm_domain_set(dev, &genpd->domain); 1762d6948c13SUlf Hansson 1763d6948c13SUlf Hansson genpd->device_count++; 1764d6948c13SUlf Hansson if (gd) 1765d6948c13SUlf Hansson gd->max_off_time_changed = true; 1766d6948c13SUlf Hansson 1767d6948c13SUlf Hansson list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1768d6948c13SUlf Hansson 1769d6948c13SUlf Hansson genpd_unlock(genpd); 1770d6948c13SUlf Hansson out: 1771d6948c13SUlf Hansson if (ret) 1772d6948c13SUlf Hansson genpd_free_dev_data(dev, gpd_data); 1773d6948c13SUlf Hansson else 1774d6948c13SUlf Hansson dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1775d6948c13SUlf Hansson DEV_PM_QOS_RESUME_LATENCY); 1776d6948c13SUlf Hansson 1777d6948c13SUlf Hansson return ret; 1778d6948c13SUlf Hansson } 1779d6948c13SUlf Hansson 1780d6948c13SUlf Hansson /** 1781d6948c13SUlf Hansson * pm_genpd_add_device - Add a device to an I/O PM domain. 1782d6948c13SUlf Hansson * @genpd: PM domain to add the device to. 1783d6948c13SUlf Hansson * @dev: Device to be added. 1784d6948c13SUlf Hansson */ 1785d6948c13SUlf Hansson int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1786d6948c13SUlf Hansson { 1787d6948c13SUlf Hansson int ret; 1788d6948c13SUlf Hansson 1789d6948c13SUlf Hansson if (!genpd || !dev) 1790d6948c13SUlf Hansson return -EINVAL; 1791d6948c13SUlf Hansson 1792d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 1793d6948c13SUlf Hansson ret = genpd_add_device(genpd, dev, dev); 1794d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 1795d6948c13SUlf Hansson 1796d6948c13SUlf Hansson return ret; 1797d6948c13SUlf Hansson } 1798d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1799d6948c13SUlf Hansson 1800d6948c13SUlf Hansson static int genpd_remove_device(struct generic_pm_domain *genpd, 1801d6948c13SUlf Hansson struct device *dev) 1802d6948c13SUlf Hansson { 1803d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 1804d6948c13SUlf Hansson struct pm_domain_data *pdd; 1805d6948c13SUlf Hansson int ret = 0; 1806d6948c13SUlf Hansson 1807d6948c13SUlf Hansson dev_dbg(dev, "%s()\n", __func__); 1808d6948c13SUlf Hansson 1809d6948c13SUlf Hansson pdd = dev->power.subsys_data->domain_data; 1810d6948c13SUlf Hansson gpd_data = to_gpd_data(pdd); 1811d6948c13SUlf Hansson dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1812d6948c13SUlf Hansson DEV_PM_QOS_RESUME_LATENCY); 1813d6948c13SUlf Hansson 1814d6948c13SUlf Hansson genpd_lock(genpd); 1815d6948c13SUlf Hansson 1816d6948c13SUlf Hansson if (genpd->prepared_count > 0) { 1817d6948c13SUlf Hansson ret = -EAGAIN; 1818d6948c13SUlf Hansson goto out; 1819d6948c13SUlf Hansson } 1820d6948c13SUlf Hansson 1821d6948c13SUlf Hansson genpd->device_count--; 1822d6948c13SUlf Hansson if (genpd->gd) 1823d6948c13SUlf Hansson genpd->gd->max_off_time_changed = true; 1824d6948c13SUlf Hansson 1825d6948c13SUlf Hansson genpd_clear_cpumask(genpd, gpd_data->cpu); 1826d6948c13SUlf Hansson dev_pm_domain_set(dev, NULL); 1827d6948c13SUlf Hansson 1828d6948c13SUlf Hansson list_del_init(&pdd->list_node); 1829d6948c13SUlf Hansson 1830d6948c13SUlf Hansson genpd_unlock(genpd); 1831d6948c13SUlf Hansson 1832d6948c13SUlf Hansson if (genpd->detach_dev) 1833d6948c13SUlf Hansson genpd->detach_dev(genpd, dev); 1834d6948c13SUlf Hansson 1835d6948c13SUlf Hansson genpd_free_dev_data(dev, gpd_data); 1836d6948c13SUlf Hansson 1837d6948c13SUlf Hansson return 0; 1838d6948c13SUlf Hansson 1839d6948c13SUlf Hansson out: 1840d6948c13SUlf Hansson genpd_unlock(genpd); 1841d6948c13SUlf Hansson dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1842d6948c13SUlf Hansson 1843d6948c13SUlf Hansson return ret; 1844d6948c13SUlf Hansson } 1845d6948c13SUlf Hansson 1846d6948c13SUlf Hansson /** 1847d6948c13SUlf Hansson * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1848d6948c13SUlf Hansson * @dev: Device to be removed. 1849d6948c13SUlf Hansson */ 1850d6948c13SUlf Hansson int pm_genpd_remove_device(struct device *dev) 1851d6948c13SUlf Hansson { 1852d6948c13SUlf Hansson struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1853d6948c13SUlf Hansson 1854d6948c13SUlf Hansson if (!genpd) 1855d6948c13SUlf Hansson return -EINVAL; 1856d6948c13SUlf Hansson 1857d6948c13SUlf Hansson return genpd_remove_device(genpd, dev); 1858d6948c13SUlf Hansson } 1859d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1860d6948c13SUlf Hansson 1861d6948c13SUlf Hansson /** 1862d6948c13SUlf Hansson * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1863d6948c13SUlf Hansson * 1864d6948c13SUlf Hansson * @dev: Device that should be associated with the notifier 1865d6948c13SUlf Hansson * @nb: The notifier block to register 1866d6948c13SUlf Hansson * 1867d6948c13SUlf Hansson * Users may call this function to add a genpd power on/off notifier for an 1868d6948c13SUlf Hansson * attached @dev. Only one notifier per device is allowed. The notifier is 1869d6948c13SUlf Hansson * sent when genpd is powering on/off the PM domain. 1870d6948c13SUlf Hansson * 1871d6948c13SUlf Hansson * It is assumed that the user guarantee that the genpd wouldn't be detached 1872d6948c13SUlf Hansson * while this routine is getting called. 1873d6948c13SUlf Hansson * 1874d6948c13SUlf Hansson * Returns 0 on success and negative error values on failures. 1875d6948c13SUlf Hansson */ 1876d6948c13SUlf Hansson int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1877d6948c13SUlf Hansson { 1878d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1879d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 1880d6948c13SUlf Hansson int ret; 1881d6948c13SUlf Hansson 1882d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 1883d6948c13SUlf Hansson if (!genpd) 1884d6948c13SUlf Hansson return -ENODEV; 1885d6948c13SUlf Hansson 1886d6948c13SUlf Hansson if (WARN_ON(!dev->power.subsys_data || 1887d6948c13SUlf Hansson !dev->power.subsys_data->domain_data)) 1888d6948c13SUlf Hansson return -EINVAL; 1889d6948c13SUlf Hansson 1890d6948c13SUlf Hansson gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1891d6948c13SUlf Hansson if (gpd_data->power_nb) 1892d6948c13SUlf Hansson return -EEXIST; 1893d6948c13SUlf Hansson 1894d6948c13SUlf Hansson genpd_lock(genpd); 1895d6948c13SUlf Hansson ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1896d6948c13SUlf Hansson genpd_unlock(genpd); 1897d6948c13SUlf Hansson 1898d6948c13SUlf Hansson if (ret) { 1899d6948c13SUlf Hansson dev_warn(dev, "failed to add notifier for PM domain %s\n", 1900d6948c13SUlf Hansson genpd->name); 1901d6948c13SUlf Hansson return ret; 1902d6948c13SUlf Hansson } 1903d6948c13SUlf Hansson 1904d6948c13SUlf Hansson gpd_data->power_nb = nb; 1905d6948c13SUlf Hansson return 0; 1906d6948c13SUlf Hansson } 1907d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1908d6948c13SUlf Hansson 1909d6948c13SUlf Hansson /** 1910d6948c13SUlf Hansson * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1911d6948c13SUlf Hansson * 1912d6948c13SUlf Hansson * @dev: Device that is associated with the notifier 1913d6948c13SUlf Hansson * 1914d6948c13SUlf Hansson * Users may call this function to remove a genpd power on/off notifier for an 1915d6948c13SUlf Hansson * attached @dev. 1916d6948c13SUlf Hansson * 1917d6948c13SUlf Hansson * It is assumed that the user guarantee that the genpd wouldn't be detached 1918d6948c13SUlf Hansson * while this routine is getting called. 1919d6948c13SUlf Hansson * 1920d6948c13SUlf Hansson * Returns 0 on success and negative error values on failures. 1921d6948c13SUlf Hansson */ 1922d6948c13SUlf Hansson int dev_pm_genpd_remove_notifier(struct device *dev) 1923d6948c13SUlf Hansson { 1924d6948c13SUlf Hansson struct generic_pm_domain *genpd; 1925d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 1926d6948c13SUlf Hansson int ret; 1927d6948c13SUlf Hansson 1928d6948c13SUlf Hansson genpd = dev_to_genpd_safe(dev); 1929d6948c13SUlf Hansson if (!genpd) 1930d6948c13SUlf Hansson return -ENODEV; 1931d6948c13SUlf Hansson 1932d6948c13SUlf Hansson if (WARN_ON(!dev->power.subsys_data || 1933d6948c13SUlf Hansson !dev->power.subsys_data->domain_data)) 1934d6948c13SUlf Hansson return -EINVAL; 1935d6948c13SUlf Hansson 1936d6948c13SUlf Hansson gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1937d6948c13SUlf Hansson if (!gpd_data->power_nb) 1938d6948c13SUlf Hansson return -ENODEV; 1939d6948c13SUlf Hansson 1940d6948c13SUlf Hansson genpd_lock(genpd); 1941d6948c13SUlf Hansson ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1942d6948c13SUlf Hansson gpd_data->power_nb); 1943d6948c13SUlf Hansson genpd_unlock(genpd); 1944d6948c13SUlf Hansson 1945d6948c13SUlf Hansson if (ret) { 1946d6948c13SUlf Hansson dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1947d6948c13SUlf Hansson genpd->name); 1948d6948c13SUlf Hansson return ret; 1949d6948c13SUlf Hansson } 1950d6948c13SUlf Hansson 1951d6948c13SUlf Hansson gpd_data->power_nb = NULL; 1952d6948c13SUlf Hansson return 0; 1953d6948c13SUlf Hansson } 1954d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1955d6948c13SUlf Hansson 1956d6948c13SUlf Hansson static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1957d6948c13SUlf Hansson struct generic_pm_domain *subdomain) 1958d6948c13SUlf Hansson { 1959d6948c13SUlf Hansson struct gpd_link *link, *itr; 1960d6948c13SUlf Hansson int ret = 0; 1961d6948c13SUlf Hansson 1962d6948c13SUlf Hansson if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1963d6948c13SUlf Hansson || genpd == subdomain) 1964d6948c13SUlf Hansson return -EINVAL; 1965d6948c13SUlf Hansson 1966d6948c13SUlf Hansson /* 1967d6948c13SUlf Hansson * If the domain can be powered on/off in an IRQ safe 1968d6948c13SUlf Hansson * context, ensure that the subdomain can also be 1969d6948c13SUlf Hansson * powered on/off in that context. 1970d6948c13SUlf Hansson */ 1971d6948c13SUlf Hansson if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1972d6948c13SUlf Hansson WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1973d6948c13SUlf Hansson genpd->name, subdomain->name); 1974d6948c13SUlf Hansson return -EINVAL; 1975d6948c13SUlf Hansson } 1976d6948c13SUlf Hansson 1977d6948c13SUlf Hansson link = kzalloc(sizeof(*link), GFP_KERNEL); 1978d6948c13SUlf Hansson if (!link) 1979d6948c13SUlf Hansson return -ENOMEM; 1980d6948c13SUlf Hansson 1981d6948c13SUlf Hansson genpd_lock(subdomain); 1982d6948c13SUlf Hansson genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1983d6948c13SUlf Hansson 1984d6948c13SUlf Hansson if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1985d6948c13SUlf Hansson ret = -EINVAL; 1986d6948c13SUlf Hansson goto out; 1987d6948c13SUlf Hansson } 1988d6948c13SUlf Hansson 1989d6948c13SUlf Hansson list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1990d6948c13SUlf Hansson if (itr->child == subdomain && itr->parent == genpd) { 1991d6948c13SUlf Hansson ret = -EINVAL; 1992d6948c13SUlf Hansson goto out; 1993d6948c13SUlf Hansson } 1994d6948c13SUlf Hansson } 1995d6948c13SUlf Hansson 1996d6948c13SUlf Hansson link->parent = genpd; 1997d6948c13SUlf Hansson list_add_tail(&link->parent_node, &genpd->parent_links); 1998d6948c13SUlf Hansson link->child = subdomain; 1999d6948c13SUlf Hansson list_add_tail(&link->child_node, &subdomain->child_links); 2000d6948c13SUlf Hansson if (genpd_status_on(subdomain)) 2001d6948c13SUlf Hansson genpd_sd_counter_inc(genpd); 2002d6948c13SUlf Hansson 2003d6948c13SUlf Hansson out: 2004d6948c13SUlf Hansson genpd_unlock(genpd); 2005d6948c13SUlf Hansson genpd_unlock(subdomain); 2006d6948c13SUlf Hansson if (ret) 2007d6948c13SUlf Hansson kfree(link); 2008d6948c13SUlf Hansson return ret; 2009d6948c13SUlf Hansson } 2010d6948c13SUlf Hansson 2011d6948c13SUlf Hansson /** 2012d6948c13SUlf Hansson * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2013d6948c13SUlf Hansson * @genpd: Leader PM domain to add the subdomain to. 2014d6948c13SUlf Hansson * @subdomain: Subdomain to be added. 2015d6948c13SUlf Hansson */ 2016d6948c13SUlf Hansson int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 2017d6948c13SUlf Hansson struct generic_pm_domain *subdomain) 2018d6948c13SUlf Hansson { 2019d6948c13SUlf Hansson int ret; 2020d6948c13SUlf Hansson 2021d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2022d6948c13SUlf Hansson ret = genpd_add_subdomain(genpd, subdomain); 2023d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2024d6948c13SUlf Hansson 2025d6948c13SUlf Hansson return ret; 2026d6948c13SUlf Hansson } 2027d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 2028d6948c13SUlf Hansson 2029d6948c13SUlf Hansson /** 2030d6948c13SUlf Hansson * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2031d6948c13SUlf Hansson * @genpd: Leader PM domain to remove the subdomain from. 2032d6948c13SUlf Hansson * @subdomain: Subdomain to be removed. 2033d6948c13SUlf Hansson */ 2034d6948c13SUlf Hansson int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 2035d6948c13SUlf Hansson struct generic_pm_domain *subdomain) 2036d6948c13SUlf Hansson { 2037d6948c13SUlf Hansson struct gpd_link *l, *link; 2038d6948c13SUlf Hansson int ret = -EINVAL; 2039d6948c13SUlf Hansson 2040d6948c13SUlf Hansson if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 2041d6948c13SUlf Hansson return -EINVAL; 2042d6948c13SUlf Hansson 2043d6948c13SUlf Hansson genpd_lock(subdomain); 2044d6948c13SUlf Hansson genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2045d6948c13SUlf Hansson 2046d6948c13SUlf Hansson if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 2047d6948c13SUlf Hansson pr_warn("%s: unable to remove subdomain %s\n", 2048d6948c13SUlf Hansson genpd->name, subdomain->name); 2049d6948c13SUlf Hansson ret = -EBUSY; 2050d6948c13SUlf Hansson goto out; 2051d6948c13SUlf Hansson } 2052d6948c13SUlf Hansson 2053d6948c13SUlf Hansson list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 2054d6948c13SUlf Hansson if (link->child != subdomain) 2055d6948c13SUlf Hansson continue; 2056d6948c13SUlf Hansson 2057d6948c13SUlf Hansson list_del(&link->parent_node); 2058d6948c13SUlf Hansson list_del(&link->child_node); 2059d6948c13SUlf Hansson kfree(link); 2060d6948c13SUlf Hansson if (genpd_status_on(subdomain)) 2061d6948c13SUlf Hansson genpd_sd_counter_dec(genpd); 2062d6948c13SUlf Hansson 2063d6948c13SUlf Hansson ret = 0; 2064d6948c13SUlf Hansson break; 2065d6948c13SUlf Hansson } 2066d6948c13SUlf Hansson 2067d6948c13SUlf Hansson out: 2068d6948c13SUlf Hansson genpd_unlock(genpd); 2069d6948c13SUlf Hansson genpd_unlock(subdomain); 2070d6948c13SUlf Hansson 2071d6948c13SUlf Hansson return ret; 2072d6948c13SUlf Hansson } 2073d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 2074d6948c13SUlf Hansson 2075d6948c13SUlf Hansson static void genpd_free_default_power_state(struct genpd_power_state *states, 2076d6948c13SUlf Hansson unsigned int state_count) 2077d6948c13SUlf Hansson { 2078d6948c13SUlf Hansson kfree(states); 2079d6948c13SUlf Hansson } 2080d6948c13SUlf Hansson 2081d6948c13SUlf Hansson static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 2082d6948c13SUlf Hansson { 2083d6948c13SUlf Hansson struct genpd_power_state *state; 2084d6948c13SUlf Hansson 2085d6948c13SUlf Hansson state = kzalloc(sizeof(*state), GFP_KERNEL); 2086d6948c13SUlf Hansson if (!state) 2087d6948c13SUlf Hansson return -ENOMEM; 2088d6948c13SUlf Hansson 2089d6948c13SUlf Hansson genpd->states = state; 2090d6948c13SUlf Hansson genpd->state_count = 1; 2091d6948c13SUlf Hansson genpd->free_states = genpd_free_default_power_state; 2092d6948c13SUlf Hansson 2093d6948c13SUlf Hansson return 0; 2094d6948c13SUlf Hansson } 2095d6948c13SUlf Hansson 2096d6948c13SUlf Hansson static int genpd_alloc_data(struct generic_pm_domain *genpd) 2097d6948c13SUlf Hansson { 2098d6948c13SUlf Hansson struct genpd_governor_data *gd = NULL; 2099d6948c13SUlf Hansson int ret; 2100d6948c13SUlf Hansson 2101d6948c13SUlf Hansson if (genpd_is_cpu_domain(genpd) && 2102d6948c13SUlf Hansson !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 2103d6948c13SUlf Hansson return -ENOMEM; 2104d6948c13SUlf Hansson 2105d6948c13SUlf Hansson if (genpd->gov) { 2106d6948c13SUlf Hansson gd = kzalloc(sizeof(*gd), GFP_KERNEL); 2107d6948c13SUlf Hansson if (!gd) { 2108d6948c13SUlf Hansson ret = -ENOMEM; 2109d6948c13SUlf Hansson goto free; 2110d6948c13SUlf Hansson } 2111d6948c13SUlf Hansson 2112d6948c13SUlf Hansson gd->max_off_time_ns = -1; 2113d6948c13SUlf Hansson gd->max_off_time_changed = true; 2114d6948c13SUlf Hansson gd->next_wakeup = KTIME_MAX; 2115d6948c13SUlf Hansson gd->next_hrtimer = KTIME_MAX; 2116d6948c13SUlf Hansson } 2117d6948c13SUlf Hansson 2118d6948c13SUlf Hansson /* Use only one "off" state if there were no states declared */ 2119d6948c13SUlf Hansson if (genpd->state_count == 0) { 2120d6948c13SUlf Hansson ret = genpd_set_default_power_state(genpd); 2121d6948c13SUlf Hansson if (ret) 2122d6948c13SUlf Hansson goto free; 2123d6948c13SUlf Hansson } 2124d6948c13SUlf Hansson 2125d6948c13SUlf Hansson genpd->gd = gd; 2126d6948c13SUlf Hansson return 0; 2127d6948c13SUlf Hansson 2128d6948c13SUlf Hansson free: 2129d6948c13SUlf Hansson if (genpd_is_cpu_domain(genpd)) 2130d6948c13SUlf Hansson free_cpumask_var(genpd->cpus); 2131d6948c13SUlf Hansson kfree(gd); 2132d6948c13SUlf Hansson return ret; 2133d6948c13SUlf Hansson } 2134d6948c13SUlf Hansson 2135d6948c13SUlf Hansson static void genpd_free_data(struct generic_pm_domain *genpd) 2136d6948c13SUlf Hansson { 2137d6948c13SUlf Hansson if (genpd_is_cpu_domain(genpd)) 2138d6948c13SUlf Hansson free_cpumask_var(genpd->cpus); 2139d6948c13SUlf Hansson if (genpd->free_states) 2140d6948c13SUlf Hansson genpd->free_states(genpd->states, genpd->state_count); 2141d6948c13SUlf Hansson kfree(genpd->gd); 2142d6948c13SUlf Hansson } 2143d6948c13SUlf Hansson 2144d6948c13SUlf Hansson static void genpd_lock_init(struct generic_pm_domain *genpd) 2145d6948c13SUlf Hansson { 21460ddaf2c1SGeert Uytterhoeven if (genpd_is_irq_safe(genpd)) { 2147d6948c13SUlf Hansson spin_lock_init(&genpd->slock); 2148d6948c13SUlf Hansson genpd->lock_ops = &genpd_spin_ops; 2149d6948c13SUlf Hansson } else { 2150d6948c13SUlf Hansson mutex_init(&genpd->mlock); 2151d6948c13SUlf Hansson genpd->lock_ops = &genpd_mtx_ops; 2152d6948c13SUlf Hansson } 2153d6948c13SUlf Hansson } 2154d6948c13SUlf Hansson 2155d6948c13SUlf Hansson /** 2156d6948c13SUlf Hansson * pm_genpd_init - Initialize a generic I/O PM domain object. 2157d6948c13SUlf Hansson * @genpd: PM domain object to initialize. 2158d6948c13SUlf Hansson * @gov: PM domain governor to associate with the domain (may be NULL). 2159d6948c13SUlf Hansson * @is_off: Initial value of the domain's power_is_off field. 2160d6948c13SUlf Hansson * 2161d6948c13SUlf Hansson * Returns 0 on successful initialization, else a negative error code. 2162d6948c13SUlf Hansson */ 2163d6948c13SUlf Hansson int pm_genpd_init(struct generic_pm_domain *genpd, 2164d6948c13SUlf Hansson struct dev_power_governor *gov, bool is_off) 2165d6948c13SUlf Hansson { 2166d6948c13SUlf Hansson int ret; 2167d6948c13SUlf Hansson 2168d6948c13SUlf Hansson if (IS_ERR_OR_NULL(genpd)) 2169d6948c13SUlf Hansson return -EINVAL; 2170d6948c13SUlf Hansson 2171d6948c13SUlf Hansson INIT_LIST_HEAD(&genpd->parent_links); 2172d6948c13SUlf Hansson INIT_LIST_HEAD(&genpd->child_links); 2173d6948c13SUlf Hansson INIT_LIST_HEAD(&genpd->dev_list); 2174d6948c13SUlf Hansson RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2175d6948c13SUlf Hansson genpd_lock_init(genpd); 2176d6948c13SUlf Hansson genpd->gov = gov; 2177d6948c13SUlf Hansson INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2178d6948c13SUlf Hansson atomic_set(&genpd->sd_count, 0); 2179d6948c13SUlf Hansson genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2180d6948c13SUlf Hansson genpd->device_count = 0; 2181d6948c13SUlf Hansson genpd->provider = NULL; 2182d6948c13SUlf Hansson genpd->has_provider = false; 2183d6948c13SUlf Hansson genpd->accounting_time = ktime_get_mono_fast_ns(); 2184d6948c13SUlf Hansson genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2185d6948c13SUlf Hansson genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2186d6948c13SUlf Hansson genpd->domain.ops.prepare = genpd_prepare; 2187d6948c13SUlf Hansson genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2188d6948c13SUlf Hansson genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2189d6948c13SUlf Hansson genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2190d6948c13SUlf Hansson genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2191d6948c13SUlf Hansson genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2192d6948c13SUlf Hansson genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2193d6948c13SUlf Hansson genpd->domain.ops.complete = genpd_complete; 2194d6948c13SUlf Hansson genpd->domain.start = genpd_dev_pm_start; 2195d6948c13SUlf Hansson genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; 2196d6948c13SUlf Hansson 2197d6948c13SUlf Hansson if (genpd->flags & GENPD_FLAG_PM_CLK) { 2198d6948c13SUlf Hansson genpd->dev_ops.stop = pm_clk_suspend; 2199d6948c13SUlf Hansson genpd->dev_ops.start = pm_clk_resume; 2200d6948c13SUlf Hansson } 2201d6948c13SUlf Hansson 2202d6948c13SUlf Hansson /* The always-on governor works better with the corresponding flag. */ 2203d6948c13SUlf Hansson if (gov == &pm_domain_always_on_gov) 2204d6948c13SUlf Hansson genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2205d6948c13SUlf Hansson 2206d6948c13SUlf Hansson /* Always-on domains must be powered on at initialization. */ 2207d6948c13SUlf Hansson if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2208d6948c13SUlf Hansson !genpd_status_on(genpd)) { 2209d6948c13SUlf Hansson pr_err("always-on PM domain %s is not on\n", genpd->name); 2210d6948c13SUlf Hansson return -EINVAL; 2211d6948c13SUlf Hansson } 2212d6948c13SUlf Hansson 2213d6948c13SUlf Hansson /* Multiple states but no governor doesn't make sense. */ 2214d6948c13SUlf Hansson if (!gov && genpd->state_count > 1) 2215d6948c13SUlf Hansson pr_warn("%s: no governor for states\n", genpd->name); 2216d6948c13SUlf Hansson 2217d6948c13SUlf Hansson ret = genpd_alloc_data(genpd); 2218d6948c13SUlf Hansson if (ret) 2219d6948c13SUlf Hansson return ret; 2220d6948c13SUlf Hansson 2221d6948c13SUlf Hansson device_initialize(&genpd->dev); 2222d6948c13SUlf Hansson dev_set_name(&genpd->dev, "%s", genpd->name); 2223d6948c13SUlf Hansson 2224d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2225d6948c13SUlf Hansson list_add(&genpd->gpd_list_node, &gpd_list); 2226d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2227d6948c13SUlf Hansson genpd_debug_add(genpd); 2228d6948c13SUlf Hansson 2229d6948c13SUlf Hansson return 0; 2230d6948c13SUlf Hansson } 2231d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_init); 2232d6948c13SUlf Hansson 2233d6948c13SUlf Hansson static int genpd_remove(struct generic_pm_domain *genpd) 2234d6948c13SUlf Hansson { 2235d6948c13SUlf Hansson struct gpd_link *l, *link; 2236d6948c13SUlf Hansson 2237d6948c13SUlf Hansson if (IS_ERR_OR_NULL(genpd)) 2238d6948c13SUlf Hansson return -EINVAL; 2239d6948c13SUlf Hansson 2240d6948c13SUlf Hansson genpd_lock(genpd); 2241d6948c13SUlf Hansson 2242d6948c13SUlf Hansson if (genpd->has_provider) { 2243d6948c13SUlf Hansson genpd_unlock(genpd); 2244d6948c13SUlf Hansson pr_err("Provider present, unable to remove %s\n", genpd->name); 2245d6948c13SUlf Hansson return -EBUSY; 2246d6948c13SUlf Hansson } 2247d6948c13SUlf Hansson 2248d6948c13SUlf Hansson if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2249d6948c13SUlf Hansson genpd_unlock(genpd); 2250d6948c13SUlf Hansson pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2251d6948c13SUlf Hansson return -EBUSY; 2252d6948c13SUlf Hansson } 2253d6948c13SUlf Hansson 2254d6948c13SUlf Hansson list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2255d6948c13SUlf Hansson list_del(&link->parent_node); 2256d6948c13SUlf Hansson list_del(&link->child_node); 2257d6948c13SUlf Hansson kfree(link); 2258d6948c13SUlf Hansson } 2259d6948c13SUlf Hansson 2260d6948c13SUlf Hansson list_del(&genpd->gpd_list_node); 2261d6948c13SUlf Hansson genpd_unlock(genpd); 2262d6948c13SUlf Hansson genpd_debug_remove(genpd); 2263d6948c13SUlf Hansson cancel_work_sync(&genpd->power_off_work); 2264d6948c13SUlf Hansson genpd_free_data(genpd); 2265d6948c13SUlf Hansson 2266d6948c13SUlf Hansson pr_debug("%s: removed %s\n", __func__, genpd->name); 2267d6948c13SUlf Hansson 2268d6948c13SUlf Hansson return 0; 2269d6948c13SUlf Hansson } 2270d6948c13SUlf Hansson 2271d6948c13SUlf Hansson /** 2272d6948c13SUlf Hansson * pm_genpd_remove - Remove a generic I/O PM domain 2273d6948c13SUlf Hansson * @genpd: Pointer to PM domain that is to be removed. 2274d6948c13SUlf Hansson * 2275d6948c13SUlf Hansson * To remove the PM domain, this function: 2276d6948c13SUlf Hansson * - Removes the PM domain as a subdomain to any parent domains, 2277d6948c13SUlf Hansson * if it was added. 2278d6948c13SUlf Hansson * - Removes the PM domain from the list of registered PM domains. 2279d6948c13SUlf Hansson * 2280d6948c13SUlf Hansson * The PM domain will only be removed, if the associated provider has 2281d6948c13SUlf Hansson * been removed, it is not a parent to any other PM domain and has no 2282d6948c13SUlf Hansson * devices associated with it. 2283d6948c13SUlf Hansson */ 2284d6948c13SUlf Hansson int pm_genpd_remove(struct generic_pm_domain *genpd) 2285d6948c13SUlf Hansson { 2286d6948c13SUlf Hansson int ret; 2287d6948c13SUlf Hansson 2288d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2289d6948c13SUlf Hansson ret = genpd_remove(genpd); 2290d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2291d6948c13SUlf Hansson 2292d6948c13SUlf Hansson return ret; 2293d6948c13SUlf Hansson } 2294d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(pm_genpd_remove); 2295d6948c13SUlf Hansson 2296d6948c13SUlf Hansson #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2297d6948c13SUlf Hansson 2298d6948c13SUlf Hansson /* 2299d6948c13SUlf Hansson * Device Tree based PM domain providers. 2300d6948c13SUlf Hansson * 2301d6948c13SUlf Hansson * The code below implements generic device tree based PM domain providers that 2302d6948c13SUlf Hansson * bind device tree nodes with generic PM domains registered in the system. 2303d6948c13SUlf Hansson * 2304d6948c13SUlf Hansson * Any driver that registers generic PM domains and needs to support binding of 2305d6948c13SUlf Hansson * devices to these domains is supposed to register a PM domain provider, which 2306d6948c13SUlf Hansson * maps a PM domain specifier retrieved from the device tree to a PM domain. 2307d6948c13SUlf Hansson * 2308d6948c13SUlf Hansson * Two simple mapping functions have been provided for convenience: 2309d6948c13SUlf Hansson * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2310d6948c13SUlf Hansson * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2311d6948c13SUlf Hansson * index. 2312d6948c13SUlf Hansson */ 2313d6948c13SUlf Hansson 2314d6948c13SUlf Hansson /** 2315d6948c13SUlf Hansson * struct of_genpd_provider - PM domain provider registration structure 2316d6948c13SUlf Hansson * @link: Entry in global list of PM domain providers 2317d6948c13SUlf Hansson * @node: Pointer to device tree node of PM domain provider 2318d6948c13SUlf Hansson * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2319d6948c13SUlf Hansson * into a PM domain. 2320d6948c13SUlf Hansson * @data: context pointer to be passed into @xlate callback 2321d6948c13SUlf Hansson */ 2322d6948c13SUlf Hansson struct of_genpd_provider { 2323d6948c13SUlf Hansson struct list_head link; 2324d6948c13SUlf Hansson struct device_node *node; 2325d6948c13SUlf Hansson genpd_xlate_t xlate; 2326d6948c13SUlf Hansson void *data; 2327d6948c13SUlf Hansson }; 2328d6948c13SUlf Hansson 2329d6948c13SUlf Hansson /* List of registered PM domain providers. */ 2330d6948c13SUlf Hansson static LIST_HEAD(of_genpd_providers); 2331d6948c13SUlf Hansson /* Mutex to protect the list above. */ 2332d6948c13SUlf Hansson static DEFINE_MUTEX(of_genpd_mutex); 2333d6948c13SUlf Hansson 2334d6948c13SUlf Hansson /** 2335d6948c13SUlf Hansson * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2336d6948c13SUlf Hansson * @genpdspec: OF phandle args to map into a PM domain 2337d6948c13SUlf Hansson * @data: xlate function private data - pointer to struct generic_pm_domain 2338d6948c13SUlf Hansson * 2339d6948c13SUlf Hansson * This is a generic xlate function that can be used to model PM domains that 2340d6948c13SUlf Hansson * have their own device tree nodes. The private data of xlate function needs 2341d6948c13SUlf Hansson * to be a valid pointer to struct generic_pm_domain. 2342d6948c13SUlf Hansson */ 2343d6948c13SUlf Hansson static struct generic_pm_domain *genpd_xlate_simple( 23444d082460SKrzysztof Kozlowski const struct of_phandle_args *genpdspec, 2345d6948c13SUlf Hansson void *data) 2346d6948c13SUlf Hansson { 2347d6948c13SUlf Hansson return data; 2348d6948c13SUlf Hansson } 2349d6948c13SUlf Hansson 2350d6948c13SUlf Hansson /** 2351d6948c13SUlf Hansson * genpd_xlate_onecell() - Xlate function using a single index. 2352d6948c13SUlf Hansson * @genpdspec: OF phandle args to map into a PM domain 2353d6948c13SUlf Hansson * @data: xlate function private data - pointer to struct genpd_onecell_data 2354d6948c13SUlf Hansson * 2355d6948c13SUlf Hansson * This is a generic xlate function that can be used to model simple PM domain 2356d6948c13SUlf Hansson * controllers that have one device tree node and provide multiple PM domains. 2357d6948c13SUlf Hansson * A single cell is used as an index into an array of PM domains specified in 2358d6948c13SUlf Hansson * the genpd_onecell_data struct when registering the provider. 2359d6948c13SUlf Hansson */ 2360d6948c13SUlf Hansson static struct generic_pm_domain *genpd_xlate_onecell( 23614d082460SKrzysztof Kozlowski const struct of_phandle_args *genpdspec, 2362d6948c13SUlf Hansson void *data) 2363d6948c13SUlf Hansson { 2364d6948c13SUlf Hansson struct genpd_onecell_data *genpd_data = data; 2365d6948c13SUlf Hansson unsigned int idx = genpdspec->args[0]; 2366d6948c13SUlf Hansson 2367d6948c13SUlf Hansson if (genpdspec->args_count != 1) 2368d6948c13SUlf Hansson return ERR_PTR(-EINVAL); 2369d6948c13SUlf Hansson 2370d6948c13SUlf Hansson if (idx >= genpd_data->num_domains) { 2371d6948c13SUlf Hansson pr_err("%s: invalid domain index %u\n", __func__, idx); 2372d6948c13SUlf Hansson return ERR_PTR(-EINVAL); 2373d6948c13SUlf Hansson } 2374d6948c13SUlf Hansson 2375d6948c13SUlf Hansson if (!genpd_data->domains[idx]) 2376d6948c13SUlf Hansson return ERR_PTR(-ENOENT); 2377d6948c13SUlf Hansson 2378d6948c13SUlf Hansson return genpd_data->domains[idx]; 2379d6948c13SUlf Hansson } 2380d6948c13SUlf Hansson 2381d6948c13SUlf Hansson /** 2382d6948c13SUlf Hansson * genpd_add_provider() - Register a PM domain provider for a node 2383d6948c13SUlf Hansson * @np: Device node pointer associated with the PM domain provider. 2384d6948c13SUlf Hansson * @xlate: Callback for decoding PM domain from phandle arguments. 2385d6948c13SUlf Hansson * @data: Context pointer for @xlate callback. 2386d6948c13SUlf Hansson */ 2387d6948c13SUlf Hansson static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2388d6948c13SUlf Hansson void *data) 2389d6948c13SUlf Hansson { 2390d6948c13SUlf Hansson struct of_genpd_provider *cp; 2391d6948c13SUlf Hansson 2392d6948c13SUlf Hansson cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2393d6948c13SUlf Hansson if (!cp) 2394d6948c13SUlf Hansson return -ENOMEM; 2395d6948c13SUlf Hansson 2396d6948c13SUlf Hansson cp->node = of_node_get(np); 2397d6948c13SUlf Hansson cp->data = data; 2398d6948c13SUlf Hansson cp->xlate = xlate; 2399d6948c13SUlf Hansson fwnode_dev_initialized(&np->fwnode, true); 2400d6948c13SUlf Hansson 2401d6948c13SUlf Hansson mutex_lock(&of_genpd_mutex); 2402d6948c13SUlf Hansson list_add(&cp->link, &of_genpd_providers); 2403d6948c13SUlf Hansson mutex_unlock(&of_genpd_mutex); 2404d6948c13SUlf Hansson pr_debug("Added domain provider from %pOF\n", np); 2405d6948c13SUlf Hansson 2406d6948c13SUlf Hansson return 0; 2407d6948c13SUlf Hansson } 2408d6948c13SUlf Hansson 2409d6948c13SUlf Hansson static bool genpd_present(const struct generic_pm_domain *genpd) 2410d6948c13SUlf Hansson { 2411d6948c13SUlf Hansson bool ret = false; 2412d6948c13SUlf Hansson const struct generic_pm_domain *gpd; 2413d6948c13SUlf Hansson 2414d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2415d6948c13SUlf Hansson list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2416d6948c13SUlf Hansson if (gpd == genpd) { 2417d6948c13SUlf Hansson ret = true; 2418d6948c13SUlf Hansson break; 2419d6948c13SUlf Hansson } 2420d6948c13SUlf Hansson } 2421d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2422d6948c13SUlf Hansson 2423d6948c13SUlf Hansson return ret; 2424d6948c13SUlf Hansson } 2425d6948c13SUlf Hansson 2426d6948c13SUlf Hansson /** 2427d6948c13SUlf Hansson * of_genpd_add_provider_simple() - Register a simple PM domain provider 2428d6948c13SUlf Hansson * @np: Device node pointer associated with the PM domain provider. 2429d6948c13SUlf Hansson * @genpd: Pointer to PM domain associated with the PM domain provider. 2430d6948c13SUlf Hansson */ 2431d6948c13SUlf Hansson int of_genpd_add_provider_simple(struct device_node *np, 2432d6948c13SUlf Hansson struct generic_pm_domain *genpd) 2433d6948c13SUlf Hansson { 2434d6948c13SUlf Hansson int ret; 2435d6948c13SUlf Hansson 2436d6948c13SUlf Hansson if (!np || !genpd) 2437d6948c13SUlf Hansson return -EINVAL; 2438d6948c13SUlf Hansson 2439d6948c13SUlf Hansson if (!genpd_present(genpd)) 2440d6948c13SUlf Hansson return -EINVAL; 2441d6948c13SUlf Hansson 2442d6948c13SUlf Hansson genpd->dev.of_node = np; 2443d6948c13SUlf Hansson 2444d6948c13SUlf Hansson /* Parse genpd OPP table */ 2445d6948c13SUlf Hansson if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2446d6948c13SUlf Hansson ret = dev_pm_opp_of_add_table(&genpd->dev); 2447d6948c13SUlf Hansson if (ret) 2448d6948c13SUlf Hansson return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2449d6948c13SUlf Hansson 2450d6948c13SUlf Hansson /* 2451d6948c13SUlf Hansson * Save table for faster processing while setting performance 2452d6948c13SUlf Hansson * state. 2453d6948c13SUlf Hansson */ 2454d6948c13SUlf Hansson genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2455d6948c13SUlf Hansson WARN_ON(IS_ERR(genpd->opp_table)); 2456d6948c13SUlf Hansson } 2457d6948c13SUlf Hansson 2458d6948c13SUlf Hansson ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2459d6948c13SUlf Hansson if (ret) { 2460d6948c13SUlf Hansson if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2461d6948c13SUlf Hansson dev_pm_opp_put_opp_table(genpd->opp_table); 2462d6948c13SUlf Hansson dev_pm_opp_of_remove_table(&genpd->dev); 2463d6948c13SUlf Hansson } 2464d6948c13SUlf Hansson 2465d6948c13SUlf Hansson return ret; 2466d6948c13SUlf Hansson } 2467d6948c13SUlf Hansson 2468d6948c13SUlf Hansson genpd->provider = &np->fwnode; 2469d6948c13SUlf Hansson genpd->has_provider = true; 2470d6948c13SUlf Hansson 2471d6948c13SUlf Hansson return 0; 2472d6948c13SUlf Hansson } 2473d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2474d6948c13SUlf Hansson 2475d6948c13SUlf Hansson /** 2476d6948c13SUlf Hansson * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2477d6948c13SUlf Hansson * @np: Device node pointer associated with the PM domain provider. 2478d6948c13SUlf Hansson * @data: Pointer to the data associated with the PM domain provider. 2479d6948c13SUlf Hansson */ 2480d6948c13SUlf Hansson int of_genpd_add_provider_onecell(struct device_node *np, 2481d6948c13SUlf Hansson struct genpd_onecell_data *data) 2482d6948c13SUlf Hansson { 2483d6948c13SUlf Hansson struct generic_pm_domain *genpd; 2484d6948c13SUlf Hansson unsigned int i; 2485d6948c13SUlf Hansson int ret = -EINVAL; 2486d6948c13SUlf Hansson 2487d6948c13SUlf Hansson if (!np || !data) 2488d6948c13SUlf Hansson return -EINVAL; 2489d6948c13SUlf Hansson 2490d6948c13SUlf Hansson if (!data->xlate) 2491d6948c13SUlf Hansson data->xlate = genpd_xlate_onecell; 2492d6948c13SUlf Hansson 2493d6948c13SUlf Hansson for (i = 0; i < data->num_domains; i++) { 2494d6948c13SUlf Hansson genpd = data->domains[i]; 2495d6948c13SUlf Hansson 2496d6948c13SUlf Hansson if (!genpd) 2497d6948c13SUlf Hansson continue; 2498d6948c13SUlf Hansson if (!genpd_present(genpd)) 2499d6948c13SUlf Hansson goto error; 2500d6948c13SUlf Hansson 2501d6948c13SUlf Hansson genpd->dev.of_node = np; 2502d6948c13SUlf Hansson 2503d6948c13SUlf Hansson /* Parse genpd OPP table */ 2504d6948c13SUlf Hansson if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2505d6948c13SUlf Hansson ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2506d6948c13SUlf Hansson if (ret) { 2507d6948c13SUlf Hansson dev_err_probe(&genpd->dev, ret, 2508d6948c13SUlf Hansson "Failed to add OPP table for index %d\n", i); 2509d6948c13SUlf Hansson goto error; 2510d6948c13SUlf Hansson } 2511d6948c13SUlf Hansson 2512d6948c13SUlf Hansson /* 2513d6948c13SUlf Hansson * Save table for faster processing while setting 2514d6948c13SUlf Hansson * performance state. 2515d6948c13SUlf Hansson */ 2516d6948c13SUlf Hansson genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2517d6948c13SUlf Hansson WARN_ON(IS_ERR(genpd->opp_table)); 2518d6948c13SUlf Hansson } 2519d6948c13SUlf Hansson 2520d6948c13SUlf Hansson genpd->provider = &np->fwnode; 2521d6948c13SUlf Hansson genpd->has_provider = true; 2522d6948c13SUlf Hansson } 2523d6948c13SUlf Hansson 2524d6948c13SUlf Hansson ret = genpd_add_provider(np, data->xlate, data); 2525d6948c13SUlf Hansson if (ret < 0) 2526d6948c13SUlf Hansson goto error; 2527d6948c13SUlf Hansson 2528d6948c13SUlf Hansson return 0; 2529d6948c13SUlf Hansson 2530d6948c13SUlf Hansson error: 2531d6948c13SUlf Hansson while (i--) { 2532d6948c13SUlf Hansson genpd = data->domains[i]; 2533d6948c13SUlf Hansson 2534d6948c13SUlf Hansson if (!genpd) 2535d6948c13SUlf Hansson continue; 2536d6948c13SUlf Hansson 2537d6948c13SUlf Hansson genpd->provider = NULL; 2538d6948c13SUlf Hansson genpd->has_provider = false; 2539d6948c13SUlf Hansson 2540d6948c13SUlf Hansson if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2541d6948c13SUlf Hansson dev_pm_opp_put_opp_table(genpd->opp_table); 2542d6948c13SUlf Hansson dev_pm_opp_of_remove_table(&genpd->dev); 2543d6948c13SUlf Hansson } 2544d6948c13SUlf Hansson } 2545d6948c13SUlf Hansson 2546d6948c13SUlf Hansson return ret; 2547d6948c13SUlf Hansson } 2548d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2549d6948c13SUlf Hansson 2550d6948c13SUlf Hansson /** 2551d6948c13SUlf Hansson * of_genpd_del_provider() - Remove a previously registered PM domain provider 2552d6948c13SUlf Hansson * @np: Device node pointer associated with the PM domain provider 2553d6948c13SUlf Hansson */ 2554d6948c13SUlf Hansson void of_genpd_del_provider(struct device_node *np) 2555d6948c13SUlf Hansson { 2556d6948c13SUlf Hansson struct of_genpd_provider *cp, *tmp; 2557d6948c13SUlf Hansson struct generic_pm_domain *gpd; 2558d6948c13SUlf Hansson 2559d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2560d6948c13SUlf Hansson mutex_lock(&of_genpd_mutex); 2561d6948c13SUlf Hansson list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2562d6948c13SUlf Hansson if (cp->node == np) { 2563d6948c13SUlf Hansson /* 2564d6948c13SUlf Hansson * For each PM domain associated with the 2565d6948c13SUlf Hansson * provider, set the 'has_provider' to false 2566d6948c13SUlf Hansson * so that the PM domain can be safely removed. 2567d6948c13SUlf Hansson */ 2568d6948c13SUlf Hansson list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2569d6948c13SUlf Hansson if (gpd->provider == &np->fwnode) { 2570d6948c13SUlf Hansson gpd->has_provider = false; 2571d6948c13SUlf Hansson 2572d6948c13SUlf Hansson if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state) 2573d6948c13SUlf Hansson continue; 2574d6948c13SUlf Hansson 2575d6948c13SUlf Hansson dev_pm_opp_put_opp_table(gpd->opp_table); 2576d6948c13SUlf Hansson dev_pm_opp_of_remove_table(&gpd->dev); 2577d6948c13SUlf Hansson } 2578d6948c13SUlf Hansson } 2579d6948c13SUlf Hansson 2580d6948c13SUlf Hansson fwnode_dev_initialized(&cp->node->fwnode, false); 2581d6948c13SUlf Hansson list_del(&cp->link); 2582d6948c13SUlf Hansson of_node_put(cp->node); 2583d6948c13SUlf Hansson kfree(cp); 2584d6948c13SUlf Hansson break; 2585d6948c13SUlf Hansson } 2586d6948c13SUlf Hansson } 2587d6948c13SUlf Hansson mutex_unlock(&of_genpd_mutex); 2588d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2589d6948c13SUlf Hansson } 2590d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2591d6948c13SUlf Hansson 2592d6948c13SUlf Hansson /** 2593d6948c13SUlf Hansson * genpd_get_from_provider() - Look-up PM domain 2594d6948c13SUlf Hansson * @genpdspec: OF phandle args to use for look-up 2595d6948c13SUlf Hansson * 2596d6948c13SUlf Hansson * Looks for a PM domain provider under the node specified by @genpdspec and if 2597d6948c13SUlf Hansson * found, uses xlate function of the provider to map phandle args to a PM 2598d6948c13SUlf Hansson * domain. 2599d6948c13SUlf Hansson * 2600d6948c13SUlf Hansson * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2601d6948c13SUlf Hansson * on failure. 2602d6948c13SUlf Hansson */ 2603d6948c13SUlf Hansson static struct generic_pm_domain *genpd_get_from_provider( 26044af6bc16SKrzysztof Kozlowski const struct of_phandle_args *genpdspec) 2605d6948c13SUlf Hansson { 2606d6948c13SUlf Hansson struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2607d6948c13SUlf Hansson struct of_genpd_provider *provider; 2608d6948c13SUlf Hansson 2609d6948c13SUlf Hansson if (!genpdspec) 2610d6948c13SUlf Hansson return ERR_PTR(-EINVAL); 2611d6948c13SUlf Hansson 2612d6948c13SUlf Hansson mutex_lock(&of_genpd_mutex); 2613d6948c13SUlf Hansson 2614d6948c13SUlf Hansson /* Check if we have such a provider in our array */ 2615d6948c13SUlf Hansson list_for_each_entry(provider, &of_genpd_providers, link) { 2616d6948c13SUlf Hansson if (provider->node == genpdspec->np) 2617d6948c13SUlf Hansson genpd = provider->xlate(genpdspec, provider->data); 2618d6948c13SUlf Hansson if (!IS_ERR(genpd)) 2619d6948c13SUlf Hansson break; 2620d6948c13SUlf Hansson } 2621d6948c13SUlf Hansson 2622d6948c13SUlf Hansson mutex_unlock(&of_genpd_mutex); 2623d6948c13SUlf Hansson 2624d6948c13SUlf Hansson return genpd; 2625d6948c13SUlf Hansson } 2626d6948c13SUlf Hansson 2627d6948c13SUlf Hansson /** 2628d6948c13SUlf Hansson * of_genpd_add_device() - Add a device to an I/O PM domain 2629d6948c13SUlf Hansson * @genpdspec: OF phandle args to use for look-up PM domain 2630d6948c13SUlf Hansson * @dev: Device to be added. 2631d6948c13SUlf Hansson * 2632d6948c13SUlf Hansson * Looks-up an I/O PM domain based upon phandle args provided and adds 2633d6948c13SUlf Hansson * the device to the PM domain. Returns a negative error code on failure. 2634d6948c13SUlf Hansson */ 26354af6bc16SKrzysztof Kozlowski int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) 2636d6948c13SUlf Hansson { 2637d6948c13SUlf Hansson struct generic_pm_domain *genpd; 2638d6948c13SUlf Hansson int ret; 2639d6948c13SUlf Hansson 2640d6948c13SUlf Hansson if (!dev) 2641d6948c13SUlf Hansson return -EINVAL; 2642d6948c13SUlf Hansson 2643d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2644d6948c13SUlf Hansson 2645d6948c13SUlf Hansson genpd = genpd_get_from_provider(genpdspec); 2646d6948c13SUlf Hansson if (IS_ERR(genpd)) { 2647d6948c13SUlf Hansson ret = PTR_ERR(genpd); 2648d6948c13SUlf Hansson goto out; 2649d6948c13SUlf Hansson } 2650d6948c13SUlf Hansson 2651d6948c13SUlf Hansson ret = genpd_add_device(genpd, dev, dev); 2652d6948c13SUlf Hansson 2653d6948c13SUlf Hansson out: 2654d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2655d6948c13SUlf Hansson 2656d6948c13SUlf Hansson return ret; 2657d6948c13SUlf Hansson } 2658d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_add_device); 2659d6948c13SUlf Hansson 2660d6948c13SUlf Hansson /** 2661d6948c13SUlf Hansson * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2662d6948c13SUlf Hansson * @parent_spec: OF phandle args to use for parent PM domain look-up 2663d6948c13SUlf Hansson * @subdomain_spec: OF phandle args to use for subdomain look-up 2664d6948c13SUlf Hansson * 2665d6948c13SUlf Hansson * Looks-up a parent PM domain and subdomain based upon phandle args 2666d6948c13SUlf Hansson * provided and adds the subdomain to the parent PM domain. Returns a 2667d6948c13SUlf Hansson * negative error code on failure. 2668d6948c13SUlf Hansson */ 26694af6bc16SKrzysztof Kozlowski int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 26704af6bc16SKrzysztof Kozlowski const struct of_phandle_args *subdomain_spec) 2671d6948c13SUlf Hansson { 2672d6948c13SUlf Hansson struct generic_pm_domain *parent, *subdomain; 2673d6948c13SUlf Hansson int ret; 2674d6948c13SUlf Hansson 2675d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2676d6948c13SUlf Hansson 2677d6948c13SUlf Hansson parent = genpd_get_from_provider(parent_spec); 2678d6948c13SUlf Hansson if (IS_ERR(parent)) { 2679d6948c13SUlf Hansson ret = PTR_ERR(parent); 2680d6948c13SUlf Hansson goto out; 2681d6948c13SUlf Hansson } 2682d6948c13SUlf Hansson 2683d6948c13SUlf Hansson subdomain = genpd_get_from_provider(subdomain_spec); 2684d6948c13SUlf Hansson if (IS_ERR(subdomain)) { 2685d6948c13SUlf Hansson ret = PTR_ERR(subdomain); 2686d6948c13SUlf Hansson goto out; 2687d6948c13SUlf Hansson } 2688d6948c13SUlf Hansson 2689d6948c13SUlf Hansson ret = genpd_add_subdomain(parent, subdomain); 2690d6948c13SUlf Hansson 2691d6948c13SUlf Hansson out: 2692d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2693d6948c13SUlf Hansson 2694d6948c13SUlf Hansson return ret == -ENOENT ? -EPROBE_DEFER : ret; 2695d6948c13SUlf Hansson } 2696d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2697d6948c13SUlf Hansson 2698d6948c13SUlf Hansson /** 2699d6948c13SUlf Hansson * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2700d6948c13SUlf Hansson * @parent_spec: OF phandle args to use for parent PM domain look-up 2701d6948c13SUlf Hansson * @subdomain_spec: OF phandle args to use for subdomain look-up 2702d6948c13SUlf Hansson * 2703d6948c13SUlf Hansson * Looks-up a parent PM domain and subdomain based upon phandle args 2704d6948c13SUlf Hansson * provided and removes the subdomain from the parent PM domain. Returns a 2705d6948c13SUlf Hansson * negative error code on failure. 2706d6948c13SUlf Hansson */ 27074af6bc16SKrzysztof Kozlowski int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 27084af6bc16SKrzysztof Kozlowski const struct of_phandle_args *subdomain_spec) 2709d6948c13SUlf Hansson { 2710d6948c13SUlf Hansson struct generic_pm_domain *parent, *subdomain; 2711d6948c13SUlf Hansson int ret; 2712d6948c13SUlf Hansson 2713d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2714d6948c13SUlf Hansson 2715d6948c13SUlf Hansson parent = genpd_get_from_provider(parent_spec); 2716d6948c13SUlf Hansson if (IS_ERR(parent)) { 2717d6948c13SUlf Hansson ret = PTR_ERR(parent); 2718d6948c13SUlf Hansson goto out; 2719d6948c13SUlf Hansson } 2720d6948c13SUlf Hansson 2721d6948c13SUlf Hansson subdomain = genpd_get_from_provider(subdomain_spec); 2722d6948c13SUlf Hansson if (IS_ERR(subdomain)) { 2723d6948c13SUlf Hansson ret = PTR_ERR(subdomain); 2724d6948c13SUlf Hansson goto out; 2725d6948c13SUlf Hansson } 2726d6948c13SUlf Hansson 2727d6948c13SUlf Hansson ret = pm_genpd_remove_subdomain(parent, subdomain); 2728d6948c13SUlf Hansson 2729d6948c13SUlf Hansson out: 2730d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2731d6948c13SUlf Hansson 2732d6948c13SUlf Hansson return ret; 2733d6948c13SUlf Hansson } 2734d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2735d6948c13SUlf Hansson 2736d6948c13SUlf Hansson /** 2737d6948c13SUlf Hansson * of_genpd_remove_last - Remove the last PM domain registered for a provider 2738d6948c13SUlf Hansson * @np: Pointer to device node associated with provider 2739d6948c13SUlf Hansson * 2740d6948c13SUlf Hansson * Find the last PM domain that was added by a particular provider and 2741d6948c13SUlf Hansson * remove this PM domain from the list of PM domains. The provider is 2742d6948c13SUlf Hansson * identified by the 'provider' device structure that is passed. The PM 2743d6948c13SUlf Hansson * domain will only be removed, if the provider associated with domain 2744d6948c13SUlf Hansson * has been removed. 2745d6948c13SUlf Hansson * 2746d6948c13SUlf Hansson * Returns a valid pointer to struct generic_pm_domain on success or 2747d6948c13SUlf Hansson * ERR_PTR() on failure. 2748d6948c13SUlf Hansson */ 2749d6948c13SUlf Hansson struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2750d6948c13SUlf Hansson { 2751d6948c13SUlf Hansson struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2752d6948c13SUlf Hansson int ret; 2753d6948c13SUlf Hansson 2754d6948c13SUlf Hansson if (IS_ERR_OR_NULL(np)) 2755d6948c13SUlf Hansson return ERR_PTR(-EINVAL); 2756d6948c13SUlf Hansson 2757d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2758d6948c13SUlf Hansson list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2759d6948c13SUlf Hansson if (gpd->provider == &np->fwnode) { 2760d6948c13SUlf Hansson ret = genpd_remove(gpd); 2761d6948c13SUlf Hansson genpd = ret ? ERR_PTR(ret) : gpd; 2762d6948c13SUlf Hansson break; 2763d6948c13SUlf Hansson } 2764d6948c13SUlf Hansson } 2765d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2766d6948c13SUlf Hansson 2767d6948c13SUlf Hansson return genpd; 2768d6948c13SUlf Hansson } 2769d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2770d6948c13SUlf Hansson 2771d6948c13SUlf Hansson static void genpd_release_dev(struct device *dev) 2772d6948c13SUlf Hansson { 2773d6948c13SUlf Hansson of_node_put(dev->of_node); 2774d6948c13SUlf Hansson kfree(dev); 2775d6948c13SUlf Hansson } 2776d6948c13SUlf Hansson 277780955ae9SLinus Torvalds static const struct bus_type genpd_bus_type = { 2778d6948c13SUlf Hansson .name = "genpd", 2779d6948c13SUlf Hansson }; 2780d6948c13SUlf Hansson 2781d6948c13SUlf Hansson /** 2782d6948c13SUlf Hansson * genpd_dev_pm_detach - Detach a device from its PM domain. 2783d6948c13SUlf Hansson * @dev: Device to detach. 2784d6948c13SUlf Hansson * @power_off: Currently not used 2785d6948c13SUlf Hansson * 2786d6948c13SUlf Hansson * Try to locate a corresponding generic PM domain, which the device was 2787d6948c13SUlf Hansson * attached to previously. If such is found, the device is detached from it. 2788d6948c13SUlf Hansson */ 2789d6948c13SUlf Hansson static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2790d6948c13SUlf Hansson { 2791d6948c13SUlf Hansson struct generic_pm_domain *pd; 2792d6948c13SUlf Hansson unsigned int i; 2793d6948c13SUlf Hansson int ret = 0; 2794d6948c13SUlf Hansson 2795d6948c13SUlf Hansson pd = dev_to_genpd(dev); 2796d6948c13SUlf Hansson if (IS_ERR(pd)) 2797d6948c13SUlf Hansson return; 2798d6948c13SUlf Hansson 2799d6948c13SUlf Hansson dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2800d6948c13SUlf Hansson 2801d6948c13SUlf Hansson /* Drop the default performance state */ 2802d6948c13SUlf Hansson if (dev_gpd_data(dev)->default_pstate) { 2803d6948c13SUlf Hansson dev_pm_genpd_set_performance_state(dev, 0); 2804d6948c13SUlf Hansson dev_gpd_data(dev)->default_pstate = 0; 2805d6948c13SUlf Hansson } 2806d6948c13SUlf Hansson 2807d6948c13SUlf Hansson for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2808d6948c13SUlf Hansson ret = genpd_remove_device(pd, dev); 2809d6948c13SUlf Hansson if (ret != -EAGAIN) 2810d6948c13SUlf Hansson break; 2811d6948c13SUlf Hansson 2812d6948c13SUlf Hansson mdelay(i); 2813d6948c13SUlf Hansson cond_resched(); 2814d6948c13SUlf Hansson } 2815d6948c13SUlf Hansson 2816d6948c13SUlf Hansson if (ret < 0) { 2817d6948c13SUlf Hansson dev_err(dev, "failed to remove from PM domain %s: %d", 2818d6948c13SUlf Hansson pd->name, ret); 2819d6948c13SUlf Hansson return; 2820d6948c13SUlf Hansson } 2821d6948c13SUlf Hansson 2822d6948c13SUlf Hansson /* Check if PM domain can be powered off after removing this device. */ 2823d6948c13SUlf Hansson genpd_queue_power_off_work(pd); 2824d6948c13SUlf Hansson 2825d6948c13SUlf Hansson /* Unregister the device if it was created by genpd. */ 2826d6948c13SUlf Hansson if (dev->bus == &genpd_bus_type) 2827d6948c13SUlf Hansson device_unregister(dev); 2828d6948c13SUlf Hansson } 2829d6948c13SUlf Hansson 2830d6948c13SUlf Hansson static void genpd_dev_pm_sync(struct device *dev) 2831d6948c13SUlf Hansson { 2832d6948c13SUlf Hansson struct generic_pm_domain *pd; 2833d6948c13SUlf Hansson 2834d6948c13SUlf Hansson pd = dev_to_genpd(dev); 2835d6948c13SUlf Hansson if (IS_ERR(pd)) 2836d6948c13SUlf Hansson return; 2837d6948c13SUlf Hansson 2838d6948c13SUlf Hansson genpd_queue_power_off_work(pd); 2839d6948c13SUlf Hansson } 2840d6948c13SUlf Hansson 2841d6948c13SUlf Hansson static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2842d6948c13SUlf Hansson unsigned int index, bool power_on) 2843d6948c13SUlf Hansson { 2844d6948c13SUlf Hansson struct of_phandle_args pd_args; 2845d6948c13SUlf Hansson struct generic_pm_domain *pd; 2846d6948c13SUlf Hansson int pstate; 2847d6948c13SUlf Hansson int ret; 2848d6948c13SUlf Hansson 2849d6948c13SUlf Hansson ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2850d6948c13SUlf Hansson "#power-domain-cells", index, &pd_args); 2851d6948c13SUlf Hansson if (ret < 0) 2852d6948c13SUlf Hansson return ret; 2853d6948c13SUlf Hansson 2854d6948c13SUlf Hansson mutex_lock(&gpd_list_lock); 2855d6948c13SUlf Hansson pd = genpd_get_from_provider(&pd_args); 2856d6948c13SUlf Hansson of_node_put(pd_args.np); 2857d6948c13SUlf Hansson if (IS_ERR(pd)) { 2858d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2859d6948c13SUlf Hansson dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2860d6948c13SUlf Hansson __func__, PTR_ERR(pd)); 2861d6948c13SUlf Hansson return driver_deferred_probe_check_state(base_dev); 2862d6948c13SUlf Hansson } 2863d6948c13SUlf Hansson 2864d6948c13SUlf Hansson dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2865d6948c13SUlf Hansson 2866d6948c13SUlf Hansson ret = genpd_add_device(pd, dev, base_dev); 2867d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 2868d6948c13SUlf Hansson 2869d6948c13SUlf Hansson if (ret < 0) 2870d6948c13SUlf Hansson return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2871d6948c13SUlf Hansson 2872d6948c13SUlf Hansson dev->pm_domain->detach = genpd_dev_pm_detach; 2873d6948c13SUlf Hansson dev->pm_domain->sync = genpd_dev_pm_sync; 2874d6948c13SUlf Hansson 2875d6948c13SUlf Hansson /* Set the default performance state */ 2876d6948c13SUlf Hansson pstate = of_get_required_opp_performance_state(dev->of_node, index); 2877d6948c13SUlf Hansson if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2878d6948c13SUlf Hansson ret = pstate; 2879d6948c13SUlf Hansson goto err; 2880d6948c13SUlf Hansson } else if (pstate > 0) { 2881d6948c13SUlf Hansson ret = dev_pm_genpd_set_performance_state(dev, pstate); 2882d6948c13SUlf Hansson if (ret) 2883d6948c13SUlf Hansson goto err; 2884d6948c13SUlf Hansson dev_gpd_data(dev)->default_pstate = pstate; 2885d6948c13SUlf Hansson } 2886d6948c13SUlf Hansson 2887d6948c13SUlf Hansson if (power_on) { 2888d6948c13SUlf Hansson genpd_lock(pd); 2889d6948c13SUlf Hansson ret = genpd_power_on(pd, 0); 2890d6948c13SUlf Hansson genpd_unlock(pd); 2891d6948c13SUlf Hansson } 2892d6948c13SUlf Hansson 2893d6948c13SUlf Hansson if (ret) { 2894d6948c13SUlf Hansson /* Drop the default performance state */ 2895d6948c13SUlf Hansson if (dev_gpd_data(dev)->default_pstate) { 2896d6948c13SUlf Hansson dev_pm_genpd_set_performance_state(dev, 0); 2897d6948c13SUlf Hansson dev_gpd_data(dev)->default_pstate = 0; 2898d6948c13SUlf Hansson } 2899d6948c13SUlf Hansson 2900d6948c13SUlf Hansson genpd_remove_device(pd, dev); 2901d6948c13SUlf Hansson return -EPROBE_DEFER; 2902d6948c13SUlf Hansson } 2903d6948c13SUlf Hansson 2904d6948c13SUlf Hansson return 1; 2905d6948c13SUlf Hansson 2906d6948c13SUlf Hansson err: 2907d6948c13SUlf Hansson dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2908d6948c13SUlf Hansson pd->name, ret); 2909d6948c13SUlf Hansson genpd_remove_device(pd, dev); 2910d6948c13SUlf Hansson return ret; 2911d6948c13SUlf Hansson } 2912d6948c13SUlf Hansson 2913d6948c13SUlf Hansson /** 2914d6948c13SUlf Hansson * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2915d6948c13SUlf Hansson * @dev: Device to attach. 2916d6948c13SUlf Hansson * 2917d6948c13SUlf Hansson * Parse device's OF node to find a PM domain specifier. If such is found, 2918d6948c13SUlf Hansson * attaches the device to retrieved pm_domain ops. 2919d6948c13SUlf Hansson * 2920d6948c13SUlf Hansson * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2921d6948c13SUlf Hansson * PM domain or when multiple power-domains exists for it, else a negative error 2922d6948c13SUlf Hansson * code. Note that if a power-domain exists for the device, but it cannot be 2923d6948c13SUlf Hansson * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2924d6948c13SUlf Hansson * not probed and to re-try again later. 2925d6948c13SUlf Hansson */ 2926d6948c13SUlf Hansson int genpd_dev_pm_attach(struct device *dev) 2927d6948c13SUlf Hansson { 2928d6948c13SUlf Hansson if (!dev->of_node) 2929d6948c13SUlf Hansson return 0; 2930d6948c13SUlf Hansson 2931d6948c13SUlf Hansson /* 2932d6948c13SUlf Hansson * Devices with multiple PM domains must be attached separately, as we 2933d6948c13SUlf Hansson * can only attach one PM domain per device. 2934d6948c13SUlf Hansson */ 2935d6948c13SUlf Hansson if (of_count_phandle_with_args(dev->of_node, "power-domains", 2936d6948c13SUlf Hansson "#power-domain-cells") != 1) 2937d6948c13SUlf Hansson return 0; 2938d6948c13SUlf Hansson 2939d6948c13SUlf Hansson return __genpd_dev_pm_attach(dev, dev, 0, true); 2940d6948c13SUlf Hansson } 2941d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2942d6948c13SUlf Hansson 2943d6948c13SUlf Hansson /** 2944d6948c13SUlf Hansson * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2945d6948c13SUlf Hansson * @dev: The device used to lookup the PM domain. 2946d6948c13SUlf Hansson * @index: The index of the PM domain. 2947d6948c13SUlf Hansson * 2948d6948c13SUlf Hansson * Parse device's OF node to find a PM domain specifier at the provided @index. 2949d6948c13SUlf Hansson * If such is found, creates a virtual device and attaches it to the retrieved 2950d6948c13SUlf Hansson * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2951d6948c13SUlf Hansson * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2952d6948c13SUlf Hansson * 2953d6948c13SUlf Hansson * Returns the created virtual device if successfully attached PM domain, NULL 2954d6948c13SUlf Hansson * when the device don't need a PM domain, else an ERR_PTR() in case of 2955d6948c13SUlf Hansson * failures. If a power-domain exists for the device, but cannot be found or 2956d6948c13SUlf Hansson * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2957d6948c13SUlf Hansson * is not probed and to re-try again later. 2958d6948c13SUlf Hansson */ 2959d6948c13SUlf Hansson struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2960d6948c13SUlf Hansson unsigned int index) 2961d6948c13SUlf Hansson { 2962d6948c13SUlf Hansson struct device *virt_dev; 2963d6948c13SUlf Hansson int num_domains; 2964d6948c13SUlf Hansson int ret; 2965d6948c13SUlf Hansson 2966d6948c13SUlf Hansson if (!dev->of_node) 2967d6948c13SUlf Hansson return NULL; 2968d6948c13SUlf Hansson 2969d6948c13SUlf Hansson /* Verify that the index is within a valid range. */ 2970d6948c13SUlf Hansson num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2971d6948c13SUlf Hansson "#power-domain-cells"); 2972d6948c13SUlf Hansson if (index >= num_domains) 2973d6948c13SUlf Hansson return NULL; 2974d6948c13SUlf Hansson 2975d6948c13SUlf Hansson /* Allocate and register device on the genpd bus. */ 2976d6948c13SUlf Hansson virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2977d6948c13SUlf Hansson if (!virt_dev) 2978d6948c13SUlf Hansson return ERR_PTR(-ENOMEM); 2979d6948c13SUlf Hansson 2980d6948c13SUlf Hansson dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2981d6948c13SUlf Hansson virt_dev->bus = &genpd_bus_type; 2982d6948c13SUlf Hansson virt_dev->release = genpd_release_dev; 2983d6948c13SUlf Hansson virt_dev->of_node = of_node_get(dev->of_node); 2984d6948c13SUlf Hansson 2985d6948c13SUlf Hansson ret = device_register(virt_dev); 2986d6948c13SUlf Hansson if (ret) { 2987d6948c13SUlf Hansson put_device(virt_dev); 2988d6948c13SUlf Hansson return ERR_PTR(ret); 2989d6948c13SUlf Hansson } 2990d6948c13SUlf Hansson 2991d6948c13SUlf Hansson /* Try to attach the device to the PM domain at the specified index. */ 2992d6948c13SUlf Hansson ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2993d6948c13SUlf Hansson if (ret < 1) { 2994d6948c13SUlf Hansson device_unregister(virt_dev); 2995d6948c13SUlf Hansson return ret ? ERR_PTR(ret) : NULL; 2996d6948c13SUlf Hansson } 2997d6948c13SUlf Hansson 2998d6948c13SUlf Hansson pm_runtime_enable(virt_dev); 2999d6948c13SUlf Hansson genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 3000d6948c13SUlf Hansson 3001d6948c13SUlf Hansson return virt_dev; 3002d6948c13SUlf Hansson } 3003d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 3004d6948c13SUlf Hansson 3005d6948c13SUlf Hansson /** 3006d6948c13SUlf Hansson * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 3007d6948c13SUlf Hansson * @dev: The device used to lookup the PM domain. 3008d6948c13SUlf Hansson * @name: The name of the PM domain. 3009d6948c13SUlf Hansson * 3010d6948c13SUlf Hansson * Parse device's OF node to find a PM domain specifier using the 3011d6948c13SUlf Hansson * power-domain-names DT property. For further description see 3012d6948c13SUlf Hansson * genpd_dev_pm_attach_by_id(). 3013d6948c13SUlf Hansson */ 3014d6948c13SUlf Hansson struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 3015d6948c13SUlf Hansson { 3016d6948c13SUlf Hansson int index; 3017d6948c13SUlf Hansson 3018d6948c13SUlf Hansson if (!dev->of_node) 3019d6948c13SUlf Hansson return NULL; 3020d6948c13SUlf Hansson 3021d6948c13SUlf Hansson index = of_property_match_string(dev->of_node, "power-domain-names", 3022d6948c13SUlf Hansson name); 3023d6948c13SUlf Hansson if (index < 0) 3024d6948c13SUlf Hansson return NULL; 3025d6948c13SUlf Hansson 3026d6948c13SUlf Hansson return genpd_dev_pm_attach_by_id(dev, index); 3027d6948c13SUlf Hansson } 3028d6948c13SUlf Hansson 3029d6948c13SUlf Hansson static const struct of_device_id idle_state_match[] = { 3030d6948c13SUlf Hansson { .compatible = "domain-idle-state", }, 3031d6948c13SUlf Hansson { } 3032d6948c13SUlf Hansson }; 3033d6948c13SUlf Hansson 3034d6948c13SUlf Hansson static int genpd_parse_state(struct genpd_power_state *genpd_state, 3035d6948c13SUlf Hansson struct device_node *state_node) 3036d6948c13SUlf Hansson { 3037d6948c13SUlf Hansson int err; 3038d6948c13SUlf Hansson u32 residency; 3039d6948c13SUlf Hansson u32 entry_latency, exit_latency; 3040d6948c13SUlf Hansson 3041d6948c13SUlf Hansson err = of_property_read_u32(state_node, "entry-latency-us", 3042d6948c13SUlf Hansson &entry_latency); 3043d6948c13SUlf Hansson if (err) { 3044d6948c13SUlf Hansson pr_debug(" * %pOF missing entry-latency-us property\n", 3045d6948c13SUlf Hansson state_node); 3046d6948c13SUlf Hansson return -EINVAL; 3047d6948c13SUlf Hansson } 3048d6948c13SUlf Hansson 3049d6948c13SUlf Hansson err = of_property_read_u32(state_node, "exit-latency-us", 3050d6948c13SUlf Hansson &exit_latency); 3051d6948c13SUlf Hansson if (err) { 3052d6948c13SUlf Hansson pr_debug(" * %pOF missing exit-latency-us property\n", 3053d6948c13SUlf Hansson state_node); 3054d6948c13SUlf Hansson return -EINVAL; 3055d6948c13SUlf Hansson } 3056d6948c13SUlf Hansson 3057d6948c13SUlf Hansson err = of_property_read_u32(state_node, "min-residency-us", &residency); 3058d6948c13SUlf Hansson if (!err) 3059d6948c13SUlf Hansson genpd_state->residency_ns = 1000LL * residency; 3060d6948c13SUlf Hansson 3061d6948c13SUlf Hansson genpd_state->power_on_latency_ns = 1000LL * exit_latency; 3062d6948c13SUlf Hansson genpd_state->power_off_latency_ns = 1000LL * entry_latency; 3063d6948c13SUlf Hansson genpd_state->fwnode = &state_node->fwnode; 3064d6948c13SUlf Hansson 3065d6948c13SUlf Hansson return 0; 3066d6948c13SUlf Hansson } 3067d6948c13SUlf Hansson 3068d6948c13SUlf Hansson static int genpd_iterate_idle_states(struct device_node *dn, 3069d6948c13SUlf Hansson struct genpd_power_state *states) 3070d6948c13SUlf Hansson { 3071d6948c13SUlf Hansson int ret; 3072d6948c13SUlf Hansson struct of_phandle_iterator it; 3073d6948c13SUlf Hansson struct device_node *np; 3074d6948c13SUlf Hansson int i = 0; 3075d6948c13SUlf Hansson 3076d6948c13SUlf Hansson ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 3077d6948c13SUlf Hansson if (ret <= 0) 3078d6948c13SUlf Hansson return ret == -ENOENT ? 0 : ret; 3079d6948c13SUlf Hansson 3080d6948c13SUlf Hansson /* Loop over the phandles until all the requested entry is found */ 3081d6948c13SUlf Hansson of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 3082d6948c13SUlf Hansson np = it.node; 3083d6948c13SUlf Hansson if (!of_match_node(idle_state_match, np)) 3084d6948c13SUlf Hansson continue; 3085d6948c13SUlf Hansson 3086d6948c13SUlf Hansson if (!of_device_is_available(np)) 3087d6948c13SUlf Hansson continue; 3088d6948c13SUlf Hansson 3089d6948c13SUlf Hansson if (states) { 3090d6948c13SUlf Hansson ret = genpd_parse_state(&states[i], np); 3091d6948c13SUlf Hansson if (ret) { 3092d6948c13SUlf Hansson pr_err("Parsing idle state node %pOF failed with err %d\n", 3093d6948c13SUlf Hansson np, ret); 3094d6948c13SUlf Hansson of_node_put(np); 3095d6948c13SUlf Hansson return ret; 3096d6948c13SUlf Hansson } 3097d6948c13SUlf Hansson } 3098d6948c13SUlf Hansson i++; 3099d6948c13SUlf Hansson } 3100d6948c13SUlf Hansson 3101d6948c13SUlf Hansson return i; 3102d6948c13SUlf Hansson } 3103d6948c13SUlf Hansson 3104d6948c13SUlf Hansson /** 3105d6948c13SUlf Hansson * of_genpd_parse_idle_states: Return array of idle states for the genpd. 3106d6948c13SUlf Hansson * 3107d6948c13SUlf Hansson * @dn: The genpd device node 3108d6948c13SUlf Hansson * @states: The pointer to which the state array will be saved. 3109d6948c13SUlf Hansson * @n: The count of elements in the array returned from this function. 3110d6948c13SUlf Hansson * 3111d6948c13SUlf Hansson * Returns the device states parsed from the OF node. The memory for the states 3112d6948c13SUlf Hansson * is allocated by this function and is the responsibility of the caller to 3113d6948c13SUlf Hansson * free the memory after use. If any or zero compatible domain idle states is 3114d6948c13SUlf Hansson * found it returns 0 and in case of errors, a negative error code is returned. 3115d6948c13SUlf Hansson */ 3116d6948c13SUlf Hansson int of_genpd_parse_idle_states(struct device_node *dn, 3117d6948c13SUlf Hansson struct genpd_power_state **states, int *n) 3118d6948c13SUlf Hansson { 3119d6948c13SUlf Hansson struct genpd_power_state *st; 3120d6948c13SUlf Hansson int ret; 3121d6948c13SUlf Hansson 3122d6948c13SUlf Hansson ret = genpd_iterate_idle_states(dn, NULL); 3123d6948c13SUlf Hansson if (ret < 0) 3124d6948c13SUlf Hansson return ret; 3125d6948c13SUlf Hansson 3126d6948c13SUlf Hansson if (!ret) { 3127d6948c13SUlf Hansson *states = NULL; 3128d6948c13SUlf Hansson *n = 0; 3129d6948c13SUlf Hansson return 0; 3130d6948c13SUlf Hansson } 3131d6948c13SUlf Hansson 3132d6948c13SUlf Hansson st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3133d6948c13SUlf Hansson if (!st) 3134d6948c13SUlf Hansson return -ENOMEM; 3135d6948c13SUlf Hansson 3136d6948c13SUlf Hansson ret = genpd_iterate_idle_states(dn, st); 3137d6948c13SUlf Hansson if (ret <= 0) { 3138d6948c13SUlf Hansson kfree(st); 3139d6948c13SUlf Hansson return ret < 0 ? ret : -EINVAL; 3140d6948c13SUlf Hansson } 3141d6948c13SUlf Hansson 3142d6948c13SUlf Hansson *states = st; 3143d6948c13SUlf Hansson *n = ret; 3144d6948c13SUlf Hansson 3145d6948c13SUlf Hansson return 0; 3146d6948c13SUlf Hansson } 3147d6948c13SUlf Hansson EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3148d6948c13SUlf Hansson 3149d6948c13SUlf Hansson static int __init genpd_bus_init(void) 3150d6948c13SUlf Hansson { 3151d6948c13SUlf Hansson return bus_register(&genpd_bus_type); 3152d6948c13SUlf Hansson } 3153d6948c13SUlf Hansson core_initcall(genpd_bus_init); 3154d6948c13SUlf Hansson 3155d6948c13SUlf Hansson #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3156d6948c13SUlf Hansson 3157d6948c13SUlf Hansson 3158d6948c13SUlf Hansson /*** debugfs support ***/ 3159d6948c13SUlf Hansson 3160d6948c13SUlf Hansson #ifdef CONFIG_DEBUG_FS 3161d6948c13SUlf Hansson /* 3162d6948c13SUlf Hansson * TODO: This function is a slightly modified version of rtpm_status_show 3163d6948c13SUlf Hansson * from sysfs.c, so generalize it. 3164d6948c13SUlf Hansson */ 3165d6948c13SUlf Hansson static void rtpm_status_str(struct seq_file *s, struct device *dev) 3166d6948c13SUlf Hansson { 3167d6948c13SUlf Hansson static const char * const status_lookup[] = { 3168d6948c13SUlf Hansson [RPM_ACTIVE] = "active", 3169d6948c13SUlf Hansson [RPM_RESUMING] = "resuming", 3170d6948c13SUlf Hansson [RPM_SUSPENDED] = "suspended", 3171d6948c13SUlf Hansson [RPM_SUSPENDING] = "suspending" 3172d6948c13SUlf Hansson }; 3173d6948c13SUlf Hansson const char *p = ""; 3174d6948c13SUlf Hansson 3175d6948c13SUlf Hansson if (dev->power.runtime_error) 3176d6948c13SUlf Hansson p = "error"; 3177d6948c13SUlf Hansson else if (dev->power.disable_depth) 3178d6948c13SUlf Hansson p = "unsupported"; 3179d6948c13SUlf Hansson else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3180d6948c13SUlf Hansson p = status_lookup[dev->power.runtime_status]; 3181d6948c13SUlf Hansson else 3182d6948c13SUlf Hansson WARN_ON(1); 3183d6948c13SUlf Hansson 3184d6948c13SUlf Hansson seq_printf(s, "%-25s ", p); 3185d6948c13SUlf Hansson } 3186d6948c13SUlf Hansson 3187d6948c13SUlf Hansson static void perf_status_str(struct seq_file *s, struct device *dev) 3188d6948c13SUlf Hansson { 3189d6948c13SUlf Hansson struct generic_pm_domain_data *gpd_data; 3190d6948c13SUlf Hansson 3191d6948c13SUlf Hansson gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3192d6948c13SUlf Hansson seq_put_decimal_ull(s, "", gpd_data->performance_state); 3193d6948c13SUlf Hansson } 3194d6948c13SUlf Hansson 3195d6948c13SUlf Hansson static int genpd_summary_one(struct seq_file *s, 3196d6948c13SUlf Hansson struct generic_pm_domain *genpd) 3197d6948c13SUlf Hansson { 3198d6948c13SUlf Hansson static const char * const status_lookup[] = { 3199d6948c13SUlf Hansson [GENPD_STATE_ON] = "on", 3200d6948c13SUlf Hansson [GENPD_STATE_OFF] = "off" 3201d6948c13SUlf Hansson }; 3202d6948c13SUlf Hansson struct pm_domain_data *pm_data; 3203d6948c13SUlf Hansson const char *kobj_path; 3204d6948c13SUlf Hansson struct gpd_link *link; 3205d6948c13SUlf Hansson char state[16]; 3206d6948c13SUlf Hansson int ret; 3207d6948c13SUlf Hansson 3208d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3209d6948c13SUlf Hansson if (ret) 3210d6948c13SUlf Hansson return -ERESTARTSYS; 3211d6948c13SUlf Hansson 3212d6948c13SUlf Hansson if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3213d6948c13SUlf Hansson goto exit; 3214d6948c13SUlf Hansson if (!genpd_status_on(genpd)) 3215d6948c13SUlf Hansson snprintf(state, sizeof(state), "%s-%u", 3216d6948c13SUlf Hansson status_lookup[genpd->status], genpd->state_idx); 3217d6948c13SUlf Hansson else 3218d6948c13SUlf Hansson snprintf(state, sizeof(state), "%s", 3219d6948c13SUlf Hansson status_lookup[genpd->status]); 3220d6948c13SUlf Hansson seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3221d6948c13SUlf Hansson 3222d6948c13SUlf Hansson /* 3223d6948c13SUlf Hansson * Modifications on the list require holding locks on both 3224d6948c13SUlf Hansson * parent and child, so we are safe. 3225d6948c13SUlf Hansson * Also genpd->name is immutable. 3226d6948c13SUlf Hansson */ 3227d6948c13SUlf Hansson list_for_each_entry(link, &genpd->parent_links, parent_node) { 3228d6948c13SUlf Hansson if (list_is_first(&link->parent_node, &genpd->parent_links)) 3229d6948c13SUlf Hansson seq_printf(s, "\n%48s", " "); 3230d6948c13SUlf Hansson seq_printf(s, "%s", link->child->name); 3231d6948c13SUlf Hansson if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3232d6948c13SUlf Hansson seq_puts(s, ", "); 3233d6948c13SUlf Hansson } 3234d6948c13SUlf Hansson 3235d6948c13SUlf Hansson list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3236d6948c13SUlf Hansson kobj_path = kobject_get_path(&pm_data->dev->kobj, 3237d6948c13SUlf Hansson genpd_is_irq_safe(genpd) ? 3238d6948c13SUlf Hansson GFP_ATOMIC : GFP_KERNEL); 3239d6948c13SUlf Hansson if (kobj_path == NULL) 3240d6948c13SUlf Hansson continue; 3241d6948c13SUlf Hansson 3242d6948c13SUlf Hansson seq_printf(s, "\n %-50s ", kobj_path); 3243d6948c13SUlf Hansson rtpm_status_str(s, pm_data->dev); 3244d6948c13SUlf Hansson perf_status_str(s, pm_data->dev); 3245d6948c13SUlf Hansson kfree(kobj_path); 3246d6948c13SUlf Hansson } 3247d6948c13SUlf Hansson 3248d6948c13SUlf Hansson seq_puts(s, "\n"); 3249d6948c13SUlf Hansson exit: 3250d6948c13SUlf Hansson genpd_unlock(genpd); 3251d6948c13SUlf Hansson 3252d6948c13SUlf Hansson return 0; 3253d6948c13SUlf Hansson } 3254d6948c13SUlf Hansson 3255d6948c13SUlf Hansson static int summary_show(struct seq_file *s, void *data) 3256d6948c13SUlf Hansson { 3257d6948c13SUlf Hansson struct generic_pm_domain *genpd; 3258d6948c13SUlf Hansson int ret = 0; 3259d6948c13SUlf Hansson 3260d6948c13SUlf Hansson seq_puts(s, "domain status children performance\n"); 3261d6948c13SUlf Hansson seq_puts(s, " /device runtime status\n"); 3262d6948c13SUlf Hansson seq_puts(s, "----------------------------------------------------------------------------------------------\n"); 3263d6948c13SUlf Hansson 3264d6948c13SUlf Hansson ret = mutex_lock_interruptible(&gpd_list_lock); 3265d6948c13SUlf Hansson if (ret) 3266d6948c13SUlf Hansson return -ERESTARTSYS; 3267d6948c13SUlf Hansson 3268d6948c13SUlf Hansson list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3269d6948c13SUlf Hansson ret = genpd_summary_one(s, genpd); 3270d6948c13SUlf Hansson if (ret) 3271d6948c13SUlf Hansson break; 3272d6948c13SUlf Hansson } 3273d6948c13SUlf Hansson mutex_unlock(&gpd_list_lock); 3274d6948c13SUlf Hansson 3275d6948c13SUlf Hansson return ret; 3276d6948c13SUlf Hansson } 3277d6948c13SUlf Hansson 3278d6948c13SUlf Hansson static int status_show(struct seq_file *s, void *data) 3279d6948c13SUlf Hansson { 3280d6948c13SUlf Hansson static const char * const status_lookup[] = { 3281d6948c13SUlf Hansson [GENPD_STATE_ON] = "on", 3282d6948c13SUlf Hansson [GENPD_STATE_OFF] = "off" 3283d6948c13SUlf Hansson }; 3284d6948c13SUlf Hansson 3285d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3286d6948c13SUlf Hansson int ret = 0; 3287d6948c13SUlf Hansson 3288d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3289d6948c13SUlf Hansson if (ret) 3290d6948c13SUlf Hansson return -ERESTARTSYS; 3291d6948c13SUlf Hansson 3292d6948c13SUlf Hansson if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3293d6948c13SUlf Hansson goto exit; 3294d6948c13SUlf Hansson 3295d6948c13SUlf Hansson if (genpd->status == GENPD_STATE_OFF) 3296d6948c13SUlf Hansson seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3297d6948c13SUlf Hansson genpd->state_idx); 3298d6948c13SUlf Hansson else 3299d6948c13SUlf Hansson seq_printf(s, "%s\n", status_lookup[genpd->status]); 3300d6948c13SUlf Hansson exit: 3301d6948c13SUlf Hansson genpd_unlock(genpd); 3302d6948c13SUlf Hansson return ret; 3303d6948c13SUlf Hansson } 3304d6948c13SUlf Hansson 3305d6948c13SUlf Hansson static int sub_domains_show(struct seq_file *s, void *data) 3306d6948c13SUlf Hansson { 3307d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3308d6948c13SUlf Hansson struct gpd_link *link; 3309d6948c13SUlf Hansson int ret = 0; 3310d6948c13SUlf Hansson 3311d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3312d6948c13SUlf Hansson if (ret) 3313d6948c13SUlf Hansson return -ERESTARTSYS; 3314d6948c13SUlf Hansson 3315d6948c13SUlf Hansson list_for_each_entry(link, &genpd->parent_links, parent_node) 3316d6948c13SUlf Hansson seq_printf(s, "%s\n", link->child->name); 3317d6948c13SUlf Hansson 3318d6948c13SUlf Hansson genpd_unlock(genpd); 3319d6948c13SUlf Hansson return ret; 3320d6948c13SUlf Hansson } 3321d6948c13SUlf Hansson 3322d6948c13SUlf Hansson static int idle_states_show(struct seq_file *s, void *data) 3323d6948c13SUlf Hansson { 3324d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3325d6948c13SUlf Hansson u64 now, delta, idle_time = 0; 3326d6948c13SUlf Hansson unsigned int i; 3327d6948c13SUlf Hansson int ret = 0; 3328d6948c13SUlf Hansson 3329d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3330d6948c13SUlf Hansson if (ret) 3331d6948c13SUlf Hansson return -ERESTARTSYS; 3332d6948c13SUlf Hansson 3333d6948c13SUlf Hansson seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3334d6948c13SUlf Hansson 3335d6948c13SUlf Hansson for (i = 0; i < genpd->state_count; i++) { 3336d6948c13SUlf Hansson idle_time += genpd->states[i].idle_time; 3337d6948c13SUlf Hansson 3338d6948c13SUlf Hansson if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3339d6948c13SUlf Hansson now = ktime_get_mono_fast_ns(); 3340d6948c13SUlf Hansson if (now > genpd->accounting_time) { 3341d6948c13SUlf Hansson delta = now - genpd->accounting_time; 3342d6948c13SUlf Hansson idle_time += delta; 3343d6948c13SUlf Hansson } 3344d6948c13SUlf Hansson } 3345d6948c13SUlf Hansson 3346d6948c13SUlf Hansson do_div(idle_time, NSEC_PER_MSEC); 3347d6948c13SUlf Hansson seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, 3348d6948c13SUlf Hansson genpd->states[i].usage, genpd->states[i].rejected); 3349d6948c13SUlf Hansson } 3350d6948c13SUlf Hansson 3351d6948c13SUlf Hansson genpd_unlock(genpd); 3352d6948c13SUlf Hansson return ret; 3353d6948c13SUlf Hansson } 3354d6948c13SUlf Hansson 3355d6948c13SUlf Hansson static int active_time_show(struct seq_file *s, void *data) 3356d6948c13SUlf Hansson { 3357d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3358d6948c13SUlf Hansson u64 now, on_time, delta = 0; 3359d6948c13SUlf Hansson int ret = 0; 3360d6948c13SUlf Hansson 3361d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3362d6948c13SUlf Hansson if (ret) 3363d6948c13SUlf Hansson return -ERESTARTSYS; 3364d6948c13SUlf Hansson 3365d6948c13SUlf Hansson if (genpd->status == GENPD_STATE_ON) { 3366d6948c13SUlf Hansson now = ktime_get_mono_fast_ns(); 3367d6948c13SUlf Hansson if (now > genpd->accounting_time) 3368d6948c13SUlf Hansson delta = now - genpd->accounting_time; 3369d6948c13SUlf Hansson } 3370d6948c13SUlf Hansson 3371d6948c13SUlf Hansson on_time = genpd->on_time + delta; 3372d6948c13SUlf Hansson do_div(on_time, NSEC_PER_MSEC); 3373d6948c13SUlf Hansson seq_printf(s, "%llu ms\n", on_time); 3374d6948c13SUlf Hansson 3375d6948c13SUlf Hansson genpd_unlock(genpd); 3376d6948c13SUlf Hansson return ret; 3377d6948c13SUlf Hansson } 3378d6948c13SUlf Hansson 3379d6948c13SUlf Hansson static int total_idle_time_show(struct seq_file *s, void *data) 3380d6948c13SUlf Hansson { 3381d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3382d6948c13SUlf Hansson u64 now, delta, total = 0; 3383d6948c13SUlf Hansson unsigned int i; 3384d6948c13SUlf Hansson int ret = 0; 3385d6948c13SUlf Hansson 3386d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3387d6948c13SUlf Hansson if (ret) 3388d6948c13SUlf Hansson return -ERESTARTSYS; 3389d6948c13SUlf Hansson 3390d6948c13SUlf Hansson for (i = 0; i < genpd->state_count; i++) { 3391d6948c13SUlf Hansson total += genpd->states[i].idle_time; 3392d6948c13SUlf Hansson 3393d6948c13SUlf Hansson if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3394d6948c13SUlf Hansson now = ktime_get_mono_fast_ns(); 3395d6948c13SUlf Hansson if (now > genpd->accounting_time) { 3396d6948c13SUlf Hansson delta = now - genpd->accounting_time; 3397d6948c13SUlf Hansson total += delta; 3398d6948c13SUlf Hansson } 3399d6948c13SUlf Hansson } 3400d6948c13SUlf Hansson } 3401d6948c13SUlf Hansson 3402d6948c13SUlf Hansson do_div(total, NSEC_PER_MSEC); 3403d6948c13SUlf Hansson seq_printf(s, "%llu ms\n", total); 3404d6948c13SUlf Hansson 3405d6948c13SUlf Hansson genpd_unlock(genpd); 3406d6948c13SUlf Hansson return ret; 3407d6948c13SUlf Hansson } 3408d6948c13SUlf Hansson 3409d6948c13SUlf Hansson 3410d6948c13SUlf Hansson static int devices_show(struct seq_file *s, void *data) 3411d6948c13SUlf Hansson { 3412d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3413d6948c13SUlf Hansson struct pm_domain_data *pm_data; 3414d6948c13SUlf Hansson const char *kobj_path; 3415d6948c13SUlf Hansson int ret = 0; 3416d6948c13SUlf Hansson 3417d6948c13SUlf Hansson ret = genpd_lock_interruptible(genpd); 3418d6948c13SUlf Hansson if (ret) 3419d6948c13SUlf Hansson return -ERESTARTSYS; 3420d6948c13SUlf Hansson 3421d6948c13SUlf Hansson list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3422d6948c13SUlf Hansson kobj_path = kobject_get_path(&pm_data->dev->kobj, 3423d6948c13SUlf Hansson genpd_is_irq_safe(genpd) ? 3424d6948c13SUlf Hansson GFP_ATOMIC : GFP_KERNEL); 3425d6948c13SUlf Hansson if (kobj_path == NULL) 3426d6948c13SUlf Hansson continue; 3427d6948c13SUlf Hansson 3428d6948c13SUlf Hansson seq_printf(s, "%s\n", kobj_path); 3429d6948c13SUlf Hansson kfree(kobj_path); 3430d6948c13SUlf Hansson } 3431d6948c13SUlf Hansson 3432d6948c13SUlf Hansson genpd_unlock(genpd); 3433d6948c13SUlf Hansson return ret; 3434d6948c13SUlf Hansson } 3435d6948c13SUlf Hansson 3436d6948c13SUlf Hansson static int perf_state_show(struct seq_file *s, void *data) 3437d6948c13SUlf Hansson { 3438d6948c13SUlf Hansson struct generic_pm_domain *genpd = s->private; 3439d6948c13SUlf Hansson 3440d6948c13SUlf Hansson if (genpd_lock_interruptible(genpd)) 3441d6948c13SUlf Hansson return -ERESTARTSYS; 3442d6948c13SUlf Hansson 3443d6948c13SUlf Hansson seq_printf(s, "%u\n", genpd->performance_state); 3444d6948c13SUlf Hansson 3445d6948c13SUlf Hansson genpd_unlock(genpd); 3446d6948c13SUlf Hansson return 0; 3447d6948c13SUlf Hansson } 3448d6948c13SUlf Hansson 3449d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(summary); 3450d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(status); 3451d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(sub_domains); 3452d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(idle_states); 3453d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(active_time); 3454d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3455d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(devices); 3456d6948c13SUlf Hansson DEFINE_SHOW_ATTRIBUTE(perf_state); 3457d6948c13SUlf Hansson 3458d6948c13SUlf Hansson static void genpd_debug_add(struct generic_pm_domain *genpd) 3459d6948c13SUlf Hansson { 3460d6948c13SUlf Hansson struct dentry *d; 3461d6948c13SUlf Hansson 3462d6948c13SUlf Hansson if (!genpd_debugfs_dir) 3463d6948c13SUlf Hansson return; 3464d6948c13SUlf Hansson 3465d6948c13SUlf Hansson d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3466d6948c13SUlf Hansson 3467d6948c13SUlf Hansson debugfs_create_file("current_state", 0444, 3468d6948c13SUlf Hansson d, genpd, &status_fops); 3469d6948c13SUlf Hansson debugfs_create_file("sub_domains", 0444, 3470d6948c13SUlf Hansson d, genpd, &sub_domains_fops); 3471d6948c13SUlf Hansson debugfs_create_file("idle_states", 0444, 3472d6948c13SUlf Hansson d, genpd, &idle_states_fops); 3473d6948c13SUlf Hansson debugfs_create_file("active_time", 0444, 3474d6948c13SUlf Hansson d, genpd, &active_time_fops); 3475d6948c13SUlf Hansson debugfs_create_file("total_idle_time", 0444, 3476d6948c13SUlf Hansson d, genpd, &total_idle_time_fops); 3477d6948c13SUlf Hansson debugfs_create_file("devices", 0444, 3478d6948c13SUlf Hansson d, genpd, &devices_fops); 3479d6948c13SUlf Hansson if (genpd->set_performance_state) 3480d6948c13SUlf Hansson debugfs_create_file("perf_state", 0444, 3481d6948c13SUlf Hansson d, genpd, &perf_state_fops); 3482d6948c13SUlf Hansson } 3483d6948c13SUlf Hansson 3484d6948c13SUlf Hansson static int __init genpd_debug_init(void) 3485d6948c13SUlf Hansson { 3486d6948c13SUlf Hansson struct generic_pm_domain *genpd; 3487d6948c13SUlf Hansson 3488d6948c13SUlf Hansson genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3489d6948c13SUlf Hansson 3490d6948c13SUlf Hansson debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3491d6948c13SUlf Hansson NULL, &summary_fops); 3492d6948c13SUlf Hansson 3493d6948c13SUlf Hansson list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3494d6948c13SUlf Hansson genpd_debug_add(genpd); 3495d6948c13SUlf Hansson 3496d6948c13SUlf Hansson return 0; 3497d6948c13SUlf Hansson } 3498d6948c13SUlf Hansson late_initcall(genpd_debug_init); 3499d6948c13SUlf Hansson 3500d6948c13SUlf Hansson static void __exit genpd_debug_exit(void) 3501d6948c13SUlf Hansson { 3502d6948c13SUlf Hansson debugfs_remove_recursive(genpd_debugfs_dir); 3503d6948c13SUlf Hansson } 3504d6948c13SUlf Hansson __exitcall(genpd_debug_exit); 3505d6948c13SUlf Hansson #endif /* CONFIG_DEBUG_FS */ 3506