xref: /linux/arch/arm/mach-omap2/pm44xx.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * OMAP4 Power Management Routines
3  *
4  * Copyright (C) 2010-2011 Texas Instruments, Inc.
5  * Rajendra Nayak <rnayak@ti.com>
6  * Santosh Shilimkar <santosh.shilimkar@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/pm.h>
14 #include <linux/suspend.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 
20 #include "common.h"
21 #include "clockdomain.h"
22 #include "powerdomain.h"
23 #include "pm.h"
24 
25 struct power_state {
26 	struct powerdomain *pwrdm;
27 	u32 next_state;
28 #ifdef CONFIG_SUSPEND
29 	u32 saved_state;
30 	u32 saved_logic_state;
31 #endif
32 	struct list_head node;
33 };
34 
35 static LIST_HEAD(pwrst_list);
36 
37 #ifdef CONFIG_SUSPEND
38 static int omap4_pm_suspend(void)
39 {
40 	struct power_state *pwrst;
41 	int state, ret = 0;
42 	u32 cpu_id = smp_processor_id();
43 
44 	/* Save current powerdomain state */
45 	list_for_each_entry(pwrst, &pwrst_list, node) {
46 		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
47 		pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
48 	}
49 
50 	/* Set targeted power domain states by suspend */
51 	list_for_each_entry(pwrst, &pwrst_list, node) {
52 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
53 		pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF);
54 	}
55 
56 	/*
57 	 * For MPUSS to hit power domain retention(CSWR or OSWR),
58 	 * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
59 	 * since CPU power domain CSWR is not supported by hardware
60 	 * Only master CPU follows suspend path. All other CPUs follow
61 	 * CPU hotplug path in system wide suspend. On OMAP4, CPU power
62 	 * domain CSWR is not supported by hardware.
63 	 * More details can be found in OMAP4430 TRM section 4.3.4.2.
64 	 */
65 	omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
66 
67 	/* Restore next powerdomain state */
68 	list_for_each_entry(pwrst, &pwrst_list, node) {
69 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
70 		if (state > pwrst->next_state) {
71 			pr_info("Powerdomain (%s) didn't enter "
72 			       "target state %d\n",
73 			       pwrst->pwrdm->name, pwrst->next_state);
74 			ret = -1;
75 		}
76 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
77 		pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
78 	}
79 	if (ret)
80 		pr_crit("Could not enter target state in pm_suspend\n");
81 	else
82 		pr_info("Successfully put all powerdomains to target state\n");
83 
84 	return 0;
85 }
86 
87 static int omap4_pm_enter(suspend_state_t suspend_state)
88 {
89 	int ret = 0;
90 
91 	switch (suspend_state) {
92 	case PM_SUSPEND_STANDBY:
93 	case PM_SUSPEND_MEM:
94 		ret = omap4_pm_suspend();
95 		break;
96 	default:
97 		ret = -EINVAL;
98 	}
99 
100 	return ret;
101 }
102 
103 static int omap4_pm_begin(suspend_state_t state)
104 {
105 	disable_hlt();
106 	return 0;
107 }
108 
109 static void omap4_pm_end(void)
110 {
111 	enable_hlt();
112 	return;
113 }
114 
115 static const struct platform_suspend_ops omap_pm_ops = {
116 	.begin		= omap4_pm_begin,
117 	.end		= omap4_pm_end,
118 	.enter		= omap4_pm_enter,
119 	.valid		= suspend_valid_only_mem,
120 };
121 #endif /* CONFIG_SUSPEND */
122 
123 /*
124  * Enable hardware supervised mode for all clockdomains if it's
125  * supported. Initiate sleep transition for other clockdomains, if
126  * they are not used
127  */
128 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
129 {
130 	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
131 		clkdm_allow_idle(clkdm);
132 	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
133 			atomic_read(&clkdm->usecount) == 0)
134 		clkdm_sleep(clkdm);
135 	return 0;
136 }
137 
138 
139 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
140 {
141 	struct power_state *pwrst;
142 
143 	if (!pwrdm->pwrsts)
144 		return 0;
145 
146 	/*
147 	 * Skip CPU0 and CPU1 power domains. CPU1 is programmed
148 	 * through hotplug path and CPU0 explicitly programmed
149 	 * further down in the code path
150 	 */
151 	if (!strncmp(pwrdm->name, "cpu", 3))
152 		return 0;
153 
154 	/*
155 	 * FIXME: Remove this check when core retention is supported
156 	 * Only MPUSS power domain is added in the list.
157 	 */
158 	if (strcmp(pwrdm->name, "mpu_pwrdm"))
159 		return 0;
160 
161 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
162 	if (!pwrst)
163 		return -ENOMEM;
164 
165 	pwrst->pwrdm = pwrdm;
166 	pwrst->next_state = PWRDM_POWER_RET;
167 	list_add(&pwrst->node, &pwrst_list);
168 
169 	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
170 }
171 
172 /**
173  * omap_default_idle - OMAP4 default ilde routine.'
174  *
175  * Implements OMAP4 memory, IO ordering requirements which can't be addressed
176  * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
177  * by secondary CPU with CONFIG_CPUIDLE.
178  */
179 static void omap_default_idle(void)
180 {
181 	local_irq_disable();
182 	local_fiq_disable();
183 
184 	omap_do_wfi();
185 
186 	local_fiq_enable();
187 	local_irq_enable();
188 }
189 
190 /**
191  * omap4_pm_init - Init routine for OMAP4 PM
192  *
193  * Initializes all powerdomain and clockdomain target states
194  * and all PRCM settings.
195  */
196 static int __init omap4_pm_init(void)
197 {
198 	int ret;
199 	struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
200 	struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
201 
202 	if (!cpu_is_omap44xx())
203 		return -ENODEV;
204 
205 	if (omap_rev() == OMAP4430_REV_ES1_0) {
206 		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
207 		return -ENODEV;
208 	}
209 
210 	pr_err("Power Management for TI OMAP4.\n");
211 
212 	ret = pwrdm_for_each(pwrdms_setup, NULL);
213 	if (ret) {
214 		pr_err("Failed to setup powerdomains\n");
215 		goto err2;
216 	}
217 
218 	/*
219 	 * The dynamic dependency between MPUSS -> MEMIF and
220 	 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
221 	 * expected. The hardware recommendation is to enable static
222 	 * dependencies for these to avoid system lock ups or random crashes.
223 	 */
224 	mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
225 	emif_clkdm = clkdm_lookup("l3_emif_clkdm");
226 	l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
227 	l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
228 	l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
229 	ducati_clkdm = clkdm_lookup("ducati_clkdm");
230 	if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
231 		(!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
232 		goto err2;
233 
234 	ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
235 	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
236 	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
237 	ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
238 	ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
239 	ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
240 	if (ret) {
241 		pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
242 				"wakeup dependency\n");
243 		goto err2;
244 	}
245 
246 	ret = omap4_mpuss_init();
247 	if (ret) {
248 		pr_err("Failed to initialise OMAP4 MPUSS\n");
249 		goto err2;
250 	}
251 
252 	(void) clkdm_for_each(clkdms_setup, NULL);
253 
254 #ifdef CONFIG_SUSPEND
255 	suspend_set_ops(&omap_pm_ops);
256 #endif /* CONFIG_SUSPEND */
257 
258 	/* Overwrite the default arch_idle() */
259 	pm_idle = omap_default_idle;
260 
261 	omap4_idle_init();
262 
263 err2:
264 	return ret;
265 }
266 late_initcall(omap4_pm_init);
267