xref: /linux/arch/arm/mach-omap2/pm33xx-core.c (revision 336b78c655c84ce9ce47219185171b3912109c0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AM33XX Arch Power Management Routines
4  *
5  * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/
6  *	Dave Gerlach
7  */
8 
9 #include <linux/cpuidle.h>
10 #include <linux/platform_data/pm33xx.h>
11 #include <linux/suspend.h>
12 #include <asm/cpuidle.h>
13 #include <asm/smp_scu.h>
14 #include <asm/suspend.h>
15 #include <linux/errno.h>
16 #include <linux/clk.h>
17 #include <linux/cpu.h>
18 #include <linux/platform_data/gpio-omap.h>
19 #include <linux/wkup_m3_ipc.h>
20 #include <linux/of.h>
21 #include <linux/rtc.h>
22 
23 #include "cm33xx.h"
24 #include "common.h"
25 #include "control.h"
26 #include "clockdomain.h"
27 #include "iomap.h"
28 #include "pm.h"
29 #include "powerdomain.h"
30 #include "prm33xx.h"
31 #include "soc.h"
32 #include "sram.h"
33 #include "omap-secure.h"
34 
35 static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
36 static struct clockdomain *gfx_l4ls_clkdm;
37 static void __iomem *scu_base;
38 
39 static int (*idle_fn)(u32 wfi_flags);
40 
41 struct amx3_idle_state {
42 	int wfi_flags;
43 };
44 
45 static struct amx3_idle_state *idle_states;
46 
47 static int am43xx_map_scu(void)
48 {
49 	scu_base = ioremap(scu_a9_get_base(), SZ_256);
50 
51 	if (!scu_base)
52 		return -ENOMEM;
53 
54 	return 0;
55 }
56 
57 static int am33xx_check_off_mode_enable(void)
58 {
59 	if (enable_off_mode)
60 		pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
61 
62 	/* off mode not supported on am335x so return 0 always */
63 	return 0;
64 }
65 
66 static int am43xx_check_off_mode_enable(void)
67 {
68 	/*
69 	 * Check for am437x-gp-evm which has the right Hardware design to
70 	 * support this mode reliably.
71 	 */
72 	if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode)
73 		return enable_off_mode;
74 	else if (enable_off_mode)
75 		pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
76 
77 	return 0;
78 }
79 
80 static int amx3_common_init(int (*idle)(u32 wfi_flags))
81 {
82 	gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
83 	per_pwrdm = pwrdm_lookup("per_pwrdm");
84 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
85 
86 	if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm))
87 		return -ENODEV;
88 
89 	(void)clkdm_for_each(omap_pm_clkdms_setup, NULL);
90 
91 	/* CEFUSE domain can be turned off post bootup */
92 	cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
93 	if (!cefuse_pwrdm)
94 		pr_err("PM: Failed to get cefuse_pwrdm\n");
95 	else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
96 		pr_info("PM: Leaving EFUSE power domain active\n");
97 	else
98 		omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
99 
100 	idle_fn = idle;
101 
102 	return 0;
103 }
104 
105 static int am33xx_suspend_init(int (*idle)(u32 wfi_flags))
106 {
107 	int ret;
108 
109 	gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
110 
111 	if (!gfx_l4ls_clkdm) {
112 		pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n");
113 		return -ENODEV;
114 	}
115 
116 	ret = amx3_common_init(idle);
117 
118 	return ret;
119 }
120 
121 static int am43xx_suspend_init(int (*idle)(u32 wfi_flags))
122 {
123 	int ret = 0;
124 
125 	ret = am43xx_map_scu();
126 	if (ret) {
127 		pr_err("PM: Could not ioremap SCU\n");
128 		return ret;
129 	}
130 
131 	ret = amx3_common_init(idle);
132 
133 	return ret;
134 }
135 
136 static int amx3_suspend_deinit(void)
137 {
138 	idle_fn = NULL;
139 	return 0;
140 }
141 
142 static void amx3_pre_suspend_common(void)
143 {
144 	omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
145 }
146 
147 static void amx3_post_suspend_common(void)
148 {
149 	int status;
150 	/*
151 	 * Because gfx_pwrdm is the only one under MPU control,
152 	 * comment on transition status
153 	 */
154 	status = pwrdm_read_pwrst(gfx_pwrdm);
155 	if (status != PWRDM_POWER_OFF)
156 		pr_err("PM: GFX domain did not transition: %x\n", status);
157 }
158 
159 static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long),
160 			  unsigned long args)
161 {
162 	int ret = 0;
163 
164 	amx3_pre_suspend_common();
165 	ret = cpu_suspend(args, fn);
166 	amx3_post_suspend_common();
167 
168 	/*
169 	 * BUG: GFX_L4LS clock domain needs to be woken up to
170 	 * ensure thet L4LS clock domain does not get stuck in
171 	 * transition. If that happens L3 module does not get
172 	 * disabled, thereby leading to PER power domain
173 	 * transition failing
174 	 */
175 
176 	clkdm_wakeup(gfx_l4ls_clkdm);
177 	clkdm_sleep(gfx_l4ls_clkdm);
178 
179 	return ret;
180 }
181 
182 static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
183 			  unsigned long args)
184 {
185 	int ret = 0;
186 
187 	/* Suspend secure side on HS devices */
188 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
189 		if (optee_available)
190 			omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0);
191 		else
192 			omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND,
193 					       FLAG_START_CRITICAL,
194 					       0, 0, 0, 0, 0);
195 	}
196 
197 	amx3_pre_suspend_common();
198 	scu_power_mode(scu_base, SCU_PM_POWEROFF);
199 	ret = cpu_suspend(args, fn);
200 	scu_power_mode(scu_base, SCU_PM_NORMAL);
201 
202 	if (!am43xx_check_off_mode_enable())
203 		amx3_post_suspend_common();
204 
205 	/*
206 	 * Resume secure side on HS devices.
207 	 *
208 	 * Note that even on systems with OP-TEE available this resume call is
209 	 * issued to the ROM. This is because upon waking from suspend the ROM
210 	 * is restored as the secure monitor. On systems with OP-TEE ROM will
211 	 * restore OP-TEE during this call.
212 	 */
213 	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
214 		omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME,
215 				       FLAG_START_CRITICAL,
216 				       0, 0, 0, 0, 0);
217 
218 	return ret;
219 }
220 
221 static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
222 {
223 	int ret = 0;
224 
225 	if (omap_irq_pending() || need_resched())
226 		return ret;
227 
228 	ret = cpu_suspend(args, fn);
229 
230 	return ret;
231 }
232 
233 static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
234 {
235 	int ret = 0;
236 
237 	if (!scu_base)
238 		return 0;
239 
240 	scu_power_mode(scu_base, SCU_PM_DORMANT);
241 	ret = cpu_suspend(args, fn);
242 	scu_power_mode(scu_base, SCU_PM_NORMAL);
243 
244 	return ret;
245 }
246 
247 static void amx3_begin_suspend(void)
248 {
249 	cpu_idle_poll_ctrl(true);
250 }
251 
252 static void amx3_finish_suspend(void)
253 {
254 	cpu_idle_poll_ctrl(false);
255 }
256 
257 
258 static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
259 {
260 	if (soc_is_am33xx())
261 		return &am33xx_pm_sram;
262 	else if (soc_is_am437x())
263 		return &am43xx_pm_sram;
264 	else
265 		return NULL;
266 }
267 
268 static void am43xx_save_context(void)
269 {
270 }
271 
272 static void am33xx_save_context(void)
273 {
274 	omap_intc_save_context();
275 }
276 
277 static void am33xx_restore_context(void)
278 {
279 	omap_intc_restore_context();
280 }
281 
282 static void am43xx_restore_context(void)
283 {
284 	/*
285 	 * HACK: restore dpll_per_clkdcoldo register contents, to avoid
286 	 * breaking suspend-resume
287 	 */
288 	writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14));
289 }
290 
291 static struct am33xx_pm_platform_data am33xx_ops = {
292 	.init = am33xx_suspend_init,
293 	.deinit = amx3_suspend_deinit,
294 	.soc_suspend = am33xx_suspend,
295 	.cpu_suspend = am33xx_cpu_suspend,
296 	.begin_suspend = amx3_begin_suspend,
297 	.finish_suspend = amx3_finish_suspend,
298 	.get_sram_addrs = amx3_get_sram_addrs,
299 	.save_context = am33xx_save_context,
300 	.restore_context = am33xx_restore_context,
301 	.check_off_mode_enable = am33xx_check_off_mode_enable,
302 };
303 
304 static struct am33xx_pm_platform_data am43xx_ops = {
305 	.init = am43xx_suspend_init,
306 	.deinit = amx3_suspend_deinit,
307 	.soc_suspend = am43xx_suspend,
308 	.cpu_suspend = am43xx_cpu_suspend,
309 	.begin_suspend = amx3_begin_suspend,
310 	.finish_suspend = amx3_finish_suspend,
311 	.get_sram_addrs = amx3_get_sram_addrs,
312 	.save_context = am43xx_save_context,
313 	.restore_context = am43xx_restore_context,
314 	.check_off_mode_enable = am43xx_check_off_mode_enable,
315 };
316 
317 static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
318 {
319 	if (soc_is_am33xx())
320 		return &am33xx_ops;
321 	else if (soc_is_am437x())
322 		return &am43xx_ops;
323 	else
324 		return NULL;
325 }
326 
327 #ifdef CONFIG_SUSPEND
328 /*
329  * Block system suspend initially. Later on pm33xx sets up it's own
330  * platform_suspend_ops after probe. That depends also on loaded
331  * wkup_m3_ipc and booted am335x-pm-firmware.elf.
332  */
333 static int amx3_suspend_block(suspend_state_t state)
334 {
335 	pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n");
336 
337 	return -EINVAL;
338 }
339 
340 static int amx3_pm_valid(suspend_state_t state)
341 {
342 	switch (state) {
343 	case PM_SUSPEND_STANDBY:
344 		return 1;
345 	default:
346 		return 0;
347 	}
348 }
349 
350 static const struct platform_suspend_ops amx3_blocked_pm_ops = {
351 	.begin = amx3_suspend_block,
352 	.valid = amx3_pm_valid,
353 };
354 
355 static void __init amx3_block_suspend(void)
356 {
357 	suspend_set_ops(&amx3_blocked_pm_ops);
358 }
359 #else
360 static inline void amx3_block_suspend(void)
361 {
362 }
363 #endif	/* CONFIG_SUSPEND */
364 
365 int __init amx3_common_pm_init(void)
366 {
367 	struct am33xx_pm_platform_data *pdata;
368 	struct platform_device_info devinfo;
369 
370 	pdata = am33xx_pm_get_pdata();
371 
372 	memset(&devinfo, 0, sizeof(devinfo));
373 	devinfo.name = "pm33xx";
374 	devinfo.data = pdata;
375 	devinfo.size_data = sizeof(*pdata);
376 	devinfo.id = -1;
377 	platform_device_register_full(&devinfo);
378 	amx3_block_suspend();
379 
380 	return 0;
381 }
382 
383 static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
384 {
385 	struct device_node *state_node;
386 	struct amx3_idle_state states[CPUIDLE_STATE_MAX];
387 	int i;
388 	int state_count = 1;
389 
390 	for (i = 0; ; i++) {
391 		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
392 		if (!state_node)
393 			break;
394 
395 		if (!of_device_is_available(state_node))
396 			continue;
397 
398 		if (i == CPUIDLE_STATE_MAX) {
399 			pr_warn("%s: cpuidle states reached max possible\n",
400 				__func__);
401 			break;
402 		}
403 
404 		states[state_count].wfi_flags = 0;
405 
406 		if (of_property_read_bool(state_node, "ti,idle-wkup-m3"))
407 			states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
408 							 WFI_FLAG_FLUSH_CACHE;
409 
410 		state_count++;
411 	}
412 
413 	idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL);
414 	if (!idle_states)
415 		return -ENOMEM;
416 
417 	for (i = 1; i < state_count; i++)
418 		idle_states[i].wfi_flags = states[i].wfi_flags;
419 
420 	return 0;
421 }
422 
423 static int amx3_idle_enter(unsigned long index)
424 {
425 	struct amx3_idle_state *idle_state = &idle_states[index];
426 
427 	if (!idle_state)
428 		return -EINVAL;
429 
430 	if (idle_fn)
431 		idle_fn(idle_state->wfi_flags);
432 
433 	return 0;
434 }
435 
436 static struct cpuidle_ops amx3_cpuidle_ops __initdata = {
437 	.init = amx3_idle_init,
438 	.suspend = amx3_idle_enter,
439 };
440 
441 CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops);
442 CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);
443