xref: /linux/arch/arm/mach-omap2/pm34xx.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * OMAP3 Power Management Routines
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation
5  * Tony Lindgren <tony@atomide.com>
6  * Jouni Hogander
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  * Rajendra Nayak <rnayak@ti.com>
10  *
11  * Copyright (C) 2005 Texas Instruments, Inc.
12  * Richard Woodruff <r-woodruff2@ti.com>
13  *
14  * Based on pm.c for omap1
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20 
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31 #include <linux/omap-dma.h>
32 #include <linux/omap-gpmc.h>
33 #include <linux/platform_data/gpio-omap.h>
34 
35 #include <trace/events/power.h>
36 
37 #include <asm/fncpy.h>
38 #include <asm/suspend.h>
39 #include <asm/system_misc.h>
40 
41 #include "clockdomain.h"
42 #include "powerdomain.h"
43 #include "soc.h"
44 #include "common.h"
45 #include "cm3xxx.h"
46 #include "cm-regbits-34xx.h"
47 #include "prm-regbits-34xx.h"
48 #include "prm3xxx.h"
49 #include "pm.h"
50 #include "sdrc.h"
51 #include "sram.h"
52 #include "control.h"
53 #include "vc.h"
54 
55 /* pm34xx errata defined in pm.h */
56 u16 pm34xx_errata;
57 
58 struct power_state {
59 	struct powerdomain *pwrdm;
60 	u32 next_state;
61 #ifdef CONFIG_SUSPEND
62 	u32 saved_state;
63 #endif
64 	struct list_head node;
65 };
66 
67 static LIST_HEAD(pwrst_list);
68 
69 static int (*_omap_save_secure_sram)(u32 *addr);
70 void (*omap3_do_wfi_sram)(void);
71 
72 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
73 static struct powerdomain *core_pwrdm, *per_pwrdm;
74 
75 static void omap3_core_save_context(void)
76 {
77 	omap3_ctrl_save_padconf();
78 
79 	/*
80 	 * Force write last pad into memory, as this can fail in some
81 	 * cases according to errata 1.157, 1.185
82 	 */
83 	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
84 		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
85 
86 	/* Save the Interrupt controller context */
87 	omap_intc_save_context();
88 	/* Save the GPMC context */
89 	omap3_gpmc_save_context();
90 	/* Save the system control module context, padconf already save above*/
91 	omap3_control_save_context();
92 	omap_dma_global_context_save();
93 }
94 
95 static void omap3_core_restore_context(void)
96 {
97 	/* Restore the control module context, padconf restored by h/w */
98 	omap3_control_restore_context();
99 	/* Restore the GPMC context */
100 	omap3_gpmc_restore_context();
101 	/* Restore the interrupt controller context */
102 	omap_intc_restore_context();
103 	omap_dma_global_context_restore();
104 }
105 
106 /*
107  * FIXME: This function should be called before entering off-mode after
108  * OMAP3 secure services have been accessed. Currently it is only called
109  * once during boot sequence, but this works as we are not using secure
110  * services.
111  */
112 static void omap3_save_secure_ram_context(void)
113 {
114 	u32 ret;
115 	int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
116 
117 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
118 		/*
119 		 * MPU next state must be set to POWER_ON temporarily,
120 		 * otherwise the WFI executed inside the ROM code
121 		 * will hang the system.
122 		 */
123 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
124 		ret = _omap_save_secure_sram((u32 *)(unsigned long)
125 				__pa(omap3_secure_ram_storage));
126 		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
127 		/* Following is for error tracking, it should not happen */
128 		if (ret) {
129 			pr_err("save_secure_sram() returns %08x\n", ret);
130 			while (1)
131 				;
132 		}
133 	}
134 }
135 
136 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
137 {
138 	int c;
139 
140 	c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK |
141 				    OMAP3430_ST_IO_CHAIN_MASK);
142 
143 	return c ? IRQ_HANDLED : IRQ_NONE;
144 }
145 
146 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
147 {
148 	int c;
149 
150 	/*
151 	 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
152 	 * these are handled in a separate handler to avoid acking
153 	 * IO events before parsing in mux code
154 	 */
155 	c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK |
156 						   OMAP3430_ST_IO_CHAIN_MASK));
157 	c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0);
158 	c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0);
159 	if (omap_rev() > OMAP3430_REV_ES1_0) {
160 		c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0);
161 		c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0);
162 	}
163 
164 	return c ? IRQ_HANDLED : IRQ_NONE;
165 }
166 
167 static void omap34xx_save_context(u32 *save)
168 {
169 	u32 val;
170 
171 	/* Read Auxiliary Control Register */
172 	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
173 	*save++ = 1;
174 	*save++ = val;
175 
176 	/* Read L2 AUX ctrl register */
177 	asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
178 	*save++ = 1;
179 	*save++ = val;
180 }
181 
182 static int omap34xx_do_sram_idle(unsigned long save_state)
183 {
184 	omap34xx_cpu_suspend(save_state);
185 	return 0;
186 }
187 
188 void omap_sram_idle(void)
189 {
190 	/* Variable to tell what needs to be saved and restored
191 	 * in omap_sram_idle*/
192 	/* save_state = 0 => Nothing to save and restored */
193 	/* save_state = 1 => Only L1 and logic lost */
194 	/* save_state = 2 => Only L2 lost */
195 	/* save_state = 3 => L1, L2 and logic lost */
196 	int save_state = 0;
197 	int mpu_next_state = PWRDM_POWER_ON;
198 	int per_next_state = PWRDM_POWER_ON;
199 	int core_next_state = PWRDM_POWER_ON;
200 	int per_going_off;
201 	u32 sdrc_pwr = 0;
202 
203 	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
204 	switch (mpu_next_state) {
205 	case PWRDM_POWER_ON:
206 	case PWRDM_POWER_RET:
207 		/* No need to save context */
208 		save_state = 0;
209 		break;
210 	case PWRDM_POWER_OFF:
211 		save_state = 3;
212 		break;
213 	default:
214 		/* Invalid state */
215 		pr_err("Invalid mpu state in sram_idle\n");
216 		return;
217 	}
218 
219 	/* NEON control */
220 	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
221 		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
222 
223 	/* Enable IO-PAD and IO-CHAIN wakeups */
224 	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
225 	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
226 
227 	pwrdm_pre_transition(NULL);
228 
229 	/* PER */
230 	if (per_next_state < PWRDM_POWER_ON) {
231 		per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
232 		omap2_gpio_prepare_for_idle(per_going_off);
233 	}
234 
235 	/* CORE */
236 	if (core_next_state < PWRDM_POWER_ON) {
237 		if (core_next_state == PWRDM_POWER_OFF) {
238 			omap3_core_save_context();
239 			omap3_cm_save_context();
240 		}
241 	}
242 
243 	/* Configure PMIC signaling for I2C4 or sys_off_mode */
244 	omap3_vc_set_pmic_signaling(core_next_state);
245 
246 	omap3_intc_prepare_idle();
247 
248 	/*
249 	 * On EMU/HS devices ROM code restores a SRDC value
250 	 * from scratchpad which has automatic self refresh on timeout
251 	 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
252 	 * Hence store/restore the SDRC_POWER register here.
253 	 */
254 	if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
255 	    (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
256 	     omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
257 	    core_next_state == PWRDM_POWER_OFF)
258 		sdrc_pwr = sdrc_read_reg(SDRC_POWER);
259 
260 	/*
261 	 * omap3_arm_context is the location where some ARM context
262 	 * get saved. The rest is placed on the stack, and restored
263 	 * from there before resuming.
264 	 */
265 	if (save_state)
266 		omap34xx_save_context(omap3_arm_context);
267 	if (save_state == 1 || save_state == 3)
268 		cpu_suspend(save_state, omap34xx_do_sram_idle);
269 	else
270 		omap34xx_do_sram_idle(save_state);
271 
272 	/* Restore normal SDRC POWER settings */
273 	if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
274 	    (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
275 	     omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
276 	    core_next_state == PWRDM_POWER_OFF)
277 		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
278 
279 	/* CORE */
280 	if (core_next_state < PWRDM_POWER_ON &&
281 	    pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
282 		omap3_core_restore_context();
283 		omap3_cm_restore_context();
284 		omap3_sram_restore_context();
285 		omap2_sms_restore_context();
286 	} else {
287 		/*
288 		 * In off-mode resume path above, omap3_core_restore_context
289 		 * also handles the INTC autoidle restore done here so limit
290 		 * this to non-off mode resume paths so we don't do it twice.
291 		 */
292 		omap3_intc_resume_idle();
293 	}
294 
295 	pwrdm_post_transition(NULL);
296 
297 	/* PER */
298 	if (per_next_state < PWRDM_POWER_ON)
299 		omap2_gpio_resume_after_idle();
300 }
301 
302 static void omap3_pm_idle(void)
303 {
304 	if (omap_irq_pending())
305 		return;
306 
307 	trace_cpu_idle_rcuidle(1, smp_processor_id());
308 
309 	omap_sram_idle();
310 
311 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
312 }
313 
314 #ifdef CONFIG_SUSPEND
315 static int omap3_pm_suspend(void)
316 {
317 	struct power_state *pwrst;
318 	int state, ret = 0;
319 
320 	/* Read current next_pwrsts */
321 	list_for_each_entry(pwrst, &pwrst_list, node)
322 		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
323 	/* Set ones wanted by suspend */
324 	list_for_each_entry(pwrst, &pwrst_list, node) {
325 		if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
326 			goto restore;
327 		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
328 			goto restore;
329 	}
330 
331 	omap3_intc_suspend();
332 
333 	omap_sram_idle();
334 
335 restore:
336 	/* Restore next_pwrsts */
337 	list_for_each_entry(pwrst, &pwrst_list, node) {
338 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
339 		if (state > pwrst->next_state) {
340 			pr_info("Powerdomain (%s) didn't enter target state %d\n",
341 				pwrst->pwrdm->name, pwrst->next_state);
342 			ret = -1;
343 		}
344 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
345 	}
346 	if (ret)
347 		pr_err("Could not enter target state in pm_suspend\n");
348 	else
349 		pr_info("Successfully put all powerdomains to target state\n");
350 
351 	return ret;
352 }
353 #else
354 #define omap3_pm_suspend NULL
355 #endif /* CONFIG_SUSPEND */
356 
357 static void __init prcm_setup_regs(void)
358 {
359 	omap3_ctrl_init();
360 
361 	omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva());
362 }
363 
364 void omap3_pm_off_mode_enable(int enable)
365 {
366 	struct power_state *pwrst;
367 	u32 state;
368 
369 	if (enable)
370 		state = PWRDM_POWER_OFF;
371 	else
372 		state = PWRDM_POWER_RET;
373 
374 	list_for_each_entry(pwrst, &pwrst_list, node) {
375 		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
376 				pwrst->pwrdm == core_pwrdm &&
377 				state == PWRDM_POWER_OFF) {
378 			pwrst->next_state = PWRDM_POWER_RET;
379 			pr_warn("%s: Core OFF disabled due to errata i583\n",
380 				__func__);
381 		} else {
382 			pwrst->next_state = state;
383 		}
384 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
385 	}
386 }
387 
388 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
389 {
390 	struct power_state *pwrst;
391 
392 	list_for_each_entry(pwrst, &pwrst_list, node) {
393 		if (pwrst->pwrdm == pwrdm)
394 			return pwrst->next_state;
395 	}
396 	return -EINVAL;
397 }
398 
399 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
400 {
401 	struct power_state *pwrst;
402 
403 	list_for_each_entry(pwrst, &pwrst_list, node) {
404 		if (pwrst->pwrdm == pwrdm) {
405 			pwrst->next_state = state;
406 			return 0;
407 		}
408 	}
409 	return -EINVAL;
410 }
411 
412 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
413 {
414 	struct power_state *pwrst;
415 
416 	if (!pwrdm->pwrsts)
417 		return 0;
418 
419 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
420 	if (!pwrst)
421 		return -ENOMEM;
422 	pwrst->pwrdm = pwrdm;
423 	pwrst->next_state = PWRDM_POWER_RET;
424 	list_add(&pwrst->node, &pwrst_list);
425 
426 	if (pwrdm_has_hdwr_sar(pwrdm))
427 		pwrdm_enable_hdwr_sar(pwrdm);
428 
429 	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
430 }
431 
432 /*
433  * Push functions to SRAM
434  *
435  * The minimum set of functions is pushed to SRAM for execution:
436  * - omap3_do_wfi for erratum i581 WA,
437  * - save_secure_ram_context for security extensions.
438  */
439 void omap_push_sram_idle(void)
440 {
441 	omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
442 
443 	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
444 		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
445 				save_secure_ram_context_sz);
446 }
447 
448 static void __init pm_errata_configure(void)
449 {
450 	if (cpu_is_omap3630()) {
451 		pm34xx_errata |= PM_RTA_ERRATUM_i608;
452 		/* Enable the l2 cache toggling in sleep logic */
453 		enable_omap3630_toggle_l2_on_restore();
454 		if (omap_rev() < OMAP3630_REV_ES1_2)
455 			pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
456 					  PM_PER_MEMORIES_ERRATUM_i582);
457 	} else if (cpu_is_omap34xx()) {
458 		pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
459 	}
460 }
461 
462 int __init omap3_pm_init(void)
463 {
464 	struct power_state *pwrst, *tmp;
465 	struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
466 	int ret;
467 
468 	if (!omap3_has_io_chain_ctrl())
469 		pr_warn("PM: no software I/O chain control; some wakeups may be lost\n");
470 
471 	pm_errata_configure();
472 
473 	/* XXX prcm_setup_regs needs to be before enabling hw
474 	 * supervised mode for powerdomains */
475 	prcm_setup_regs();
476 
477 	ret = request_irq(omap_prcm_event_to_irq("wkup"),
478 		_prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
479 
480 	if (ret) {
481 		pr_err("pm: Failed to request pm_wkup irq\n");
482 		goto err1;
483 	}
484 
485 	/* IO interrupt is shared with mux code */
486 	ret = request_irq(omap_prcm_event_to_irq("io"),
487 		_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
488 		omap3_pm_init);
489 	enable_irq(omap_prcm_event_to_irq("io"));
490 
491 	if (ret) {
492 		pr_err("pm: Failed to request pm_io irq\n");
493 		goto err2;
494 	}
495 
496 	ret = pwrdm_for_each(pwrdms_setup, NULL);
497 	if (ret) {
498 		pr_err("Failed to setup powerdomains\n");
499 		goto err3;
500 	}
501 
502 	(void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
503 
504 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
505 	if (mpu_pwrdm == NULL) {
506 		pr_err("Failed to get mpu_pwrdm\n");
507 		ret = -EINVAL;
508 		goto err3;
509 	}
510 
511 	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
512 	per_pwrdm = pwrdm_lookup("per_pwrdm");
513 	core_pwrdm = pwrdm_lookup("core_pwrdm");
514 
515 	neon_clkdm = clkdm_lookup("neon_clkdm");
516 	mpu_clkdm = clkdm_lookup("mpu_clkdm");
517 	per_clkdm = clkdm_lookup("per_clkdm");
518 	wkup_clkdm = clkdm_lookup("wkup_clkdm");
519 
520 	omap_common_suspend_init(omap3_pm_suspend);
521 
522 	arm_pm_idle = omap3_pm_idle;
523 	omap3_idle_init();
524 
525 	/*
526 	 * RTA is disabled during initialization as per erratum i608
527 	 * it is safer to disable RTA by the bootloader, but we would like
528 	 * to be doubly sure here and prevent any mishaps.
529 	 */
530 	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
531 		omap3630_ctrl_disable_rta();
532 
533 	/*
534 	 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
535 	 * not correctly reset when the PER powerdomain comes back
536 	 * from OFF or OSWR when the CORE powerdomain is kept active.
537 	 * See OMAP36xx Erratum i582 "PER Domain reset issue after
538 	 * Domain-OFF/OSWR Wakeup".  This wakeup dependency is not a
539 	 * complete workaround.  The kernel must also prevent the PER
540 	 * powerdomain from going to OSWR/OFF while the CORE
541 	 * powerdomain is not going to OSWR/OFF.  And if PER last
542 	 * power state was off while CORE last power state was ON, the
543 	 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
544 	 * self-test using their loopback tests; if that fails, those
545 	 * devices are unusable until the PER/CORE can complete a transition
546 	 * from ON to OSWR/OFF and then back to ON.
547 	 *
548 	 * XXX Technically this workaround is only needed if off-mode
549 	 * or OSWR is enabled.
550 	 */
551 	if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
552 		clkdm_add_wkdep(per_clkdm, wkup_clkdm);
553 
554 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
555 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
556 		omap3_secure_ram_storage =
557 			kmalloc(0x803F, GFP_KERNEL);
558 		if (!omap3_secure_ram_storage)
559 			pr_err("Memory allocation failed when allocating for secure sram context\n");
560 
561 		local_irq_disable();
562 
563 		omap_dma_global_context_save();
564 		omap3_save_secure_ram_context();
565 		omap_dma_global_context_restore();
566 
567 		local_irq_enable();
568 	}
569 
570 	omap3_save_scratchpad_contents();
571 	return ret;
572 
573 err3:
574 	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
575 		list_del(&pwrst->node);
576 		kfree(pwrst);
577 	}
578 	free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
579 err2:
580 	free_irq(omap_prcm_event_to_irq("wkup"), NULL);
581 err1:
582 	return ret;
583 }
584