1 /* 2 * OMAP4 CPU idle Routines 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 * Rajendra Nayak <rnayak@ti.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/cpuidle.h> 15 #include <linux/cpu_pm.h> 16 #include <linux/export.h> 17 #include <linux/clockchips.h> 18 19 #include <asm/proc-fns.h> 20 21 #include "common.h" 22 #include "pm.h" 23 #include "prm.h" 24 #include "clockdomain.h" 25 26 #ifdef CONFIG_CPU_IDLE 27 28 /* Machine specific information */ 29 struct omap4_idle_statedata { 30 u32 cpu_state; 31 u32 mpu_logic_state; 32 u32 mpu_state; 33 }; 34 35 static struct omap4_idle_statedata omap4_idle_data[] = { 36 { 37 .cpu_state = PWRDM_POWER_ON, 38 .mpu_state = PWRDM_POWER_ON, 39 .mpu_logic_state = PWRDM_POWER_RET, 40 }, 41 { 42 .cpu_state = PWRDM_POWER_OFF, 43 .mpu_state = PWRDM_POWER_RET, 44 .mpu_logic_state = PWRDM_POWER_RET, 45 }, 46 { 47 .cpu_state = PWRDM_POWER_OFF, 48 .mpu_state = PWRDM_POWER_RET, 49 .mpu_logic_state = PWRDM_POWER_OFF, 50 }, 51 }; 52 53 static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 54 static struct clockdomain *cpu_clkdm[NR_CPUS]; 55 56 /** 57 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions 58 * @dev: cpuidle device 59 * @drv: cpuidle driver 60 * @index: the index of state to be entered 61 * 62 * Called from the CPUidle framework to program the device to the 63 * specified low power state selected by the governor. 64 * Returns the amount of time spent in the low power state. 65 */ 66 static int omap4_enter_idle_simple(struct cpuidle_device *dev, 67 struct cpuidle_driver *drv, 68 int index) 69 { 70 local_fiq_disable(); 71 omap_do_wfi(); 72 local_fiq_enable(); 73 74 return index; 75 } 76 77 static int omap4_enter_idle_coupled(struct cpuidle_device *dev, 78 struct cpuidle_driver *drv, 79 int index) 80 { 81 struct omap4_idle_statedata *cx = &omap4_idle_data[index]; 82 int cpu_id = smp_processor_id(); 83 84 local_fiq_disable(); 85 86 /* 87 * CPU0 has to wait and stay ON until CPU1 is OFF state. 88 * This is necessary to honour hardware recommondation 89 * of triggeing all the possible low power modes once CPU1 is 90 * out of coherency and in OFF mode. 91 */ 92 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 93 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) 94 cpu_relax(); 95 } 96 97 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); 98 99 /* 100 * Call idle CPU PM enter notifier chain so that 101 * VFP and per CPU interrupt context is saved. 102 */ 103 cpu_pm_enter(); 104 105 if (dev->cpu == 0) { 106 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 107 omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 108 109 /* 110 * Call idle CPU cluster PM enter notifier chain 111 * to save GIC and wakeupgen context. 112 */ 113 if ((cx->mpu_state == PWRDM_POWER_RET) && 114 (cx->mpu_logic_state == PWRDM_POWER_OFF)) 115 cpu_cluster_pm_enter(); 116 } 117 118 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 119 120 /* Wakeup CPU1 only if it is not offlined */ 121 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 122 clkdm_wakeup(cpu_clkdm[1]); 123 clkdm_allow_idle(cpu_clkdm[1]); 124 } 125 126 /* 127 * Call idle CPU PM exit notifier chain to restore 128 * VFP and per CPU IRQ context. 129 */ 130 cpu_pm_exit(); 131 132 /* 133 * Call idle CPU cluster PM exit notifier chain 134 * to restore GIC and wakeupgen context. 135 */ 136 if (omap4_mpuss_read_prev_context_state()) 137 cpu_cluster_pm_exit(); 138 139 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 140 141 local_fiq_enable(); 142 143 return index; 144 } 145 146 DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev); 147 148 struct cpuidle_driver omap4_idle_driver = { 149 .name = "omap4_idle", 150 .owner = THIS_MODULE, 151 .en_core_tk_irqen = 1, 152 .states = { 153 { 154 /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 155 .exit_latency = 2 + 2, 156 .target_residency = 5, 157 .flags = CPUIDLE_FLAG_TIME_VALID, 158 .enter = omap4_enter_idle_simple, 159 .name = "C1", 160 .desc = "MPUSS ON" 161 }, 162 { 163 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 164 .exit_latency = 328 + 440, 165 .target_residency = 960, 166 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 167 .enter = omap4_enter_idle_coupled, 168 .name = "C2", 169 .desc = "MPUSS CSWR", 170 }, 171 { 172 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 173 .exit_latency = 460 + 518, 174 .target_residency = 1100, 175 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 176 .enter = omap4_enter_idle_coupled, 177 .name = "C3", 178 .desc = "MPUSS OSWR", 179 }, 180 }, 181 .state_count = ARRAY_SIZE(omap4_idle_data), 182 .safe_state_index = 0, 183 }; 184 185 /** 186 * omap4_idle_init - Init routine for OMAP4 idle 187 * 188 * Registers the OMAP4 specific cpuidle driver to the cpuidle 189 * framework with the valid set of states. 190 */ 191 int __init omap4_idle_init(void) 192 { 193 struct cpuidle_device *dev; 194 unsigned int cpu_id = 0; 195 196 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 197 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); 198 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); 199 if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) 200 return -ENODEV; 201 202 cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); 203 cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); 204 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 205 return -ENODEV; 206 207 for_each_cpu(cpu_id, cpu_online_mask) { 208 dev = &per_cpu(omap4_idle_dev, cpu_id); 209 dev->cpu = cpu_id; 210 dev->coupled_cpus = *cpu_online_mask; 211 212 cpuidle_register_driver(&omap4_idle_driver); 213 214 if (cpuidle_register_device(dev)) { 215 pr_err("%s: CPUidle register failed\n", __func__); 216 return -EIO; 217 } 218 } 219 220 return 0; 221 } 222 #else 223 int __init omap4_idle_init(void) 224 { 225 return 0; 226 } 227 #endif /* CONFIG_CPU_IDLE */ 228