cpuidle44xx.c (37e2c2a775fc887acd1432908478dfd532f7f00f) cpuidle44xx.c (74ed7bdcb41d32c7628c3bd1478b076e5b1ad8a4)
1/*
2 * OMAP4+ CPU idle Routines
3 *
4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 66 unchanged lines hidden (view full) ---

75 return index;
76}
77
78static int omap_enter_idle_coupled(struct cpuidle_device *dev,
79 struct cpuidle_driver *drv,
80 int index)
81{
82 struct idle_statedata *cx = state_ptr + index;
1/*
2 * OMAP4+ CPU idle Routines
3 *
4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 66 unchanged lines hidden (view full) ---

75 return index;
76}
77
78static int omap_enter_idle_coupled(struct cpuidle_device *dev,
79 struct cpuidle_driver *drv,
80 int index)
81{
82 struct idle_statedata *cx = state_ptr + index;
83 u32 mpuss_can_lose_context = 0;
83
84 /*
85 * CPU0 has to wait and stay ON until CPU1 is OFF state.
86 * This is necessary to honour hardware recommondation
87 * of triggeing all the possible low power modes once CPU1 is
88 * out of coherency and in OFF mode.
89 */
90 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {

--- 8 unchanged lines hidden (view full) ---

99 * waiting for CPU1 off.
100 */
101 if (cpu_done[1])
102 goto fail;
103
104 }
105 }
106
84
85 /*
86 * CPU0 has to wait and stay ON until CPU1 is OFF state.
87 * This is necessary to honour hardware recommondation
88 * of triggeing all the possible low power modes once CPU1 is
89 * out of coherency and in OFF mode.
90 */
91 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {

--- 8 unchanged lines hidden (view full) ---

100 * waiting for CPU1 off.
101 */
102 if (cpu_done[1])
103 goto fail;
104
105 }
106 }
107
108 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
109 (cx->mpu_logic_state == PWRDM_POWER_OFF);
110
107 /*
108 * Call idle CPU PM enter notifier chain so that
109 * VFP and per CPU interrupt context is saved.
110 */
111 cpu_pm_enter();
112
113 if (dev->cpu == 0) {
114 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
115 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
116
117 /*
118 * Call idle CPU cluster PM enter notifier chain
119 * to save GIC and wakeupgen context.
120 */
111 /*
112 * Call idle CPU PM enter notifier chain so that
113 * VFP and per CPU interrupt context is saved.
114 */
115 cpu_pm_enter();
116
117 if (dev->cpu == 0) {
118 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
119 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
120
121 /*
122 * Call idle CPU cluster PM enter notifier chain
123 * to save GIC and wakeupgen context.
124 */
121 if ((cx->mpu_state == PWRDM_POWER_RET) &&
122 (cx->mpu_logic_state == PWRDM_POWER_OFF))
123 cpu_cluster_pm_enter();
125 if (mpuss_can_lose_context)
126 cpu_cluster_pm_enter();
124 }
125
126 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
127 cpu_done[dev->cpu] = true;
128
129 /* Wakeup CPU1 only if it is not offlined */
130 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
127 }
128
129 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
130 cpu_done[dev->cpu] = true;
131
132 /* Wakeup CPU1 only if it is not offlined */
133 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
134
135 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
136 mpuss_can_lose_context)
137 gic_dist_disable();
138
131 clkdm_wakeup(cpu_clkdm[1]);
132 omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
133 clkdm_allow_idle(cpu_clkdm[1]);
139 clkdm_wakeup(cpu_clkdm[1]);
140 omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
141 clkdm_allow_idle(cpu_clkdm[1]);
142
143 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
144 mpuss_can_lose_context) {
145 while (gic_dist_disabled()) {
146 udelay(1);
147 cpu_relax();
148 }
149 gic_timer_retrigger();
150 }
134 }
135
136 /*
137 * Call idle CPU PM exit notifier chain to restore
138 * VFP and per CPU IRQ context.
139 */
140 cpu_pm_exit();
141
142 /*
143 * Call idle CPU cluster PM exit notifier chain
144 * to restore GIC and wakeupgen context.
145 */
151 }
152
153 /*
154 * Call idle CPU PM exit notifier chain to restore
155 * VFP and per CPU IRQ context.
156 */
157 cpu_pm_exit();
158
159 /*
160 * Call idle CPU cluster PM exit notifier chain
161 * to restore GIC and wakeupgen context.
162 */
146 if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) &&
147 (cx->mpu_logic_state == PWRDM_POWER_OFF))
163 if (dev->cpu == 0 && mpuss_can_lose_context)
148 cpu_cluster_pm_exit();
149
150fail:
151 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
152 cpu_done[dev->cpu] = false;
153
154 return index;
155}

--- 62 unchanged lines hidden ---
164 cpu_cluster_pm_exit();
165
166fail:
167 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
168 cpu_done[dev->cpu] = false;
169
170 return index;
171}

--- 62 unchanged lines hidden ---