xref: /linux/drivers/cpuidle/cpuidle-big_little.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 /*
2  * Copyright (c) 2013 ARM/Linaro
3  *
4  * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
5  *          Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6  *          Nicolas Pitre <nicolas.pitre@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
13  * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
14  */
15 #include <linux/cpuidle.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/cpuidle.h>
23 #include <asm/mcpm.h>
24 #include <asm/smp_plat.h>
25 #include <asm/suspend.h>
26 
27 #include "dt_idle_states.h"
28 
29 static int bl_enter_powerdown(struct cpuidle_device *dev,
30 			      struct cpuidle_driver *drv, int idx);
31 
32 /*
33  * NB: Owing to current menu governor behaviour big and LITTLE
34  * index 1 states have to define exit_latency and target_residency for
35  * cluster state since, when all CPUs in a cluster hit it, the cluster
36  * can be shutdown. This means that when a single CPU enters this state
37  * the exit_latency and target_residency values are somewhat overkill.
38  * There is no notion of cluster states in the menu governor, so CPUs
39  * have to define CPU states where possibly the cluster will be shutdown
40  * depending on the state of other CPUs. idle states entry and exit happen
41  * at random times; however the cluster state provides target_residency
42  * values as if all CPUs in a cluster enter the state at once; this is
43  * somewhat optimistic and behaviour should be fixed either in the governor
44  * or in the MCPM back-ends.
45  * To make this driver 100% generic the number of states and the exit_latency
46  * target_residency values must be obtained from device tree bindings.
47  *
48  * exit_latency: refers to the TC2 vexpress test chip and depends on the
49  * current cluster operating point. It is the time it takes to get the CPU
50  * up and running when the CPU is powered up on cluster wake-up from shutdown.
51  * Current values for big and LITTLE clusters are provided for clusters
52  * running at default operating points.
53  *
54  * target_residency: it is the minimum amount of time the cluster has
55  * to be down to break even in terms of power consumption. cluster
56  * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
57  * being the main factor) that depend on the current operating points.
58  * The current values for both clusters are provided for a CPU whose half
59  * of L2 lines are dirty and require cleaning to DRAM, and takes into
60  * account leakage static power values related to the vexpress TC2 testchip.
61  */
62 static struct cpuidle_driver bl_idle_little_driver = {
63 	.name = "little_idle",
64 	.owner = THIS_MODULE,
65 	.states[0] = ARM_CPUIDLE_WFI_STATE,
66 	.states[1] = {
67 		.enter			= bl_enter_powerdown,
68 		.exit_latency		= 700,
69 		.target_residency	= 2500,
70 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
71 		.name			= "C1",
72 		.desc			= "ARM little-cluster power down",
73 	},
74 	.state_count = 2,
75 };
76 
77 static const struct of_device_id bl_idle_state_match[] __initconst = {
78 	{ .compatible = "arm,idle-state",
79 	  .data = bl_enter_powerdown },
80 	{ },
81 };
82 
83 static struct cpuidle_driver bl_idle_big_driver = {
84 	.name = "big_idle",
85 	.owner = THIS_MODULE,
86 	.states[0] = ARM_CPUIDLE_WFI_STATE,
87 	.states[1] = {
88 		.enter			= bl_enter_powerdown,
89 		.exit_latency		= 500,
90 		.target_residency	= 2000,
91 		.flags			= CPUIDLE_FLAG_TIMER_STOP,
92 		.name			= "C1",
93 		.desc			= "ARM big-cluster power down",
94 	},
95 	.state_count = 2,
96 };
97 
98 /*
99  * notrace prevents trace shims from getting inserted where they
100  * should not. Global jumps and ldrex/strex must not be inserted
101  * in power down sequences where caches and MMU may be turned off.
102  */
103 static int notrace bl_powerdown_finisher(unsigned long arg)
104 {
105 	/* MCPM works with HW CPU identifiers */
106 	unsigned int mpidr = read_cpuid_mpidr();
107 	unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
108 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
109 
110 	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
111 	mcpm_cpu_suspend();
112 
113 	/* return value != 0 means failure */
114 	return 1;
115 }
116 
117 /**
118  * bl_enter_powerdown - Programs CPU to enter the specified state
119  * @dev: cpuidle device
120  * @drv: The target state to be programmed
121  * @idx: state index
122  *
123  * Called from the CPUidle framework to program the device to the
124  * specified target state selected by the governor.
125  */
126 static int bl_enter_powerdown(struct cpuidle_device *dev,
127 				struct cpuidle_driver *drv, int idx)
128 {
129 	cpu_pm_enter();
130 
131 	cpu_suspend(0, bl_powerdown_finisher);
132 
133 	/* signals the MCPM core that CPU is out of low power state */
134 	mcpm_cpu_powered_up();
135 
136 	cpu_pm_exit();
137 
138 	return idx;
139 }
140 
141 static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
142 {
143 	struct cpumask *cpumask;
144 	int cpu;
145 
146 	cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
147 	if (!cpumask)
148 		return -ENOMEM;
149 
150 	for_each_possible_cpu(cpu)
151 		if (smp_cpuid_part(cpu) == part_id)
152 			cpumask_set_cpu(cpu, cpumask);
153 
154 	drv->cpumask = cpumask;
155 
156 	return 0;
157 }
158 
159 static const struct of_device_id compatible_machine_match[] = {
160 	{ .compatible = "arm,vexpress,v2p-ca15_a7" },
161 	{ .compatible = "samsung,exynos5420" },
162 	{ .compatible = "samsung,exynos5800" },
163 	{},
164 };
165 
166 static int __init bl_idle_init(void)
167 {
168 	int ret;
169 	struct device_node *root = of_find_node_by_path("/");
170 	const struct of_device_id *match_id;
171 
172 	if (!root)
173 		return -ENODEV;
174 
175 	/*
176 	 * Initialize the driver just for a compliant set of machines
177 	 */
178 	match_id = of_match_node(compatible_machine_match, root);
179 
180 	of_node_put(root);
181 
182 	if (!match_id)
183 		return -ENODEV;
184 
185 	if (!mcpm_is_available())
186 		return -EUNATCH;
187 
188 	/*
189 	 * For now the differentiation between little and big cores
190 	 * is based on the part number. A7 cores are considered little
191 	 * cores, A15 are considered big cores. This distinction may
192 	 * evolve in the future with a more generic matching approach.
193 	 */
194 	ret = bl_idle_driver_init(&bl_idle_little_driver,
195 				  ARM_CPU_PART_CORTEX_A7);
196 	if (ret)
197 		return ret;
198 
199 	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
200 	if (ret)
201 		goto out_uninit_little;
202 
203 	/* Start at index 1, index 0 standard WFI */
204 	ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
205 	if (ret < 0)
206 		goto out_uninit_big;
207 
208 	/* Start at index 1, index 0 standard WFI */
209 	ret = dt_init_idle_driver(&bl_idle_little_driver,
210 				  bl_idle_state_match, 1);
211 	if (ret < 0)
212 		goto out_uninit_big;
213 
214 	ret = cpuidle_register(&bl_idle_little_driver, NULL);
215 	if (ret)
216 		goto out_uninit_big;
217 
218 	ret = cpuidle_register(&bl_idle_big_driver, NULL);
219 	if (ret)
220 		goto out_unregister_little;
221 
222 	return 0;
223 
224 out_unregister_little:
225 	cpuidle_unregister(&bl_idle_little_driver);
226 out_uninit_big:
227 	kfree(bl_idle_big_driver.cpumask);
228 out_uninit_little:
229 	kfree(bl_idle_little_driver.cpumask);
230 
231 	return ret;
232 }
233 device_initcall(bl_idle_init);
234