xref: /linux/arch/arm/common/bL_switcher.c (revision 9797a0e95ead7bfe52260c369ee9fe6ba445afaf)
1 /*
2  * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
3  *
4  * Created by:	Nicolas Pitre, March 2012
5  * Copyright:	(C) 2012-2013  Linaro Limited
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/kthread.h>
21 #include <linux/wait.h>
22 #include <linux/clockchips.h>
23 #include <linux/hrtimer.h>
24 #include <linux/tick.h>
25 #include <linux/mm.h>
26 #include <linux/string.h>
27 #include <linux/irqchip/arm-gic.h>
28 
29 #include <asm/smp_plat.h>
30 #include <asm/suspend.h>
31 #include <asm/mcpm.h>
32 #include <asm/bL_switcher.h>
33 
34 
35 /*
36  * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
37  * __attribute_const__ and we don't want the compiler to assume any
38  * constness here as the value _does_ change along some code paths.
39  */
40 
41 static int read_mpidr(void)
42 {
43 	unsigned int id;
44 	asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
45 	return id & MPIDR_HWID_BITMASK;
46 }
47 
48 /*
49  * bL switcher core code.
50  */
51 
52 static void bL_do_switch(void *_unused)
53 {
54 	unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster;
55 
56 	pr_debug("%s\n", __func__);
57 
58 	mpidr = read_mpidr();
59 	cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
60 	clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
61 	ob_cluster = clusterid;
62 	ib_cluster = clusterid ^ 1;
63 
64 	/*
65 	 * Our state has been saved at this point.  Let's release our
66 	 * inbound CPU.
67 	 */
68 	mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume);
69 	sev();
70 
71 	/*
72 	 * From this point, we must assume that our counterpart CPU might
73 	 * have taken over in its parallel world already, as if execution
74 	 * just returned from cpu_suspend().  It is therefore important to
75 	 * be very careful not to make any change the other guy is not
76 	 * expecting.  This is why we need stack isolation.
77 	 *
78 	 * Fancy under cover tasks could be performed here.  For now
79 	 * we have none.
80 	 */
81 
82 	/* Let's put ourself down. */
83 	mcpm_cpu_power_down();
84 
85 	/* should never get here */
86 	BUG();
87 }
88 
89 /*
90  * Stack isolation.  To ensure 'current' remains valid, we just use another
91  * piece of our thread's stack space which should be fairly lightly used.
92  * The selected area starts just above the thread_info structure located
93  * at the very bottom of the stack, aligned to a cache line, and indexed
94  * with the cluster number.
95  */
96 #define STACK_SIZE 512
97 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
98 static int bL_switchpoint(unsigned long _arg)
99 {
100 	unsigned int mpidr = read_mpidr();
101 	unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
102 	void *stack = current_thread_info() + 1;
103 	stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
104 	stack += clusterid * STACK_SIZE + STACK_SIZE;
105 	call_with_stack(bL_do_switch, (void *)_arg, stack);
106 	BUG();
107 }
108 
109 /*
110  * Generic switcher interface
111  */
112 
113 /*
114  * bL_switch_to - Switch to a specific cluster for the current CPU
115  * @new_cluster_id: the ID of the cluster to switch to.
116  *
117  * This function must be called on the CPU to be switched.
118  * Returns 0 on success, else a negative status code.
119  */
120 static int bL_switch_to(unsigned int new_cluster_id)
121 {
122 	unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu;
123 	struct tick_device *tdev;
124 	enum clock_event_mode tdev_mode;
125 	int ret;
126 
127 	mpidr = read_mpidr();
128 	cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
129 	clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
130 	ob_cluster = clusterid;
131 	ib_cluster = clusterid ^ 1;
132 
133 	if (new_cluster_id == clusterid)
134 		return 0;
135 
136 	pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid);
137 
138 	/* Close the gate for our entry vectors */
139 	mcpm_set_entry_vector(cpuid, ob_cluster, NULL);
140 	mcpm_set_entry_vector(cpuid, ib_cluster, NULL);
141 
142 	/*
143 	 * Let's wake up the inbound CPU now in case it requires some delay
144 	 * to come online, but leave it gated in our entry vector code.
145 	 */
146 	ret = mcpm_cpu_power_up(cpuid, ib_cluster);
147 	if (ret) {
148 		pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
149 		return ret;
150 	}
151 
152 	/*
153 	 * From this point we are entering the switch critical zone
154 	 * and can't take any interrupts anymore.
155 	 */
156 	local_irq_disable();
157 	local_fiq_disable();
158 
159 	this_cpu = smp_processor_id();
160 
161 	/* redirect GIC's SGIs to our counterpart */
162 	gic_migrate_target(cpuid + ib_cluster*4);
163 
164 	/*
165 	 * Raise a SGI on the inbound CPU to make sure it doesn't stall
166 	 * in a possible WFI, such as in mcpm_power_down().
167 	 */
168 	arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
169 
170 	tdev = tick_get_device(this_cpu);
171 	if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
172 		tdev = NULL;
173 	if (tdev) {
174 		tdev_mode = tdev->evtdev->mode;
175 		clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
176 	}
177 
178 	ret = cpu_pm_enter();
179 
180 	/* we can not tolerate errors at this point */
181 	if (ret)
182 		panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
183 
184 	/* Flip the cluster in the CPU logical map for this CPU. */
185 	cpu_logical_map(this_cpu) ^= (1 << 8);
186 
187 	/* Let's do the actual CPU switch. */
188 	ret = cpu_suspend(0, bL_switchpoint);
189 	if (ret > 0)
190 		panic("%s: cpu_suspend() returned %d\n", __func__, ret);
191 
192 	/* We are executing on the inbound CPU at this point */
193 	mpidr = read_mpidr();
194 	cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
195 	clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
196 	pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid);
197 	BUG_ON(clusterid != ib_cluster);
198 
199 	mcpm_cpu_powered_up();
200 
201 	ret = cpu_pm_exit();
202 
203 	if (tdev) {
204 		clockevents_set_mode(tdev->evtdev, tdev_mode);
205 		clockevents_program_event(tdev->evtdev,
206 					  tdev->evtdev->next_event, 1);
207 	}
208 
209 	local_fiq_enable();
210 	local_irq_enable();
211 
212 	if (ret)
213 		pr_err("%s exiting with error %d\n", __func__, ret);
214 	return ret;
215 }
216 
217 struct bL_thread {
218 	struct task_struct *task;
219 	wait_queue_head_t wq;
220 	int wanted_cluster;
221 };
222 
223 static struct bL_thread bL_threads[NR_CPUS];
224 
225 static int bL_switcher_thread(void *arg)
226 {
227 	struct bL_thread *t = arg;
228 	struct sched_param param = { .sched_priority = 1 };
229 	int cluster;
230 
231 	sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
232 
233 	do {
234 		if (signal_pending(current))
235 			flush_signals(current);
236 		wait_event_interruptible(t->wq,
237 				t->wanted_cluster != -1 ||
238 				kthread_should_stop());
239 		cluster = xchg(&t->wanted_cluster, -1);
240 		if (cluster != -1)
241 			bL_switch_to(cluster);
242 	} while (!kthread_should_stop());
243 
244 	return 0;
245 }
246 
247 static struct task_struct * __init bL_switcher_thread_create(int cpu, void *arg)
248 {
249 	struct task_struct *task;
250 
251 	task = kthread_create_on_node(bL_switcher_thread, arg,
252 				      cpu_to_node(cpu), "kswitcher_%d", cpu);
253 	if (!IS_ERR(task)) {
254 		kthread_bind(task, cpu);
255 		wake_up_process(task);
256 	} else
257 		pr_err("%s failed for CPU %d\n", __func__, cpu);
258 	return task;
259 }
260 
261 /*
262  * bL_switch_request - Switch to a specific cluster for the given CPU
263  *
264  * @cpu: the CPU to switch
265  * @new_cluster_id: the ID of the cluster to switch to.
266  *
267  * This function causes a cluster switch on the given CPU by waking up
268  * the appropriate switcher thread.  This function may or may not return
269  * before the switch has occurred.
270  */
271 int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
272 {
273 	struct bL_thread *t;
274 
275 	if (cpu >= ARRAY_SIZE(bL_threads)) {
276 		pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
277 		return -EINVAL;
278 	}
279 
280 	t = &bL_threads[cpu];
281 	if (IS_ERR(t->task))
282 		return PTR_ERR(t->task);
283 	if (!t->task)
284 		return -ESRCH;
285 
286 	t->wanted_cluster = new_cluster_id;
287 	wake_up(&t->wq);
288 	return 0;
289 }
290 EXPORT_SYMBOL_GPL(bL_switch_request);
291 
292 /*
293  * Activation and configuration code.
294  */
295 
296 static cpumask_t bL_switcher_removed_logical_cpus;
297 
298 static void __init bL_switcher_restore_cpus(void)
299 {
300 	int i;
301 
302 	for_each_cpu(i, &bL_switcher_removed_logical_cpus)
303 		cpu_up(i);
304 }
305 
306 static int __init bL_switcher_halve_cpus(void)
307 {
308 	int cpu, cluster, i, ret;
309 	cpumask_t cluster_mask[2], common_mask;
310 
311 	cpumask_clear(&bL_switcher_removed_logical_cpus);
312 	cpumask_clear(&cluster_mask[0]);
313 	cpumask_clear(&cluster_mask[1]);
314 
315 	for_each_online_cpu(i) {
316 		cpu = cpu_logical_map(i) & 0xff;
317 		cluster = (cpu_logical_map(i) >> 8) & 0xff;
318 		if (cluster >= 2) {
319 			pr_err("%s: only dual cluster systems are supported\n", __func__);
320 			return -EINVAL;
321 		}
322 		cpumask_set_cpu(cpu, &cluster_mask[cluster]);
323 	}
324 
325 	if (!cpumask_and(&common_mask, &cluster_mask[0], &cluster_mask[1])) {
326 		pr_err("%s: no common set of CPUs\n", __func__);
327 		return -EINVAL;
328 	}
329 
330 	for_each_online_cpu(i) {
331 		cpu = cpu_logical_map(i) & 0xff;
332 		cluster = (cpu_logical_map(i) >> 8) & 0xff;
333 
334 		if (cpumask_test_cpu(cpu, &common_mask)) {
335 			/*
336 			 * We keep only those logical CPUs which number
337 			 * is equal to their physical CPU number. This is
338 			 * not perfect but good enough for now.
339 			 */
340 			if (cpu == i)
341 				continue;
342 		}
343 
344 		ret = cpu_down(i);
345 		if (ret) {
346 			bL_switcher_restore_cpus();
347 			return ret;
348 		}
349 		cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
350 	}
351 
352 	return 0;
353 }
354 
355 static int __init bL_switcher_init(void)
356 {
357 	int cpu, ret;
358 
359 	pr_info("big.LITTLE switcher initializing\n");
360 
361 	if (MAX_NR_CLUSTERS != 2) {
362 		pr_err("%s: only dual cluster systems are supported\n", __func__);
363 		return -EINVAL;
364 	}
365 
366 	cpu_hotplug_driver_lock();
367 	ret = bL_switcher_halve_cpus();
368 	if (ret) {
369 		cpu_hotplug_driver_unlock();
370 		return ret;
371 	}
372 
373 	for_each_online_cpu(cpu) {
374 		struct bL_thread *t = &bL_threads[cpu];
375 		init_waitqueue_head(&t->wq);
376 		t->wanted_cluster = -1;
377 		t->task = bL_switcher_thread_create(cpu, t);
378 	}
379 	cpu_hotplug_driver_unlock();
380 
381 	pr_info("big.LITTLE switcher initialized\n");
382 	return 0;
383 }
384 
385 late_initcall(bL_switcher_init);
386