xref: /linux/arch/arm/mach-exynos/platsmp.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1  /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *		http://www.samsung.com
4  *
5  * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
6  *
7  *  Copyright (C) 2002 ARM Ltd.
8  *  All Rights Reserved
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13 */
14 
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/jiffies.h>
20 #include <linux/smp.h>
21 #include <linux/io.h>
22 #include <linux/of_address.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/cp15.h>
26 #include <asm/smp_plat.h>
27 #include <asm/smp_scu.h>
28 #include <asm/firmware.h>
29 
30 #include <mach/map.h>
31 
32 #include "common.h"
33 #include "regs-pmu.h"
34 
35 extern void exynos4_secondary_startup(void);
36 
37 /*
38  * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
39  * during hot-(un)plugging CPUx.
40  *
41  * The feature can be cleared safely during first boot of secondary CPU.
42  *
43  * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
44  * down a CPU so the CPU idle clock down feature could properly detect global
45  * idle state when CPUx is off.
46  */
47 static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
48 {
49 	if (soc_is_exynos4()) {
50 		unsigned int tmp;
51 
52 		tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
53 		if (enable)
54 			tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
55 		else
56 			tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
57 		pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
58 	}
59 }
60 
61 #ifdef CONFIG_HOTPLUG_CPU
62 static inline void cpu_leave_lowpower(u32 core_id)
63 {
64 	unsigned int v;
65 
66 	asm volatile(
67 	"mrc	p15, 0, %0, c1, c0, 0\n"
68 	"	orr	%0, %0, %1\n"
69 	"	mcr	p15, 0, %0, c1, c0, 0\n"
70 	"	mrc	p15, 0, %0, c1, c0, 1\n"
71 	"	orr	%0, %0, %2\n"
72 	"	mcr	p15, 0, %0, c1, c0, 1\n"
73 	  : "=&r" (v)
74 	  : "Ir" (CR_C), "Ir" (0x40)
75 	  : "cc");
76 
77 	 exynos_set_delayed_reset_assertion(core_id, false);
78 }
79 
80 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
81 {
82 	u32 mpidr = cpu_logical_map(cpu);
83 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
84 
85 	for (;;) {
86 
87 		/* Turn the CPU off on next WFI instruction. */
88 		exynos_cpu_power_down(core_id);
89 
90 		/*
91 		 * Exynos4 SoCs require setting
92 		 * USE_DELAYED_RESET_ASSERTION so the CPU idle
93 		 * clock down feature could properly detect
94 		 * global idle state when CPUx is off.
95 		 */
96 		exynos_set_delayed_reset_assertion(core_id, true);
97 
98 		wfi();
99 
100 		if (pen_release == core_id) {
101 			/*
102 			 * OK, proper wakeup, we're done
103 			 */
104 			break;
105 		}
106 
107 		/*
108 		 * Getting here, means that we have come out of WFI without
109 		 * having been woken up - this shouldn't happen
110 		 *
111 		 * Just note it happening - when we're woken, we can report
112 		 * its occurrence.
113 		 */
114 		(*spurious)++;
115 	}
116 }
117 #endif /* CONFIG_HOTPLUG_CPU */
118 
119 /**
120  * exynos_core_power_down : power down the specified cpu
121  * @cpu : the cpu to power down
122  *
123  * Power down the specified cpu. The sequence must be finished by a
124  * call to cpu_do_idle()
125  *
126  */
127 void exynos_cpu_power_down(int cpu)
128 {
129 	if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
130 		/*
131 		 * Bypass power down for CPU0 during suspend. Check for
132 		 * the SYS_PWR_REG value to decide if we are suspending
133 		 * the system.
134 		 */
135 		int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
136 
137 		if (!(val & S5P_CORE_LOCAL_PWR_EN))
138 			return;
139 	}
140 	pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
141 }
142 
143 /**
144  * exynos_cpu_power_up : power up the specified cpu
145  * @cpu : the cpu to power up
146  *
147  * Power up the specified cpu
148  */
149 void exynos_cpu_power_up(int cpu)
150 {
151 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
152 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
153 }
154 
155 /**
156  * exynos_cpu_power_state : returns the power state of the cpu
157  * @cpu : the cpu to retrieve the power state from
158  *
159  */
160 int exynos_cpu_power_state(int cpu)
161 {
162 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
163 			S5P_CORE_LOCAL_PWR_EN);
164 }
165 
166 /**
167  * exynos_cluster_power_down : power down the specified cluster
168  * @cluster : the cluster to power down
169  */
170 void exynos_cluster_power_down(int cluster)
171 {
172 	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
173 }
174 
175 /**
176  * exynos_cluster_power_up : power up the specified cluster
177  * @cluster : the cluster to power up
178  */
179 void exynos_cluster_power_up(int cluster)
180 {
181 	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
182 			EXYNOS_COMMON_CONFIGURATION(cluster));
183 }
184 
185 /**
186  * exynos_cluster_power_state : returns the power state of the cluster
187  * @cluster : the cluster to retrieve the power state from
188  *
189  */
190 int exynos_cluster_power_state(int cluster)
191 {
192 	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
193 		S5P_CORE_LOCAL_PWR_EN);
194 }
195 
196 void __iomem *cpu_boot_reg_base(void)
197 {
198 	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
199 		return pmu_base_addr + S5P_INFORM5;
200 	return sysram_base_addr;
201 }
202 
203 static inline void __iomem *cpu_boot_reg(int cpu)
204 {
205 	void __iomem *boot_reg;
206 
207 	boot_reg = cpu_boot_reg_base();
208 	if (!boot_reg)
209 		return ERR_PTR(-ENODEV);
210 	if (soc_is_exynos4412())
211 		boot_reg += 4*cpu;
212 	else if (soc_is_exynos5420() || soc_is_exynos5800())
213 		boot_reg += 4;
214 	return boot_reg;
215 }
216 
217 /*
218  * Set wake up by local power mode and execute software reset for given core.
219  *
220  * Currently this is needed only when booting secondary CPU on Exynos3250.
221  */
222 static void exynos_core_restart(u32 core_id)
223 {
224 	u32 val;
225 
226 	if (!of_machine_is_compatible("samsung,exynos3250"))
227 		return;
228 
229 	val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
230 	val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
231 	pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
232 
233 	pr_info("CPU%u: Software reset\n", core_id);
234 	pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
235 }
236 
237 /*
238  * Write pen_release in a way that is guaranteed to be visible to all
239  * observers, irrespective of whether they're taking part in coherency
240  * or not.  This is necessary for the hotplug code to work reliably.
241  */
242 static void write_pen_release(int val)
243 {
244 	pen_release = val;
245 	smp_wmb();
246 	sync_cache_w(&pen_release);
247 }
248 
249 static void __iomem *scu_base_addr(void)
250 {
251 	return (void __iomem *)(S5P_VA_SCU);
252 }
253 
254 static DEFINE_SPINLOCK(boot_lock);
255 
256 static void exynos_secondary_init(unsigned int cpu)
257 {
258 	/*
259 	 * let the primary processor know we're out of the
260 	 * pen, then head off into the C entry point
261 	 */
262 	write_pen_release(-1);
263 
264 	/*
265 	 * Synchronise with the boot thread.
266 	 */
267 	spin_lock(&boot_lock);
268 	spin_unlock(&boot_lock);
269 }
270 
271 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
272 {
273 	unsigned long timeout;
274 	u32 mpidr = cpu_logical_map(cpu);
275 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
276 	int ret = -ENOSYS;
277 
278 	/*
279 	 * Set synchronisation state between this boot processor
280 	 * and the secondary one
281 	 */
282 	spin_lock(&boot_lock);
283 
284 	/*
285 	 * The secondary processor is waiting to be released from
286 	 * the holding pen - release it, then wait for it to flag
287 	 * that it has been released by resetting pen_release.
288 	 *
289 	 * Note that "pen_release" is the hardware CPU core ID, whereas
290 	 * "cpu" is Linux's internal ID.
291 	 */
292 	write_pen_release(core_id);
293 
294 	if (!exynos_cpu_power_state(core_id)) {
295 		exynos_cpu_power_up(core_id);
296 		timeout = 10;
297 
298 		/* wait max 10 ms until cpu1 is on */
299 		while (exynos_cpu_power_state(core_id)
300 		       != S5P_CORE_LOCAL_PWR_EN) {
301 			if (timeout-- == 0)
302 				break;
303 
304 			mdelay(1);
305 		}
306 
307 		if (timeout == 0) {
308 			printk(KERN_ERR "cpu1 power enable failed");
309 			spin_unlock(&boot_lock);
310 			return -ETIMEDOUT;
311 		}
312 	}
313 
314 	exynos_core_restart(core_id);
315 
316 	/*
317 	 * Send the secondary CPU a soft interrupt, thereby causing
318 	 * the boot monitor to read the system wide flags register,
319 	 * and branch to the address found there.
320 	 */
321 
322 	timeout = jiffies + (1 * HZ);
323 	while (time_before(jiffies, timeout)) {
324 		unsigned long boot_addr;
325 
326 		smp_rmb();
327 
328 		boot_addr = virt_to_phys(exynos4_secondary_startup);
329 
330 		/*
331 		 * Try to set boot address using firmware first
332 		 * and fall back to boot register if it fails.
333 		 */
334 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
335 		if (ret && ret != -ENOSYS)
336 			goto fail;
337 		if (ret == -ENOSYS) {
338 			void __iomem *boot_reg = cpu_boot_reg(core_id);
339 
340 			if (IS_ERR(boot_reg)) {
341 				ret = PTR_ERR(boot_reg);
342 				goto fail;
343 			}
344 			__raw_writel(boot_addr, boot_reg);
345 		}
346 
347 		call_firmware_op(cpu_boot, core_id);
348 
349 		arch_send_wakeup_ipi_mask(cpumask_of(cpu));
350 
351 		if (pen_release == -1)
352 			break;
353 
354 		udelay(10);
355 	}
356 
357 	/* No harm if this is called during first boot of secondary CPU */
358 	exynos_set_delayed_reset_assertion(core_id, false);
359 
360 	/*
361 	 * now the secondary core is starting up let it run its
362 	 * calibrations, then wait for it to finish
363 	 */
364 fail:
365 	spin_unlock(&boot_lock);
366 
367 	return pen_release != -1 ? ret : 0;
368 }
369 
370 /*
371  * Initialise the CPU possible map early - this describes the CPUs
372  * which may be present or become present in the system.
373  */
374 
375 static void __init exynos_smp_init_cpus(void)
376 {
377 	void __iomem *scu_base = scu_base_addr();
378 	unsigned int i, ncores;
379 
380 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
381 		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
382 	else
383 		/*
384 		 * CPU Nodes are passed thru DT and set_cpu_possible
385 		 * is set by "arm_dt_init_cpu_maps".
386 		 */
387 		return;
388 
389 	/* sanity check */
390 	if (ncores > nr_cpu_ids) {
391 		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
392 			ncores, nr_cpu_ids);
393 		ncores = nr_cpu_ids;
394 	}
395 
396 	for (i = 0; i < ncores; i++)
397 		set_cpu_possible(i, true);
398 }
399 
400 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
401 {
402 	int i;
403 
404 	exynos_sysram_init();
405 
406 	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
407 		scu_enable(scu_base_addr());
408 
409 	/*
410 	 * Write the address of secondary startup into the
411 	 * system-wide flags register. The boot monitor waits
412 	 * until it receives a soft interrupt, and then the
413 	 * secondary CPU branches to this address.
414 	 *
415 	 * Try using firmware operation first and fall back to
416 	 * boot register if it fails.
417 	 */
418 	for (i = 1; i < max_cpus; ++i) {
419 		unsigned long boot_addr;
420 		u32 mpidr;
421 		u32 core_id;
422 		int ret;
423 
424 		mpidr = cpu_logical_map(i);
425 		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
426 		boot_addr = virt_to_phys(exynos4_secondary_startup);
427 
428 		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
429 		if (ret && ret != -ENOSYS)
430 			break;
431 		if (ret == -ENOSYS) {
432 			void __iomem *boot_reg = cpu_boot_reg(core_id);
433 
434 			if (IS_ERR(boot_reg))
435 				break;
436 			__raw_writel(boot_addr, boot_reg);
437 		}
438 	}
439 }
440 
441 #ifdef CONFIG_HOTPLUG_CPU
442 /*
443  * platform-specific code to shutdown a CPU
444  *
445  * Called with IRQs disabled
446  */
447 static void exynos_cpu_die(unsigned int cpu)
448 {
449 	int spurious = 0;
450 	u32 mpidr = cpu_logical_map(cpu);
451 	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
452 
453 	v7_exit_coherency_flush(louis);
454 
455 	platform_do_lowpower(cpu, &spurious);
456 
457 	/*
458 	 * bring this CPU back into the world of cache
459 	 * coherency, and then restore interrupts
460 	 */
461 	cpu_leave_lowpower(core_id);
462 
463 	if (spurious)
464 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
465 }
466 #endif /* CONFIG_HOTPLUG_CPU */
467 
468 struct smp_operations exynos_smp_ops __initdata = {
469 	.smp_init_cpus		= exynos_smp_init_cpus,
470 	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
471 	.smp_secondary_init	= exynos_secondary_init,
472 	.smp_boot_secondary	= exynos_boot_secondary,
473 #ifdef CONFIG_HOTPLUG_CPU
474 	.cpu_die		= exynos_cpu_die,
475 #endif
476 };
477