xref: /linux/arch/arm/mach-zynq/platsmp.c (revision 31a7d26fbc51a39292bf9911f9e9efdf6d8e22b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contains Xilinx specific SMP code, used to start up
4  * the second processor.
5  *
6  * Copyright (C) 2011-2013 Xilinx
7  *
8  * based on linux/arch/arm/mach-realview/platsmp.c
9  *
10  * Copyright (C) 2002 ARM Ltd.
11  */
12 
13 #include <linux/export.h>
14 #include <linux/jiffies.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <asm/cacheflush.h>
18 #include <asm/smp_plat.h>
19 #include <asm/smp_scu.h>
20 #include <linux/irqchip/arm-gic.h>
21 #include "common.h"
22 
23 /*
24  * Store number of cores in the system
25  * Because of scu_get_core_count() must be in __init section and can't
26  * be called from zynq_cpun_start() because it is not in __init section.
27  */
28 static int ncores;
29 
30 int zynq_cpun_start(u32 address, int cpu)
31 {
32 	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
33 						&zynq_secondary_trampoline;
34 	u32 phy_cpuid = cpu_logical_map(cpu);
35 
36 	/* MS: Expectation that SLCR are directly map and accessible */
37 	/* Not possible to jump to non aligned address */
38 	if (!(address & 3) && (!address || (address >= trampoline_code_size))) {
39 		/* Store pointer to ioremap area which points to address 0x0 */
40 		static u8 __iomem *zero;
41 		u32 trampoline_size = &zynq_secondary_trampoline_jump -
42 						&zynq_secondary_trampoline;
43 
44 		zynq_slcr_cpu_stop(phy_cpuid);
45 		if (address) {
46 			if (__pa(PAGE_OFFSET)) {
47 				zero = ioremap(0, trampoline_code_size);
48 				if (!zero) {
49 					pr_warn("BOOTUP jump vectors not accessible\n");
50 					return -1;
51 				}
52 			} else {
53 				zero = (__force u8 __iomem *)PAGE_OFFSET;
54 			}
55 
56 			/*
57 			* This is elegant way how to jump to any address
58 			* 0x0: Load address at 0x8 to r0
59 			* 0x4: Jump by mov instruction
60 			* 0x8: Jumping address
61 			*/
62 			memcpy_toio(zero, &zynq_secondary_trampoline,
63 							trampoline_size);
64 			writel(address, zero + trampoline_size);
65 
66 			flush_cache_all();
67 			outer_flush_range(0, trampoline_code_size);
68 			smp_wmb();
69 
70 			if (__pa(PAGE_OFFSET))
71 				iounmap(zero);
72 		}
73 		zynq_slcr_cpu_start(phy_cpuid);
74 
75 		return 0;
76 	}
77 
78 	pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
79 
80 	return -1;
81 }
82 EXPORT_SYMBOL(zynq_cpun_start);
83 
84 static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
85 {
86 	return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
87 }
88 
89 /*
90  * Initialise the CPU possible map early - this describes the CPUs
91  * which may be present or become present in the system.
92  */
93 static void __init zynq_smp_init_cpus(void)
94 {
95 	int i;
96 
97 	ncores = scu_get_core_count(zynq_scu_base);
98 
99 	for (i = 0; i < ncores && i < CONFIG_NR_CPUS; i++)
100 		set_cpu_possible(i, true);
101 }
102 
103 static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
104 {
105 	scu_enable(zynq_scu_base);
106 }
107 
108 /**
109  * zynq_secondary_init - Initialize secondary CPU cores
110  * @cpu:	CPU that is initialized
111  *
112  * This function is in the hotplug path. Don't move it into the
113  * init section!!
114  */
115 static void zynq_secondary_init(unsigned int cpu)
116 {
117 	zynq_core_pm_init();
118 }
119 
120 #ifdef CONFIG_HOTPLUG_CPU
121 static int zynq_cpu_kill(unsigned cpu)
122 {
123 	unsigned long timeout = jiffies + msecs_to_jiffies(50);
124 
125 	while (zynq_slcr_cpu_state_read(cpu))
126 		if (time_after(jiffies, timeout))
127 			return 0;
128 
129 	zynq_slcr_cpu_stop(cpu);
130 	return 1;
131 }
132 
133 /**
134  * zynq_cpu_die - Let a CPU core die
135  * @cpu:	Dying CPU
136  *
137  * Platform-specific code to shutdown a CPU.
138  * Called with IRQs disabled on the dying CPU.
139  */
140 static void zynq_cpu_die(unsigned int cpu)
141 {
142 	zynq_slcr_cpu_state_write(cpu, true);
143 
144 	/*
145 	 * there is no power-control hardware on this platform, so all
146 	 * we can do is put the core into WFI; this is safe as the calling
147 	 * code will have already disabled interrupts
148 	 */
149 	for (;;)
150 		cpu_do_idle();
151 }
152 #endif
153 
154 const struct smp_operations zynq_smp_ops __initconst = {
155 	.smp_init_cpus		= zynq_smp_init_cpus,
156 	.smp_prepare_cpus	= zynq_smp_prepare_cpus,
157 	.smp_boot_secondary	= zynq_boot_secondary,
158 	.smp_secondary_init	= zynq_secondary_init,
159 #ifdef CONFIG_HOTPLUG_CPU
160 	.cpu_die		= zynq_cpu_die,
161 	.cpu_kill		= zynq_cpu_kill,
162 #endif
163 };
164