1 /*-
2 * Copyright (c) 2014 Ganbold Tsagaankhuu <ganbold@freebsd.org>
3 * Copyright (c) 2016 Emmanuel Vadot <manu@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/bus.h>
30 #include <sys/kernel.h>
31 #include <sys/lock.h>
32 #include <sys/mutex.h>
33 #include <sys/smp.h>
34
35 #include <vm/vm.h>
36 #include <vm/pmap.h>
37
38 #include <machine/cpu.h>
39 #include <machine/smp.h>
40 #include <machine/fdt.h>
41 #include <machine/intr.h>
42 #include <machine/platformvar.h>
43
44 #include <arm/allwinner/aw_mp.h>
45 #include <arm/allwinner/aw_machdep.h>
46
47 /* Register for all dual-core SoC */
48 #define A20_CPUCFG_BASE 0x01c25c00
49 /* Register for all quad-core SoC */
50 #define CPUCFG_BASE 0x01f01c00
51 #define CPUCFG_SIZE 0x400
52 #define PRCM_BASE 0x01f01400
53 #define PRCM_SIZE 0x800
54 /* Register for multi-cluster SoC */
55 #define CPUXCFG_BASE 0x01700000
56 #define CPUXCFG_SIZE 0x400
57
58 #define CPU_OFFSET 0x40
59 #define CPU_OFFSET_CTL 0x04
60 #define CPU_OFFSET_STATUS 0x08
61 #define CPU_RST_CTL(cpuid) ((cpuid + 1) * CPU_OFFSET)
62 #define CPU_CTL(cpuid) (((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_CTL)
63 #define CPU_STATUS(cpuid) (((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_STATUS)
64
65 #define CPU_RESET (1 << 0)
66 #define CPU_CORE_RESET (1 << 1)
67
68 #define CPUCFG_GENCTL 0x184
69 #define CPUCFG_P_REG0 0x1a4
70
71 #define A20_CPU1_PWR_CLAMP 0x1b0
72 #define CPU_PWR_CLAMP_REG 0x140
73 #define CPU_PWR_CLAMP(cpu) ((cpu * 4) + CPU_PWR_CLAMP_REG)
74 #define CPU_PWR_CLAMP_STEPS 8
75
76 #define A20_CPU1_PWROFF_REG 0x1b4
77 #define CPU_PWROFF 0x100
78
79 #define CPUCFG_DBGCTL0 0x1e0
80 #define CPUCFG_DBGCTL1 0x1e4
81
82 #define CPUS_CL_RST(cl) (0x30 + (cl) * 0x4)
83 #define CPUX_CL_CTRL0(cl) (0x0 + (cl) * 0x10)
84 #define CPUX_CL_CTRL1(cl) (0x4 + (cl) * 0x10)
85 #define CPUX_CL_CPU_STATUS(cl) (0x30 + (cl) * 0x4)
86 #define CPUX_CL_RST(cl) (0x80 + (cl) * 0x4)
87 #define PRCM_CL_PWROFF(cl) (0x100 + (cl) * 0x4)
88 #define PRCM_CL_PWR_CLAMP(cl, cpu) (0x140 + (cl) * 0x4 + (cpu) * 0x4)
89
90 void
aw_mp_setmaxid(platform_t plat)91 aw_mp_setmaxid(platform_t plat)
92 {
93 int ncpu;
94 uint32_t reg;
95
96 if (mp_ncpus != 0)
97 return;
98
99 reg = cp15_l2ctlr_get();
100 ncpu = CPUV7_L2CTLR_NPROC(reg);
101
102 mp_ncpus = ncpu;
103 mp_maxid = ncpu - 1;
104 }
105
106 void
aw_mp_start_ap(platform_t plat)107 aw_mp_start_ap(platform_t plat)
108 {
109 bus_space_handle_t cpucfg;
110 bus_space_handle_t prcm;
111 int i, j, soc_family;
112 uint32_t val;
113
114 soc_family = allwinner_soc_family();
115 if (soc_family == ALLWINNERSOC_SUN7I) {
116 if (bus_space_map(fdtbus_bs_tag, A20_CPUCFG_BASE, CPUCFG_SIZE,
117 0, &cpucfg) != 0)
118 panic("Couldn't map the CPUCFG\n");
119 } else {
120 if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE,
121 0, &cpucfg) != 0)
122 panic("Couldn't map the CPUCFG\n");
123 if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0,
124 &prcm) != 0)
125 panic("Couldn't map the PRCM\n");
126 }
127
128 dcache_wbinv_poc_all();
129
130 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_P_REG0,
131 pmap_kextract((vm_offset_t)mpentry));
132
133 /*
134 * Assert nCOREPORESET low and set L1RSTDISABLE low.
135 * Ensure DBGPWRDUP is set to LOW to prevent any external
136 * debug access to the processor.
137 */
138 for (i = 1; i < mp_ncpus; i++)
139 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i), 0);
140
141 /* Set L1RSTDISABLE low */
142 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL);
143 for (i = 1; i < mp_ncpus; i++)
144 val &= ~(1 << i);
145 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL, val);
146
147 /* Set DBGPWRDUP low */
148 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1);
149 for (i = 1; i < mp_ncpus; i++)
150 val &= ~(1 << i);
151 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val);
152
153 /* Release power clamp */
154 for (i = 1; i < mp_ncpus; i++)
155 for (j = 0; j <= CPU_PWR_CLAMP_STEPS; j++) {
156 if (soc_family != ALLWINNERSOC_SUN7I) {
157 bus_space_write_4(fdtbus_bs_tag, prcm,
158 CPU_PWR_CLAMP(i), 0xff >> j);
159 } else {
160 bus_space_write_4(fdtbus_bs_tag,
161 cpucfg, A20_CPU1_PWR_CLAMP, 0xff >> j);
162 }
163 }
164 DELAY(10000);
165
166 /* Clear power-off gating */
167 if (soc_family != ALLWINNERSOC_SUN7I) {
168 val = bus_space_read_4(fdtbus_bs_tag, prcm, CPU_PWROFF);
169 for (i = 0; i < mp_ncpus; i++)
170 val &= ~(1 << i);
171 bus_space_write_4(fdtbus_bs_tag, prcm, CPU_PWROFF, val);
172 } else {
173 val = bus_space_read_4(fdtbus_bs_tag,
174 cpucfg, A20_CPU1_PWROFF_REG);
175 val &= ~(1 << 0);
176 bus_space_write_4(fdtbus_bs_tag, cpucfg,
177 A20_CPU1_PWROFF_REG, val);
178 }
179 DELAY(1000);
180
181 /* De-assert cpu core reset */
182 for (i = 1; i < mp_ncpus; i++)
183 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i),
184 CPU_RESET | CPU_CORE_RESET);
185
186 /* Assert DBGPWRDUP signal */
187 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1);
188 for (i = 1; i < mp_ncpus; i++)
189 val |= (1 << i);
190 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val);
191
192 dsb();
193 sev();
194 bus_space_unmap(fdtbus_bs_tag, cpucfg, CPUCFG_SIZE);
195 if (soc_family != ALLWINNERSOC_SUN7I)
196 bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE);
197 }
198
199 static void
aw_mc_mp_start_cpu(bus_space_handle_t cpuscfg,bus_space_handle_t cpuxcfg,bus_space_handle_t prcm,int cluster,int cpu)200 aw_mc_mp_start_cpu(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg,
201 bus_space_handle_t prcm, int cluster, int cpu)
202 {
203 uint32_t val;
204 int i;
205
206 /* Assert core reset */
207 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster));
208 val &= ~(1 << cpu);
209 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val);
210
211 /* Assert power-on reset */
212 val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster));
213 val &= ~(1 << cpu);
214 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val);
215
216 /* Disable automatic L1 cache invalidate at reset */
217 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster));
218 val &= ~(1 << cpu);
219 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster), val);
220
221 /* Release power clamp */
222 for (i = 0; i <= CPU_PWR_CLAMP_STEPS; i++)
223 bus_space_write_4(fdtbus_bs_tag, prcm,
224 PRCM_CL_PWR_CLAMP(cluster, cpu), 0xff >> i);
225 while (bus_space_read_4(fdtbus_bs_tag, prcm,
226 PRCM_CL_PWR_CLAMP(cluster, cpu)) != 0)
227 ;
228
229 /* Clear power-off gating */
230 val = bus_space_read_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster));
231 val &= ~(1 << cpu);
232 bus_space_write_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster), val);
233
234 /* De-assert power-on reset */
235 val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster));
236 val |= (1 << cpu);
237 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val);
238
239 /* De-assert core reset */
240 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster));
241 val |= (1 << cpu);
242 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val);
243 }
244
245 static void
aw_mc_mp_start_ap(bus_space_handle_t cpuscfg,bus_space_handle_t cpuxcfg,bus_space_handle_t prcm)246 aw_mc_mp_start_ap(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg,
247 bus_space_handle_t prcm)
248 {
249 int cluster, cpu;
250
251 KASSERT(mp_ncpus <= 4, ("multiple clusters not yet supported"));
252
253 dcache_wbinv_poc_all();
254
255 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUCFG_P_REG0,
256 pmap_kextract((vm_offset_t)mpentry));
257
258 cluster = 0;
259 for (cpu = 1; cpu < mp_ncpus; cpu++)
260 aw_mc_mp_start_cpu(cpuscfg, cpuxcfg, prcm, cluster, cpu);
261 }
262
263 void
a83t_mp_start_ap(platform_t plat)264 a83t_mp_start_ap(platform_t plat)
265 {
266 bus_space_handle_t cpuscfg, cpuxcfg, prcm;
267
268 if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE,
269 0, &cpuscfg) != 0)
270 panic("Couldn't map the CPUCFG\n");
271 if (bus_space_map(fdtbus_bs_tag, CPUXCFG_BASE, CPUXCFG_SIZE,
272 0, &cpuxcfg) != 0)
273 panic("Couldn't map the CPUXCFG\n");
274 if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0,
275 &prcm) != 0)
276 panic("Couldn't map the PRCM\n");
277
278 aw_mc_mp_start_ap(cpuscfg, cpuxcfg, prcm);
279 dsb();
280 sev();
281 bus_space_unmap(fdtbus_bs_tag, cpuxcfg, CPUXCFG_SIZE);
282 bus_space_unmap(fdtbus_bs_tag, cpuscfg, CPUCFG_SIZE);
283 bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE);
284 }
285