1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 Semihalf.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include "opt_ddb.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/intr.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/pcpu.h>
39 #include <sys/sched.h>
40 #include <sys/smp.h>
41 #include <sys/ktr.h>
42 #include <sys/malloc.h>
43
44 #include <vm/vm.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_kern.h>
47 #include <vm/pmap.h>
48
49 #include <machine/armreg.h>
50 #include <machine/cpu.h>
51 #include <machine/cpufunc.h>
52 #include <machine/debug_monitor.h>
53 #include <machine/smp.h>
54 #include <machine/pcb.h>
55 #include <machine/vmparam.h>
56 #ifdef VFP
57 #include <machine/vfp.h>
58 #endif
59 #ifdef CPU_MV_PJ4B
60 #include <arm/mv/mvwin.h>
61 #endif
62
63 /* used to hold the AP's until we are ready to release them */
64 struct mtx ap_boot_mtx;
65
66 /* # of Applications processors */
67 volatile int mp_naps;
68
69 /* Set to 1 once we're ready to let the APs out of the pen. */
70 volatile int aps_ready = 0;
71
72 void set_stackptrs(int cpu);
73
74 /* Temporary variables for init_secondary() */
75 void *dpcpu[MAXCPU - 1];
76
77 /* Determine if we running MP machine */
78 int
cpu_mp_probe(void)79 cpu_mp_probe(void)
80 {
81
82 KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset"));
83
84 CPU_SETOF(0, &all_cpus);
85
86 return (mp_ncpus > 1);
87 }
88
89 /* Start Application Processor via platform specific function */
90 static int
check_ap(void)91 check_ap(void)
92 {
93 uint32_t ms;
94
95 for (ms = 0; ms < 2000; ++ms) {
96 if ((mp_naps + 1) == mp_ncpus)
97 return (0); /* success */
98 else
99 DELAY(1000);
100 }
101
102 return (-2);
103 }
104
105 /* Initialize and fire up non-boot processors */
106 void
cpu_mp_start(void)107 cpu_mp_start(void)
108 {
109 int error, i;
110
111 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
112
113 /* Reserve memory for application processors */
114 for(i = 0; i < (mp_ncpus - 1); i++)
115 dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
116
117 dcache_wbinv_poc_all();
118
119 /* Initialize boot code and start up processors */
120 platform_mp_start_ap();
121
122 /* Check if ap's started properly */
123 error = check_ap();
124 if (error)
125 printf("WARNING: Some AP's failed to start\n");
126 else
127 for (i = 1; i < mp_ncpus; i++)
128 CPU_SET(i, &all_cpus);
129 }
130
131 /* Introduce rest of cores to the world */
132 void
cpu_mp_announce(void)133 cpu_mp_announce(void)
134 {
135
136 }
137
138 void
init_secondary(int cpu)139 init_secondary(int cpu)
140 {
141 struct pcpu *pc;
142 uint32_t loop_counter;
143
144 pmap_set_tex();
145 cpuinfo_reinit_mmu(pmap_kern_ttb);
146 cpu_setup();
147
148 /* Provide stack pointers for other processor modes. */
149 set_stackptrs(cpu);
150
151 enable_interrupts(PSR_A);
152 pc = &__pcpu[cpu];
153
154 /*
155 * pcpu_init() updates queue, so it should not be executed in parallel
156 * on several cores
157 */
158 while(mp_naps < (cpu - 1))
159 ;
160
161 pcpu_init(pc, cpu, sizeof(struct pcpu));
162 pc->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
163 dpcpu_init(dpcpu[cpu - 1], cpu);
164 #if defined(DDB)
165 dbg_monitor_init_secondary();
166 #endif
167 /* Signal our startup to BSP */
168 atomic_add_rel_32(&mp_naps, 1);
169
170 /* Spin until the BSP releases the APs */
171 while (!atomic_load_acq_int(&aps_ready)) {
172 __asm __volatile("wfe");
173 }
174
175 /* Initialize curthread */
176 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
177 pc->pc_curthread = pc->pc_idlethread;
178 pc->pc_curpcb = pc->pc_idlethread->td_pcb;
179 set_curthread(pc->pc_idlethread);
180 schedinit_ap();
181 #ifdef VFP
182 vfp_init();
183 #endif
184
185 /* Configure the interrupt controller */
186 intr_pic_init_secondary();
187
188 /* Apply possible BP hardening */
189 cpuinfo_init_bp_hardening();
190
191 mtx_lock_spin(&ap_boot_mtx);
192
193 atomic_add_rel_32(&smp_cpus, 1);
194
195 if (smp_cpus == mp_ncpus) {
196 /* enable IPI's, tlb shootdown, freezes etc */
197 atomic_store_rel_int(&smp_started, 1);
198 }
199
200 mtx_unlock_spin(&ap_boot_mtx);
201
202 loop_counter = 0;
203 while (smp_started == 0) {
204 DELAY(100);
205 loop_counter++;
206 if (loop_counter == 1000)
207 CTR0(KTR_SMP, "AP still wait for smp_started");
208 }
209 /* Start per-CPU event timers. */
210 cpu_initclocks_ap();
211
212 CTR0(KTR_SMP, "go into scheduler");
213
214 /* Enter the scheduler */
215 sched_ap_entry();
216
217 panic("scheduler returned us to %s", __func__);
218 /* NOTREACHED */
219 }
220
221 static void
ipi_rendezvous(void * dummy __unused)222 ipi_rendezvous(void *dummy __unused)
223 {
224
225 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
226 smp_rendezvous_action();
227 }
228
229 static void
ipi_ast(void * dummy __unused)230 ipi_ast(void *dummy __unused)
231 {
232
233 CTR0(KTR_SMP, "IPI_AST");
234 }
235
236 static void
ipi_stop(void * dummy __unused)237 ipi_stop(void *dummy __unused)
238 {
239 u_int cpu;
240
241 /*
242 * IPI_STOP_HARD is mapped to IPI_STOP.
243 */
244 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
245
246 cpu = PCPU_GET(cpuid);
247 savectx(&stoppcbs[cpu]);
248
249 /*
250 * CPUs are stopped when entering the debugger and at
251 * system shutdown, both events which can precede a
252 * panic dump. For the dump to be correct, all caches
253 * must be flushed and invalidated, but on ARM there's
254 * no way to broadcast a wbinv_all to other cores.
255 * Instead, we have each core do the local wbinv_all as
256 * part of stopping the core. The core requesting the
257 * stop will do the l2 cache flush after all other cores
258 * have done their l1 flushes and stopped.
259 */
260 dcache_wbinv_poc_all();
261
262 /* Indicate we are stopped */
263 CPU_SET_ATOMIC(cpu, &stopped_cpus);
264
265 /* Wait for restart */
266 while (!CPU_ISSET(cpu, &started_cpus))
267 cpu_spinwait();
268
269 CPU_CLR_ATOMIC(cpu, &started_cpus);
270 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
271 #ifdef DDB
272 dbg_resume_dbreg();
273 #endif
274 CTR0(KTR_SMP, "IPI_STOP (restart)");
275 }
276
277 static void
ipi_preempt(void * arg)278 ipi_preempt(void *arg)
279 {
280
281 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
282 sched_preempt(curthread);
283 }
284
285 static void
ipi_hardclock(void * arg)286 ipi_hardclock(void *arg)
287 {
288
289 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
290 hardclockintr();
291 }
292
293 static void
release_aps(void * dummy __unused)294 release_aps(void *dummy __unused)
295 {
296 uint32_t loop_counter;
297
298 if (mp_ncpus == 1)
299 return;
300
301 intr_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
302 intr_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
303 intr_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
304 intr_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
305 intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
306
307 atomic_store_rel_int(&aps_ready, 1);
308 /* Wake the other threads up */
309 dsb();
310 sev();
311
312 printf("Release APs\n");
313
314 for (loop_counter = 0; loop_counter < 2000; loop_counter++) {
315 if (smp_started)
316 return;
317 DELAY(1000);
318 }
319 printf("AP's not started\n");
320 }
321
322 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
323
324 struct cpu_group *
cpu_topo(void)325 cpu_topo(void)
326 {
327
328 return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0));
329 }
330
331 void
cpu_mp_setmaxid(void)332 cpu_mp_setmaxid(void)
333 {
334
335 platform_mp_setmaxid();
336 }
337
338 /* Sending IPI */
339 void
ipi_all_but_self(u_int ipi)340 ipi_all_but_self(u_int ipi)
341 {
342 cpuset_t other_cpus;
343
344 other_cpus = all_cpus;
345 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
346 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
347 intr_ipi_send(other_cpus, ipi);
348 }
349
350 void
ipi_cpu(int cpu,u_int ipi)351 ipi_cpu(int cpu, u_int ipi)
352 {
353 cpuset_t cpus;
354
355 CPU_ZERO(&cpus);
356 CPU_SET(cpu, &cpus);
357
358 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
359 intr_ipi_send(cpus, ipi);
360 }
361
362 void
ipi_selected(cpuset_t cpus,u_int ipi)363 ipi_selected(cpuset_t cpus, u_int ipi)
364 {
365
366 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
367 intr_ipi_send(cpus, ipi);
368 }
369