xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 62f8d65ee4d0178f670a0930736d3628d06746fb)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include "opt_acpi.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/cpu.h>
39 #include <sys/csan.h>
40 #include <sys/domainset.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/proc.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_map.h>
56 
57 #include <machine/machdep.h>
58 #include <machine/cpu.h>
59 #include <machine/cpu_feat.h>
60 #include <machine/debug_monitor.h>
61 #include <machine/intr.h>
62 #include <machine/smp.h>
63 #ifdef VFP
64 #include <machine/vfp.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #ifdef FDT
73 #include <dev/ofw/openfirm.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/ofw/ofw_cpu.h>
77 #endif
78 
79 #include <dev/psci/psci.h>
80 
81 #define	MP_BOOTSTACK_SIZE	(kstack_pages * PAGE_SIZE)
82 
83 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
84 					/* don't panic if one fails to start */
85 static uint32_t mp_quirks;
86 
87 #ifdef FDT
88 static struct {
89 	const char *compat;
90 	uint32_t quirks;
91 } fdt_quirks[] = {
92 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
93 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
94 	/* This is incorrect in some DTS files */
95 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
96 	{ NULL, 0 },
97 };
98 #endif
99 
100 static void ipi_ast(void *);
101 static void ipi_hardclock(void *);
102 static void ipi_preempt(void *);
103 static void ipi_rendezvous(void *);
104 static void ipi_stop(void *);
105 
106 #ifdef FDT
107 static u_int fdt_cpuid;
108 #endif
109 
110 void mpentry_psci(unsigned long cpuid);
111 void mpentry_spintable(void);
112 void init_secondary(uint64_t);
113 
114 /* Synchronize AP startup. */
115 static struct mtx ap_boot_mtx;
116 
117 /* Used to initialize the PCPU ahead of calling init_secondary(). */
118 void *bootpcpu;
119 uint64_t ap_cpuid;
120 
121 /* Stacks for AP initialization, discarded once idle threads are started. */
122 void *bootstack;
123 static void *bootstacks[MAXCPU];
124 
125 /* Count of started APs, used to synchronize access to bootstack. */
126 static volatile int aps_started;
127 
128 /* Set to 1 once we're ready to let the APs out of the pen. */
129 static volatile int aps_ready;
130 
131 /* Temporary variables for init_secondary()  */
132 static void *dpcpu[MAXCPU - 1];
133 
134 static bool
is_boot_cpu(uint64_t target_cpu)135 is_boot_cpu(uint64_t target_cpu)
136 {
137 
138 	return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
139 }
140 
141 static void
release_aps(void * dummy __unused)142 release_aps(void *dummy __unused)
143 {
144 	int i, started;
145 
146 	/* Only release CPUs if they exist */
147 	if (mp_ncpus == 1)
148 		return;
149 
150 	intr_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
151 	intr_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
152 	intr_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
153 	intr_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
154 	intr_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
155 	intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
156 
157 	atomic_store_rel_int(&aps_ready, 1);
158 	/* Wake up the other CPUs */
159 	__asm __volatile(
160 	    "dsb ishst	\n"
161 	    "sev	\n"
162 	    ::: "memory");
163 
164 	printf("Release APs...");
165 
166 	started = 0;
167 	for (i = 0; i < 2000; i++) {
168 		if (atomic_load_acq_int(&smp_started) != 0) {
169 			printf("done\n");
170 			return;
171 		}
172 		/*
173 		 * Don't time out while we are making progress. Some large
174 		 * systems can take a while to start all CPUs.
175 		 */
176 		if (smp_cpus > started) {
177 			i = 0;
178 			started = smp_cpus;
179 		}
180 		DELAY(1000);
181 	}
182 
183 	printf("APs not started\n");
184 }
185 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
186 
187 void
init_secondary(uint64_t cpu)188 init_secondary(uint64_t cpu)
189 {
190 	struct pcpu *pcpup;
191 	pmap_t pmap0;
192 	uint64_t mpidr;
193 
194 	ptrauth_mp_start(cpu);
195 
196 	/*
197 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
198 	 * valid. Some older U-Boot based PSCI implementations are buggy,
199 	 * they can pass random value in it.
200 	 */
201 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
202 	if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
203 	    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
204 		for (cpu = 0; cpu < mp_maxid; cpu++)
205 			if (cpuid_to_pcpu[cpu] != NULL &&
206 			    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
207 				break;
208 		if ( cpu >= MAXCPU)
209 			panic("MPIDR for this CPU is not in pcpu table");
210 	}
211 
212 	/*
213 	 * Identify current CPU. This is necessary to setup
214 	 * affinity registers and to provide support for
215 	 * runtime chip identification.
216 	 *
217 	 * We need this before signalling the CPU is ready to
218 	 * let the boot CPU use the results.
219 	 */
220 	pcpup = cpuid_to_pcpu[cpu];
221 	pcpup->pc_midr = get_midr();
222 	identify_cpu(cpu);
223 
224 	/* Ensure the stores in identify_cpu have completed */
225 	atomic_thread_fence_acq_rel();
226 
227 	/* Detect early CPU feature support */
228 	enable_cpu_feat(CPU_FEAT_EARLY_BOOT);
229 
230 	/* Signal the BSP and spin until it has released all APs. */
231 	atomic_add_int(&aps_started, 1);
232 	while (!atomic_load_int(&aps_ready))
233 		__asm __volatile("wfe");
234 
235 	/* Initialize curthread */
236 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
237 	pcpup->pc_curthread = pcpup->pc_idlethread;
238 	schedinit_ap();
239 
240 	/* Initialize curpmap to match TTBR0's current setting. */
241 	pmap0 = vmspace_pmap(&vmspace0);
242 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
243 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
244 	pcpup->pc_curpmap = pmap0;
245 
246 	install_cpu_errata();
247 	enable_cpu_feat(CPU_FEAT_AFTER_DEV);
248 
249 	intr_pic_init_secondary();
250 
251 	/* Start per-CPU event timers. */
252 	cpu_initclocks_ap();
253 
254 #ifdef VFP
255 	vfp_init_secondary();
256 #endif
257 
258 	dbg_init();
259 
260 	mtx_lock_spin(&ap_boot_mtx);
261 	atomic_add_rel_32(&smp_cpus, 1);
262 	if (smp_cpus == mp_ncpus) {
263 		/* enable IPI's, tlb shootdown, freezes etc */
264 		atomic_store_rel_int(&smp_started, 1);
265 	}
266 	mtx_unlock_spin(&ap_boot_mtx);
267 
268 	kcsan_cpu_init(cpu);
269 
270 	/* Enter the scheduler */
271 	sched_ap_entry();
272 
273 	panic("scheduler returned us to init_secondary");
274 	/* NOTREACHED */
275 }
276 
277 static void
smp_after_idle_runnable(void * arg __unused)278 smp_after_idle_runnable(void *arg __unused)
279 {
280 	int cpu;
281 
282 	if (mp_ncpus == 1)
283 		return;
284 
285 	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
286 
287 	/*
288 	 * Wait for all APs to handle an interrupt.  After that, we know that
289 	 * the APs have entered the scheduler at least once, so the boot stacks
290 	 * are safe to free.
291 	 */
292 	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
293 	    smp_no_rendezvous_barrier, NULL);
294 
295 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
296 		if (bootstacks[cpu] != NULL)
297 			kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
298 	}
299 }
300 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
301     smp_after_idle_runnable, NULL);
302 
303 static void
ipi_ast(void * dummy __unused)304 ipi_ast(void *dummy __unused)
305 {
306 
307 	CTR0(KTR_SMP, "IPI_AST");
308 }
309 
310 static void
ipi_hardclock(void * dummy __unused)311 ipi_hardclock(void *dummy __unused)
312 {
313 
314 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
315 	hardclockintr();
316 }
317 
318 static void
ipi_preempt(void * dummy __unused)319 ipi_preempt(void *dummy __unused)
320 {
321 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
322 	sched_preempt(curthread);
323 }
324 
325 static void
ipi_rendezvous(void * dummy __unused)326 ipi_rendezvous(void *dummy __unused)
327 {
328 
329 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
330 	smp_rendezvous_action();
331 }
332 
333 static void
ipi_stop(void * dummy __unused)334 ipi_stop(void *dummy __unused)
335 {
336 	u_int cpu;
337 
338 	CTR0(KTR_SMP, "IPI_STOP");
339 
340 	cpu = PCPU_GET(cpuid);
341 	savectx(&stoppcbs[cpu]);
342 
343 	/* Indicate we are stopped */
344 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
345 
346 	/* Wait for restart */
347 	while (!CPU_ISSET(cpu, &started_cpus))
348 		cpu_spinwait();
349 
350 #ifdef DDB
351 	dbg_register_sync(NULL);
352 #endif
353 
354 	CPU_CLR_ATOMIC(cpu, &started_cpus);
355 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
356 	CTR0(KTR_SMP, "IPI_STOP (restart)");
357 }
358 
359 struct cpu_group *
cpu_topo(void)360 cpu_topo(void)
361 {
362 	struct cpu_group *dom, *root;
363 	int i;
364 
365 	root = smp_topo_alloc(1);
366 	dom = smp_topo_alloc(vm_ndomains);
367 
368 	root->cg_parent = NULL;
369 	root->cg_child = dom;
370 	CPU_COPY(&all_cpus, &root->cg_mask);
371 	root->cg_count = mp_ncpus;
372 	root->cg_children = vm_ndomains;
373 	root->cg_level = CG_SHARE_NONE;
374 	root->cg_flags = 0;
375 
376 	/*
377 	 * Redundant layers will be collapsed by the caller so we don't need a
378 	 * special case for a single domain.
379 	 */
380 	for (i = 0; i < vm_ndomains; i++, dom++) {
381 		dom->cg_parent = root;
382 		dom->cg_child = NULL;
383 		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
384 		dom->cg_count = CPU_COUNT(&dom->cg_mask);
385 		dom->cg_children = 0;
386 		dom->cg_level = CG_SHARE_L3;
387 		dom->cg_flags = 0;
388 	}
389 
390 	return (root);
391 }
392 
393 /* Determine if we running MP machine */
394 int
cpu_mp_probe(void)395 cpu_mp_probe(void)
396 {
397 
398 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
399 	return (1);
400 }
401 
402 static int
enable_cpu_psci(uint64_t target_cpu,vm_paddr_t entry,u_int cpuid)403 enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid)
404 {
405 	int err;
406 
407 	err = psci_cpu_on(target_cpu, entry, cpuid);
408 	if (err != PSCI_RETVAL_SUCCESS) {
409 		/*
410 		 * Panic here if INVARIANTS are enabled and PSCI failed to
411 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
412 		 * to indicate we are unable to use it to start the given CPU.
413 		 */
414 		KASSERT(err == PSCI_MISSING ||
415 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
416 		    ("Failed to start CPU %u (%lx), error %d\n",
417 		    cpuid, target_cpu, err));
418 		return (EINVAL);
419 	}
420 
421 	return (0);
422 }
423 
424 static int
enable_cpu_spin(uint64_t cpu,vm_paddr_t entry,vm_paddr_t release_paddr)425 enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
426 {
427 	vm_paddr_t *release_addr;
428 
429 	ap_cpuid = cpu & CPU_AFF_MASK;
430 
431 	release_addr = pmap_mapdev_attr(release_paddr, sizeof(*release_addr),
432 	    VM_MEMATTR_DEFAULT);
433 	if (release_addr == NULL)
434 		return (ENOMEM);
435 
436 	*release_addr = entry;
437 	cpu_dcache_wbinv_range(release_addr, sizeof(*release_addr));
438 	pmap_unmapdev(release_addr, sizeof(*release_addr));
439 
440 	__asm __volatile(
441 	    "sev	\n"
442 	    ::: "memory");
443 
444 	/* Wait for the target CPU to start */
445 	while (atomic_load_64(&ap_cpuid) != 0)
446 		__asm __volatile("wfe");
447 
448 	return (0);
449 }
450 
451 /*
452  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
453  * do nothing. Returns true if the CPU is present and running.
454  */
455 static bool
start_cpu(u_int cpuid,uint64_t target_cpu,int domain,vm_paddr_t release_addr)456 start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
457 {
458 	struct pcpu *pcpup;
459 	vm_size_t size;
460 	vm_paddr_t pa;
461 	int err, naps;
462 
463 	/* Check we are able to start this cpu */
464 	if (cpuid > mp_maxid)
465 		return (false);
466 
467 	/* Skip boot CPU */
468 	if (is_boot_cpu(target_cpu))
469 		return (true);
470 
471 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
472 
473 	size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
474 	pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
475 	    M_WAITOK | M_ZERO);
476 	pmap_disable_promotion((vm_offset_t)pcpup, size);
477 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
478 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
479 	bootpcpu = pcpup;
480 
481 	dpcpu[cpuid - 1] = (void *)(pcpup + 1);
482 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
483 
484 	bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
485 	    MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
486 
487 	naps = atomic_load_int(&aps_started);
488 	bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
489 
490 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
491 
492 	/*
493 	 * A limited set of hardware we support can only do spintables and
494 	 * remain useful, due to lack of EL3.  Thus, we'll usually fall into the
495 	 * PSCI branch here.
496 	 */
497 	MPASS(release_addr == 0 || !psci_present);
498 	if (release_addr != 0) {
499 		pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_spintable);
500 		err = enable_cpu_spin(target_cpu, pa, release_addr);
501 	} else {
502 		pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_psci);
503 		err = enable_cpu_psci(target_cpu, pa, cpuid);
504 	}
505 
506 	if (err != 0) {
507 		pcpu_destroy(pcpup);
508 		dpcpu[cpuid - 1] = NULL;
509 		kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
510 		kmem_free(pcpup, size);
511 		bootstacks[cpuid] = NULL;
512 		mp_ncpus--;
513 		return (false);
514 	}
515 
516 	/* Wait for the AP to switch to its boot stack. */
517 	while (atomic_load_int(&aps_started) < naps + 1)
518 		cpu_spinwait();
519 	CPU_SET(cpuid, &all_cpus);
520 
521 	return (true);
522 }
523 
524 #ifdef DEV_ACPI
525 static void
madt_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)526 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
527 {
528 	ACPI_MADT_GENERIC_INTERRUPT *intr;
529 	u_int *cpuid;
530 	u_int id;
531 	int domain;
532 
533 	switch(entry->Type) {
534 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
535 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
536 		cpuid = arg;
537 
538 		if (is_boot_cpu(intr->ArmMpidr))
539 			id = 0;
540 		else
541 			id = *cpuid;
542 
543 		domain = 0;
544 #ifdef NUMA
545 		if (vm_ndomains > 1)
546 			domain = acpi_pxm_get_cpu_locality(intr->Uid);
547 #endif
548 		if (start_cpu(id, intr->ArmMpidr, domain, 0)) {
549 			MPASS(cpuid_to_pcpu[id] != NULL);
550 			cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
551 			/*
552 			 * Don't increment for the boot CPU, its CPU ID is
553 			 * reserved.
554 			 */
555 			if (!is_boot_cpu(intr->ArmMpidr))
556 				(*cpuid)++;
557 		}
558 
559 		break;
560 	default:
561 		break;
562 	}
563 }
564 
565 static void
cpu_init_acpi(void)566 cpu_init_acpi(void)
567 {
568 	ACPI_TABLE_MADT *madt;
569 	vm_paddr_t physaddr;
570 	u_int cpuid;
571 
572 	physaddr = acpi_find_table(ACPI_SIG_MADT);
573 	if (physaddr == 0)
574 		return;
575 
576 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
577 	if (madt == NULL) {
578 		printf("Unable to map the MADT, not starting APs\n");
579 		return;
580 	}
581 	/* Boot CPU is always 0 */
582 	cpuid = 1;
583 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
584 	    madt_handler, &cpuid);
585 
586 	acpi_unmap_table(madt);
587 
588 #if MAXMEMDOM > 1
589 	acpi_pxm_set_cpu_locality();
590 #endif
591 }
592 #endif
593 
594 #ifdef FDT
595 /*
596  * Failure is indicated by failing to populate *release_addr.
597  */
598 static void
populate_release_addr(phandle_t node,vm_paddr_t * release_addr)599 populate_release_addr(phandle_t node, vm_paddr_t *release_addr)
600 {
601 	pcell_t buf[2];
602 
603 	if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) !=
604 	    sizeof(buf))
605 		return;
606 
607 	*release_addr = (((uintptr_t)buf[0] << 32) | buf[1]);
608 }
609 
610 static bool
start_cpu_fdt(u_int id,phandle_t node,u_int addr_size,pcell_t * reg)611 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
612 {
613 	uint64_t target_cpu;
614 	vm_paddr_t release_addr;
615 	char *enable_method;
616 	int domain;
617 	int cpuid;
618 
619 	target_cpu = reg[0];
620 	if (addr_size == 2) {
621 		target_cpu <<= 32;
622 		target_cpu |= reg[1];
623 	}
624 
625 	if (is_boot_cpu(target_cpu))
626 		cpuid = 0;
627 	else
628 		cpuid = fdt_cpuid;
629 
630 	/*
631 	 * If PSCI is present, we'll always use that -- the cpu_on method is
632 	 * mandated in both v0.1 and v0.2.  We'll check the enable-method if
633 	 * we don't have PSCI and use spin table if it's provided.
634 	 */
635 	release_addr = 0;
636 	if (!psci_present && cpuid != 0) {
637 		if (OF_getprop_alloc(node, "enable-method",
638 		    (void **)&enable_method) <= 0)
639 			return (false);
640 
641 		if (strcmp(enable_method, "spin-table") != 0) {
642 			OF_prop_free(enable_method);
643 			return (false);
644 		}
645 
646 		OF_prop_free(enable_method);
647 		populate_release_addr(node, &release_addr);
648 		if (release_addr == 0) {
649 			printf("Failed to fetch release address for CPU %u",
650 			    cpuid);
651 			return (false);
652 		}
653 	}
654 
655 	if (!start_cpu(cpuid, target_cpu, 0, release_addr))
656 		return (false);
657 
658 	/*
659 	 * Don't increment for the boot CPU, its CPU ID is reserved.
660 	 */
661 	if (!is_boot_cpu(target_cpu))
662 		fdt_cpuid++;
663 
664 	/* Try to read the numa node of this cpu */
665 	if (vm_ndomains == 1 ||
666 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
667 		domain = 0;
668 	cpuid_to_pcpu[cpuid]->pc_domain = domain;
669 	if (domain < MAXMEMDOM)
670 		CPU_SET(cpuid, &cpuset_domain[domain]);
671 	return (true);
672 }
673 static void
cpu_init_fdt(void)674 cpu_init_fdt(void)
675 {
676 	phandle_t node;
677 	int i;
678 
679 	node = OF_peer(0);
680 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
681 		if (ofw_bus_node_is_compatible(node,
682 		    fdt_quirks[i].compat) != 0) {
683 			mp_quirks = fdt_quirks[i].quirks;
684 		}
685 	}
686 	fdt_cpuid = 1;
687 	ofw_cpu_early_foreach(start_cpu_fdt, true);
688 }
689 #endif
690 
691 /* Initialize and fire up non-boot processors */
692 void
cpu_mp_start(void)693 cpu_mp_start(void)
694 {
695 	uint64_t mpidr;
696 
697 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
698 
699 	/* CPU 0 is always boot CPU. */
700 	CPU_SET(0, &all_cpus);
701 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
702 	cpuid_to_pcpu[0]->pc_mpidr = mpidr;
703 
704 	cpu_desc_init();
705 
706 	switch(arm64_bus_method) {
707 #ifdef DEV_ACPI
708 	case ARM64_BUS_ACPI:
709 		mp_quirks = MP_QUIRK_CPULIST;
710 		cpu_init_acpi();
711 		break;
712 #endif
713 #ifdef FDT
714 	case ARM64_BUS_FDT:
715 		cpu_init_fdt();
716 		break;
717 #endif
718 	default:
719 		break;
720 	}
721 }
722 
723 /* Introduce rest of cores to the world */
724 void
cpu_mp_announce(void)725 cpu_mp_announce(void)
726 {
727 }
728 
729 #ifdef DEV_ACPI
730 static void
cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)731 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
732 {
733 	u_int *cores = arg;
734 
735 	switch(entry->Type) {
736 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
737 		(*cores)++;
738 		break;
739 	default:
740 		break;
741 	}
742 }
743 
744 static u_int
cpu_count_acpi(void)745 cpu_count_acpi(void)
746 {
747 	ACPI_TABLE_MADT *madt;
748 	vm_paddr_t physaddr;
749 	u_int cores;
750 
751 	physaddr = acpi_find_table(ACPI_SIG_MADT);
752 	if (physaddr == 0)
753 		return (0);
754 
755 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
756 	if (madt == NULL) {
757 		printf("Unable to map the MADT, not starting APs\n");
758 		return (0);
759 	}
760 
761 	cores = 0;
762 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
763 	    cpu_count_acpi_handler, &cores);
764 
765 	acpi_unmap_table(madt);
766 
767 	return (cores);
768 }
769 #endif
770 
771 void
cpu_mp_setmaxid(void)772 cpu_mp_setmaxid(void)
773 {
774 	int cores;
775 
776 	mp_ncpus = 1;
777 	mp_maxid = 0;
778 
779 	switch(arm64_bus_method) {
780 #ifdef DEV_ACPI
781 	case ARM64_BUS_ACPI:
782 		cores = cpu_count_acpi();
783 		if (cores > 0) {
784 			cores = MIN(cores, MAXCPU);
785 			if (bootverbose)
786 				printf("Found %d CPUs in the ACPI tables\n",
787 				    cores);
788 			mp_ncpus = cores;
789 			mp_maxid = cores - 1;
790 		}
791 		break;
792 #endif
793 #ifdef FDT
794 	case ARM64_BUS_FDT:
795 		cores = ofw_cpu_early_foreach(NULL, false);
796 		if (cores > 0) {
797 			cores = MIN(cores, MAXCPU);
798 			if (bootverbose)
799 				printf("Found %d CPUs in the device tree\n",
800 				    cores);
801 			mp_ncpus = cores;
802 			mp_maxid = cores - 1;
803 		}
804 		break;
805 #endif
806 	default:
807 		if (bootverbose)
808 			printf("No CPU data, limiting to 1 core\n");
809 		break;
810 	}
811 
812 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
813 		if (cores > 0 && cores < mp_ncpus) {
814 			mp_ncpus = cores;
815 			mp_maxid = cores - 1;
816 		}
817 	}
818 }
819 
820 /* Sending IPI */
821 void
ipi_all_but_self(u_int ipi)822 ipi_all_but_self(u_int ipi)
823 {
824 	cpuset_t cpus;
825 
826 	cpus = all_cpus;
827 	CPU_CLR(PCPU_GET(cpuid), &cpus);
828 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
829 	intr_ipi_send(cpus, ipi);
830 }
831 
832 void
ipi_cpu(int cpu,u_int ipi)833 ipi_cpu(int cpu, u_int ipi)
834 {
835 	cpuset_t cpus;
836 
837 	CPU_ZERO(&cpus);
838 	CPU_SET(cpu, &cpus);
839 
840 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
841 	intr_ipi_send(cpus, ipi);
842 }
843 
844 void
ipi_selected(cpuset_t cpus,u_int ipi)845 ipi_selected(cpuset_t cpus, u_int ipi)
846 {
847 
848 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
849 	intr_ipi_send(cpus, ipi);
850 }
851