xref: /freebsd/sys/kern/subr_smp.c (revision 120ca8d74b46caa260702485e30fe5f9f9984682)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * This module holds the global variables and machine independent functions
30  * used for the kernel SMP support.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/ktr.h>
37 #include <sys/proc.h>
38 #include <sys/bus.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 
47 #include <machine/cpu.h>
48 #include <machine/pcb.h>
49 #include <machine/smp.h>
50 
51 #include "opt_sched.h"
52 
53 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
54 
55 struct cpu_group *
smp_topo_alloc(u_int count)56 smp_topo_alloc(u_int count)
57 {
58 	static struct cpu_group *group = NULL;
59 	static u_int index;
60 	u_int curr;
61 
62 	if (group == NULL) {
63 		group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1,
64 		    sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO);
65 	}
66 	curr = index;
67 	index += count;
68 	return (&group[curr]);
69 }
70 
71 struct cpu_group *
smp_topo_none(void)72 smp_topo_none(void)
73 {
74 	struct cpu_group *top;
75 
76 	top = smp_topo_alloc(1);
77 	top->cg_parent = NULL;
78 	top->cg_child = NULL;
79 	top->cg_mask = all_cpus;
80 	top->cg_count = mp_ncpus;
81 	top->cg_children = 0;
82 	top->cg_level = CG_SHARE_NONE;
83 	top->cg_flags = 0;
84 
85 	return (top);
86 }
87 
88 #ifdef SMP
89 
90 volatile cpuset_t stopped_cpus;
91 volatile cpuset_t started_cpus;
92 volatile cpuset_t suspended_cpus;
93 cpuset_t hlt_cpus_mask;
94 cpuset_t logical_cpus_mask;
95 
96 void (*cpustop_restartfunc)(void);
97 #endif
98 
99 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
100 
101 /* This is used in modules that need to work in both SMP and UP. */
102 cpuset_t all_cpus;
103 
104 int mp_ncpus;
105 /* export this for libkvm consumers. */
106 int mp_maxcpus = MAXCPU;
107 
108 volatile int smp_started;
109 u_int mp_maxid;
110 
111 /* Array of CPU contexts saved during a panic. */
112 struct pcb *stoppcbs;
113 
114 static SYSCTL_NODE(_kern, OID_AUTO, smp,
115     CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL,
116     "Kernel SMP");
117 
118 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
119     "Max CPU ID.");
120 
121 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
122     0, "Max number of CPUs that the system was compiled for.");
123 
124 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE,
125     NULL, 0, sysctl_kern_smp_active, "I",
126     "Indicates system is running in SMP mode");
127 
128 int smp_disabled = 0;	/* has smp been disabled? */
129 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
130     &smp_disabled, 0, "SMP has been disabled from the loader");
131 
132 int smp_cpus = 1;	/* how many cpu's running */
133 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
134     "Number of CPUs online");
135 
136 int smp_threads_per_core = 1;	/* how many SMT threads are running per core */
137 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD,
138     &smp_threads_per_core, 0, "Number of SMT threads online per core");
139 
140 int mp_ncores = -1;	/* how many physical cores running */
141 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0,
142     "Number of physical cores online");
143 
144 int smp_topology = 0;	/* Which topology we're using. */
145 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
146     "Topology override setting; 0 is default provided by hardware.");
147 
148 #ifdef SMP
149 /* Variables needed for SMP rendezvous. */
150 static volatile int smp_rv_ncpus;
151 static void (*volatile smp_rv_setup_func)(void *arg);
152 static void (*volatile smp_rv_action_func)(void *arg);
153 static void (*volatile smp_rv_teardown_func)(void *arg);
154 static void *volatile smp_rv_func_arg;
155 static volatile int smp_rv_waiters[4];
156 
157 /*
158  * Shared mutex to restrict busywaits between smp_rendezvous() and
159  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
160  * functions trigger at once and cause multiple CPUs to busywait with
161  * interrupts disabled.
162  */
163 struct mtx smp_ipi_mtx;
164 
165 /*
166  * Let the MD SMP code initialize mp_maxid very early if it can.
167  */
168 static void
mp_setmaxid(void * dummy)169 mp_setmaxid(void *dummy)
170 {
171 
172 	cpu_mp_setmaxid();
173 
174 	KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
175 	KASSERT(mp_ncpus > 1 || mp_maxid == 0,
176 	    ("%s: one CPU but mp_maxid is not zero", __func__));
177 	KASSERT(mp_maxid >= mp_ncpus - 1,
178 	    ("%s: counters out of sync: max %d, count %d", __func__,
179 		mp_maxid, mp_ncpus));
180 
181 	cpusetsizemin = howmany(mp_maxid + 1, NBBY);
182 }
183 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
184 
185 /*
186  * Call the MD SMP initialization code.
187  */
188 static void
mp_start(void * dummy)189 mp_start(void *dummy)
190 {
191 
192 	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
193 
194 	/* Probe for MP hardware. */
195 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
196 		mp_ncores = 1;
197 		mp_ncpus = 1;
198 		CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
199 		return;
200 	}
201 
202 	cpu_mp_start();
203 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
204 	    mp_ncpus);
205 
206 	/* Provide a default for most architectures that don't have SMT/HTT. */
207 	if (mp_ncores < 0)
208 		mp_ncores = mp_ncpus;
209 
210 	stoppcbs = mallocarray(mp_maxid + 1, sizeof(struct pcb), M_DEVBUF,
211 	    M_WAITOK | M_ZERO);
212 
213 	cpu_mp_announce();
214 }
215 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
216 
217 void
forward_signal(struct thread * td)218 forward_signal(struct thread *td)
219 {
220 	int id;
221 
222 	/*
223 	 * signotify() has already set TDA_AST and TDA_SIG on td_ast for
224 	 * this thread, so all we need to do is poke it if it is currently
225 	 * executing so that it executes ast().
226 	 */
227 	THREAD_LOCK_ASSERT(td, MA_OWNED);
228 	KASSERT(TD_IS_RUNNING(td),
229 	    ("forward_signal: thread is not TDS_RUNNING"));
230 
231 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
232 
233 	if (!smp_started || cold || KERNEL_PANICKED())
234 		return;
235 
236 	/* No need to IPI ourself. */
237 	if (td == curthread)
238 		return;
239 
240 	id = td->td_oncpu;
241 	if (id == NOCPU)
242 		return;
243 	ipi_cpu(id, IPI_AST);
244 }
245 
246 /*
247  * When called the executing CPU will send an IPI to all other CPUs
248  *  requesting that they halt execution.
249  *
250  * Usually (but not necessarily) called with 'other_cpus' as its arg.
251  *
252  *  - Signals all CPUs in map to stop.
253  *  - Waits for each to stop.
254  *
255  * Returns:
256  *  -1: error
257  *   0: NA
258  *   1: ok
259  *
260  */
261 #if defined(__amd64__) || defined(__i386__)
262 #define	X86	1
263 #else
264 #define	X86	0
265 #endif
266 static int
generic_stop_cpus(cpuset_t map,u_int type)267 generic_stop_cpus(cpuset_t map, u_int type)
268 {
269 #ifdef KTR
270 	char cpusetbuf[CPUSETBUFSIZ];
271 #endif
272 	static volatile u_int stopping_cpu = NOCPU;
273 	int i;
274 	volatile cpuset_t *cpus;
275 
276 	KASSERT(
277 	    type == IPI_STOP || type == IPI_STOP_HARD
278 #if X86
279 	    || type == IPI_SUSPEND || type == IPI_OFF
280 #endif
281 	    , ("%s: invalid stop type", __func__));
282 
283 	if (!smp_started)
284 		return (0);
285 
286 	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
287 	    cpusetobj_strprint(cpusetbuf, &map), type);
288 
289 #if X86
290 	/*
291 	 * When suspending, ensure there are are no IPIs in progress.
292 	 * IPIs that have been issued, but not yet delivered (e.g.
293 	 * not pending on a vCPU when running under virtualization)
294 	 * will be lost, violating FreeBSD's assumption of reliable
295 	 * IPI delivery.
296 	 */
297 	if (type == IPI_SUSPEND || type == IPI_OFF)
298 		mtx_lock_spin(&smp_ipi_mtx);
299 #endif
300 
301 #if X86
302 	if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
303 #endif
304 	if (stopping_cpu != PCPU_GET(cpuid))
305 		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
306 		    PCPU_GET(cpuid)) == 0)
307 			while (stopping_cpu != NOCPU)
308 				cpu_spinwait(); /* spin */
309 
310 	/* send the stop IPI to all CPUs in map */
311 	ipi_selected(map, type);
312 #if X86
313 	}
314 #endif
315 
316 #if X86
317 	if (type == IPI_SUSPEND || type == IPI_OFF)
318 		cpus = &suspended_cpus;
319 	else
320 #endif
321 		cpus = &stopped_cpus;
322 
323 	i = 0;
324 	while (!CPU_SUBSET(cpus, &map)) {
325 		/* spin */
326 		cpu_spinwait();
327 		i++;
328 		if (i == 100000000) {
329 			printf("timeout stopping cpus\n");
330 			break;
331 		}
332 	}
333 
334 #if X86
335 	if (type == IPI_SUSPEND || type == IPI_OFF)
336 		mtx_unlock_spin(&smp_ipi_mtx);
337 #endif
338 
339 	stopping_cpu = NOCPU;
340 	return (1);
341 }
342 
343 int
stop_cpus(cpuset_t map)344 stop_cpus(cpuset_t map)
345 {
346 
347 	return (generic_stop_cpus(map, IPI_STOP));
348 }
349 
350 int
stop_cpus_hard(cpuset_t map)351 stop_cpus_hard(cpuset_t map)
352 {
353 
354 	return (generic_stop_cpus(map, IPI_STOP_HARD));
355 }
356 
357 #if X86
358 int
suspend_cpus(cpuset_t map)359 suspend_cpus(cpuset_t map)
360 {
361 
362 	return (generic_stop_cpus(map, IPI_SUSPEND));
363 }
364 
365 int
offline_cpus(cpuset_t map)366 offline_cpus(cpuset_t map)
367 {
368 
369 	return (generic_stop_cpus(map, IPI_OFF));
370 }
371 #endif
372 
373 /*
374  * Called by a CPU to restart stopped CPUs.
375  *
376  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
377  *
378  *  - Signals all CPUs in map to restart.
379  *  - Waits for each to restart.
380  *
381  * Returns:
382  *  -1: error
383  *   0: NA
384  *   1: ok
385  */
386 static int
generic_restart_cpus(cpuset_t map,u_int type)387 generic_restart_cpus(cpuset_t map, u_int type)
388 {
389 #ifdef KTR
390 	char cpusetbuf[CPUSETBUFSIZ];
391 #endif
392 	volatile cpuset_t *cpus;
393 
394 #if X86
395 	KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
396 	    || type == IPI_SUSPEND, ("%s: invalid stop type", __func__));
397 
398 	if (!smp_started)
399 		return (0);
400 
401 	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
402 
403 	if (type == IPI_SUSPEND)
404 		cpus = &resuming_cpus;
405 	else
406 		cpus = &stopped_cpus;
407 
408 	/* signal other cpus to restart */
409 	if (type == IPI_SUSPEND)
410 		CPU_COPY_STORE_REL(&map, &toresume_cpus);
411 	else
412 		CPU_COPY_STORE_REL(&map, &started_cpus);
413 
414 	/*
415 	 * Wake up any CPUs stopped with MWAIT.  From MI code we can't tell if
416 	 * MONITOR/MWAIT is enabled, but the potentially redundant writes are
417 	 * relatively inexpensive.
418 	 */
419 	if (type == IPI_STOP) {
420 		struct monitorbuf *mb;
421 		u_int id;
422 
423 		CPU_FOREACH(id) {
424 			if (!CPU_ISSET(id, &map))
425 				continue;
426 
427 			mb = &pcpu_find(id)->pc_monitorbuf;
428 			atomic_store_int(&mb->stop_state,
429 			    MONITOR_STOPSTATE_RUNNING);
430 		}
431 	}
432 
433 	if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
434 		/* wait for each to clear its bit */
435 		while (CPU_OVERLAP(cpus, &map))
436 			cpu_spinwait();
437 	}
438 #else /* !X86 */
439 	KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
440 	    ("%s: invalid stop type", __func__));
441 
442 	if (!smp_started)
443 		return (0);
444 
445 	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
446 
447 	cpus = &stopped_cpus;
448 
449 	/* signal other cpus to restart */
450 	CPU_COPY_STORE_REL(&map, &started_cpus);
451 
452 	/* wait for each to clear its bit */
453 	while (CPU_OVERLAP(cpus, &map))
454 		cpu_spinwait();
455 #endif
456 	return (1);
457 }
458 
459 int
restart_cpus(cpuset_t map)460 restart_cpus(cpuset_t map)
461 {
462 
463 	return (generic_restart_cpus(map, IPI_STOP));
464 }
465 
466 #if X86
467 int
resume_cpus(cpuset_t map)468 resume_cpus(cpuset_t map)
469 {
470 
471 	return (generic_restart_cpus(map, IPI_SUSPEND));
472 }
473 #endif
474 #undef X86
475 
476 /*
477  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
478  * (if specified), rendezvous, execute the action function (if specified),
479  * rendezvous again, execute the teardown function (if specified), and then
480  * resume.
481  *
482  * Note that the supplied external functions _must_ be reentrant and aware
483  * that they are running in parallel and in an unknown lock context.
484  */
485 void
smp_rendezvous_action(void)486 smp_rendezvous_action(void)
487 {
488 	struct thread *td;
489 	void *local_func_arg;
490 	void (*local_setup_func)(void*);
491 	void (*local_action_func)(void*);
492 	void (*local_teardown_func)(void*);
493 #ifdef INVARIANTS
494 	int owepreempt;
495 #endif
496 
497 	/* Ensure we have up-to-date values. */
498 	atomic_add_acq_int(&smp_rv_waiters[0], 1);
499 	while (smp_rv_waiters[0] < smp_rv_ncpus)
500 		cpu_spinwait();
501 
502 	/* Fetch rendezvous parameters after acquire barrier. */
503 	local_func_arg = smp_rv_func_arg;
504 	local_setup_func = smp_rv_setup_func;
505 	local_action_func = smp_rv_action_func;
506 	local_teardown_func = smp_rv_teardown_func;
507 
508 	/*
509 	 * Use a nested critical section to prevent any preemptions
510 	 * from occurring during a rendezvous action routine.
511 	 * Specifically, if a rendezvous handler is invoked via an IPI
512 	 * and the interrupted thread was in the critical_exit()
513 	 * function after setting td_critnest to 0 but before
514 	 * performing a deferred preemption, this routine can be
515 	 * invoked with td_critnest set to 0 and td_owepreempt true.
516 	 * In that case, a critical_exit() during the rendezvous
517 	 * action would trigger a preemption which is not permitted in
518 	 * a rendezvous action.  To fix this, wrap all of the
519 	 * rendezvous action handlers in a critical section.  We
520 	 * cannot use a regular critical section however as having
521 	 * critical_exit() preempt from this routine would also be
522 	 * problematic (the preemption must not occur before the IPI
523 	 * has been acknowledged via an EOI).  Instead, we
524 	 * intentionally ignore td_owepreempt when leaving the
525 	 * critical section.  This should be harmless because we do
526 	 * not permit rendezvous action routines to schedule threads,
527 	 * and thus td_owepreempt should never transition from 0 to 1
528 	 * during this routine.
529 	 */
530 	td = curthread;
531 	td->td_critnest++;
532 #ifdef INVARIANTS
533 	owepreempt = td->td_owepreempt;
534 #endif
535 
536 	/*
537 	 * If requested, run a setup function before the main action
538 	 * function.  Ensure all CPUs have completed the setup
539 	 * function before moving on to the action function.
540 	 */
541 	if (local_setup_func != smp_no_rendezvous_barrier) {
542 		if (local_setup_func != NULL)
543 			local_setup_func(local_func_arg);
544 		atomic_add_int(&smp_rv_waiters[1], 1);
545 		while (smp_rv_waiters[1] < smp_rv_ncpus)
546                 	cpu_spinwait();
547 	}
548 
549 	if (local_action_func != NULL)
550 		local_action_func(local_func_arg);
551 
552 	if (local_teardown_func != smp_no_rendezvous_barrier) {
553 		/*
554 		 * Signal that the main action has been completed.  If a
555 		 * full exit rendezvous is requested, then all CPUs will
556 		 * wait here until all CPUs have finished the main action.
557 		 */
558 		atomic_add_int(&smp_rv_waiters[2], 1);
559 		while (smp_rv_waiters[2] < smp_rv_ncpus)
560 			cpu_spinwait();
561 
562 		if (local_teardown_func != NULL)
563 			local_teardown_func(local_func_arg);
564 	}
565 
566 	/*
567 	 * Signal that the rendezvous is fully completed by this CPU.
568 	 * This means that no member of smp_rv_* pseudo-structure will be
569 	 * accessed by this target CPU after this point; in particular,
570 	 * memory pointed by smp_rv_func_arg.
571 	 *
572 	 * The release semantic ensures that all accesses performed by
573 	 * the current CPU are visible when smp_rendezvous_cpus()
574 	 * returns, by synchronizing with the
575 	 * atomic_load_acq_int(&smp_rv_waiters[3]).
576 	 */
577 	atomic_add_rel_int(&smp_rv_waiters[3], 1);
578 
579 	td->td_critnest--;
580 	KASSERT(owepreempt == td->td_owepreempt,
581 	    ("rendezvous action changed td_owepreempt"));
582 }
583 
584 void
smp_rendezvous_cpus(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)585 smp_rendezvous_cpus(cpuset_t map,
586 	void (* setup_func)(void *),
587 	void (* action_func)(void *),
588 	void (* teardown_func)(void *),
589 	void *arg)
590 {
591 	int curcpumap, i, ncpus = 0;
592 
593 	/* See comments in the !SMP case. */
594 	if (!smp_started) {
595 		spinlock_enter();
596 		if (setup_func != NULL)
597 			setup_func(arg);
598 		if (action_func != NULL)
599 			action_func(arg);
600 		if (teardown_func != NULL)
601 			teardown_func(arg);
602 		spinlock_exit();
603 		return;
604 	}
605 
606 	/*
607 	 * Make sure we come here with interrupts enabled.  Otherwise we
608 	 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI.
609 	 */
610 	MPASS(curthread->td_md.md_spinlock_count == 0);
611 
612 	CPU_FOREACH(i) {
613 		if (CPU_ISSET(i, &map))
614 			ncpus++;
615 	}
616 	if (ncpus == 0)
617 		panic("ncpus is 0 with non-zero map");
618 
619 	mtx_lock_spin(&smp_ipi_mtx);
620 
621 	/* Pass rendezvous parameters via global variables. */
622 	smp_rv_ncpus = ncpus;
623 	smp_rv_setup_func = setup_func;
624 	smp_rv_action_func = action_func;
625 	smp_rv_teardown_func = teardown_func;
626 	smp_rv_func_arg = arg;
627 	smp_rv_waiters[1] = 0;
628 	smp_rv_waiters[2] = 0;
629 	smp_rv_waiters[3] = 0;
630 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
631 
632 	/*
633 	 * Signal other processors, which will enter the IPI with
634 	 * interrupts off.
635 	 */
636 	curcpumap = CPU_ISSET(curcpu, &map);
637 	CPU_CLR(curcpu, &map);
638 	ipi_selected(map, IPI_RENDEZVOUS);
639 
640 	/* Check if the current CPU is in the map */
641 	if (curcpumap != 0)
642 		smp_rendezvous_action();
643 
644 	/*
645 	 * Ensure that the master CPU waits for all the other
646 	 * CPUs to finish the rendezvous, so that smp_rv_*
647 	 * pseudo-structure and the arg are guaranteed to not
648 	 * be in use.
649 	 *
650 	 * Load acquire synchronizes with the release add in
651 	 * smp_rendezvous_action(), which ensures that our caller sees
652 	 * all memory actions done by the called functions on other
653 	 * CPUs.
654 	 */
655 	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
656 		cpu_spinwait();
657 
658 	mtx_unlock_spin(&smp_ipi_mtx);
659 }
660 
661 void
smp_rendezvous_cpu(u_int cpuid,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)662 smp_rendezvous_cpu(u_int cpuid,
663 	void (* setup_func)(void *),
664 	void (* action_func)(void *),
665 	void (* teardown_func)(void *),
666 	void *arg)
667 {
668 	cpuset_t set;
669 
670 	CPU_SETOF(cpuid, &set);
671 	smp_rendezvous_cpus(set, setup_func, action_func, teardown_func, arg);
672 }
673 
674 void
smp_rendezvous(void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)675 smp_rendezvous(void (* setup_func)(void *),
676 	       void (* action_func)(void *),
677 	       void (* teardown_func)(void *),
678 	       void *arg)
679 {
680 	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
681 }
682 
683 static void
smp_topo_fill(struct cpu_group * cg)684 smp_topo_fill(struct cpu_group *cg)
685 {
686 	int c;
687 
688 	for (c = 0; c < cg->cg_children; c++)
689 		smp_topo_fill(&cg->cg_child[c]);
690 	cg->cg_first = CPU_FFS(&cg->cg_mask) - 1;
691 	cg->cg_last = CPU_FLS(&cg->cg_mask) - 1;
692 }
693 
694 struct cpu_group *
smp_topo(void)695 smp_topo(void)
696 {
697 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
698 	static struct cpu_group *top = NULL;
699 
700 	/*
701 	 * The first call to smp_topo() is guaranteed to occur
702 	 * during the kernel boot while we are still single-threaded.
703 	 */
704 	if (top != NULL)
705 		return (top);
706 
707 	/*
708 	 * Check for a fake topology request for debugging purposes.
709 	 */
710 	switch (smp_topology) {
711 	case 1:
712 		/* Dual core with no sharing.  */
713 		top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
714 		break;
715 	case 2:
716 		/* No topology, all cpus are equal. */
717 		top = smp_topo_none();
718 		break;
719 	case 3:
720 		/* Dual core with shared L2.  */
721 		top = smp_topo_1level(CG_SHARE_L2, 2, 0);
722 		break;
723 	case 4:
724 		/* quad core, shared l3 among each package, private l2.  */
725 		top = smp_topo_1level(CG_SHARE_L3, 4, 0);
726 		break;
727 	case 5:
728 		/* quad core,  2 dualcore parts on each package share l2.  */
729 		top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
730 		break;
731 	case 6:
732 		/* Single-core 2xHTT */
733 		top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
734 		break;
735 	case 7:
736 		/* quad core with a shared l3, 8 threads sharing L2.  */
737 		top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
738 		    CG_FLAG_SMT);
739 		break;
740 	default:
741 		/* Default, ask the system what it wants. */
742 		top = cpu_topo();
743 		break;
744 	}
745 	/*
746 	 * Verify the returned topology.
747 	 */
748 	if (top->cg_count != mp_ncpus)
749 		panic("Built bad topology at %p.  CPU count %d != %d",
750 		    top, top->cg_count, mp_ncpus);
751 	if (CPU_CMP(&top->cg_mask, &all_cpus))
752 		panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
753 		    top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
754 		    cpusetobj_strprint(cpusetbuf2, &all_cpus));
755 
756 	/*
757 	 * Collapse nonsense levels that may be created out of convenience by
758 	 * the MD layers.  They cause extra work in the search functions.
759 	 */
760 	while (top->cg_children == 1) {
761 		top = &top->cg_child[0];
762 		top->cg_parent = NULL;
763 	}
764 	smp_topo_fill(top);
765 	return (top);
766 }
767 
768 static int
smp_topo_addleaf(struct cpu_group * parent,struct cpu_group * child,int share,int count,int flags,int start)769 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
770     int count, int flags, int start)
771 {
772 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
773 	cpuset_t mask;
774 	int i;
775 
776 	CPU_ZERO(&mask);
777 	for (i = 0; i < count; i++, start++)
778 		CPU_SET(start, &mask);
779 	child->cg_parent = parent;
780 	child->cg_child = NULL;
781 	child->cg_children = 0;
782 	child->cg_level = share;
783 	child->cg_count = count;
784 	child->cg_flags = flags;
785 	child->cg_mask = mask;
786 	parent->cg_children++;
787 	for (; parent != NULL; parent = parent->cg_parent) {
788 		if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
789 			panic("Duplicate children in %p.  mask (%s) child (%s)",
790 			    parent,
791 			    cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
792 			    cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
793 		CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
794 		parent->cg_count += child->cg_count;
795 	}
796 
797 	return (start);
798 }
799 
800 struct cpu_group *
smp_topo_1level(int share,int count,int flags)801 smp_topo_1level(int share, int count, int flags)
802 {
803 	struct cpu_group *child;
804 	struct cpu_group *top;
805 	int packages;
806 	int cpu;
807 	int i;
808 
809 	cpu = 0;
810 	packages = mp_ncpus / count;
811 	top = smp_topo_alloc(1 + packages);
812 	top->cg_child = child = top + 1;
813 	top->cg_level = CG_SHARE_NONE;
814 	for (i = 0; i < packages; i++, child++)
815 		cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
816 	return (top);
817 }
818 
819 struct cpu_group *
smp_topo_2level(int l2share,int l2count,int l1share,int l1count,int l1flags)820 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
821     int l1flags)
822 {
823 	struct cpu_group *top;
824 	struct cpu_group *l1g;
825 	struct cpu_group *l2g;
826 	int cpu;
827 	int i;
828 	int j;
829 
830 	cpu = 0;
831 	top = smp_topo_alloc(1 + mp_ncpus / (l2count * l1count) +
832 	    mp_ncpus / l1count);
833 	l2g = top + 1;
834 	top->cg_child = l2g;
835 	top->cg_level = CG_SHARE_NONE;
836 	top->cg_children = mp_ncpus / (l2count * l1count);
837 	l1g = l2g + top->cg_children;
838 	for (i = 0; i < top->cg_children; i++, l2g++) {
839 		l2g->cg_parent = top;
840 		l2g->cg_child = l1g;
841 		l2g->cg_level = l2share;
842 		for (j = 0; j < l2count; j++, l1g++)
843 			cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
844 			    l1flags, cpu);
845 	}
846 	return (top);
847 }
848 
849 struct cpu_group *
smp_topo_find(struct cpu_group * top,int cpu)850 smp_topo_find(struct cpu_group *top, int cpu)
851 {
852 	struct cpu_group *cg;
853 	cpuset_t mask;
854 	int children;
855 	int i;
856 
857 	CPU_SETOF(cpu, &mask);
858 	cg = top;
859 	for (;;) {
860 		if (!CPU_OVERLAP(&cg->cg_mask, &mask))
861 			return (NULL);
862 		if (cg->cg_children == 0)
863 			return (cg);
864 		children = cg->cg_children;
865 		for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
866 			if (CPU_OVERLAP(&cg->cg_mask, &mask))
867 				break;
868 	}
869 	return (NULL);
870 }
871 #else /* !SMP */
872 
873 void
smp_rendezvous_cpus(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)874 smp_rendezvous_cpus(cpuset_t map,
875 	void (*setup_func)(void *),
876 	void (*action_func)(void *),
877 	void (*teardown_func)(void *),
878 	void *arg)
879 {
880 	/*
881 	 * In the !SMP case we just need to ensure the same initial conditions
882 	 * as the SMP case.
883 	 */
884 	spinlock_enter();
885 	if (setup_func != NULL)
886 		setup_func(arg);
887 	if (action_func != NULL)
888 		action_func(arg);
889 	if (teardown_func != NULL)
890 		teardown_func(arg);
891 	spinlock_exit();
892 }
893 
894 void
smp_rendezvous(void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void * arg)895 smp_rendezvous(void (*setup_func)(void *),
896 	       void (*action_func)(void *),
897 	       void (*teardown_func)(void *),
898 	       void *arg)
899 {
900 
901 	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
902 	    arg);
903 }
904 
905 struct cpu_group *
smp_topo(void)906 smp_topo(void)
907 {
908 	static struct cpu_group *top = NULL;
909 
910 	if (top != NULL)
911 		return (top);
912 
913 	top = smp_topo_none();
914 	return (top);
915 }
916 
917 /*
918  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
919  * APIs will still work using this dummy support.
920  */
921 static void
mp_setvariables_for_up(void * dummy)922 mp_setvariables_for_up(void *dummy)
923 {
924 	mp_ncpus = 1;
925 	mp_ncores = 1;
926 	mp_maxid = PCPU_GET(cpuid);
927 	CPU_SETOF(mp_maxid, &all_cpus);
928 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
929 }
930 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
931     mp_setvariables_for_up, NULL);
932 #endif /* SMP */
933 
934 void
smp_no_rendezvous_barrier(void * dummy)935 smp_no_rendezvous_barrier(void *dummy)
936 {
937 #ifdef SMP
938 	KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
939 #endif
940 }
941 
942 void
smp_rendezvous_cpus_retry(cpuset_t map,void (* setup_func)(void *),void (* action_func)(void *),void (* teardown_func)(void *),void (* wait_func)(void *,int),struct smp_rendezvous_cpus_retry_arg * arg)943 smp_rendezvous_cpus_retry(cpuset_t map,
944 	void (* setup_func)(void *),
945 	void (* action_func)(void *),
946 	void (* teardown_func)(void *),
947 	void (* wait_func)(void *, int),
948 	struct smp_rendezvous_cpus_retry_arg *arg)
949 {
950 	int cpu;
951 
952 	CPU_COPY(&map, &arg->cpus);
953 
954 	/*
955 	 * Only one CPU to execute on.
956 	 */
957 	if (!smp_started) {
958 		spinlock_enter();
959 		if (setup_func != NULL)
960 			setup_func(arg);
961 		if (action_func != NULL)
962 			action_func(arg);
963 		if (teardown_func != NULL)
964 			teardown_func(arg);
965 		spinlock_exit();
966 		return;
967 	}
968 
969 	/*
970 	 * Execute an action on all specified CPUs while retrying until they
971 	 * all acknowledge completion.
972 	 */
973 	for (;;) {
974 		smp_rendezvous_cpus(
975 		    arg->cpus,
976 		    setup_func,
977 		    action_func,
978 		    teardown_func,
979 		    arg);
980 
981 		if (CPU_EMPTY(&arg->cpus))
982 			break;
983 
984 		CPU_FOREACH(cpu) {
985 			if (!CPU_ISSET(cpu, &arg->cpus))
986 				continue;
987 			wait_func(arg, cpu);
988 		}
989 	}
990 }
991 
992 void
smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg * arg)993 smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
994 {
995 
996 	CPU_CLR_ATOMIC(curcpu, &arg->cpus);
997 }
998 
999 /*
1000  * If (prio & PDROP) == 0:
1001  * Wait for specified idle threads to switch once.  This ensures that even
1002  * preempted threads have cycled through the switch function once,
1003  * exiting their codepaths.  This allows us to change global pointers
1004  * with no other synchronization.
1005  * If (prio & PDROP) != 0:
1006  * Force the specified CPUs to switch context at least once.
1007  */
1008 int
quiesce_cpus(cpuset_t map,const char * wmesg,int prio)1009 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
1010 {
1011 	struct pcpu *pcpu;
1012 	u_int *gen;
1013 	int error;
1014 	int cpu;
1015 
1016 	error = 0;
1017 	if ((prio & PDROP) == 0) {
1018 		gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP,
1019 		    M_WAITOK);
1020 		for (cpu = 0; cpu <= mp_maxid; cpu++) {
1021 			if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
1022 				continue;
1023 			pcpu = pcpu_find(cpu);
1024 			gen[cpu] = pcpu->pc_idlethread->td_generation;
1025 		}
1026 	}
1027 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1028 		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
1029 			continue;
1030 		pcpu = pcpu_find(cpu);
1031 		thread_lock(curthread);
1032 		sched_bind(curthread, cpu);
1033 		thread_unlock(curthread);
1034 		if ((prio & PDROP) != 0)
1035 			continue;
1036 		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
1037 			error = tsleep(quiesce_cpus, prio & ~PDROP, wmesg, 1);
1038 			if (error != EWOULDBLOCK)
1039 				goto out;
1040 			error = 0;
1041 		}
1042 	}
1043 out:
1044 	thread_lock(curthread);
1045 	sched_unbind(curthread);
1046 	thread_unlock(curthread);
1047 	if ((prio & PDROP) == 0)
1048 		free(gen, M_TEMP);
1049 
1050 	return (error);
1051 }
1052 
1053 int
quiesce_all_cpus(const char * wmesg,int prio)1054 quiesce_all_cpus(const char *wmesg, int prio)
1055 {
1056 
1057 	return quiesce_cpus(all_cpus, wmesg, prio);
1058 }
1059 
1060 /*
1061  * Observe all CPUs not executing in critical section.
1062  * We are not in one so the check for us is safe. If the found
1063  * thread changes to something else we know the section was
1064  * exited as well.
1065  */
1066 void
quiesce_all_critical(void)1067 quiesce_all_critical(void)
1068 {
1069 	struct thread *td, *newtd;
1070 	struct pcpu *pcpu;
1071 	int cpu;
1072 
1073 	MPASS(curthread->td_critnest == 0);
1074 
1075 	CPU_FOREACH(cpu) {
1076 		pcpu = cpuid_to_pcpu[cpu];
1077 		td = pcpu->pc_curthread;
1078 		for (;;) {
1079 			if (td->td_critnest == 0)
1080 				break;
1081 			cpu_spinwait();
1082 			newtd = (struct thread *)
1083 			    atomic_load_acq_ptr((void *)pcpu->pc_curthread);
1084 			if (td != newtd)
1085 				break;
1086 		}
1087 	}
1088 }
1089 
1090 static void
cpus_fence_seq_cst_issue(void * arg __unused)1091 cpus_fence_seq_cst_issue(void *arg __unused)
1092 {
1093 
1094 	atomic_thread_fence_seq_cst();
1095 }
1096 
1097 /*
1098  * Send an IPI forcing a sequentially consistent fence.
1099  *
1100  * Allows replacement of an explicitly fence with a compiler barrier.
1101  * Trades speed up during normal execution for a significant slowdown when
1102  * the barrier is needed.
1103  */
1104 void
cpus_fence_seq_cst(void)1105 cpus_fence_seq_cst(void)
1106 {
1107 
1108 #ifdef SMP
1109 	smp_rendezvous(
1110 	    smp_no_rendezvous_barrier,
1111 	    cpus_fence_seq_cst_issue,
1112 	    smp_no_rendezvous_barrier,
1113 	    NULL
1114 	);
1115 #else
1116 	cpus_fence_seq_cst_issue(NULL);
1117 #endif
1118 }
1119 
1120 /* Extra care is taken with this sysctl because the data type is volatile */
1121 static int
sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)1122 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
1123 {
1124 	int error, active;
1125 
1126 	active = smp_started;
1127 	error = SYSCTL_OUT(req, &active, sizeof(active));
1128 	return (error);
1129 }
1130 
1131 #ifdef SMP
1132 void
topo_init_node(struct topo_node * node)1133 topo_init_node(struct topo_node *node)
1134 {
1135 
1136 	bzero(node, sizeof(*node));
1137 	TAILQ_INIT(&node->children);
1138 }
1139 
1140 void
topo_init_root(struct topo_node * root)1141 topo_init_root(struct topo_node *root)
1142 {
1143 
1144 	topo_init_node(root);
1145 	root->type = TOPO_TYPE_SYSTEM;
1146 }
1147 
1148 /*
1149  * Add a child node with the given ID under the given parent.
1150  * Do nothing if there is already a child with that ID.
1151  */
1152 struct topo_node *
topo_add_node_by_hwid(struct topo_node * parent,int hwid,topo_node_type type,uintptr_t subtype)1153 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
1154     topo_node_type type, uintptr_t subtype)
1155 {
1156 	struct topo_node *node;
1157 
1158 	TAILQ_FOREACH_REVERSE(node, &parent->children,
1159 	    topo_children, siblings) {
1160 		if (node->hwid == hwid
1161 		    && node->type == type && node->subtype == subtype) {
1162 			return (node);
1163 		}
1164 	}
1165 
1166 	node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
1167 	topo_init_node(node);
1168 	node->parent = parent;
1169 	node->hwid = hwid;
1170 	node->type = type;
1171 	node->subtype = subtype;
1172 	TAILQ_INSERT_TAIL(&parent->children, node, siblings);
1173 	parent->nchildren++;
1174 
1175 	return (node);
1176 }
1177 
1178 /*
1179  * Find a child node with the given ID under the given parent.
1180  */
1181 struct topo_node *
topo_find_node_by_hwid(struct topo_node * parent,int hwid,topo_node_type type,uintptr_t subtype)1182 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
1183     topo_node_type type, uintptr_t subtype)
1184 {
1185 
1186 	struct topo_node *node;
1187 
1188 	TAILQ_FOREACH(node, &parent->children, siblings) {
1189 		if (node->hwid == hwid
1190 		    && node->type == type && node->subtype == subtype) {
1191 			return (node);
1192 		}
1193 	}
1194 
1195 	return (NULL);
1196 }
1197 
1198 /*
1199  * Given a node change the order of its parent's child nodes such
1200  * that the node becomes the firt child while preserving the cyclic
1201  * order of the children.  In other words, the given node is promoted
1202  * by rotation.
1203  */
1204 void
topo_promote_child(struct topo_node * child)1205 topo_promote_child(struct topo_node *child)
1206 {
1207 	struct topo_node *next;
1208 	struct topo_node *node;
1209 	struct topo_node *parent;
1210 
1211 	parent = child->parent;
1212 	next = TAILQ_NEXT(child, siblings);
1213 	TAILQ_REMOVE(&parent->children, child, siblings);
1214 	TAILQ_INSERT_HEAD(&parent->children, child, siblings);
1215 
1216 	while (next != NULL) {
1217 		node = next;
1218 		next = TAILQ_NEXT(node, siblings);
1219 		TAILQ_REMOVE(&parent->children, node, siblings);
1220 		TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
1221 		child = node;
1222 	}
1223 }
1224 
1225 /*
1226  * Iterate to the next node in the depth-first search (traversal) of
1227  * the topology tree.
1228  */
1229 struct topo_node *
topo_next_node(struct topo_node * top,struct topo_node * node)1230 topo_next_node(struct topo_node *top, struct topo_node *node)
1231 {
1232 	struct topo_node *next;
1233 
1234 	if ((next = TAILQ_FIRST(&node->children)) != NULL)
1235 		return (next);
1236 
1237 	if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1238 		return (next);
1239 
1240 	while (node != top && (node = node->parent) != top)
1241 		if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1242 			return (next);
1243 
1244 	return (NULL);
1245 }
1246 
1247 /*
1248  * Iterate to the next node in the depth-first search of the topology tree,
1249  * but without descending below the current node.
1250  */
1251 struct topo_node *
topo_next_nonchild_node(struct topo_node * top,struct topo_node * node)1252 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1253 {
1254 	struct topo_node *next;
1255 
1256 	if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1257 		return (next);
1258 
1259 	while (node != top && (node = node->parent) != top)
1260 		if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1261 			return (next);
1262 
1263 	return (NULL);
1264 }
1265 
1266 /*
1267  * Assign the given ID to the given topology node that represents a logical
1268  * processor.
1269  */
1270 void
topo_set_pu_id(struct topo_node * node,cpuid_t id)1271 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1272 {
1273 
1274 	KASSERT(node->type == TOPO_TYPE_PU,
1275 	    ("topo_set_pu_id: wrong node type: %u", node->type));
1276 	KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1277 	    ("topo_set_pu_id: cpuset already not empty"));
1278 	node->id = id;
1279 	CPU_SET(id, &node->cpuset);
1280 	node->cpu_count = 1;
1281 	node->subtype = 1;
1282 
1283 	while ((node = node->parent) != NULL) {
1284 		KASSERT(!CPU_ISSET(id, &node->cpuset),
1285 		    ("logical ID %u is already set in node %p", id, node));
1286 		CPU_SET(id, &node->cpuset);
1287 		node->cpu_count++;
1288 	}
1289 }
1290 
1291 static struct topology_spec {
1292 	topo_node_type	type;
1293 	bool		match_subtype;
1294 	uintptr_t	subtype;
1295 } topology_level_table[TOPO_LEVEL_COUNT] = {
1296 	[TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
1297 	[TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
1298 	[TOPO_LEVEL_CACHEGROUP] = {
1299 		.type = TOPO_TYPE_CACHE,
1300 		.match_subtype = true,
1301 		.subtype = CG_SHARE_L3,
1302 	},
1303 	[TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
1304 	[TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
1305 };
1306 
1307 static bool
topo_analyze_table(struct topo_node * root,int all,enum topo_level level,struct topo_analysis * results)1308 topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
1309     struct topo_analysis *results)
1310 {
1311 	struct topology_spec *spec;
1312 	struct topo_node *node;
1313 	int count;
1314 
1315 	if (level >= TOPO_LEVEL_COUNT)
1316 		return (true);
1317 
1318 	spec = &topology_level_table[level];
1319 	count = 0;
1320 	node = topo_next_node(root, root);
1321 
1322 	while (node != NULL) {
1323 		if (node->type != spec->type ||
1324 		    (spec->match_subtype && node->subtype != spec->subtype)) {
1325 			node = topo_next_node(root, node);
1326 			continue;
1327 		}
1328 		if (!all && CPU_EMPTY(&node->cpuset)) {
1329 			node = topo_next_nonchild_node(root, node);
1330 			continue;
1331 		}
1332 
1333 		count++;
1334 
1335 		if (!topo_analyze_table(node, all, level + 1, results))
1336 			return (false);
1337 
1338 		node = topo_next_nonchild_node(root, node);
1339 	}
1340 
1341 	/* No explicit subgroups is essentially one subgroup. */
1342 	if (count == 0) {
1343 		count = 1;
1344 
1345 		if (!topo_analyze_table(root, all, level + 1, results))
1346 			return (false);
1347 	}
1348 
1349 	if (results->entities[level] == -1)
1350 		results->entities[level] = count;
1351 	else if (results->entities[level] != count)
1352 		return (false);
1353 
1354 	return (true);
1355 }
1356 
1357 /*
1358  * Check if the topology is uniform, that is, each package has the same number
1359  * of cores in it and each core has the same number of threads (logical
1360  * processors) in it.  If so, calculate the number of packages, the number of
1361  * groups per package, the number of cachegroups per group, and the number of
1362  * logical processors per cachegroup.  'all' parameter tells whether to include
1363  * administratively disabled logical processors into the analysis.
1364  */
1365 int
topo_analyze(struct topo_node * topo_root,int all,struct topo_analysis * results)1366 topo_analyze(struct topo_node *topo_root, int all,
1367     struct topo_analysis *results)
1368 {
1369 
1370 	results->entities[TOPO_LEVEL_PKG] = -1;
1371 	results->entities[TOPO_LEVEL_CORE] = -1;
1372 	results->entities[TOPO_LEVEL_THREAD] = -1;
1373 	results->entities[TOPO_LEVEL_GROUP] = -1;
1374 	results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
1375 
1376 	if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
1377 		return (0);
1378 
1379 	KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
1380 		("bug in topology or analysis"));
1381 
1382 	return (1);
1383 }
1384 
1385 #endif /* SMP */
1386