xref: /freebsd/sys/kern/subr_smp.c (revision 1bee2ec756f2ea5255f9f68dd58c1ceae1a2f56c)
1 /*-
2  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and machine independent functions
32  * used for the kernel SMP support.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/bus.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
52 
53 #include "opt_sched.h"
54 
55 #ifdef SMP
56 volatile cpuset_t stopped_cpus;
57 volatile cpuset_t started_cpus;
58 cpuset_t hlt_cpus_mask;
59 cpuset_t logical_cpus_mask;
60 
61 void (*cpustop_restartfunc)(void);
62 #endif
63 /* This is used in modules that need to work in both SMP and UP. */
64 cpuset_t all_cpus;
65 
66 int mp_ncpus;
67 /* export this for libkvm consumers. */
68 int mp_maxcpus = MAXCPU;
69 
70 volatile int smp_started;
71 u_int mp_maxid;
72 
73 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, "Kernel SMP");
74 
75 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
76     "Max CPU ID.");
77 
78 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
79     0, "Max number of CPUs that the system was compiled for.");
80 
81 int smp_active = 0;	/* are the APs allowed to run? */
82 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
83     "Number of Auxillary Processors (APs) that were successfully started");
84 
85 int smp_disabled = 0;	/* has smp been disabled? */
86 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
87     &smp_disabled, 0, "SMP has been disabled from the loader");
88 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
89 
90 int smp_cpus = 1;	/* how many cpu's running */
91 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
92     "Number of CPUs online");
93 
94 int smp_topology = 0;	/* Which topology we're using. */
95 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
96     "Topology override setting; 0 is default provided by hardware.");
97 TUNABLE_INT("kern.smp.topology", &smp_topology);
98 
99 #ifdef SMP
100 /* Enable forwarding of a signal to a process running on a different CPU */
101 static int forward_signal_enabled = 1;
102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
103 	   &forward_signal_enabled, 0,
104 	   "Forwarding of a signal to a process on a different CPU");
105 
106 /* Variables needed for SMP rendezvous. */
107 static volatile int smp_rv_ncpus;
108 static void (*volatile smp_rv_setup_func)(void *arg);
109 static void (*volatile smp_rv_action_func)(void *arg);
110 static void (*volatile smp_rv_teardown_func)(void *arg);
111 static void *volatile smp_rv_func_arg;
112 static volatile int smp_rv_waiters[3];
113 static volatile int smp_rv_generation;
114 
115 /*
116  * Shared mutex to restrict busywaits between smp_rendezvous() and
117  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
118  * functions trigger at once and cause multiple CPUs to busywait with
119  * interrupts disabled.
120  */
121 struct mtx smp_ipi_mtx;
122 
123 /*
124  * Let the MD SMP code initialize mp_maxid very early if it can.
125  */
126 static void
127 mp_setmaxid(void *dummy)
128 {
129 	cpu_mp_setmaxid();
130 }
131 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
132 
133 /*
134  * Call the MD SMP initialization code.
135  */
136 static void
137 mp_start(void *dummy)
138 {
139 
140 	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
141 
142 	/* Probe for MP hardware. */
143 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
144 		mp_ncpus = 1;
145 		CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
146 		return;
147 	}
148 
149 	cpu_mp_start();
150 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
151 	    mp_ncpus);
152 	cpu_mp_announce();
153 }
154 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
155 
156 void
157 forward_signal(struct thread *td)
158 {
159 	int id;
160 
161 	/*
162 	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
163 	 * this thread, so all we need to do is poke it if it is currently
164 	 * executing so that it executes ast().
165 	 */
166 	THREAD_LOCK_ASSERT(td, MA_OWNED);
167 	KASSERT(TD_IS_RUNNING(td),
168 	    ("forward_signal: thread is not TDS_RUNNING"));
169 
170 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
171 
172 	if (!smp_started || cold || panicstr)
173 		return;
174 	if (!forward_signal_enabled)
175 		return;
176 
177 	/* No need to IPI ourself. */
178 	if (td == curthread)
179 		return;
180 
181 	id = td->td_oncpu;
182 	if (id == NOCPU)
183 		return;
184 	ipi_cpu(id, IPI_AST);
185 }
186 
187 /*
188  * When called the executing CPU will send an IPI to all other CPUs
189  *  requesting that they halt execution.
190  *
191  * Usually (but not necessarily) called with 'other_cpus' as its arg.
192  *
193  *  - Signals all CPUs in map to stop.
194  *  - Waits for each to stop.
195  *
196  * Returns:
197  *  -1: error
198  *   0: NA
199  *   1: ok
200  *
201  */
202 static int
203 generic_stop_cpus(cpuset_t map, u_int type)
204 {
205 #ifdef KTR
206 	char cpusetbuf[CPUSETBUFSIZ];
207 #endif
208 	static volatile u_int stopping_cpu = NOCPU;
209 	int i;
210 
211 	KASSERT(
212 #if defined(__amd64__)
213 	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
214 #else
215 	    type == IPI_STOP || type == IPI_STOP_HARD,
216 #endif
217 	    ("%s: invalid stop type", __func__));
218 
219 	if (!smp_started)
220 		return (0);
221 
222 	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
223 	    cpusetobj_strprint(cpusetbuf, &map), type);
224 
225 	if (stopping_cpu != PCPU_GET(cpuid))
226 		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
227 		    PCPU_GET(cpuid)) == 0)
228 			while (stopping_cpu != NOCPU)
229 				cpu_spinwait(); /* spin */
230 
231 	/* send the stop IPI to all CPUs in map */
232 	ipi_selected(map, type);
233 
234 	i = 0;
235 	while (!CPU_SUBSET(&stopped_cpus, &map)) {
236 		/* spin */
237 		cpu_spinwait();
238 		i++;
239 		if (i == 100000000) {
240 			printf("timeout stopping cpus\n");
241 			break;
242 		}
243 	}
244 
245 	stopping_cpu = NOCPU;
246 	return (1);
247 }
248 
249 int
250 stop_cpus(cpuset_t map)
251 {
252 
253 	return (generic_stop_cpus(map, IPI_STOP));
254 }
255 
256 int
257 stop_cpus_hard(cpuset_t map)
258 {
259 
260 	return (generic_stop_cpus(map, IPI_STOP_HARD));
261 }
262 
263 #if defined(__amd64__)
264 int
265 suspend_cpus(cpuset_t map)
266 {
267 
268 	return (generic_stop_cpus(map, IPI_SUSPEND));
269 }
270 #endif
271 
272 /*
273  * Called by a CPU to restart stopped CPUs.
274  *
275  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
276  *
277  *  - Signals all CPUs in map to restart.
278  *  - Waits for each to restart.
279  *
280  * Returns:
281  *  -1: error
282  *   0: NA
283  *   1: ok
284  */
285 int
286 restart_cpus(cpuset_t map)
287 {
288 #ifdef KTR
289 	char cpusetbuf[CPUSETBUFSIZ];
290 #endif
291 
292 	if (!smp_started)
293 		return 0;
294 
295 	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
296 
297 	/* signal other cpus to restart */
298 	CPU_COPY_STORE_REL(&map, &started_cpus);
299 
300 	/* wait for each to clear its bit */
301 	while (CPU_OVERLAP(&stopped_cpus, &map))
302 		cpu_spinwait();
303 
304 	return 1;
305 }
306 
307 /*
308  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
309  * (if specified), rendezvous, execute the action function (if specified),
310  * rendezvous again, execute the teardown function (if specified), and then
311  * resume.
312  *
313  * Note that the supplied external functions _must_ be reentrant and aware
314  * that they are running in parallel and in an unknown lock context.
315  */
316 void
317 smp_rendezvous_action(void)
318 {
319 	struct thread *td;
320 	void *local_func_arg;
321 	void (*local_setup_func)(void*);
322 	void (*local_action_func)(void*);
323 	void (*local_teardown_func)(void*);
324 	int generation;
325 #ifdef INVARIANTS
326 	int owepreempt;
327 #endif
328 
329 	/* Ensure we have up-to-date values. */
330 	atomic_add_acq_int(&smp_rv_waiters[0], 1);
331 	while (smp_rv_waiters[0] < smp_rv_ncpus)
332 		cpu_spinwait();
333 
334 	/* Fetch rendezvous parameters after acquire barrier. */
335 	local_func_arg = smp_rv_func_arg;
336 	local_setup_func = smp_rv_setup_func;
337 	local_action_func = smp_rv_action_func;
338 	local_teardown_func = smp_rv_teardown_func;
339 	generation = smp_rv_generation;
340 
341 	/*
342 	 * Use a nested critical section to prevent any preemptions
343 	 * from occurring during a rendezvous action routine.
344 	 * Specifically, if a rendezvous handler is invoked via an IPI
345 	 * and the interrupted thread was in the critical_exit()
346 	 * function after setting td_critnest to 0 but before
347 	 * performing a deferred preemption, this routine can be
348 	 * invoked with td_critnest set to 0 and td_owepreempt true.
349 	 * In that case, a critical_exit() during the rendezvous
350 	 * action would trigger a preemption which is not permitted in
351 	 * a rendezvous action.  To fix this, wrap all of the
352 	 * rendezvous action handlers in a critical section.  We
353 	 * cannot use a regular critical section however as having
354 	 * critical_exit() preempt from this routine would also be
355 	 * problematic (the preemption must not occur before the IPI
356 	 * has been acknowledged via an EOI).  Instead, we
357 	 * intentionally ignore td_owepreempt when leaving the
358 	 * critical section.  This should be harmless because we do
359 	 * not permit rendezvous action routines to schedule threads,
360 	 * and thus td_owepreempt should never transition from 0 to 1
361 	 * during this routine.
362 	 */
363 	td = curthread;
364 	td->td_critnest++;
365 #ifdef INVARIANTS
366 	owepreempt = td->td_owepreempt;
367 #endif
368 
369 	/*
370 	 * If requested, run a setup function before the main action
371 	 * function.  Ensure all CPUs have completed the setup
372 	 * function before moving on to the action function.
373 	 */
374 	if (local_setup_func != smp_no_rendevous_barrier) {
375 		if (smp_rv_setup_func != NULL)
376 			smp_rv_setup_func(smp_rv_func_arg);
377 		atomic_add_int(&smp_rv_waiters[1], 1);
378 		while (smp_rv_waiters[1] < smp_rv_ncpus)
379                 	cpu_spinwait();
380 	}
381 
382 	if (local_action_func != NULL)
383 		local_action_func(local_func_arg);
384 
385 	/*
386 	 * Signal that the main action has been completed.  If a
387 	 * full exit rendezvous is requested, then all CPUs will
388 	 * wait here until all CPUs have finished the main action.
389 	 *
390 	 * Note that the write by the last CPU to finish the action
391 	 * may become visible to different CPUs at different times.
392 	 * As a result, the CPU that initiated the rendezvous may
393 	 * exit the rendezvous and drop the lock allowing another
394 	 * rendezvous to be initiated on the same CPU or a different
395 	 * CPU.  In that case the exit sentinel may be cleared before
396 	 * all CPUs have noticed causing those CPUs to hang forever.
397 	 * Workaround this by using a generation count to notice when
398 	 * this race occurs and to exit the rendezvous in that case.
399 	 */
400 	MPASS(generation == smp_rv_generation);
401 	atomic_add_int(&smp_rv_waiters[2], 1);
402 	if (local_teardown_func != smp_no_rendevous_barrier) {
403 		while (smp_rv_waiters[2] < smp_rv_ncpus &&
404 		    generation == smp_rv_generation)
405 			cpu_spinwait();
406 
407 		if (local_teardown_func != NULL)
408 			local_teardown_func(local_func_arg);
409 	}
410 
411 	td->td_critnest--;
412 	KASSERT(owepreempt == td->td_owepreempt,
413 	    ("rendezvous action changed td_owepreempt"));
414 }
415 
416 void
417 smp_rendezvous_cpus(cpuset_t map,
418 	void (* setup_func)(void *),
419 	void (* action_func)(void *),
420 	void (* teardown_func)(void *),
421 	void *arg)
422 {
423 	int curcpumap, i, ncpus = 0;
424 
425 	if (!smp_started) {
426 		if (setup_func != NULL)
427 			setup_func(arg);
428 		if (action_func != NULL)
429 			action_func(arg);
430 		if (teardown_func != NULL)
431 			teardown_func(arg);
432 		return;
433 	}
434 
435 	CPU_FOREACH(i) {
436 		if (CPU_ISSET(i, &map))
437 			ncpus++;
438 	}
439 	if (ncpus == 0)
440 		panic("ncpus is 0 with non-zero map");
441 
442 	mtx_lock_spin(&smp_ipi_mtx);
443 
444 	atomic_add_acq_int(&smp_rv_generation, 1);
445 
446 	/* Pass rendezvous parameters via global variables. */
447 	smp_rv_ncpus = ncpus;
448 	smp_rv_setup_func = setup_func;
449 	smp_rv_action_func = action_func;
450 	smp_rv_teardown_func = teardown_func;
451 	smp_rv_func_arg = arg;
452 	smp_rv_waiters[1] = 0;
453 	smp_rv_waiters[2] = 0;
454 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
455 
456 	/*
457 	 * Signal other processors, which will enter the IPI with
458 	 * interrupts off.
459 	 */
460 	curcpumap = CPU_ISSET(curcpu, &map);
461 	CPU_CLR(curcpu, &map);
462 	ipi_selected(map, IPI_RENDEZVOUS);
463 
464 	/* Check if the current CPU is in the map */
465 	if (curcpumap != 0)
466 		smp_rendezvous_action();
467 
468 	/*
469 	 * If the caller did not request an exit barrier to be enforced
470 	 * on each CPU, ensure that this CPU waits for all the other
471 	 * CPUs to finish the rendezvous.
472 	 */
473 	if (teardown_func == smp_no_rendevous_barrier)
474 		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
475 			cpu_spinwait();
476 
477 	mtx_unlock_spin(&smp_ipi_mtx);
478 }
479 
480 void
481 smp_rendezvous(void (* setup_func)(void *),
482 	       void (* action_func)(void *),
483 	       void (* teardown_func)(void *),
484 	       void *arg)
485 {
486 	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
487 }
488 
489 static struct cpu_group group[MAXCPU];
490 
491 struct cpu_group *
492 smp_topo(void)
493 {
494 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
495 	struct cpu_group *top;
496 
497 	/*
498 	 * Check for a fake topology request for debugging purposes.
499 	 */
500 	switch (smp_topology) {
501 	case 1:
502 		/* Dual core with no sharing.  */
503 		top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
504 		break;
505 	case 2:
506 		/* No topology, all cpus are equal. */
507 		top = smp_topo_none();
508 		break;
509 	case 3:
510 		/* Dual core with shared L2.  */
511 		top = smp_topo_1level(CG_SHARE_L2, 2, 0);
512 		break;
513 	case 4:
514 		/* quad core, shared l3 among each package, private l2.  */
515 		top = smp_topo_1level(CG_SHARE_L3, 4, 0);
516 		break;
517 	case 5:
518 		/* quad core,  2 dualcore parts on each package share l2.  */
519 		top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
520 		break;
521 	case 6:
522 		/* Single-core 2xHTT */
523 		top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
524 		break;
525 	case 7:
526 		/* quad core with a shared l3, 8 threads sharing L2.  */
527 		top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
528 		    CG_FLAG_SMT);
529 		break;
530 	default:
531 		/* Default, ask the system what it wants. */
532 		top = cpu_topo();
533 		break;
534 	}
535 	/*
536 	 * Verify the returned topology.
537 	 */
538 	if (top->cg_count != mp_ncpus)
539 		panic("Built bad topology at %p.  CPU count %d != %d",
540 		    top, top->cg_count, mp_ncpus);
541 	if (CPU_CMP(&top->cg_mask, &all_cpus))
542 		panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
543 		    top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
544 		    cpusetobj_strprint(cpusetbuf2, &all_cpus));
545 	return (top);
546 }
547 
548 struct cpu_group *
549 smp_topo_none(void)
550 {
551 	struct cpu_group *top;
552 
553 	top = &group[0];
554 	top->cg_parent = NULL;
555 	top->cg_child = NULL;
556 	top->cg_mask = all_cpus;
557 	top->cg_count = mp_ncpus;
558 	top->cg_children = 0;
559 	top->cg_level = CG_SHARE_NONE;
560 	top->cg_flags = 0;
561 
562 	return (top);
563 }
564 
565 static int
566 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
567     int count, int flags, int start)
568 {
569 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
570 	cpuset_t mask;
571 	int i;
572 
573 	CPU_ZERO(&mask);
574 	for (i = 0; i < count; i++, start++)
575 		CPU_SET(start, &mask);
576 	child->cg_parent = parent;
577 	child->cg_child = NULL;
578 	child->cg_children = 0;
579 	child->cg_level = share;
580 	child->cg_count = count;
581 	child->cg_flags = flags;
582 	child->cg_mask = mask;
583 	parent->cg_children++;
584 	for (; parent != NULL; parent = parent->cg_parent) {
585 		if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
586 			panic("Duplicate children in %p.  mask (%s) child (%s)",
587 			    parent,
588 			    cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
589 			    cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
590 		CPU_OR(&parent->cg_mask, &child->cg_mask);
591 		parent->cg_count += child->cg_count;
592 	}
593 
594 	return (start);
595 }
596 
597 struct cpu_group *
598 smp_topo_1level(int share, int count, int flags)
599 {
600 	struct cpu_group *child;
601 	struct cpu_group *top;
602 	int packages;
603 	int cpu;
604 	int i;
605 
606 	cpu = 0;
607 	top = &group[0];
608 	packages = mp_ncpus / count;
609 	top->cg_child = child = &group[1];
610 	top->cg_level = CG_SHARE_NONE;
611 	for (i = 0; i < packages; i++, child++)
612 		cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
613 	return (top);
614 }
615 
616 struct cpu_group *
617 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
618     int l1flags)
619 {
620 	struct cpu_group *top;
621 	struct cpu_group *l1g;
622 	struct cpu_group *l2g;
623 	int cpu;
624 	int i;
625 	int j;
626 
627 	cpu = 0;
628 	top = &group[0];
629 	l2g = &group[1];
630 	top->cg_child = l2g;
631 	top->cg_level = CG_SHARE_NONE;
632 	top->cg_children = mp_ncpus / (l2count * l1count);
633 	l1g = l2g + top->cg_children;
634 	for (i = 0; i < top->cg_children; i++, l2g++) {
635 		l2g->cg_parent = top;
636 		l2g->cg_child = l1g;
637 		l2g->cg_level = l2share;
638 		for (j = 0; j < l2count; j++, l1g++)
639 			cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
640 			    l1flags, cpu);
641 	}
642 	return (top);
643 }
644 
645 
646 struct cpu_group *
647 smp_topo_find(struct cpu_group *top, int cpu)
648 {
649 	struct cpu_group *cg;
650 	cpuset_t mask;
651 	int children;
652 	int i;
653 
654 	CPU_SETOF(cpu, &mask);
655 	cg = top;
656 	for (;;) {
657 		if (!CPU_OVERLAP(&cg->cg_mask, &mask))
658 			return (NULL);
659 		if (cg->cg_children == 0)
660 			return (cg);
661 		children = cg->cg_children;
662 		for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
663 			if (CPU_OVERLAP(&cg->cg_mask, &mask))
664 				break;
665 	}
666 	return (NULL);
667 }
668 #else /* !SMP */
669 
670 void
671 smp_rendezvous_cpus(cpuset_t map,
672 	void (*setup_func)(void *),
673 	void (*action_func)(void *),
674 	void (*teardown_func)(void *),
675 	void *arg)
676 {
677 	if (setup_func != NULL)
678 		setup_func(arg);
679 	if (action_func != NULL)
680 		action_func(arg);
681 	if (teardown_func != NULL)
682 		teardown_func(arg);
683 }
684 
685 void
686 smp_rendezvous(void (*setup_func)(void *),
687 	       void (*action_func)(void *),
688 	       void (*teardown_func)(void *),
689 	       void *arg)
690 {
691 
692 	if (setup_func != NULL)
693 		setup_func(arg);
694 	if (action_func != NULL)
695 		action_func(arg);
696 	if (teardown_func != NULL)
697 		teardown_func(arg);
698 }
699 
700 /*
701  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
702  * APIs will still work using this dummy support.
703  */
704 static void
705 mp_setvariables_for_up(void *dummy)
706 {
707 	mp_ncpus = 1;
708 	mp_maxid = PCPU_GET(cpuid);
709 	CPU_SETOF(mp_maxid, &all_cpus);
710 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
711 }
712 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
713     mp_setvariables_for_up, NULL);
714 #endif /* SMP */
715 
716 void
717 smp_no_rendevous_barrier(void *dummy)
718 {
719 #ifdef SMP
720 	KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
721 #endif
722 }
723