xref: /freebsd/sys/kern/subr_smp.c (revision 8a6eceff3ce76a4bb9078f3fa710f51ab6671ca3)
1 /*-
2  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * This module holds the global variables and machine independent functions
29  * used for the kernel SMP support.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/proc.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/pcpu.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48 
49 #include <machine/cpu.h>
50 #include <machine/smp.h>
51 
52 #include "opt_sched.h"
53 
54 #ifdef SMP
55 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
56 
57 volatile cpuset_t stopped_cpus;
58 volatile cpuset_t started_cpus;
59 volatile cpuset_t suspended_cpus;
60 cpuset_t hlt_cpus_mask;
61 cpuset_t logical_cpus_mask;
62 
63 void (*cpustop_restartfunc)(void);
64 #endif
65 
66 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
67 
68 /* This is used in modules that need to work in both SMP and UP. */
69 cpuset_t all_cpus;
70 
71 int mp_ncpus;
72 /* export this for libkvm consumers. */
73 int mp_maxcpus = MAXCPU;
74 
75 volatile int smp_started;
76 u_int mp_maxid;
77 
78 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
79     "Kernel SMP");
80 
81 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
82     "Max CPU ID.");
83 
84 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
85     0, "Max number of CPUs that the system was compiled for.");
86 
87 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
88     sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
89 
90 int smp_disabled = 0;	/* has smp been disabled? */
91 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
92     &smp_disabled, 0, "SMP has been disabled from the loader");
93 
94 int smp_cpus = 1;	/* how many cpu's running */
95 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
96     "Number of CPUs online");
97 
98 int smp_topology = 0;	/* Which topology we're using. */
99 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
100     "Topology override setting; 0 is default provided by hardware.");
101 
102 #ifdef SMP
103 /* Enable forwarding of a signal to a process running on a different CPU */
104 static int forward_signal_enabled = 1;
105 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
106 	   &forward_signal_enabled, 0,
107 	   "Forwarding of a signal to a process on a different CPU");
108 
109 /* Variables needed for SMP rendezvous. */
110 static volatile int smp_rv_ncpus;
111 static void (*volatile smp_rv_setup_func)(void *arg);
112 static void (*volatile smp_rv_action_func)(void *arg);
113 static void (*volatile smp_rv_teardown_func)(void *arg);
114 static void *volatile smp_rv_func_arg;
115 static volatile int smp_rv_waiters[4];
116 
117 /*
118  * Shared mutex to restrict busywaits between smp_rendezvous() and
119  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
120  * functions trigger at once and cause multiple CPUs to busywait with
121  * interrupts disabled.
122  */
123 struct mtx smp_ipi_mtx;
124 
125 /*
126  * Let the MD SMP code initialize mp_maxid very early if it can.
127  */
128 static void
129 mp_setmaxid(void *dummy)
130 {
131 
132 	cpu_mp_setmaxid();
133 
134 	KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
135 	KASSERT(mp_ncpus > 1 || mp_maxid == 0,
136 	    ("%s: one CPU but mp_maxid is not zero", __func__));
137 	KASSERT(mp_maxid >= mp_ncpus - 1,
138 	    ("%s: counters out of sync: max %d, count %d", __func__,
139 		mp_maxid, mp_ncpus));
140 }
141 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
142 
143 /*
144  * Call the MD SMP initialization code.
145  */
146 static void
147 mp_start(void *dummy)
148 {
149 
150 	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
151 
152 	/* Probe for MP hardware. */
153 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
154 		mp_ncpus = 1;
155 		CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
156 		return;
157 	}
158 
159 	cpu_mp_start();
160 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
161 	    mp_ncpus);
162 	cpu_mp_announce();
163 }
164 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
165 
166 void
167 forward_signal(struct thread *td)
168 {
169 	int id;
170 
171 	/*
172 	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
173 	 * this thread, so all we need to do is poke it if it is currently
174 	 * executing so that it executes ast().
175 	 */
176 	THREAD_LOCK_ASSERT(td, MA_OWNED);
177 	KASSERT(TD_IS_RUNNING(td),
178 	    ("forward_signal: thread is not TDS_RUNNING"));
179 
180 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
181 
182 	if (!smp_started || cold || panicstr)
183 		return;
184 	if (!forward_signal_enabled)
185 		return;
186 
187 	/* No need to IPI ourself. */
188 	if (td == curthread)
189 		return;
190 
191 	id = td->td_oncpu;
192 	if (id == NOCPU)
193 		return;
194 	ipi_cpu(id, IPI_AST);
195 }
196 
197 /*
198  * When called the executing CPU will send an IPI to all other CPUs
199  *  requesting that they halt execution.
200  *
201  * Usually (but not necessarily) called with 'other_cpus' as its arg.
202  *
203  *  - Signals all CPUs in map to stop.
204  *  - Waits for each to stop.
205  *
206  * Returns:
207  *  -1: error
208  *   0: NA
209  *   1: ok
210  *
211  */
212 #if defined(__amd64__) || defined(__i386__)
213 #define	X86	1
214 #else
215 #define	X86	0
216 #endif
217 static int
218 generic_stop_cpus(cpuset_t map, u_int type)
219 {
220 #ifdef KTR
221 	char cpusetbuf[CPUSETBUFSIZ];
222 #endif
223 	static volatile u_int stopping_cpu = NOCPU;
224 	int i;
225 	volatile cpuset_t *cpus;
226 
227 	KASSERT(
228 	    type == IPI_STOP || type == IPI_STOP_HARD
229 #if X86
230 	    || type == IPI_SUSPEND
231 #endif
232 	    , ("%s: invalid stop type", __func__));
233 
234 	if (!smp_started)
235 		return (0);
236 
237 	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
238 	    cpusetobj_strprint(cpusetbuf, &map), type);
239 
240 #if X86
241 	/*
242 	 * When suspending, ensure there are are no IPIs in progress.
243 	 * IPIs that have been issued, but not yet delivered (e.g.
244 	 * not pending on a vCPU when running under virtualization)
245 	 * will be lost, violating FreeBSD's assumption of reliable
246 	 * IPI delivery.
247 	 */
248 	if (type == IPI_SUSPEND)
249 		mtx_lock_spin(&smp_ipi_mtx);
250 #endif
251 
252 #if X86
253 	if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
254 #endif
255 	if (stopping_cpu != PCPU_GET(cpuid))
256 		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
257 		    PCPU_GET(cpuid)) == 0)
258 			while (stopping_cpu != NOCPU)
259 				cpu_spinwait(); /* spin */
260 
261 	/* send the stop IPI to all CPUs in map */
262 	ipi_selected(map, type);
263 #if X86
264 	}
265 #endif
266 
267 #if X86
268 	if (type == IPI_SUSPEND)
269 		cpus = &suspended_cpus;
270 	else
271 #endif
272 		cpus = &stopped_cpus;
273 
274 	i = 0;
275 	while (!CPU_SUBSET(cpus, &map)) {
276 		/* spin */
277 		cpu_spinwait();
278 		i++;
279 		if (i == 100000000) {
280 			printf("timeout stopping cpus\n");
281 			break;
282 		}
283 	}
284 
285 #if X86
286 	if (type == IPI_SUSPEND)
287 		mtx_unlock_spin(&smp_ipi_mtx);
288 #endif
289 
290 	stopping_cpu = NOCPU;
291 	return (1);
292 }
293 
294 int
295 stop_cpus(cpuset_t map)
296 {
297 
298 	return (generic_stop_cpus(map, IPI_STOP));
299 }
300 
301 int
302 stop_cpus_hard(cpuset_t map)
303 {
304 
305 	return (generic_stop_cpus(map, IPI_STOP_HARD));
306 }
307 
308 #if X86
309 int
310 suspend_cpus(cpuset_t map)
311 {
312 
313 	return (generic_stop_cpus(map, IPI_SUSPEND));
314 }
315 #endif
316 
317 /*
318  * Called by a CPU to restart stopped CPUs.
319  *
320  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
321  *
322  *  - Signals all CPUs in map to restart.
323  *  - Waits for each to restart.
324  *
325  * Returns:
326  *  -1: error
327  *   0: NA
328  *   1: ok
329  */
330 static int
331 generic_restart_cpus(cpuset_t map, u_int type)
332 {
333 #ifdef KTR
334 	char cpusetbuf[CPUSETBUFSIZ];
335 #endif
336 	volatile cpuset_t *cpus;
337 
338 	KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
339 #if X86
340 	    || type == IPI_SUSPEND
341 #endif
342 	    , ("%s: invalid stop type", __func__));
343 
344 	if (!smp_started)
345 		return (0);
346 
347 	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
348 
349 #if X86
350 	if (type == IPI_SUSPEND)
351 		cpus = &suspended_cpus;
352 	else
353 #endif
354 		cpus = &stopped_cpus;
355 
356 	/* signal other cpus to restart */
357 	CPU_COPY_STORE_REL(&map, &started_cpus);
358 
359 #if X86
360 	if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
361 #endif
362 	/* wait for each to clear its bit */
363 	while (CPU_OVERLAP(cpus, &map))
364 		cpu_spinwait();
365 #if X86
366 	}
367 #endif
368 
369 	return (1);
370 }
371 
372 int
373 restart_cpus(cpuset_t map)
374 {
375 
376 	return (generic_restart_cpus(map, IPI_STOP));
377 }
378 
379 #if X86
380 int
381 resume_cpus(cpuset_t map)
382 {
383 
384 	return (generic_restart_cpus(map, IPI_SUSPEND));
385 }
386 #endif
387 #undef X86
388 
389 /*
390  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
391  * (if specified), rendezvous, execute the action function (if specified),
392  * rendezvous again, execute the teardown function (if specified), and then
393  * resume.
394  *
395  * Note that the supplied external functions _must_ be reentrant and aware
396  * that they are running in parallel and in an unknown lock context.
397  */
398 void
399 smp_rendezvous_action(void)
400 {
401 	struct thread *td;
402 	void *local_func_arg;
403 	void (*local_setup_func)(void*);
404 	void (*local_action_func)(void*);
405 	void (*local_teardown_func)(void*);
406 #ifdef INVARIANTS
407 	int owepreempt;
408 #endif
409 
410 	/* Ensure we have up-to-date values. */
411 	atomic_add_acq_int(&smp_rv_waiters[0], 1);
412 	while (smp_rv_waiters[0] < smp_rv_ncpus)
413 		cpu_spinwait();
414 
415 	/* Fetch rendezvous parameters after acquire barrier. */
416 	local_func_arg = smp_rv_func_arg;
417 	local_setup_func = smp_rv_setup_func;
418 	local_action_func = smp_rv_action_func;
419 	local_teardown_func = smp_rv_teardown_func;
420 
421 	/*
422 	 * Use a nested critical section to prevent any preemptions
423 	 * from occurring during a rendezvous action routine.
424 	 * Specifically, if a rendezvous handler is invoked via an IPI
425 	 * and the interrupted thread was in the critical_exit()
426 	 * function after setting td_critnest to 0 but before
427 	 * performing a deferred preemption, this routine can be
428 	 * invoked with td_critnest set to 0 and td_owepreempt true.
429 	 * In that case, a critical_exit() during the rendezvous
430 	 * action would trigger a preemption which is not permitted in
431 	 * a rendezvous action.  To fix this, wrap all of the
432 	 * rendezvous action handlers in a critical section.  We
433 	 * cannot use a regular critical section however as having
434 	 * critical_exit() preempt from this routine would also be
435 	 * problematic (the preemption must not occur before the IPI
436 	 * has been acknowledged via an EOI).  Instead, we
437 	 * intentionally ignore td_owepreempt when leaving the
438 	 * critical section.  This should be harmless because we do
439 	 * not permit rendezvous action routines to schedule threads,
440 	 * and thus td_owepreempt should never transition from 0 to 1
441 	 * during this routine.
442 	 */
443 	td = curthread;
444 	td->td_critnest++;
445 #ifdef INVARIANTS
446 	owepreempt = td->td_owepreempt;
447 #endif
448 
449 	/*
450 	 * If requested, run a setup function before the main action
451 	 * function.  Ensure all CPUs have completed the setup
452 	 * function before moving on to the action function.
453 	 */
454 	if (local_setup_func != smp_no_rendezvous_barrier) {
455 		if (smp_rv_setup_func != NULL)
456 			smp_rv_setup_func(smp_rv_func_arg);
457 		atomic_add_int(&smp_rv_waiters[1], 1);
458 		while (smp_rv_waiters[1] < smp_rv_ncpus)
459                 	cpu_spinwait();
460 	}
461 
462 	if (local_action_func != NULL)
463 		local_action_func(local_func_arg);
464 
465 	if (local_teardown_func != smp_no_rendezvous_barrier) {
466 		/*
467 		 * Signal that the main action has been completed.  If a
468 		 * full exit rendezvous is requested, then all CPUs will
469 		 * wait here until all CPUs have finished the main action.
470 		 */
471 		atomic_add_int(&smp_rv_waiters[2], 1);
472 		while (smp_rv_waiters[2] < smp_rv_ncpus)
473 			cpu_spinwait();
474 
475 		if (local_teardown_func != NULL)
476 			local_teardown_func(local_func_arg);
477 	}
478 
479 	/*
480 	 * Signal that the rendezvous is fully completed by this CPU.
481 	 * This means that no member of smp_rv_* pseudo-structure will be
482 	 * accessed by this target CPU after this point; in particular,
483 	 * memory pointed by smp_rv_func_arg.
484 	 *
485 	 * The release semantic ensures that all accesses performed by
486 	 * the current CPU are visible when smp_rendezvous_cpus()
487 	 * returns, by synchronizing with the
488 	 * atomic_load_acq_int(&smp_rv_waiters[3]).
489 	 */
490 	atomic_add_rel_int(&smp_rv_waiters[3], 1);
491 
492 	td->td_critnest--;
493 	KASSERT(owepreempt == td->td_owepreempt,
494 	    ("rendezvous action changed td_owepreempt"));
495 }
496 
497 void
498 smp_rendezvous_cpus(cpuset_t map,
499 	void (* setup_func)(void *),
500 	void (* action_func)(void *),
501 	void (* teardown_func)(void *),
502 	void *arg)
503 {
504 	int curcpumap, i, ncpus = 0;
505 
506 	/* Look comments in the !SMP case. */
507 	if (!smp_started) {
508 		spinlock_enter();
509 		if (setup_func != NULL)
510 			setup_func(arg);
511 		if (action_func != NULL)
512 			action_func(arg);
513 		if (teardown_func != NULL)
514 			teardown_func(arg);
515 		spinlock_exit();
516 		return;
517 	}
518 
519 	CPU_FOREACH(i) {
520 		if (CPU_ISSET(i, &map))
521 			ncpus++;
522 	}
523 	if (ncpus == 0)
524 		panic("ncpus is 0 with non-zero map");
525 
526 	mtx_lock_spin(&smp_ipi_mtx);
527 
528 	/* Pass rendezvous parameters via global variables. */
529 	smp_rv_ncpus = ncpus;
530 	smp_rv_setup_func = setup_func;
531 	smp_rv_action_func = action_func;
532 	smp_rv_teardown_func = teardown_func;
533 	smp_rv_func_arg = arg;
534 	smp_rv_waiters[1] = 0;
535 	smp_rv_waiters[2] = 0;
536 	smp_rv_waiters[3] = 0;
537 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
538 
539 	/*
540 	 * Signal other processors, which will enter the IPI with
541 	 * interrupts off.
542 	 */
543 	curcpumap = CPU_ISSET(curcpu, &map);
544 	CPU_CLR(curcpu, &map);
545 	ipi_selected(map, IPI_RENDEZVOUS);
546 
547 	/* Check if the current CPU is in the map */
548 	if (curcpumap != 0)
549 		smp_rendezvous_action();
550 
551 	/*
552 	 * Ensure that the master CPU waits for all the other
553 	 * CPUs to finish the rendezvous, so that smp_rv_*
554 	 * pseudo-structure and the arg are guaranteed to not
555 	 * be in use.
556 	 *
557 	 * Load acquire synchronizes with the release add in
558 	 * smp_rendezvous_action(), which ensures that our caller sees
559 	 * all memory actions done by the called functions on other
560 	 * CPUs.
561 	 */
562 	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
563 		cpu_spinwait();
564 
565 	mtx_unlock_spin(&smp_ipi_mtx);
566 }
567 
568 void
569 smp_rendezvous(void (* setup_func)(void *),
570 	       void (* action_func)(void *),
571 	       void (* teardown_func)(void *),
572 	       void *arg)
573 {
574 	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
575 }
576 
577 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
578 
579 struct cpu_group *
580 smp_topo(void)
581 {
582 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
583 	struct cpu_group *top;
584 
585 	/*
586 	 * Check for a fake topology request for debugging purposes.
587 	 */
588 	switch (smp_topology) {
589 	case 1:
590 		/* Dual core with no sharing.  */
591 		top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
592 		break;
593 	case 2:
594 		/* No topology, all cpus are equal. */
595 		top = smp_topo_none();
596 		break;
597 	case 3:
598 		/* Dual core with shared L2.  */
599 		top = smp_topo_1level(CG_SHARE_L2, 2, 0);
600 		break;
601 	case 4:
602 		/* quad core, shared l3 among each package, private l2.  */
603 		top = smp_topo_1level(CG_SHARE_L3, 4, 0);
604 		break;
605 	case 5:
606 		/* quad core,  2 dualcore parts on each package share l2.  */
607 		top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
608 		break;
609 	case 6:
610 		/* Single-core 2xHTT */
611 		top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
612 		break;
613 	case 7:
614 		/* quad core with a shared l3, 8 threads sharing L2.  */
615 		top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
616 		    CG_FLAG_SMT);
617 		break;
618 	default:
619 		/* Default, ask the system what it wants. */
620 		top = cpu_topo();
621 		break;
622 	}
623 	/*
624 	 * Verify the returned topology.
625 	 */
626 	if (top->cg_count != mp_ncpus)
627 		panic("Built bad topology at %p.  CPU count %d != %d",
628 		    top, top->cg_count, mp_ncpus);
629 	if (CPU_CMP(&top->cg_mask, &all_cpus))
630 		panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
631 		    top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
632 		    cpusetobj_strprint(cpusetbuf2, &all_cpus));
633 	return (top);
634 }
635 
636 struct cpu_group *
637 smp_topo_alloc(u_int count)
638 {
639 	static u_int index;
640 	u_int curr;
641 
642 	curr = index;
643 	index += count;
644 	return (&group[curr]);
645 }
646 
647 struct cpu_group *
648 smp_topo_none(void)
649 {
650 	struct cpu_group *top;
651 
652 	top = &group[0];
653 	top->cg_parent = NULL;
654 	top->cg_child = NULL;
655 	top->cg_mask = all_cpus;
656 	top->cg_count = mp_ncpus;
657 	top->cg_children = 0;
658 	top->cg_level = CG_SHARE_NONE;
659 	top->cg_flags = 0;
660 
661 	return (top);
662 }
663 
664 static int
665 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
666     int count, int flags, int start)
667 {
668 	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
669 	cpuset_t mask;
670 	int i;
671 
672 	CPU_ZERO(&mask);
673 	for (i = 0; i < count; i++, start++)
674 		CPU_SET(start, &mask);
675 	child->cg_parent = parent;
676 	child->cg_child = NULL;
677 	child->cg_children = 0;
678 	child->cg_level = share;
679 	child->cg_count = count;
680 	child->cg_flags = flags;
681 	child->cg_mask = mask;
682 	parent->cg_children++;
683 	for (; parent != NULL; parent = parent->cg_parent) {
684 		if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
685 			panic("Duplicate children in %p.  mask (%s) child (%s)",
686 			    parent,
687 			    cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
688 			    cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
689 		CPU_OR(&parent->cg_mask, &child->cg_mask);
690 		parent->cg_count += child->cg_count;
691 	}
692 
693 	return (start);
694 }
695 
696 struct cpu_group *
697 smp_topo_1level(int share, int count, int flags)
698 {
699 	struct cpu_group *child;
700 	struct cpu_group *top;
701 	int packages;
702 	int cpu;
703 	int i;
704 
705 	cpu = 0;
706 	top = &group[0];
707 	packages = mp_ncpus / count;
708 	top->cg_child = child = &group[1];
709 	top->cg_level = CG_SHARE_NONE;
710 	for (i = 0; i < packages; i++, child++)
711 		cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
712 	return (top);
713 }
714 
715 struct cpu_group *
716 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
717     int l1flags)
718 {
719 	struct cpu_group *top;
720 	struct cpu_group *l1g;
721 	struct cpu_group *l2g;
722 	int cpu;
723 	int i;
724 	int j;
725 
726 	cpu = 0;
727 	top = &group[0];
728 	l2g = &group[1];
729 	top->cg_child = l2g;
730 	top->cg_level = CG_SHARE_NONE;
731 	top->cg_children = mp_ncpus / (l2count * l1count);
732 	l1g = l2g + top->cg_children;
733 	for (i = 0; i < top->cg_children; i++, l2g++) {
734 		l2g->cg_parent = top;
735 		l2g->cg_child = l1g;
736 		l2g->cg_level = l2share;
737 		for (j = 0; j < l2count; j++, l1g++)
738 			cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
739 			    l1flags, cpu);
740 	}
741 	return (top);
742 }
743 
744 
745 struct cpu_group *
746 smp_topo_find(struct cpu_group *top, int cpu)
747 {
748 	struct cpu_group *cg;
749 	cpuset_t mask;
750 	int children;
751 	int i;
752 
753 	CPU_SETOF(cpu, &mask);
754 	cg = top;
755 	for (;;) {
756 		if (!CPU_OVERLAP(&cg->cg_mask, &mask))
757 			return (NULL);
758 		if (cg->cg_children == 0)
759 			return (cg);
760 		children = cg->cg_children;
761 		for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
762 			if (CPU_OVERLAP(&cg->cg_mask, &mask))
763 				break;
764 	}
765 	return (NULL);
766 }
767 #else /* !SMP */
768 
769 void
770 smp_rendezvous_cpus(cpuset_t map,
771 	void (*setup_func)(void *),
772 	void (*action_func)(void *),
773 	void (*teardown_func)(void *),
774 	void *arg)
775 {
776 	/*
777 	 * In the !SMP case we just need to ensure the same initial conditions
778 	 * as the SMP case.
779 	 */
780 	spinlock_enter();
781 	if (setup_func != NULL)
782 		setup_func(arg);
783 	if (action_func != NULL)
784 		action_func(arg);
785 	if (teardown_func != NULL)
786 		teardown_func(arg);
787 	spinlock_exit();
788 }
789 
790 void
791 smp_rendezvous(void (*setup_func)(void *),
792 	       void (*action_func)(void *),
793 	       void (*teardown_func)(void *),
794 	       void *arg)
795 {
796 
797 	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
798 	    arg);
799 }
800 
801 /*
802  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
803  * APIs will still work using this dummy support.
804  */
805 static void
806 mp_setvariables_for_up(void *dummy)
807 {
808 	mp_ncpus = 1;
809 	mp_maxid = PCPU_GET(cpuid);
810 	CPU_SETOF(mp_maxid, &all_cpus);
811 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
812 }
813 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
814     mp_setvariables_for_up, NULL);
815 #endif /* SMP */
816 
817 void
818 smp_no_rendezvous_barrier(void *dummy)
819 {
820 #ifdef SMP
821 	KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
822 #endif
823 }
824 
825 /*
826  * Wait specified idle threads to switch once.  This ensures that even
827  * preempted threads have cycled through the switch function once,
828  * exiting their codepaths.  This allows us to change global pointers
829  * with no other synchronization.
830  */
831 int
832 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
833 {
834 	struct pcpu *pcpu;
835 	u_int gen[MAXCPU];
836 	int error;
837 	int cpu;
838 
839 	error = 0;
840 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
841 		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
842 			continue;
843 		pcpu = pcpu_find(cpu);
844 		gen[cpu] = pcpu->pc_idlethread->td_generation;
845 	}
846 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
847 		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
848 			continue;
849 		pcpu = pcpu_find(cpu);
850 		thread_lock(curthread);
851 		sched_bind(curthread, cpu);
852 		thread_unlock(curthread);
853 		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
854 			error = tsleep(quiesce_cpus, prio, wmesg, 1);
855 			if (error != EWOULDBLOCK)
856 				goto out;
857 			error = 0;
858 		}
859 	}
860 out:
861 	thread_lock(curthread);
862 	sched_unbind(curthread);
863 	thread_unlock(curthread);
864 
865 	return (error);
866 }
867 
868 int
869 quiesce_all_cpus(const char *wmesg, int prio)
870 {
871 
872 	return quiesce_cpus(all_cpus, wmesg, prio);
873 }
874 
875 /* Extra care is taken with this sysctl because the data type is volatile */
876 static int
877 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
878 {
879 	int error, active;
880 
881 	active = smp_started;
882 	error = SYSCTL_OUT(req, &active, sizeof(active));
883 	return (error);
884 }
885 
886 
887 #ifdef SMP
888 void
889 topo_init_node(struct topo_node *node)
890 {
891 
892 	bzero(node, sizeof(*node));
893 	TAILQ_INIT(&node->children);
894 }
895 
896 void
897 topo_init_root(struct topo_node *root)
898 {
899 
900 	topo_init_node(root);
901 	root->type = TOPO_TYPE_SYSTEM;
902 }
903 
904 /*
905  * Add a child node with the given ID under the given parent.
906  * Do nothing if there is already a child with that ID.
907  */
908 struct topo_node *
909 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
910     topo_node_type type, uintptr_t subtype)
911 {
912 	struct topo_node *node;
913 
914 	TAILQ_FOREACH_REVERSE(node, &parent->children,
915 	    topo_children, siblings) {
916 		if (node->hwid == hwid
917 		    && node->type == type && node->subtype == subtype) {
918 			return (node);
919 		}
920 	}
921 
922 	node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
923 	topo_init_node(node);
924 	node->parent = parent;
925 	node->hwid = hwid;
926 	node->type = type;
927 	node->subtype = subtype;
928 	TAILQ_INSERT_TAIL(&parent->children, node, siblings);
929 	parent->nchildren++;
930 
931 	return (node);
932 }
933 
934 /*
935  * Find a child node with the given ID under the given parent.
936  */
937 struct topo_node *
938 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
939     topo_node_type type, uintptr_t subtype)
940 {
941 
942 	struct topo_node *node;
943 
944 	TAILQ_FOREACH(node, &parent->children, siblings) {
945 		if (node->hwid == hwid
946 		    && node->type == type && node->subtype == subtype) {
947 			return (node);
948 		}
949 	}
950 
951 	return (NULL);
952 }
953 
954 /*
955  * Given a node change the order of its parent's child nodes such
956  * that the node becomes the firt child while preserving the cyclic
957  * order of the children.  In other words, the given node is promoted
958  * by rotation.
959  */
960 void
961 topo_promote_child(struct topo_node *child)
962 {
963 	struct topo_node *next;
964 	struct topo_node *node;
965 	struct topo_node *parent;
966 
967 	parent = child->parent;
968 	next = TAILQ_NEXT(child, siblings);
969 	TAILQ_REMOVE(&parent->children, child, siblings);
970 	TAILQ_INSERT_HEAD(&parent->children, child, siblings);
971 
972 	while (next != NULL) {
973 		node = next;
974 		next = TAILQ_NEXT(node, siblings);
975 		TAILQ_REMOVE(&parent->children, node, siblings);
976 		TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
977 		child = node;
978 	}
979 }
980 
981 /*
982  * Iterate to the next node in the depth-first search (traversal) of
983  * the topology tree.
984  */
985 struct topo_node *
986 topo_next_node(struct topo_node *top, struct topo_node *node)
987 {
988 	struct topo_node *next;
989 
990 	if ((next = TAILQ_FIRST(&node->children)) != NULL)
991 		return (next);
992 
993 	if ((next = TAILQ_NEXT(node, siblings)) != NULL)
994 		return (next);
995 
996 	while ((node = node->parent) != top)
997 		if ((next = TAILQ_NEXT(node, siblings)) != NULL)
998 			return (next);
999 
1000 	return (NULL);
1001 }
1002 
1003 /*
1004  * Iterate to the next node in the depth-first search of the topology tree,
1005  * but without descending below the current node.
1006  */
1007 struct topo_node *
1008 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1009 {
1010 	struct topo_node *next;
1011 
1012 	if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1013 		return (next);
1014 
1015 	while ((node = node->parent) != top)
1016 		if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1017 			return (next);
1018 
1019 	return (NULL);
1020 }
1021 
1022 /*
1023  * Assign the given ID to the given topology node that represents a logical
1024  * processor.
1025  */
1026 void
1027 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1028 {
1029 
1030 	KASSERT(node->type == TOPO_TYPE_PU,
1031 	    ("topo_set_pu_id: wrong node type: %u", node->type));
1032 	KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1033 	    ("topo_set_pu_id: cpuset already not empty"));
1034 	node->id = id;
1035 	CPU_SET(id, &node->cpuset);
1036 	node->cpu_count = 1;
1037 	node->subtype = 1;
1038 
1039 	while ((node = node->parent) != NULL) {
1040 		KASSERT(!CPU_ISSET(id, &node->cpuset),
1041 		    ("logical ID %u is already set in node %p", id, node));
1042 		CPU_SET(id, &node->cpuset);
1043 		node->cpu_count++;
1044 	}
1045 }
1046 
1047 /*
1048  * Check if the topology is uniform, that is, each package has the same number
1049  * of cores in it and each core has the same number of threads (logical
1050  * processors) in it.  If so, calculate the number of package, the number of
1051  * cores per package and the number of logical processors per core.
1052  * 'all' parameter tells whether to include administratively disabled logical
1053  * processors into the analysis.
1054  */
1055 int
1056 topo_analyze(struct topo_node *topo_root, int all,
1057     int *pkg_count, int *cores_per_pkg, int *thrs_per_core)
1058 {
1059 	struct topo_node *pkg_node;
1060 	struct topo_node *core_node;
1061 	struct topo_node *pu_node;
1062 	int thrs_per_pkg;
1063 	int cpp_counter;
1064 	int tpc_counter;
1065 	int tpp_counter;
1066 
1067 	*pkg_count = 0;
1068 	*cores_per_pkg = -1;
1069 	*thrs_per_core = -1;
1070 	thrs_per_pkg = -1;
1071 	pkg_node = topo_root;
1072 	while (pkg_node != NULL) {
1073 		if (pkg_node->type != TOPO_TYPE_PKG) {
1074 			pkg_node = topo_next_node(topo_root, pkg_node);
1075 			continue;
1076 		}
1077 		if (!all && CPU_EMPTY(&pkg_node->cpuset)) {
1078 			pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1079 			continue;
1080 		}
1081 
1082 		(*pkg_count)++;
1083 
1084 		cpp_counter = 0;
1085 		tpp_counter = 0;
1086 		core_node = pkg_node;
1087 		while (core_node != NULL) {
1088 			if (core_node->type == TOPO_TYPE_CORE) {
1089 				if (!all && CPU_EMPTY(&core_node->cpuset)) {
1090 					core_node =
1091 					    topo_next_nonchild_node(pkg_node,
1092 					        core_node);
1093 					continue;
1094 				}
1095 
1096 				cpp_counter++;
1097 
1098 				tpc_counter = 0;
1099 				pu_node = core_node;
1100 				while (pu_node != NULL) {
1101 					if (pu_node->type == TOPO_TYPE_PU &&
1102 					    (all || !CPU_EMPTY(&pu_node->cpuset)))
1103 						tpc_counter++;
1104 					pu_node = topo_next_node(core_node,
1105 					    pu_node);
1106 				}
1107 
1108 				if (*thrs_per_core == -1)
1109 					*thrs_per_core = tpc_counter;
1110 				else if (*thrs_per_core != tpc_counter)
1111 					return (0);
1112 
1113 				core_node = topo_next_nonchild_node(pkg_node,
1114 				    core_node);
1115 			} else {
1116 				/* PU node directly under PKG. */
1117 				if (core_node->type == TOPO_TYPE_PU &&
1118 			           (all || !CPU_EMPTY(&core_node->cpuset)))
1119 					tpp_counter++;
1120 				core_node = topo_next_node(pkg_node,
1121 				    core_node);
1122 			}
1123 		}
1124 
1125 		if (*cores_per_pkg == -1)
1126 			*cores_per_pkg = cpp_counter;
1127 		else if (*cores_per_pkg != cpp_counter)
1128 			return (0);
1129 		if (thrs_per_pkg == -1)
1130 			thrs_per_pkg = tpp_counter;
1131 		else if (thrs_per_pkg != tpp_counter)
1132 			return (0);
1133 
1134 		pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1135 	}
1136 
1137 	KASSERT(*pkg_count > 0,
1138 		("bug in topology or analysis"));
1139 	if (*cores_per_pkg == 0) {
1140 		KASSERT(*thrs_per_core == -1 && thrs_per_pkg > 0,
1141 			("bug in topology or analysis"));
1142 		*thrs_per_core = thrs_per_pkg;
1143 	}
1144 
1145 	return (1);
1146 }
1147 #endif /* SMP */
1148 
1149