xref: /freebsd/sys/kern/subr_smp.c (revision f0adf7f5cdd241db2f2c817683191a6ef64a4e95)
1 /*
2  * Copyright (c) 2001
3  *	John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and machine independent functions
32  * used for the kernel SMP support.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/bus.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/smp.h>
51 
52 #ifdef SMP
53 volatile cpumask_t stopped_cpus;
54 volatile cpumask_t started_cpus;
55 
56 void (*cpustop_restartfunc)(void);
57 #endif
58 
59 int mp_ncpus;
60 /* export this for libkvm consumers. */
61 int mp_maxcpus = MAXCPU;
62 
63 struct cpu_top *smp_topology;
64 volatile int smp_started;
65 cpumask_t all_cpus;
66 u_int mp_maxid;
67 
68 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
69 
70 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
71     "Max number of CPUs that the system was compiled for.");
72 
73 int smp_active = 0;	/* are the APs allowed to run? */
74 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
75     "Number of Auxillary Processors (APs) that were successfully started");
76 
77 int smp_disabled = 0;	/* has smp been disabled? */
78 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0,
79     "SMP has been disabled from the loader");
80 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
81 
82 int smp_cpus = 1;	/* how many cpu's running */
83 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0,
84     "Number of CPUs online");
85 
86 #ifdef SMP
87 /* Enable forwarding of a signal to a process running on a different CPU */
88 static int forward_signal_enabled = 1;
89 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
90 	   &forward_signal_enabled, 0,
91 	   "Forwarding of a signal to a process on a different CPU");
92 
93 /* Enable forwarding of roundrobin to all other cpus */
94 static int forward_roundrobin_enabled = 1;
95 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
96 	   &forward_roundrobin_enabled, 0,
97 	   "Forwarding of roundrobin to all other CPUs");
98 
99 /* Variables needed for SMP rendezvous. */
100 static void (*smp_rv_setup_func)(void *arg);
101 static void (*smp_rv_action_func)(void *arg);
102 static void (*smp_rv_teardown_func)(void *arg);
103 static void *smp_rv_func_arg;
104 static volatile int smp_rv_waiters[2];
105 static struct mtx smp_rv_mtx;
106 
107 /*
108  * Let the MD SMP code initialize mp_maxid very early if it can.
109  */
110 static void
111 mp_setmaxid(void *dummy)
112 {
113 	cpu_mp_setmaxid();
114 }
115 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL)
116 
117 /*
118  * Call the MD SMP initialization code.
119  */
120 static void
121 mp_start(void *dummy)
122 {
123 
124 	/* Probe for MP hardware. */
125 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
126 		mp_ncpus = 1;
127 		all_cpus = PCPU_GET(cpumask);
128 		return;
129 	}
130 
131 	mtx_init(&smp_rv_mtx, "smp rendezvous", NULL, MTX_SPIN);
132 	cpu_mp_start();
133 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
134 	    mp_ncpus);
135 	cpu_mp_announce();
136 }
137 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
138 
139 void
140 forward_signal(struct thread *td)
141 {
142 	int id;
143 
144 	/*
145 	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
146 	 * this thread, so all we need to do is poke it if it is currently
147 	 * executing so that it executes ast().
148 	 */
149 	mtx_assert(&sched_lock, MA_OWNED);
150 	KASSERT(TD_IS_RUNNING(td),
151 	    ("forward_signal: thread is not TDS_RUNNING"));
152 
153 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
154 
155 	if (!smp_started || cold || panicstr)
156 		return;
157 	if (!forward_signal_enabled)
158 		return;
159 
160 	/* No need to IPI ourself. */
161 	if (td == curthread)
162 		return;
163 
164 	id = td->td_oncpu;
165 	if (id == NOCPU)
166 		return;
167 	ipi_selected(1 << id, IPI_AST);
168 }
169 
170 void
171 forward_roundrobin(void)
172 {
173 	struct pcpu *pc;
174 	struct thread *td;
175 	cpumask_t id, map;
176 
177 	mtx_assert(&sched_lock, MA_OWNED);
178 
179 	CTR0(KTR_SMP, "forward_roundrobin()");
180 
181 	if (!smp_started || cold || panicstr)
182 		return;
183 	if (!forward_roundrobin_enabled)
184 		return;
185 	map = 0;
186 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
187 		td = pc->pc_curthread;
188 		id = pc->pc_cpumask;
189 		if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
190 		    td != pc->pc_idlethread) {
191 			td->td_flags |= TDF_NEEDRESCHED;
192 			map |= id;
193 		}
194 	}
195 	ipi_selected(map, IPI_AST);
196 }
197 
198 /*
199  * When called the executing CPU will send an IPI to all other CPUs
200  *  requesting that they halt execution.
201  *
202  * Usually (but not necessarily) called with 'other_cpus' as its arg.
203  *
204  *  - Signals all CPUs in map to stop.
205  *  - Waits for each to stop.
206  *
207  * Returns:
208  *  -1: error
209  *   0: NA
210  *   1: ok
211  *
212  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
213  *            from executing at same time.
214  */
215 int
216 stop_cpus(cpumask_t map)
217 {
218 	int i;
219 
220 	if (!smp_started)
221 		return 0;
222 
223 	CTR1(KTR_SMP, "stop_cpus(%x)", map);
224 
225 	/* send the stop IPI to all CPUs in map */
226 	ipi_selected(map, IPI_STOP);
227 
228 	i = 0;
229 	while ((atomic_load_acq_int(&stopped_cpus) & map) != map) {
230 		/* spin */
231 		i++;
232 #ifdef DIAGNOSTIC
233 		if (i == 100000) {
234 			printf("timeout stopping cpus\n");
235 			break;
236 		}
237 #endif
238 	}
239 
240 	return 1;
241 }
242 
243 
244 /*
245  * Called by a CPU to restart stopped CPUs.
246  *
247  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
248  *
249  *  - Signals all CPUs in map to restart.
250  *  - Waits for each to restart.
251  *
252  * Returns:
253  *  -1: error
254  *   0: NA
255  *   1: ok
256  */
257 int
258 restart_cpus(cpumask_t map)
259 {
260 
261 	if (!smp_started)
262 		return 0;
263 
264 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
265 
266 	/* signal other cpus to restart */
267 	atomic_store_rel_int(&started_cpus, map);
268 
269 	/* wait for each to clear its bit */
270 	while ((atomic_load_acq_int(&stopped_cpus) & map) != 0)
271 		;	/* nothing */
272 
273 	return 1;
274 }
275 
276 /*
277  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
278  * (if specified), rendezvous, execute the action function (if specified),
279  * rendezvous again, execute the teardown function (if specified), and then
280  * resume.
281  *
282  * Note that the supplied external functions _must_ be reentrant and aware
283  * that they are running in parallel and in an unknown lock context.
284  */
285 void
286 smp_rendezvous_action(void)
287 {
288 
289 	/* setup function */
290 	if (smp_rv_setup_func != NULL)
291 		smp_rv_setup_func(smp_rv_func_arg);
292 	/* spin on entry rendezvous */
293 	atomic_add_int(&smp_rv_waiters[0], 1);
294 	while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus)
295 		;	/* nothing */
296 	/* action function */
297 	if (smp_rv_action_func != NULL)
298 		smp_rv_action_func(smp_rv_func_arg);
299 	/* spin on exit rendezvous */
300 	atomic_add_int(&smp_rv_waiters[1], 1);
301 	while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus)
302 		;	/* nothing */
303 	/* teardown function */
304 	if (smp_rv_teardown_func != NULL)
305 		smp_rv_teardown_func(smp_rv_func_arg);
306 }
307 
308 void
309 smp_rendezvous(void (* setup_func)(void *),
310 	       void (* action_func)(void *),
311 	       void (* teardown_func)(void *),
312 	       void *arg)
313 {
314 
315 	if (!smp_started) {
316 		if (setup_func != NULL)
317 			setup_func(arg);
318 		if (action_func != NULL)
319 			action_func(arg);
320 		if (teardown_func != NULL)
321 			teardown_func(arg);
322 		return;
323 	}
324 
325 	/* obtain rendezvous lock */
326 	mtx_lock_spin(&smp_rv_mtx);
327 
328 	/* set static function pointers */
329 	smp_rv_setup_func = setup_func;
330 	smp_rv_action_func = action_func;
331 	smp_rv_teardown_func = teardown_func;
332 	smp_rv_func_arg = arg;
333 	smp_rv_waiters[0] = 0;
334 	smp_rv_waiters[1] = 0;
335 
336 	/* signal other processors, which will enter the IPI with interrupts off */
337 	ipi_all_but_self(IPI_RENDEZVOUS);
338 
339 	/* call executor function */
340 	smp_rendezvous_action();
341 
342 	/* release lock */
343 	mtx_unlock_spin(&smp_rv_mtx);
344 }
345 #else /* !SMP */
346 
347 /*
348  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
349  * APIs will still work using this dummy support.
350  */
351 static void
352 mp_setvariables_for_up(void *dummy)
353 {
354 	mp_ncpus = 1;
355 	mp_maxid = PCPU_GET(cpuid);
356 	all_cpus = PCPU_GET(cpumask);
357 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
358 }
359 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
360     mp_setvariables_for_up, NULL)
361 
362 void
363 smp_rendezvous(void (* setup_func)(void *),
364 	       void (* action_func)(void *),
365 	       void (* teardown_func)(void *),
366 	       void *arg)
367 {
368 
369 	if (setup_func != NULL)
370 		setup_func(arg);
371 	if (action_func != NULL)
372 		action_func(arg);
373 	if (teardown_func != NULL)
374 		teardown_func(arg);
375 }
376 #endif /* SMP */
377