xref: /freebsd/sys/kern/subr_smp.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 2001
3  *	John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and machine independent functions
32  * used for the kernel SMP support.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/bus.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
52 
53 #include "opt_sched.h"
54 
55 #ifdef SMP
56 volatile cpumask_t stopped_cpus;
57 volatile cpumask_t started_cpus;
58 cpumask_t idle_cpus_mask;
59 cpumask_t hlt_cpus_mask;
60 cpumask_t logical_cpus_mask;
61 
62 void (*cpustop_restartfunc)(void);
63 #endif
64 /* This is used in modules that need to work in both SMP and UP. */
65 cpumask_t all_cpus;
66 
67 int mp_ncpus;
68 /* export this for libkvm consumers. */
69 int mp_maxcpus = MAXCPU;
70 
71 struct cpu_top *smp_topology;
72 volatile int smp_started;
73 u_int mp_maxid;
74 
75 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
76 
77 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
78     "Max number of CPUs that the system was compiled for.");
79 
80 int smp_active = 0;	/* are the APs allowed to run? */
81 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
82     "Number of Auxillary Processors (APs) that were successfully started");
83 
84 int smp_disabled = 0;	/* has smp been disabled? */
85 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0,
86     "SMP has been disabled from the loader");
87 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
88 
89 int smp_cpus = 1;	/* how many cpu's running */
90 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0,
91     "Number of CPUs online");
92 
93 #ifdef SMP
94 /* Enable forwarding of a signal to a process running on a different CPU */
95 static int forward_signal_enabled = 1;
96 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
97 	   &forward_signal_enabled, 0,
98 	   "Forwarding of a signal to a process on a different CPU");
99 
100 /* Enable forwarding of roundrobin to all other cpus */
101 static int forward_roundrobin_enabled = 1;
102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
103 	   &forward_roundrobin_enabled, 0,
104 	   "Forwarding of roundrobin to all other CPUs");
105 
106 /* Variables needed for SMP rendezvous. */
107 static void (*volatile smp_rv_setup_func)(void *arg);
108 static void (*volatile smp_rv_action_func)(void *arg);
109 static void (*volatile smp_rv_teardown_func)(void *arg);
110 static void * volatile smp_rv_func_arg;
111 static volatile int smp_rv_waiters[3];
112 
113 /*
114  * Shared mutex to restrict busywaits between smp_rendezvous() and
115  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
116  * functions trigger at once and cause multiple CPUs to busywait with
117  * interrupts disabled.
118  */
119 struct mtx smp_ipi_mtx;
120 
121 /*
122  * Let the MD SMP code initialize mp_maxid very early if it can.
123  */
124 static void
125 mp_setmaxid(void *dummy)
126 {
127 	cpu_mp_setmaxid();
128 }
129 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL)
130 
131 /*
132  * Call the MD SMP initialization code.
133  */
134 static void
135 mp_start(void *dummy)
136 {
137 
138 	/* Probe for MP hardware. */
139 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
140 		mp_ncpus = 1;
141 		all_cpus = PCPU_GET(cpumask);
142 		return;
143 	}
144 
145 	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
146 	cpu_mp_start();
147 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
148 	    mp_ncpus);
149 	cpu_mp_announce();
150 }
151 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL)
152 
153 void
154 forward_signal(struct thread *td)
155 {
156 	int id;
157 
158 	/*
159 	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
160 	 * this thread, so all we need to do is poke it if it is currently
161 	 * executing so that it executes ast().
162 	 */
163 	THREAD_LOCK_ASSERT(td, MA_OWNED);
164 	KASSERT(TD_IS_RUNNING(td),
165 	    ("forward_signal: thread is not TDS_RUNNING"));
166 
167 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
168 
169 	if (!smp_started || cold || panicstr)
170 		return;
171 	if (!forward_signal_enabled)
172 		return;
173 
174 	/* No need to IPI ourself. */
175 	if (td == curthread)
176 		return;
177 
178 	id = td->td_oncpu;
179 	if (id == NOCPU)
180 		return;
181 	ipi_selected(1 << id, IPI_AST);
182 }
183 
184 void
185 forward_roundrobin(void)
186 {
187 	struct pcpu *pc;
188 	struct thread *td;
189 	cpumask_t id, map, me;
190 
191 	CTR0(KTR_SMP, "forward_roundrobin()");
192 
193 	if (!smp_started || cold || panicstr)
194 		return;
195 	if (!forward_roundrobin_enabled)
196 		return;
197 	map = 0;
198 	me = PCPU_GET(cpumask);
199 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
200 		td = pc->pc_curthread;
201 		id = pc->pc_cpumask;
202 		if (id != me && (id & stopped_cpus) == 0 &&
203 		    !TD_IS_IDLETHREAD(td)) {
204 			td->td_flags |= TDF_NEEDRESCHED;
205 			map |= id;
206 		}
207 	}
208 	ipi_selected(map, IPI_AST);
209 }
210 
211 /*
212  * When called the executing CPU will send an IPI to all other CPUs
213  *  requesting that they halt execution.
214  *
215  * Usually (but not necessarily) called with 'other_cpus' as its arg.
216  *
217  *  - Signals all CPUs in map to stop.
218  *  - Waits for each to stop.
219  *
220  * Returns:
221  *  -1: error
222  *   0: NA
223  *   1: ok
224  *
225  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
226  *            from executing at same time.
227  */
228 int
229 stop_cpus(cpumask_t map)
230 {
231 	int i;
232 
233 	if (!smp_started)
234 		return 0;
235 
236 	CTR1(KTR_SMP, "stop_cpus(%x)", map);
237 
238 	/* send the stop IPI to all CPUs in map */
239 	ipi_selected(map, IPI_STOP);
240 
241 	i = 0;
242 	while ((stopped_cpus & map) != map) {
243 		/* spin */
244 		cpu_spinwait();
245 		i++;
246 #ifdef DIAGNOSTIC
247 		if (i == 100000) {
248 			printf("timeout stopping cpus\n");
249 			break;
250 		}
251 #endif
252 	}
253 
254 	return 1;
255 }
256 
257 /*
258  * Called by a CPU to restart stopped CPUs.
259  *
260  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
261  *
262  *  - Signals all CPUs in map to restart.
263  *  - Waits for each to restart.
264  *
265  * Returns:
266  *  -1: error
267  *   0: NA
268  *   1: ok
269  */
270 int
271 restart_cpus(cpumask_t map)
272 {
273 
274 	if (!smp_started)
275 		return 0;
276 
277 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
278 
279 	/* signal other cpus to restart */
280 	atomic_store_rel_int(&started_cpus, map);
281 
282 	/* wait for each to clear its bit */
283 	while ((stopped_cpus & map) != 0)
284 		cpu_spinwait();
285 
286 	return 1;
287 }
288 
289 void
290 smp_no_rendevous_barrier(void *dummy)
291 {
292 #ifdef SMP
293 	KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
294 #endif
295 }
296 
297 /*
298  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
299  * (if specified), rendezvous, execute the action function (if specified),
300  * rendezvous again, execute the teardown function (if specified), and then
301  * resume.
302  *
303  * Note that the supplied external functions _must_ be reentrant and aware
304  * that they are running in parallel and in an unknown lock context.
305  */
306 void
307 smp_rendezvous_action(void)
308 {
309 	void* local_func_arg = smp_rv_func_arg;
310 	void (*local_setup_func)(void*)   = smp_rv_setup_func;
311 	void (*local_action_func)(void*)   = smp_rv_action_func;
312 	void (*local_teardown_func)(void*) = smp_rv_teardown_func;
313 
314 	/* Ensure we have up-to-date values. */
315 	atomic_add_acq_int(&smp_rv_waiters[0], 1);
316 	while (smp_rv_waiters[0] < mp_ncpus)
317 		cpu_spinwait();
318 
319 	/* setup function */
320 	if (local_setup_func != smp_no_rendevous_barrier) {
321 		if (smp_rv_setup_func != NULL)
322 			smp_rv_setup_func(smp_rv_func_arg);
323 
324 		/* spin on entry rendezvous */
325 		atomic_add_int(&smp_rv_waiters[1], 1);
326 		while (smp_rv_waiters[1] < mp_ncpus)
327                 	cpu_spinwait();
328 	}
329 
330 	/* action function */
331 	if (local_action_func != NULL)
332 		local_action_func(local_func_arg);
333 
334 	/* spin on exit rendezvous */
335 	atomic_add_int(&smp_rv_waiters[2], 1);
336 	if (local_teardown_func == smp_no_rendevous_barrier)
337                 return;
338 	while (smp_rv_waiters[2] < mp_ncpus)
339 		cpu_spinwait();
340 
341 	/* teardown function */
342 	if (local_teardown_func != NULL)
343 		local_teardown_func(local_func_arg);
344 }
345 
346 void
347 smp_rendezvous(void (* setup_func)(void *),
348 	       void (* action_func)(void *),
349 	       void (* teardown_func)(void *),
350 	       void *arg)
351 {
352 
353 	if (!smp_started) {
354 		if (setup_func != NULL)
355 			setup_func(arg);
356 		if (action_func != NULL)
357 			action_func(arg);
358 		if (teardown_func != NULL)
359 			teardown_func(arg);
360 		return;
361 	}
362 
363 	/* obtain rendezvous lock */
364 	mtx_lock_spin(&smp_ipi_mtx);
365 
366 	/* set static function pointers */
367 	smp_rv_setup_func = setup_func;
368 	smp_rv_action_func = action_func;
369 	smp_rv_teardown_func = teardown_func;
370 	smp_rv_func_arg = arg;
371 	smp_rv_waiters[1] = 0;
372 	smp_rv_waiters[2] = 0;
373 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
374 
375 	/* signal other processors, which will enter the IPI with interrupts off */
376 	ipi_all_but_self(IPI_RENDEZVOUS);
377 
378 	/* call executor function */
379 	smp_rendezvous_action();
380 
381 	if (teardown_func == smp_no_rendevous_barrier)
382 		while (atomic_load_acq_int(&smp_rv_waiters[2]) < mp_ncpus)
383 			cpu_spinwait();
384 
385 	/* release lock */
386 	mtx_unlock_spin(&smp_ipi_mtx);
387 }
388 #else /* !SMP */
389 
390 /*
391  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
392  * APIs will still work using this dummy support.
393  */
394 static void
395 mp_setvariables_for_up(void *dummy)
396 {
397 	mp_ncpus = 1;
398 	mp_maxid = PCPU_GET(cpuid);
399 	all_cpus = PCPU_GET(cpumask);
400 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
401 }
402 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
403     mp_setvariables_for_up, NULL)
404 
405 void
406 smp_rendezvous(void (*setup_func)(void *),
407 	       void (*action_func)(void *),
408 	       void (*teardown_func)(void *),
409 	       void *arg)
410 {
411 
412 	if (setup_func != NULL)
413 		setup_func(arg);
414 	if (action_func != NULL)
415 		action_func(arg);
416 	if (teardown_func != NULL)
417 		teardown_func(arg);
418 }
419 #endif /* SMP */
420