xref: /freebsd/sys/kern/subr_smp.c (revision 282a3889ebf826db9839be296ff1dd903f6d6d6e)
1 /*-
2  * Copyright (c) 2001
3  *	John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and machine independent functions
32  * used for the kernel SMP support.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/bus.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
52 
53 #include "opt_sched.h"
54 
55 #ifdef SMP
56 volatile cpumask_t stopped_cpus;
57 volatile cpumask_t started_cpus;
58 cpumask_t idle_cpus_mask;
59 cpumask_t hlt_cpus_mask;
60 cpumask_t logical_cpus_mask;
61 
62 void (*cpustop_restartfunc)(void);
63 #endif
64 /* This is used in modules that need to work in both SMP and UP. */
65 cpumask_t all_cpus;
66 
67 int mp_ncpus;
68 /* export this for libkvm consumers. */
69 int mp_maxcpus = MAXCPU;
70 
71 struct cpu_top *smp_topology;
72 volatile int smp_started;
73 u_int mp_maxid;
74 
75 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
76 
77 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
78     "Max number of CPUs that the system was compiled for.");
79 
80 int smp_active = 0;	/* are the APs allowed to run? */
81 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
82     "Number of Auxillary Processors (APs) that were successfully started");
83 
84 int smp_disabled = 0;	/* has smp been disabled? */
85 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0,
86     "SMP has been disabled from the loader");
87 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
88 
89 int smp_cpus = 1;	/* how many cpu's running */
90 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0,
91     "Number of CPUs online");
92 
93 #ifdef SMP
94 /* Enable forwarding of a signal to a process running on a different CPU */
95 static int forward_signal_enabled = 1;
96 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
97 	   &forward_signal_enabled, 0,
98 	   "Forwarding of a signal to a process on a different CPU");
99 
100 /* Enable forwarding of roundrobin to all other cpus */
101 static int forward_roundrobin_enabled = 1;
102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
103 	   &forward_roundrobin_enabled, 0,
104 	   "Forwarding of roundrobin to all other CPUs");
105 
106 /* Variables needed for SMP rendezvous. */
107 static void (*smp_rv_setup_func)(void *arg);
108 static void (*smp_rv_action_func)(void *arg);
109 static void (*smp_rv_teardown_func)(void *arg);
110 static void *smp_rv_func_arg;
111 static volatile int smp_rv_waiters[3];
112 
113 /*
114  * Shared mutex to restrict busywaits between smp_rendezvous() and
115  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
116  * functions trigger at once and cause multiple CPUs to busywait with
117  * interrupts disabled.
118  */
119 struct mtx smp_ipi_mtx;
120 
121 /*
122  * Let the MD SMP code initialize mp_maxid very early if it can.
123  */
124 static void
125 mp_setmaxid(void *dummy)
126 {
127 	cpu_mp_setmaxid();
128 }
129 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL)
130 
131 /*
132  * Call the MD SMP initialization code.
133  */
134 static void
135 mp_start(void *dummy)
136 {
137 
138 	/* Probe for MP hardware. */
139 	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
140 		mp_ncpus = 1;
141 		all_cpus = PCPU_GET(cpumask);
142 		return;
143 	}
144 
145 	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
146 	cpu_mp_start();
147 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
148 	    mp_ncpus);
149 	cpu_mp_announce();
150 }
151 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
152 
153 void
154 forward_signal(struct thread *td)
155 {
156 	int id;
157 
158 	/*
159 	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
160 	 * this thread, so all we need to do is poke it if it is currently
161 	 * executing so that it executes ast().
162 	 */
163 	THREAD_LOCK_ASSERT(td, MA_OWNED);
164 	KASSERT(TD_IS_RUNNING(td),
165 	    ("forward_signal: thread is not TDS_RUNNING"));
166 
167 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
168 
169 	if (!smp_started || cold || panicstr)
170 		return;
171 	if (!forward_signal_enabled)
172 		return;
173 
174 	/* No need to IPI ourself. */
175 	if (td == curthread)
176 		return;
177 
178 	id = td->td_oncpu;
179 	if (id == NOCPU)
180 		return;
181 	ipi_selected(1 << id, IPI_AST);
182 }
183 
184 void
185 forward_roundrobin(void)
186 {
187 	struct pcpu *pc;
188 	struct thread *td;
189 	cpumask_t id, map, me;
190 
191 	CTR0(KTR_SMP, "forward_roundrobin()");
192 
193 	if (!smp_started || cold || panicstr)
194 		return;
195 	if (!forward_roundrobin_enabled)
196 		return;
197 	map = 0;
198 	me = PCPU_GET(cpumask);
199 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
200 		td = pc->pc_curthread;
201 		id = pc->pc_cpumask;
202 		if (id != me && (id & stopped_cpus) == 0 &&
203 		    !TD_IS_IDLETHREAD(td)) {
204 			td->td_flags |= TDF_NEEDRESCHED;
205 			map |= id;
206 		}
207 	}
208 	ipi_selected(map, IPI_AST);
209 }
210 
211 /*
212  * When called the executing CPU will send an IPI to all other CPUs
213  *  requesting that they halt execution.
214  *
215  * Usually (but not necessarily) called with 'other_cpus' as its arg.
216  *
217  *  - Signals all CPUs in map to stop.
218  *  - Waits for each to stop.
219  *
220  * Returns:
221  *  -1: error
222  *   0: NA
223  *   1: ok
224  *
225  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
226  *            from executing at same time.
227  */
228 int
229 stop_cpus(cpumask_t map)
230 {
231 	int i;
232 
233 	if (!smp_started)
234 		return 0;
235 
236 	CTR1(KTR_SMP, "stop_cpus(%x)", map);
237 
238 	/* send the stop IPI to all CPUs in map */
239 	ipi_selected(map, IPI_STOP);
240 
241 	i = 0;
242 	while ((stopped_cpus & map) != map) {
243 		/* spin */
244 		cpu_spinwait();
245 		i++;
246 #ifdef DIAGNOSTIC
247 		if (i == 100000) {
248 			printf("timeout stopping cpus\n");
249 			break;
250 		}
251 #endif
252 	}
253 
254 	return 1;
255 }
256 
257 /*
258  * Called by a CPU to restart stopped CPUs.
259  *
260  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
261  *
262  *  - Signals all CPUs in map to restart.
263  *  - Waits for each to restart.
264  *
265  * Returns:
266  *  -1: error
267  *   0: NA
268  *   1: ok
269  */
270 int
271 restart_cpus(cpumask_t map)
272 {
273 
274 	if (!smp_started)
275 		return 0;
276 
277 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
278 
279 	/* signal other cpus to restart */
280 	atomic_store_rel_int(&started_cpus, map);
281 
282 	/* wait for each to clear its bit */
283 	while ((stopped_cpus & map) != 0)
284 		cpu_spinwait();
285 
286 	return 1;
287 }
288 
289 /*
290  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
291  * (if specified), rendezvous, execute the action function (if specified),
292  * rendezvous again, execute the teardown function (if specified), and then
293  * resume.
294  *
295  * Note that the supplied external functions _must_ be reentrant and aware
296  * that they are running in parallel and in an unknown lock context.
297  */
298 void
299 smp_rendezvous_action(void)
300 {
301 
302 	/* Ensure we have up-to-date values. */
303 	atomic_add_acq_int(&smp_rv_waiters[0], 1);
304 	while (smp_rv_waiters[0] < mp_ncpus)
305 		cpu_spinwait();
306 
307 	/* setup function */
308 	if (smp_rv_setup_func != NULL)
309 		smp_rv_setup_func(smp_rv_func_arg);
310 
311 	/* spin on entry rendezvous */
312 	atomic_add_int(&smp_rv_waiters[1], 1);
313 	while (smp_rv_waiters[1] < mp_ncpus)
314 		cpu_spinwait();
315 
316 	/* action function */
317 	if (smp_rv_action_func != NULL)
318 		smp_rv_action_func(smp_rv_func_arg);
319 
320 	/* spin on exit rendezvous */
321 	atomic_add_int(&smp_rv_waiters[2], 1);
322 	while (smp_rv_waiters[2] < mp_ncpus)
323 		cpu_spinwait();
324 
325 	/* teardown function */
326 	if (smp_rv_teardown_func != NULL)
327 		smp_rv_teardown_func(smp_rv_func_arg);
328 }
329 
330 void
331 smp_rendezvous(void (* setup_func)(void *),
332 	       void (* action_func)(void *),
333 	       void (* teardown_func)(void *),
334 	       void *arg)
335 {
336 
337 	if (!smp_started) {
338 		if (setup_func != NULL)
339 			setup_func(arg);
340 		if (action_func != NULL)
341 			action_func(arg);
342 		if (teardown_func != NULL)
343 			teardown_func(arg);
344 		return;
345 	}
346 
347 	/* obtain rendezvous lock */
348 	mtx_lock_spin(&smp_ipi_mtx);
349 
350 	/* set static function pointers */
351 	smp_rv_setup_func = setup_func;
352 	smp_rv_action_func = action_func;
353 	smp_rv_teardown_func = teardown_func;
354 	smp_rv_func_arg = arg;
355 	smp_rv_waiters[1] = 0;
356 	smp_rv_waiters[2] = 0;
357 	atomic_store_rel_int(&smp_rv_waiters[0], 0);
358 
359 	/* signal other processors, which will enter the IPI with interrupts off */
360 	ipi_all_but_self(IPI_RENDEZVOUS);
361 
362 	/* call executor function */
363 	smp_rendezvous_action();
364 
365 	/* release lock */
366 	mtx_unlock_spin(&smp_ipi_mtx);
367 }
368 #else /* !SMP */
369 
370 /*
371  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
372  * APIs will still work using this dummy support.
373  */
374 static void
375 mp_setvariables_for_up(void *dummy)
376 {
377 	mp_ncpus = 1;
378 	mp_maxid = PCPU_GET(cpuid);
379 	all_cpus = PCPU_GET(cpumask);
380 	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
381 }
382 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
383     mp_setvariables_for_up, NULL)
384 
385 void
386 smp_rendezvous(void (* setup_func)(void *),
387 	       void (* action_func)(void *),
388 	       void (* teardown_func)(void *),
389 	       void *arg)
390 {
391 
392 	if (setup_func != NULL)
393 		setup_func(arg);
394 	if (action_func != NULL)
395 		action_func(arg);
396 	if (teardown_func != NULL)
397 		teardown_func(arg);
398 }
399 #endif /* SMP */
400