xref: /freebsd/sys/kern/subr_smp.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 2001
3  *	John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * This module holds the global variables and machine independent functions
34  * used for the kernel SMP support.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/pcpu.h>
45 #include <sys/smp.h>
46 #include <sys/sysctl.h>
47 
48 #include <machine/smp.h>
49 
50 volatile u_int stopped_cpus;
51 volatile u_int started_cpus;
52 
53 void (*cpustop_restartfunc)(void);
54 int mp_ncpus;
55 
56 volatile int smp_started;
57 u_int all_cpus;
58 u_int mp_maxid;
59 
60 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
61 
62 int smp_active = 0;	/* are the APs allowed to run? */
63 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, "");
64 
65 int smp_disabled = 0;	/* has smp been disabled? */
66 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RD, &smp_disabled, 0, "");
67 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
68 
69 int smp_cpus = 1;	/* how many cpu's running */
70 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0, "");
71 
72 /* Enable forwarding of a signal to a process running on a different CPU */
73 static int forward_signal_enabled = 1;
74 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
75 	   &forward_signal_enabled, 0, "");
76 
77 /* Enable forwarding of roundrobin to all other cpus */
78 static int forward_roundrobin_enabled = 1;
79 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
80 	   &forward_roundrobin_enabled, 0, "");
81 
82 /* Variables needed for SMP rendezvous. */
83 static void (*smp_rv_setup_func)(void *arg);
84 static void (*smp_rv_action_func)(void *arg);
85 static void (*smp_rv_teardown_func)(void *arg);
86 static void *smp_rv_func_arg;
87 static volatile int smp_rv_waiters[2];
88 static struct mtx smp_rv_mtx;
89 static int mp_probe_status;
90 
91 /*
92  * Initialize MI SMP variables.
93  */
94 static void
95 mp_probe(void *dummy)
96 {
97 	mp_probe_status = cpu_mp_probe();
98 }
99 SYSINIT(cpu_mp_probe, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_probe, NULL)
100 
101 /*
102  * Call the MD SMP initialization code.
103  */
104 static void
105 mp_start(void *dummy)
106 {
107 
108 	/* Probe for MP hardware. */
109 	if (mp_probe_status == 0 || smp_disabled != 0)
110 		return;
111 
112 	mtx_init(&smp_rv_mtx, "smp rendezvous", NULL, MTX_SPIN);
113 	cpu_mp_start();
114 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
115 	    mp_ncpus);
116 	cpu_mp_announce();
117 }
118 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
119 
120 void
121 forward_signal(struct thread *td)
122 {
123 	int id;
124 
125 	/*
126 	 * signotify() has already set TDF_ASTPENDING and PS_NEEDSIGCHECK on
127 	 * this process, so all we need to do is poke it if it is currently
128 	 * executing so that it executes ast().
129 	 */
130 	mtx_assert(&sched_lock, MA_OWNED);
131 	KASSERT(TD_IS_RUNNING(td),
132 	    ("forward_signal: thread is not TDS_RUNNING"));
133 
134 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
135 
136 	if (!smp_started || cold || panicstr)
137 		return;
138 	if (!forward_signal_enabled)
139 		return;
140 
141 	/* No need to IPI ourself. */
142 	if (td == curthread)
143 		return;
144 
145 	id = td->td_kse->ke_oncpu;
146 	if (id == NOCPU)
147 		return;
148 	ipi_selected(1 << id, IPI_AST);
149 }
150 
151 void
152 forward_roundrobin(void)
153 {
154 	struct pcpu *pc;
155 	struct thread *td;
156 	u_int id, map;
157 
158 	mtx_assert(&sched_lock, MA_OWNED);
159 
160 	CTR0(KTR_SMP, "forward_roundrobin()");
161 
162 	if (!smp_started || cold || panicstr)
163 		return;
164 	if (!forward_roundrobin_enabled)
165 		return;
166 	map = 0;
167 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
168 		td = pc->pc_curthread;
169 		id = pc->pc_cpumask;
170 		if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
171 		    td != pc->pc_idlethread) {
172 			td->td_flags |= TDF_NEEDRESCHED;
173 			map |= id;
174 		}
175 	}
176 	ipi_selected(map, IPI_AST);
177 }
178 
179 /*
180  * When called the executing CPU will send an IPI to all other CPUs
181  *  requesting that they halt execution.
182  *
183  * Usually (but not necessarily) called with 'other_cpus' as its arg.
184  *
185  *  - Signals all CPUs in map to stop.
186  *  - Waits for each to stop.
187  *
188  * Returns:
189  *  -1: error
190  *   0: NA
191  *   1: ok
192  *
193  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
194  *            from executing at same time.
195  */
196 int
197 stop_cpus(u_int map)
198 {
199 	int i;
200 
201 	if (!smp_started)
202 		return 0;
203 
204 	CTR1(KTR_SMP, "stop_cpus(%x)", map);
205 
206 	/* send the stop IPI to all CPUs in map */
207 	ipi_selected(map, IPI_STOP);
208 
209 	i = 0;
210 	while ((atomic_load_acq_int(&stopped_cpus) & map) != map) {
211 		/* spin */
212 		i++;
213 #ifdef DIAGNOSTIC
214 		if (i == 100000) {
215 			printf("timeout stopping cpus\n");
216 			break;
217 		}
218 #endif
219 	}
220 
221 	return 1;
222 }
223 
224 
225 /*
226  * Called by a CPU to restart stopped CPUs.
227  *
228  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
229  *
230  *  - Signals all CPUs in map to restart.
231  *  - Waits for each to restart.
232  *
233  * Returns:
234  *  -1: error
235  *   0: NA
236  *   1: ok
237  */
238 int
239 restart_cpus(u_int map)
240 {
241 
242 	if (!smp_started)
243 		return 0;
244 
245 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
246 
247 	/* signal other cpus to restart */
248 	atomic_store_rel_int(&started_cpus, map);
249 
250 	/* wait for each to clear its bit */
251 	while ((atomic_load_acq_int(&stopped_cpus) & map) != 0)
252 		;	/* nothing */
253 
254 	return 1;
255 }
256 
257 /*
258  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
259  * (if specified), rendezvous, execute the action function (if specified),
260  * rendezvous again, execute the teardown function (if specified), and then
261  * resume.
262  *
263  * Note that the supplied external functions _must_ be reentrant and aware
264  * that they are running in parallel and in an unknown lock context.
265  */
266 void
267 smp_rendezvous_action(void)
268 {
269 
270 	/* setup function */
271 	if (smp_rv_setup_func != NULL)
272 		smp_rv_setup_func(smp_rv_func_arg);
273 	/* spin on entry rendezvous */
274 	atomic_add_int(&smp_rv_waiters[0], 1);
275 	while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus)
276 		;	/* nothing */
277 	/* action function */
278 	if (smp_rv_action_func != NULL)
279 		smp_rv_action_func(smp_rv_func_arg);
280 	/* spin on exit rendezvous */
281 	atomic_add_int(&smp_rv_waiters[1], 1);
282 	while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus)
283 		;	/* nothing */
284 	/* teardown function */
285 	if (smp_rv_teardown_func != NULL)
286 		smp_rv_teardown_func(smp_rv_func_arg);
287 }
288 
289 void
290 smp_rendezvous(void (* setup_func)(void *),
291 	       void (* action_func)(void *),
292 	       void (* teardown_func)(void *),
293 	       void *arg)
294 {
295 
296 	if (!smp_started) {
297 		if (setup_func != NULL)
298 			setup_func(arg);
299 		if (action_func != NULL)
300 			action_func(arg);
301 		if (teardown_func != NULL)
302 			teardown_func(arg);
303 		return;
304 	}
305 
306 	/* obtain rendezvous lock */
307 	mtx_lock_spin(&smp_rv_mtx);
308 
309 	/* set static function pointers */
310 	smp_rv_setup_func = setup_func;
311 	smp_rv_action_func = action_func;
312 	smp_rv_teardown_func = teardown_func;
313 	smp_rv_func_arg = arg;
314 	smp_rv_waiters[0] = 0;
315 	smp_rv_waiters[1] = 0;
316 
317 	/* signal other processors, which will enter the IPI with interrupts off */
318 	ipi_all_but_self(IPI_RENDEZVOUS);
319 
320 	/* call executor function */
321 	smp_rendezvous_action();
322 
323 	/* release lock */
324 	mtx_unlock_spin(&smp_rv_mtx);
325 }
326