xref: /freebsd/sys/kern/subr_smp.c (revision 42c159fe388a3765f69860c84183700af37aca8a)
1 /*
2  * Copyright (c) 2001
3  *	John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * This module holds the global variables and machine independent functions
34  * used for the kernel SMP support.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/pcpu.h>
45 #include <sys/smp.h>
46 #include <sys/sysctl.h>
47 
48 #include <machine/smp.h>
49 
50 volatile u_int stopped_cpus;
51 volatile u_int started_cpus;
52 
53 void (*cpustop_restartfunc)(void);
54 int mp_ncpus;
55 
56 volatile int smp_started;
57 u_int all_cpus;
58 u_int mp_maxid;
59 
60 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
61 
62 int smp_active = 0;	/* are the APs allowed to run? */
63 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, "");
64 
65 int smp_cpus = 1;	/* how many cpu's running */
66 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0, "");
67 
68 /* Enable forwarding of a signal to a process running on a different CPU */
69 static int forward_signal_enabled = 1;
70 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
71 	   &forward_signal_enabled, 0, "");
72 
73 /* Enable forwarding of roundrobin to all other cpus */
74 static int forward_roundrobin_enabled = 1;
75 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
76 	   &forward_roundrobin_enabled, 0, "");
77 
78 /* Variables needed for SMP rendezvous. */
79 static void (*smp_rv_setup_func)(void *arg);
80 static void (*smp_rv_action_func)(void *arg);
81 static void (*smp_rv_teardown_func)(void *arg);
82 static void *smp_rv_func_arg;
83 static volatile int smp_rv_waiters[2];
84 static struct mtx smp_rv_mtx;
85 static int mp_probe_status;
86 
87 /*
88  * Initialize MI SMP variables.
89  */
90 static void
91 mp_probe(void *dummy)
92 {
93 	mp_probe_status = cpu_mp_probe();
94 }
95 SYSINIT(cpu_mp_probe, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_probe, NULL)
96 
97 /*
98  * Call the MD SMP initialization code.
99  */
100 static void
101 mp_start(void *dummy)
102 {
103 
104 	/* Probe for MP hardware. */
105 	if (mp_probe_status == 0)
106 		return;
107 
108 	mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
109 	cpu_mp_start();
110 	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
111 	    mp_ncpus);
112 	cpu_mp_announce();
113 }
114 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
115 
116 void
117 forward_signal(struct thread *td)
118 {
119 	int id;
120 
121 	/*
122 	 * signotify() has already set PS_ASTPENDING on this process so all
123 	 * we need to do is poke it if it is currently executing so that it
124 	 * executes ast().
125 	 */
126 	mtx_assert(&sched_lock, MA_OWNED);
127 	KASSERT(td->td_proc->p_stat == SRUN, ("forward_signal: process is not SRUN"));
128 
129 	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
130 
131 	if (!smp_started || cold || panicstr)
132 		return;
133 	if (!forward_signal_enabled)
134 		return;
135 
136 	/* No need to IPI ourself. */
137 	if (td == curthread)
138 		return;
139 
140 	id = td->td_kse->ke_oncpu;
141 	if (id == NOCPU)
142 		return;
143 	ipi_selected(1 << id, IPI_AST);
144 }
145 
146 void
147 forward_roundrobin(void)
148 {
149 	struct pcpu *pc;
150 	struct thread *td;
151 	u_int id, map;
152 
153 	mtx_assert(&sched_lock, MA_OWNED);
154 
155 	CTR0(KTR_SMP, "forward_roundrobin()");
156 
157 	if (!smp_started || cold || panicstr)
158 		return;
159 	if (!forward_roundrobin_enabled)
160 		return;
161 	map = 0;
162 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
163 		td = pc->pc_curthread;
164 		id = pc->pc_cpumask;
165 		if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
166 		    td != pc->pc_idlethread) {
167 			td->td_kse->ke_flags |= KEF_NEEDRESCHED;
168 			map |= id;
169 		}
170 	}
171 	ipi_selected(map, IPI_AST);
172 }
173 
174 /*
175  * When called the executing CPU will send an IPI to all other CPUs
176  *  requesting that they halt execution.
177  *
178  * Usually (but not necessarily) called with 'other_cpus' as its arg.
179  *
180  *  - Signals all CPUs in map to stop.
181  *  - Waits for each to stop.
182  *
183  * Returns:
184  *  -1: error
185  *   0: NA
186  *   1: ok
187  *
188  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
189  *            from executing at same time.
190  */
191 int
192 stop_cpus(u_int map)
193 {
194 	int i;
195 
196 	if (!smp_started)
197 		return 0;
198 
199 	CTR1(KTR_SMP, "stop_cpus(%x)", map);
200 
201 	/* send the stop IPI to all CPUs in map */
202 	ipi_selected(map, IPI_STOP);
203 
204 	i = 0;
205 	while ((atomic_load_acq_int(&stopped_cpus) & map) != map) {
206 		/* spin */
207 		i++;
208 #ifdef DIAGNOSTIC
209 		if (i == 100000) {
210 			printf("timeout stopping cpus\n");
211 			break;
212 		}
213 #endif
214 	}
215 
216 	return 1;
217 }
218 
219 
220 /*
221  * Called by a CPU to restart stopped CPUs.
222  *
223  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
224  *
225  *  - Signals all CPUs in map to restart.
226  *  - Waits for each to restart.
227  *
228  * Returns:
229  *  -1: error
230  *   0: NA
231  *   1: ok
232  */
233 int
234 restart_cpus(u_int map)
235 {
236 
237 	if (!smp_started)
238 		return 0;
239 
240 	CTR1(KTR_SMP, "restart_cpus(%x)", map);
241 
242 	/* signal other cpus to restart */
243 	atomic_store_rel_int(&started_cpus, map);
244 
245 	/* wait for each to clear its bit */
246 	while ((atomic_load_acq_int(&stopped_cpus) & map) != 0)
247 		;	/* nothing */
248 
249 	return 1;
250 }
251 
252 /*
253  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
254  * (if specified), rendezvous, execute the action function (if specified),
255  * rendezvous again, execute the teardown function (if specified), and then
256  * resume.
257  *
258  * Note that the supplied external functions _must_ be reentrant and aware
259  * that they are running in parallel and in an unknown lock context.
260  */
261 void
262 smp_rendezvous_action(void)
263 {
264 
265 	/* setup function */
266 	if (smp_rv_setup_func != NULL)
267 		smp_rv_setup_func(smp_rv_func_arg);
268 	/* spin on entry rendezvous */
269 	atomic_add_int(&smp_rv_waiters[0], 1);
270 	while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus)
271 		;	/* nothing */
272 	/* action function */
273 	if (smp_rv_action_func != NULL)
274 		smp_rv_action_func(smp_rv_func_arg);
275 	/* spin on exit rendezvous */
276 	atomic_add_int(&smp_rv_waiters[1], 1);
277 	while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus)
278 		;	/* nothing */
279 	/* teardown function */
280 	if (smp_rv_teardown_func != NULL)
281 		smp_rv_teardown_func(smp_rv_func_arg);
282 }
283 
284 void
285 smp_rendezvous(void (* setup_func)(void *),
286 	       void (* action_func)(void *),
287 	       void (* teardown_func)(void *),
288 	       void *arg)
289 {
290 
291 	if (!smp_started) {
292 		if (setup_func != NULL)
293 			setup_func(arg);
294 		if (action_func != NULL)
295 			action_func(arg);
296 		if (teardown_func != NULL)
297 			teardown_func(arg);
298 		return;
299 	}
300 
301 	/* obtain rendezvous lock */
302 	mtx_lock_spin(&smp_rv_mtx);
303 
304 	/* set static function pointers */
305 	smp_rv_setup_func = setup_func;
306 	smp_rv_action_func = action_func;
307 	smp_rv_teardown_func = teardown_func;
308 	smp_rv_func_arg = arg;
309 	smp_rv_waiters[0] = 0;
310 	smp_rv_waiters[1] = 0;
311 
312 	/* signal other processors, which will enter the IPI with interrupts off */
313 	ipi_all_but_self(IPI_RENDEZVOUS);
314 
315 	/* call executor function */
316 	smp_rendezvous_action();
317 
318 	/* release lock */
319 	mtx_unlock_spin(&smp_rv_mtx);
320 }
321