1 /* 2 * Copyright (c) 2001 3 * John Baldwin <jhb@FreeBSD.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * This module holds the global variables and machine independent functions 32 * used for the kernel SMP support. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/proc.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/pcpu.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 49 #include <machine/smp.h> 50 51 volatile u_int stopped_cpus; 52 volatile u_int started_cpus; 53 54 void (*cpustop_restartfunc)(void); 55 int mp_ncpus; 56 57 volatile int smp_started; 58 u_int all_cpus; 59 u_int mp_maxid; 60 61 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP"); 62 63 int smp_active = 0; /* are the APs allowed to run? */ 64 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, 65 "Number of Auxillary Processors (APs) that were successfully started"); 66 67 int smp_disabled = 0; /* has smp been disabled? */ 68 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0, 69 "SMP has been disabled from the loader"); 70 TUNABLE_INT("kern.smp.disabled", &smp_disabled); 71 72 int smp_cpus = 1; /* how many cpu's running */ 73 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0, 74 "Number of CPUs online"); 75 76 /* Enable forwarding of a signal to a process running on a different CPU */ 77 static int forward_signal_enabled = 1; 78 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 79 &forward_signal_enabled, 0, 80 "Forwarding of a signal to a process on a different CPU"); 81 82 /* Enable forwarding of roundrobin to all other cpus */ 83 static int forward_roundrobin_enabled = 1; 84 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 85 &forward_roundrobin_enabled, 0, 86 "Forwarding of roundrobin to all other CPUs"); 87 88 /* Variables needed for SMP rendezvous. */ 89 static void (*smp_rv_setup_func)(void *arg); 90 static void (*smp_rv_action_func)(void *arg); 91 static void (*smp_rv_teardown_func)(void *arg); 92 static void *smp_rv_func_arg; 93 static volatile int smp_rv_waiters[2]; 94 static struct mtx smp_rv_mtx; 95 static int mp_probe_status; 96 97 /* 98 * Initialize MI SMP variables. 99 */ 100 static void 101 mp_probe(void *dummy) 102 { 103 mp_probe_status = cpu_mp_probe(); 104 } 105 SYSINIT(cpu_mp_probe, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_probe, NULL) 106 107 /* 108 * Call the MD SMP initialization code. 109 */ 110 static void 111 mp_start(void *dummy) 112 { 113 114 /* Probe for MP hardware. */ 115 if (mp_probe_status == 0 || smp_disabled != 0) { 116 mp_ncpus = 1; 117 return; 118 } 119 120 mtx_init(&smp_rv_mtx, "smp rendezvous", NULL, MTX_SPIN); 121 cpu_mp_start(); 122 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 123 mp_ncpus); 124 cpu_mp_announce(); 125 } 126 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL) 127 128 void 129 forward_signal(struct thread *td) 130 { 131 int id; 132 133 /* 134 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 135 * this thread, so all we need to do is poke it if it is currently 136 * executing so that it executes ast(). 137 */ 138 mtx_assert(&sched_lock, MA_OWNED); 139 KASSERT(TD_IS_RUNNING(td), 140 ("forward_signal: thread is not TDS_RUNNING")); 141 142 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 143 144 if (!smp_started || cold || panicstr) 145 return; 146 if (!forward_signal_enabled) 147 return; 148 149 /* No need to IPI ourself. */ 150 if (td == curthread) 151 return; 152 153 id = td->td_oncpu; 154 if (id == NOCPU) 155 return; 156 ipi_selected(1 << id, IPI_AST); 157 } 158 159 void 160 forward_roundrobin(void) 161 { 162 struct pcpu *pc; 163 struct thread *td; 164 u_int id, map; 165 166 mtx_assert(&sched_lock, MA_OWNED); 167 168 CTR0(KTR_SMP, "forward_roundrobin()"); 169 170 if (!smp_started || cold || panicstr) 171 return; 172 if (!forward_roundrobin_enabled) 173 return; 174 map = 0; 175 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 176 td = pc->pc_curthread; 177 id = pc->pc_cpumask; 178 if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 && 179 td != pc->pc_idlethread) { 180 td->td_flags |= TDF_NEEDRESCHED; 181 map |= id; 182 } 183 } 184 ipi_selected(map, IPI_AST); 185 } 186 187 /* 188 * When called the executing CPU will send an IPI to all other CPUs 189 * requesting that they halt execution. 190 * 191 * Usually (but not necessarily) called with 'other_cpus' as its arg. 192 * 193 * - Signals all CPUs in map to stop. 194 * - Waits for each to stop. 195 * 196 * Returns: 197 * -1: error 198 * 0: NA 199 * 1: ok 200 * 201 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 202 * from executing at same time. 203 */ 204 int 205 stop_cpus(u_int map) 206 { 207 int i; 208 209 if (!smp_started) 210 return 0; 211 212 CTR1(KTR_SMP, "stop_cpus(%x)", map); 213 214 /* send the stop IPI to all CPUs in map */ 215 ipi_selected(map, IPI_STOP); 216 217 i = 0; 218 while ((atomic_load_acq_int(&stopped_cpus) & map) != map) { 219 /* spin */ 220 i++; 221 #ifdef DIAGNOSTIC 222 if (i == 100000) { 223 printf("timeout stopping cpus\n"); 224 break; 225 } 226 #endif 227 } 228 229 return 1; 230 } 231 232 233 /* 234 * Called by a CPU to restart stopped CPUs. 235 * 236 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 237 * 238 * - Signals all CPUs in map to restart. 239 * - Waits for each to restart. 240 * 241 * Returns: 242 * -1: error 243 * 0: NA 244 * 1: ok 245 */ 246 int 247 restart_cpus(u_int map) 248 { 249 250 if (!smp_started) 251 return 0; 252 253 CTR1(KTR_SMP, "restart_cpus(%x)", map); 254 255 /* signal other cpus to restart */ 256 atomic_store_rel_int(&started_cpus, map); 257 258 /* wait for each to clear its bit */ 259 while ((atomic_load_acq_int(&stopped_cpus) & map) != 0) 260 ; /* nothing */ 261 262 return 1; 263 } 264 265 /* 266 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 267 * (if specified), rendezvous, execute the action function (if specified), 268 * rendezvous again, execute the teardown function (if specified), and then 269 * resume. 270 * 271 * Note that the supplied external functions _must_ be reentrant and aware 272 * that they are running in parallel and in an unknown lock context. 273 */ 274 void 275 smp_rendezvous_action(void) 276 { 277 278 /* setup function */ 279 if (smp_rv_setup_func != NULL) 280 smp_rv_setup_func(smp_rv_func_arg); 281 /* spin on entry rendezvous */ 282 atomic_add_int(&smp_rv_waiters[0], 1); 283 while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus) 284 ; /* nothing */ 285 /* action function */ 286 if (smp_rv_action_func != NULL) 287 smp_rv_action_func(smp_rv_func_arg); 288 /* spin on exit rendezvous */ 289 atomic_add_int(&smp_rv_waiters[1], 1); 290 while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus) 291 ; /* nothing */ 292 /* teardown function */ 293 if (smp_rv_teardown_func != NULL) 294 smp_rv_teardown_func(smp_rv_func_arg); 295 } 296 297 void 298 smp_rendezvous(void (* setup_func)(void *), 299 void (* action_func)(void *), 300 void (* teardown_func)(void *), 301 void *arg) 302 { 303 304 if (!smp_started) { 305 if (setup_func != NULL) 306 setup_func(arg); 307 if (action_func != NULL) 308 action_func(arg); 309 if (teardown_func != NULL) 310 teardown_func(arg); 311 return; 312 } 313 314 /* obtain rendezvous lock */ 315 mtx_lock_spin(&smp_rv_mtx); 316 317 /* set static function pointers */ 318 smp_rv_setup_func = setup_func; 319 smp_rv_action_func = action_func; 320 smp_rv_teardown_func = teardown_func; 321 smp_rv_func_arg = arg; 322 smp_rv_waiters[0] = 0; 323 smp_rv_waiters[1] = 0; 324 325 /* signal other processors, which will enter the IPI with interrupts off */ 326 ipi_all_but_self(IPI_RENDEZVOUS); 327 328 /* call executor function */ 329 smp_rendezvous_action(); 330 331 /* release lock */ 332 mtx_unlock_spin(&smp_rv_mtx); 333 } 334