1 /* 2 * Copyright (c) 2001 3 * John Baldwin <jhb@FreeBSD.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * This module holds the global variables and machine independent functions 34 * used for the kernel SMP support. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/proc.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/pcpu.h> 45 #include <sys/smp.h> 46 #include <sys/sysctl.h> 47 48 volatile u_int stopped_cpus; 49 volatile u_int started_cpus; 50 51 void (*cpustop_restartfunc) __P((void)); 52 int mp_ncpus; 53 54 volatile int smp_started; 55 u_int all_cpus; 56 57 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP"); 58 59 int smp_active = 0; /* are the APs allowed to run? */ 60 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, ""); 61 62 int smp_cpus = 1; /* how many cpu's running */ 63 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 64 65 /* Enable forwarding of a signal to a process running on a different CPU */ 66 static int forward_signal_enabled = 1; 67 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 68 &forward_signal_enabled, 0, ""); 69 70 /* Enable forwarding of roundrobin to all other cpus */ 71 static int forward_roundrobin_enabled = 1; 72 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 73 &forward_roundrobin_enabled, 0, ""); 74 75 /* Variables needed for SMP rendezvous. */ 76 static void (*smp_rv_setup_func)(void *arg); 77 static void (*smp_rv_action_func)(void *arg); 78 static void (*smp_rv_teardown_func)(void *arg); 79 static void *smp_rv_func_arg; 80 static volatile int smp_rv_waiters[2]; 81 static struct mtx smp_rv_mtx; 82 83 /* 84 * Initialize MI SMP variables and call the MD SMP initialization code. 85 */ 86 static void 87 mp_start(void *dummy) 88 { 89 90 /* Probe for MP hardware. */ 91 if (cpu_mp_probe() == 0) 92 return; 93 94 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); 95 cpu_mp_start(); 96 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 97 mp_ncpus); 98 cpu_mp_announce(); 99 } 100 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL) 101 102 void 103 forward_signal(struct thread *td) 104 { 105 int id; 106 107 /* 108 * signotify() has already set PS_ASTPENDING on this process so all 109 * we need to do is poke it if it is currently executing so that it 110 * executes ast(). 111 */ 112 mtx_assert(&sched_lock, MA_OWNED); 113 KASSERT(td->td_proc->p_stat == SRUN, ("forward_signal: process is not SRUN")); 114 115 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 116 117 if (!smp_started || cold || panicstr) 118 return; 119 if (!forward_signal_enabled) 120 return; 121 122 /* No need to IPI ourself. */ 123 if (td == curthread) 124 return; 125 126 id = td->td_kse->ke_oncpu; 127 if (id == NOCPU) 128 return; 129 ipi_selected(1 << id, IPI_AST); 130 } 131 132 void 133 forward_roundrobin(void) 134 { 135 struct pcpu *pc; 136 struct thread *td; 137 u_int id, map; 138 139 mtx_assert(&sched_lock, MA_OWNED); 140 141 CTR0(KTR_SMP, "forward_roundrobin()"); 142 143 if (!smp_started || cold || panicstr) 144 return; 145 if (!forward_roundrobin_enabled) 146 return; 147 map = 0; 148 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 149 td = pc->pc_curthread; 150 id = pc->pc_cpumask; 151 if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 && 152 td != pc->pc_idlethread) { 153 td->td_kse->ke_flags |= KEF_NEEDRESCHED; 154 map |= id; 155 } 156 } 157 ipi_selected(map, IPI_AST); 158 } 159 160 /* 161 * When called the executing CPU will send an IPI to all other CPUs 162 * requesting that they halt execution. 163 * 164 * Usually (but not necessarily) called with 'other_cpus' as its arg. 165 * 166 * - Signals all CPUs in map to stop. 167 * - Waits for each to stop. 168 * 169 * Returns: 170 * -1: error 171 * 0: NA 172 * 1: ok 173 * 174 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 175 * from executing at same time. 176 */ 177 int 178 stop_cpus(u_int map) 179 { 180 int i; 181 182 if (!smp_started) 183 return 0; 184 185 CTR1(KTR_SMP, "stop_cpus(%x)", map); 186 187 /* send the stop IPI to all CPUs in map */ 188 ipi_selected(map, IPI_STOP); 189 190 i = 0; 191 while ((atomic_load_acq_int(&stopped_cpus) & map) != map) { 192 /* spin */ 193 i++; 194 #ifdef DIAGNOSTIC 195 if (i == 100000) { 196 printf("timeout stopping cpus\n"); 197 break; 198 } 199 #endif 200 } 201 202 return 1; 203 } 204 205 206 /* 207 * Called by a CPU to restart stopped CPUs. 208 * 209 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 210 * 211 * - Signals all CPUs in map to restart. 212 * - Waits for each to restart. 213 * 214 * Returns: 215 * -1: error 216 * 0: NA 217 * 1: ok 218 */ 219 int 220 restart_cpus(u_int map) 221 { 222 223 if (!smp_started) 224 return 0; 225 226 CTR1(KTR_SMP, "restart_cpus(%x)", map); 227 228 /* signal other cpus to restart */ 229 atomic_store_rel_int(&started_cpus, map); 230 231 /* wait for each to clear its bit */ 232 while ((atomic_load_acq_int(&stopped_cpus) & map) != 0) 233 ; /* nothing */ 234 235 return 1; 236 } 237 238 /* 239 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 240 * (if specified), rendezvous, execute the action function (if specified), 241 * rendezvous again, execute the teardown function (if specified), and then 242 * resume. 243 * 244 * Note that the supplied external functions _must_ be reentrant and aware 245 * that they are running in parallel and in an unknown lock context. 246 */ 247 void 248 smp_rendezvous_action(void) 249 { 250 251 /* setup function */ 252 if (smp_rv_setup_func != NULL) 253 smp_rv_setup_func(smp_rv_func_arg); 254 /* spin on entry rendezvous */ 255 atomic_add_int(&smp_rv_waiters[0], 1); 256 while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus) 257 ; /* nothing */ 258 /* action function */ 259 if (smp_rv_action_func != NULL) 260 smp_rv_action_func(smp_rv_func_arg); 261 /* spin on exit rendezvous */ 262 atomic_add_int(&smp_rv_waiters[1], 1); 263 while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus) 264 ; /* nothing */ 265 /* teardown function */ 266 if (smp_rv_teardown_func != NULL) 267 smp_rv_teardown_func(smp_rv_func_arg); 268 } 269 270 void 271 smp_rendezvous(void (* setup_func)(void *), 272 void (* action_func)(void *), 273 void (* teardown_func)(void *), 274 void *arg) 275 { 276 277 if (!smp_started) { 278 if (setup_func != NULL) 279 setup_func(arg); 280 if (action_func != NULL) 281 action_func(arg); 282 if (teardown_func != NULL) 283 teardown_func(arg); 284 return; 285 } 286 287 /* obtain rendezvous lock */ 288 mtx_lock_spin(&smp_rv_mtx); 289 290 /* set static function pointers */ 291 smp_rv_setup_func = setup_func; 292 smp_rv_action_func = action_func; 293 smp_rv_teardown_func = teardown_func; 294 smp_rv_func_arg = arg; 295 smp_rv_waiters[0] = 0; 296 smp_rv_waiters[1] = 0; 297 298 /* signal other processors, which will enter the IPI with interrupts off */ 299 ipi_all_but_self(IPI_RENDEZVOUS); 300 301 /* call executor function */ 302 smp_rendezvous_action(); 303 304 /* release lock */ 305 mtx_unlock_spin(&smp_rv_mtx); 306 } 307