1 /*- 2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This module holds the global variables and machine independent functions 32 * used for the kernel SMP support. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/proc.h> 43 #include <sys/bus.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/pcpu.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/sysctl.h> 50 51 #include <machine/cpu.h> 52 #include <machine/smp.h> 53 54 #include "opt_sched.h" 55 56 #ifdef SMP 57 volatile cpuset_t stopped_cpus; 58 volatile cpuset_t started_cpus; 59 volatile cpuset_t suspended_cpus; 60 cpuset_t hlt_cpus_mask; 61 cpuset_t logical_cpus_mask; 62 63 void (*cpustop_restartfunc)(void); 64 #endif 65 /* This is used in modules that need to work in both SMP and UP. */ 66 cpuset_t all_cpus; 67 68 int mp_ncpus; 69 /* export this for libkvm consumers. */ 70 int mp_maxcpus = MAXCPU; 71 72 volatile int smp_started; 73 u_int mp_maxid; 74 75 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, 76 "Kernel SMP"); 77 78 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, 79 "Max CPU ID."); 80 81 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 82 0, "Max number of CPUs that the system was compiled for."); 83 84 int smp_active = 0; /* are the APs allowed to run? */ 85 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, 86 "Number of Auxillary Processors (APs) that were successfully started"); 87 88 int smp_disabled = 0; /* has smp been disabled? */ 89 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, 90 &smp_disabled, 0, "SMP has been disabled from the loader"); 91 TUNABLE_INT("kern.smp.disabled", &smp_disabled); 92 93 int smp_cpus = 1; /* how many cpu's running */ 94 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, 95 "Number of CPUs online"); 96 97 int smp_topology = 0; /* Which topology we're using. */ 98 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0, 99 "Topology override setting; 0 is default provided by hardware."); 100 TUNABLE_INT("kern.smp.topology", &smp_topology); 101 102 #ifdef SMP 103 /* Enable forwarding of a signal to a process running on a different CPU */ 104 static int forward_signal_enabled = 1; 105 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 106 &forward_signal_enabled, 0, 107 "Forwarding of a signal to a process on a different CPU"); 108 109 /* Variables needed for SMP rendezvous. */ 110 static volatile int smp_rv_ncpus; 111 static void (*volatile smp_rv_setup_func)(void *arg); 112 static void (*volatile smp_rv_action_func)(void *arg); 113 static void (*volatile smp_rv_teardown_func)(void *arg); 114 static void *volatile smp_rv_func_arg; 115 static volatile int smp_rv_waiters[4]; 116 117 /* 118 * Shared mutex to restrict busywaits between smp_rendezvous() and 119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 120 * functions trigger at once and cause multiple CPUs to busywait with 121 * interrupts disabled. 122 */ 123 struct mtx smp_ipi_mtx; 124 125 /* 126 * Let the MD SMP code initialize mp_maxid very early if it can. 127 */ 128 static void 129 mp_setmaxid(void *dummy) 130 { 131 cpu_mp_setmaxid(); 132 } 133 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 134 135 /* 136 * Call the MD SMP initialization code. 137 */ 138 static void 139 mp_start(void *dummy) 140 { 141 142 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 143 144 /* Probe for MP hardware. */ 145 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 146 mp_ncpus = 1; 147 CPU_SETOF(PCPU_GET(cpuid), &all_cpus); 148 return; 149 } 150 151 cpu_mp_start(); 152 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 153 mp_ncpus); 154 cpu_mp_announce(); 155 } 156 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 157 158 void 159 forward_signal(struct thread *td) 160 { 161 int id; 162 163 /* 164 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 165 * this thread, so all we need to do is poke it if it is currently 166 * executing so that it executes ast(). 167 */ 168 THREAD_LOCK_ASSERT(td, MA_OWNED); 169 KASSERT(TD_IS_RUNNING(td), 170 ("forward_signal: thread is not TDS_RUNNING")); 171 172 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 173 174 if (!smp_started || cold || panicstr) 175 return; 176 if (!forward_signal_enabled) 177 return; 178 179 /* No need to IPI ourself. */ 180 if (td == curthread) 181 return; 182 183 id = td->td_oncpu; 184 if (id == NOCPU) 185 return; 186 ipi_cpu(id, IPI_AST); 187 } 188 189 /* 190 * When called the executing CPU will send an IPI to all other CPUs 191 * requesting that they halt execution. 192 * 193 * Usually (but not necessarily) called with 'other_cpus' as its arg. 194 * 195 * - Signals all CPUs in map to stop. 196 * - Waits for each to stop. 197 * 198 * Returns: 199 * -1: error 200 * 0: NA 201 * 1: ok 202 * 203 */ 204 static int 205 generic_stop_cpus(cpuset_t map, u_int type) 206 { 207 #ifdef KTR 208 char cpusetbuf[CPUSETBUFSIZ]; 209 #endif 210 static volatile u_int stopping_cpu = NOCPU; 211 int i; 212 volatile cpuset_t *cpus; 213 214 KASSERT( 215 #if defined(__amd64__) || defined(__i386__) 216 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 217 #else 218 type == IPI_STOP || type == IPI_STOP_HARD, 219 #endif 220 ("%s: invalid stop type", __func__)); 221 222 if (!smp_started) 223 return (0); 224 225 CTR2(KTR_SMP, "stop_cpus(%s) with %u type", 226 cpusetobj_strprint(cpusetbuf, &map), type); 227 228 if (stopping_cpu != PCPU_GET(cpuid)) 229 while (atomic_cmpset_int(&stopping_cpu, NOCPU, 230 PCPU_GET(cpuid)) == 0) 231 while (stopping_cpu != NOCPU) 232 cpu_spinwait(); /* spin */ 233 234 /* send the stop IPI to all CPUs in map */ 235 ipi_selected(map, type); 236 237 #if defined(__amd64__) || defined(__i386__) 238 if (type == IPI_SUSPEND) 239 cpus = &suspended_cpus; 240 else 241 #endif 242 cpus = &stopped_cpus; 243 244 i = 0; 245 while (!CPU_SUBSET(cpus, &map)) { 246 /* spin */ 247 cpu_spinwait(); 248 i++; 249 if (i == 100000000) { 250 printf("timeout stopping cpus\n"); 251 break; 252 } 253 } 254 255 stopping_cpu = NOCPU; 256 return (1); 257 } 258 259 int 260 stop_cpus(cpuset_t map) 261 { 262 263 return (generic_stop_cpus(map, IPI_STOP)); 264 } 265 266 int 267 stop_cpus_hard(cpuset_t map) 268 { 269 270 return (generic_stop_cpus(map, IPI_STOP_HARD)); 271 } 272 273 #if defined(__amd64__) || defined(__i386__) 274 int 275 suspend_cpus(cpuset_t map) 276 { 277 278 return (generic_stop_cpus(map, IPI_SUSPEND)); 279 } 280 #endif 281 282 /* 283 * Called by a CPU to restart stopped CPUs. 284 * 285 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 286 * 287 * - Signals all CPUs in map to restart. 288 * - Waits for each to restart. 289 * 290 * Returns: 291 * -1: error 292 * 0: NA 293 * 1: ok 294 */ 295 int 296 restart_cpus(cpuset_t map) 297 { 298 #ifdef KTR 299 char cpusetbuf[CPUSETBUFSIZ]; 300 #endif 301 302 if (!smp_started) 303 return 0; 304 305 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 306 307 /* signal other cpus to restart */ 308 CPU_COPY_STORE_REL(&map, &started_cpus); 309 310 /* wait for each to clear its bit */ 311 while (CPU_OVERLAP(&stopped_cpus, &map)) 312 cpu_spinwait(); 313 314 return 1; 315 } 316 317 /* 318 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 319 * (if specified), rendezvous, execute the action function (if specified), 320 * rendezvous again, execute the teardown function (if specified), and then 321 * resume. 322 * 323 * Note that the supplied external functions _must_ be reentrant and aware 324 * that they are running in parallel and in an unknown lock context. 325 */ 326 void 327 smp_rendezvous_action(void) 328 { 329 struct thread *td; 330 void *local_func_arg; 331 void (*local_setup_func)(void*); 332 void (*local_action_func)(void*); 333 void (*local_teardown_func)(void*); 334 #ifdef INVARIANTS 335 int owepreempt; 336 #endif 337 338 /* Ensure we have up-to-date values. */ 339 atomic_add_acq_int(&smp_rv_waiters[0], 1); 340 while (smp_rv_waiters[0] < smp_rv_ncpus) 341 cpu_spinwait(); 342 343 /* Fetch rendezvous parameters after acquire barrier. */ 344 local_func_arg = smp_rv_func_arg; 345 local_setup_func = smp_rv_setup_func; 346 local_action_func = smp_rv_action_func; 347 local_teardown_func = smp_rv_teardown_func; 348 349 /* 350 * Use a nested critical section to prevent any preemptions 351 * from occurring during a rendezvous action routine. 352 * Specifically, if a rendezvous handler is invoked via an IPI 353 * and the interrupted thread was in the critical_exit() 354 * function after setting td_critnest to 0 but before 355 * performing a deferred preemption, this routine can be 356 * invoked with td_critnest set to 0 and td_owepreempt true. 357 * In that case, a critical_exit() during the rendezvous 358 * action would trigger a preemption which is not permitted in 359 * a rendezvous action. To fix this, wrap all of the 360 * rendezvous action handlers in a critical section. We 361 * cannot use a regular critical section however as having 362 * critical_exit() preempt from this routine would also be 363 * problematic (the preemption must not occur before the IPI 364 * has been acknowledged via an EOI). Instead, we 365 * intentionally ignore td_owepreempt when leaving the 366 * critical section. This should be harmless because we do 367 * not permit rendezvous action routines to schedule threads, 368 * and thus td_owepreempt should never transition from 0 to 1 369 * during this routine. 370 */ 371 td = curthread; 372 td->td_critnest++; 373 #ifdef INVARIANTS 374 owepreempt = td->td_owepreempt; 375 #endif 376 377 /* 378 * If requested, run a setup function before the main action 379 * function. Ensure all CPUs have completed the setup 380 * function before moving on to the action function. 381 */ 382 if (local_setup_func != smp_no_rendevous_barrier) { 383 if (smp_rv_setup_func != NULL) 384 smp_rv_setup_func(smp_rv_func_arg); 385 atomic_add_int(&smp_rv_waiters[1], 1); 386 while (smp_rv_waiters[1] < smp_rv_ncpus) 387 cpu_spinwait(); 388 } 389 390 if (local_action_func != NULL) 391 local_action_func(local_func_arg); 392 393 if (local_teardown_func != smp_no_rendevous_barrier) { 394 /* 395 * Signal that the main action has been completed. If a 396 * full exit rendezvous is requested, then all CPUs will 397 * wait here until all CPUs have finished the main action. 398 */ 399 atomic_add_int(&smp_rv_waiters[2], 1); 400 while (smp_rv_waiters[2] < smp_rv_ncpus) 401 cpu_spinwait(); 402 403 if (local_teardown_func != NULL) 404 local_teardown_func(local_func_arg); 405 } 406 407 /* 408 * Signal that the rendezvous is fully completed by this CPU. 409 * This means that no member of smp_rv_* pseudo-structure will be 410 * accessed by this target CPU after this point; in particular, 411 * memory pointed by smp_rv_func_arg. 412 */ 413 atomic_add_int(&smp_rv_waiters[3], 1); 414 415 td->td_critnest--; 416 KASSERT(owepreempt == td->td_owepreempt, 417 ("rendezvous action changed td_owepreempt")); 418 } 419 420 void 421 smp_rendezvous_cpus(cpuset_t map, 422 void (* setup_func)(void *), 423 void (* action_func)(void *), 424 void (* teardown_func)(void *), 425 void *arg) 426 { 427 int curcpumap, i, ncpus = 0; 428 429 /* Look comments in the !SMP case. */ 430 if (!smp_started) { 431 spinlock_enter(); 432 if (setup_func != NULL) 433 setup_func(arg); 434 if (action_func != NULL) 435 action_func(arg); 436 if (teardown_func != NULL) 437 teardown_func(arg); 438 spinlock_exit(); 439 return; 440 } 441 442 CPU_FOREACH(i) { 443 if (CPU_ISSET(i, &map)) 444 ncpus++; 445 } 446 if (ncpus == 0) 447 panic("ncpus is 0 with non-zero map"); 448 449 mtx_lock_spin(&smp_ipi_mtx); 450 451 /* Pass rendezvous parameters via global variables. */ 452 smp_rv_ncpus = ncpus; 453 smp_rv_setup_func = setup_func; 454 smp_rv_action_func = action_func; 455 smp_rv_teardown_func = teardown_func; 456 smp_rv_func_arg = arg; 457 smp_rv_waiters[1] = 0; 458 smp_rv_waiters[2] = 0; 459 smp_rv_waiters[3] = 0; 460 atomic_store_rel_int(&smp_rv_waiters[0], 0); 461 462 /* 463 * Signal other processors, which will enter the IPI with 464 * interrupts off. 465 */ 466 curcpumap = CPU_ISSET(curcpu, &map); 467 CPU_CLR(curcpu, &map); 468 ipi_selected(map, IPI_RENDEZVOUS); 469 470 /* Check if the current CPU is in the map */ 471 if (curcpumap != 0) 472 smp_rendezvous_action(); 473 474 /* 475 * Ensure that the master CPU waits for all the other 476 * CPUs to finish the rendezvous, so that smp_rv_* 477 * pseudo-structure and the arg are guaranteed to not 478 * be in use. 479 */ 480 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) 481 cpu_spinwait(); 482 483 mtx_unlock_spin(&smp_ipi_mtx); 484 } 485 486 void 487 smp_rendezvous(void (* setup_func)(void *), 488 void (* action_func)(void *), 489 void (* teardown_func)(void *), 490 void *arg) 491 { 492 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 493 } 494 495 static struct cpu_group group[MAXCPU]; 496 497 struct cpu_group * 498 smp_topo(void) 499 { 500 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 501 struct cpu_group *top; 502 503 /* 504 * Check for a fake topology request for debugging purposes. 505 */ 506 switch (smp_topology) { 507 case 1: 508 /* Dual core with no sharing. */ 509 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 510 break; 511 case 2: 512 /* No topology, all cpus are equal. */ 513 top = smp_topo_none(); 514 break; 515 case 3: 516 /* Dual core with shared L2. */ 517 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 518 break; 519 case 4: 520 /* quad core, shared l3 among each package, private l2. */ 521 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 522 break; 523 case 5: 524 /* quad core, 2 dualcore parts on each package share l2. */ 525 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 526 break; 527 case 6: 528 /* Single-core 2xHTT */ 529 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 530 break; 531 case 7: 532 /* quad core with a shared l3, 8 threads sharing L2. */ 533 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 534 CG_FLAG_SMT); 535 break; 536 default: 537 /* Default, ask the system what it wants. */ 538 top = cpu_topo(); 539 break; 540 } 541 /* 542 * Verify the returned topology. 543 */ 544 if (top->cg_count != mp_ncpus) 545 panic("Built bad topology at %p. CPU count %d != %d", 546 top, top->cg_count, mp_ncpus); 547 if (CPU_CMP(&top->cg_mask, &all_cpus)) 548 panic("Built bad topology at %p. CPU mask (%s) != (%s)", 549 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), 550 cpusetobj_strprint(cpusetbuf2, &all_cpus)); 551 return (top); 552 } 553 554 struct cpu_group * 555 smp_topo_none(void) 556 { 557 struct cpu_group *top; 558 559 top = &group[0]; 560 top->cg_parent = NULL; 561 top->cg_child = NULL; 562 top->cg_mask = all_cpus; 563 top->cg_count = mp_ncpus; 564 top->cg_children = 0; 565 top->cg_level = CG_SHARE_NONE; 566 top->cg_flags = 0; 567 568 return (top); 569 } 570 571 static int 572 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 573 int count, int flags, int start) 574 { 575 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 576 cpuset_t mask; 577 int i; 578 579 CPU_ZERO(&mask); 580 for (i = 0; i < count; i++, start++) 581 CPU_SET(start, &mask); 582 child->cg_parent = parent; 583 child->cg_child = NULL; 584 child->cg_children = 0; 585 child->cg_level = share; 586 child->cg_count = count; 587 child->cg_flags = flags; 588 child->cg_mask = mask; 589 parent->cg_children++; 590 for (; parent != NULL; parent = parent->cg_parent) { 591 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) 592 panic("Duplicate children in %p. mask (%s) child (%s)", 593 parent, 594 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), 595 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); 596 CPU_OR(&parent->cg_mask, &child->cg_mask); 597 parent->cg_count += child->cg_count; 598 } 599 600 return (start); 601 } 602 603 struct cpu_group * 604 smp_topo_1level(int share, int count, int flags) 605 { 606 struct cpu_group *child; 607 struct cpu_group *top; 608 int packages; 609 int cpu; 610 int i; 611 612 cpu = 0; 613 top = &group[0]; 614 packages = mp_ncpus / count; 615 top->cg_child = child = &group[1]; 616 top->cg_level = CG_SHARE_NONE; 617 for (i = 0; i < packages; i++, child++) 618 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 619 return (top); 620 } 621 622 struct cpu_group * 623 smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 624 int l1flags) 625 { 626 struct cpu_group *top; 627 struct cpu_group *l1g; 628 struct cpu_group *l2g; 629 int cpu; 630 int i; 631 int j; 632 633 cpu = 0; 634 top = &group[0]; 635 l2g = &group[1]; 636 top->cg_child = l2g; 637 top->cg_level = CG_SHARE_NONE; 638 top->cg_children = mp_ncpus / (l2count * l1count); 639 l1g = l2g + top->cg_children; 640 for (i = 0; i < top->cg_children; i++, l2g++) { 641 l2g->cg_parent = top; 642 l2g->cg_child = l1g; 643 l2g->cg_level = l2share; 644 for (j = 0; j < l2count; j++, l1g++) 645 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 646 l1flags, cpu); 647 } 648 return (top); 649 } 650 651 652 struct cpu_group * 653 smp_topo_find(struct cpu_group *top, int cpu) 654 { 655 struct cpu_group *cg; 656 cpuset_t mask; 657 int children; 658 int i; 659 660 CPU_SETOF(cpu, &mask); 661 cg = top; 662 for (;;) { 663 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) 664 return (NULL); 665 if (cg->cg_children == 0) 666 return (cg); 667 children = cg->cg_children; 668 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 669 if (CPU_OVERLAP(&cg->cg_mask, &mask)) 670 break; 671 } 672 return (NULL); 673 } 674 #else /* !SMP */ 675 676 void 677 smp_rendezvous_cpus(cpuset_t map, 678 void (*setup_func)(void *), 679 void (*action_func)(void *), 680 void (*teardown_func)(void *), 681 void *arg) 682 { 683 /* 684 * In the !SMP case we just need to ensure the same initial conditions 685 * as the SMP case. 686 */ 687 spinlock_enter(); 688 if (setup_func != NULL) 689 setup_func(arg); 690 if (action_func != NULL) 691 action_func(arg); 692 if (teardown_func != NULL) 693 teardown_func(arg); 694 spinlock_exit(); 695 } 696 697 void 698 smp_rendezvous(void (*setup_func)(void *), 699 void (*action_func)(void *), 700 void (*teardown_func)(void *), 701 void *arg) 702 { 703 704 /* Look comments in the smp_rendezvous_cpus() case. */ 705 spinlock_enter(); 706 if (setup_func != NULL) 707 setup_func(arg); 708 if (action_func != NULL) 709 action_func(arg); 710 if (teardown_func != NULL) 711 teardown_func(arg); 712 spinlock_exit(); 713 } 714 715 /* 716 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 717 * APIs will still work using this dummy support. 718 */ 719 static void 720 mp_setvariables_for_up(void *dummy) 721 { 722 mp_ncpus = 1; 723 mp_maxid = PCPU_GET(cpuid); 724 CPU_SETOF(mp_maxid, &all_cpus); 725 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 726 } 727 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 728 mp_setvariables_for_up, NULL); 729 #endif /* SMP */ 730 731 void 732 smp_no_rendevous_barrier(void *dummy) 733 { 734 #ifdef SMP 735 KASSERT((!smp_started),("smp_no_rendevous called and smp is started")); 736 #endif 737 } 738 739 /* 740 * Wait specified idle threads to switch once. This ensures that even 741 * preempted threads have cycled through the switch function once, 742 * exiting their codepaths. This allows us to change global pointers 743 * with no other synchronization. 744 */ 745 int 746 quiesce_cpus(cpuset_t map, const char *wmesg, int prio) 747 { 748 struct pcpu *pcpu; 749 u_int gen[MAXCPU]; 750 int error; 751 int cpu; 752 753 error = 0; 754 for (cpu = 0; cpu <= mp_maxid; cpu++) { 755 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 756 continue; 757 pcpu = pcpu_find(cpu); 758 gen[cpu] = pcpu->pc_idlethread->td_generation; 759 } 760 for (cpu = 0; cpu <= mp_maxid; cpu++) { 761 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 762 continue; 763 pcpu = pcpu_find(cpu); 764 thread_lock(curthread); 765 sched_bind(curthread, cpu); 766 thread_unlock(curthread); 767 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { 768 error = tsleep(quiesce_cpus, prio, wmesg, 1); 769 if (error) 770 goto out; 771 } 772 } 773 out: 774 thread_lock(curthread); 775 sched_unbind(curthread); 776 thread_unlock(curthread); 777 778 return (error); 779 } 780 781 int 782 quiesce_all_cpus(const char *wmesg, int prio) 783 { 784 785 return quiesce_cpus(all_cpus, wmesg, prio); 786 } 787