1 /*- 2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * This module holds the global variables and machine independent functions 29 * used for the kernel SMP support. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/proc.h> 40 #include <sys/bus.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/pcpu.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/sysctl.h> 47 48 #include <machine/cpu.h> 49 #include <machine/smp.h> 50 51 #include "opt_sched.h" 52 53 #ifdef SMP 54 volatile cpuset_t stopped_cpus; 55 volatile cpuset_t started_cpus; 56 volatile cpuset_t suspended_cpus; 57 cpuset_t hlt_cpus_mask; 58 cpuset_t logical_cpus_mask; 59 60 void (*cpustop_restartfunc)(void); 61 #endif 62 63 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS); 64 65 /* This is used in modules that need to work in both SMP and UP. */ 66 cpuset_t all_cpus; 67 68 int mp_ncpus; 69 /* export this for libkvm consumers. */ 70 int mp_maxcpus = MAXCPU; 71 72 volatile int smp_started; 73 u_int mp_maxid; 74 75 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, 76 "Kernel SMP"); 77 78 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, 79 "Max CPU ID."); 80 81 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 82 0, "Max number of CPUs that the system was compiled for."); 83 84 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0, 85 sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode"); 86 87 int smp_disabled = 0; /* has smp been disabled? */ 88 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, 89 &smp_disabled, 0, "SMP has been disabled from the loader"); 90 91 int smp_cpus = 1; /* how many cpu's running */ 92 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, 93 "Number of CPUs online"); 94 95 int smp_topology = 0; /* Which topology we're using. */ 96 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0, 97 "Topology override setting; 0 is default provided by hardware."); 98 99 #ifdef SMP 100 /* Enable forwarding of a signal to a process running on a different CPU */ 101 static int forward_signal_enabled = 1; 102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 103 &forward_signal_enabled, 0, 104 "Forwarding of a signal to a process on a different CPU"); 105 106 /* Variables needed for SMP rendezvous. */ 107 static volatile int smp_rv_ncpus; 108 static void (*volatile smp_rv_setup_func)(void *arg); 109 static void (*volatile smp_rv_action_func)(void *arg); 110 static void (*volatile smp_rv_teardown_func)(void *arg); 111 static void *volatile smp_rv_func_arg; 112 static volatile int smp_rv_waiters[4]; 113 114 /* 115 * Shared mutex to restrict busywaits between smp_rendezvous() and 116 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 117 * functions trigger at once and cause multiple CPUs to busywait with 118 * interrupts disabled. 119 */ 120 struct mtx smp_ipi_mtx; 121 122 /* 123 * Let the MD SMP code initialize mp_maxid very early if it can. 124 */ 125 static void 126 mp_setmaxid(void *dummy) 127 { 128 129 cpu_mp_setmaxid(); 130 131 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__)); 132 KASSERT(mp_ncpus > 1 || mp_maxid == 0, 133 ("%s: one CPU but mp_maxid is not zero", __func__)); 134 KASSERT(mp_maxid >= mp_ncpus - 1, 135 ("%s: counters out of sync: max %d, count %d", __func__, 136 mp_maxid, mp_ncpus)); 137 } 138 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 139 140 /* 141 * Call the MD SMP initialization code. 142 */ 143 static void 144 mp_start(void *dummy) 145 { 146 147 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 148 149 /* Probe for MP hardware. */ 150 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 151 mp_ncpus = 1; 152 CPU_SETOF(PCPU_GET(cpuid), &all_cpus); 153 return; 154 } 155 156 cpu_mp_start(); 157 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 158 mp_ncpus); 159 cpu_mp_announce(); 160 } 161 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 162 163 void 164 forward_signal(struct thread *td) 165 { 166 int id; 167 168 /* 169 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 170 * this thread, so all we need to do is poke it if it is currently 171 * executing so that it executes ast(). 172 */ 173 THREAD_LOCK_ASSERT(td, MA_OWNED); 174 KASSERT(TD_IS_RUNNING(td), 175 ("forward_signal: thread is not TDS_RUNNING")); 176 177 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 178 179 if (!smp_started || cold || panicstr) 180 return; 181 if (!forward_signal_enabled) 182 return; 183 184 /* No need to IPI ourself. */ 185 if (td == curthread) 186 return; 187 188 id = td->td_oncpu; 189 if (id == NOCPU) 190 return; 191 ipi_cpu(id, IPI_AST); 192 } 193 194 /* 195 * When called the executing CPU will send an IPI to all other CPUs 196 * requesting that they halt execution. 197 * 198 * Usually (but not necessarily) called with 'other_cpus' as its arg. 199 * 200 * - Signals all CPUs in map to stop. 201 * - Waits for each to stop. 202 * 203 * Returns: 204 * -1: error 205 * 0: NA 206 * 1: ok 207 * 208 */ 209 static int 210 generic_stop_cpus(cpuset_t map, u_int type) 211 { 212 #ifdef KTR 213 char cpusetbuf[CPUSETBUFSIZ]; 214 #endif 215 static volatile u_int stopping_cpu = NOCPU; 216 int i; 217 volatile cpuset_t *cpus; 218 219 KASSERT( 220 #if defined(__amd64__) || defined(__i386__) 221 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 222 #else 223 type == IPI_STOP || type == IPI_STOP_HARD, 224 #endif 225 ("%s: invalid stop type", __func__)); 226 227 if (!smp_started) 228 return (0); 229 230 CTR2(KTR_SMP, "stop_cpus(%s) with %u type", 231 cpusetobj_strprint(cpusetbuf, &map), type); 232 233 #if defined(__amd64__) || defined(__i386__) 234 /* 235 * When suspending, ensure there are are no IPIs in progress. 236 * IPIs that have been issued, but not yet delivered (e.g. 237 * not pending on a vCPU when running under virtualization) 238 * will be lost, violating FreeBSD's assumption of reliable 239 * IPI delivery. 240 */ 241 if (type == IPI_SUSPEND) 242 mtx_lock_spin(&smp_ipi_mtx); 243 #endif 244 245 if (stopping_cpu != PCPU_GET(cpuid)) 246 while (atomic_cmpset_int(&stopping_cpu, NOCPU, 247 PCPU_GET(cpuid)) == 0) 248 while (stopping_cpu != NOCPU) 249 cpu_spinwait(); /* spin */ 250 251 /* send the stop IPI to all CPUs in map */ 252 ipi_selected(map, type); 253 254 #if defined(__amd64__) || defined(__i386__) 255 if (type == IPI_SUSPEND) 256 cpus = &suspended_cpus; 257 else 258 #endif 259 cpus = &stopped_cpus; 260 261 i = 0; 262 while (!CPU_SUBSET(cpus, &map)) { 263 /* spin */ 264 cpu_spinwait(); 265 i++; 266 if (i == 100000000) { 267 printf("timeout stopping cpus\n"); 268 break; 269 } 270 } 271 272 #if defined(__amd64__) || defined(__i386__) 273 if (type == IPI_SUSPEND) 274 mtx_unlock_spin(&smp_ipi_mtx); 275 #endif 276 277 stopping_cpu = NOCPU; 278 return (1); 279 } 280 281 int 282 stop_cpus(cpuset_t map) 283 { 284 285 return (generic_stop_cpus(map, IPI_STOP)); 286 } 287 288 int 289 stop_cpus_hard(cpuset_t map) 290 { 291 292 return (generic_stop_cpus(map, IPI_STOP_HARD)); 293 } 294 295 #if defined(__amd64__) || defined(__i386__) 296 int 297 suspend_cpus(cpuset_t map) 298 { 299 300 return (generic_stop_cpus(map, IPI_SUSPEND)); 301 } 302 #endif 303 304 /* 305 * Called by a CPU to restart stopped CPUs. 306 * 307 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 308 * 309 * - Signals all CPUs in map to restart. 310 * - Waits for each to restart. 311 * 312 * Returns: 313 * -1: error 314 * 0: NA 315 * 1: ok 316 */ 317 static int 318 generic_restart_cpus(cpuset_t map, u_int type) 319 { 320 #ifdef KTR 321 char cpusetbuf[CPUSETBUFSIZ]; 322 #endif 323 volatile cpuset_t *cpus; 324 325 KASSERT( 326 #if defined(__amd64__) || defined(__i386__) 327 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 328 #else 329 type == IPI_STOP || type == IPI_STOP_HARD, 330 #endif 331 ("%s: invalid stop type", __func__)); 332 333 if (!smp_started) 334 return 0; 335 336 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 337 338 #if defined(__amd64__) || defined(__i386__) 339 if (type == IPI_SUSPEND) 340 cpus = &suspended_cpus; 341 else 342 #endif 343 cpus = &stopped_cpus; 344 345 /* signal other cpus to restart */ 346 CPU_COPY_STORE_REL(&map, &started_cpus); 347 348 /* wait for each to clear its bit */ 349 while (CPU_OVERLAP(cpus, &map)) 350 cpu_spinwait(); 351 352 return 1; 353 } 354 355 int 356 restart_cpus(cpuset_t map) 357 { 358 359 return (generic_restart_cpus(map, IPI_STOP)); 360 } 361 362 #if defined(__amd64__) || defined(__i386__) 363 int 364 resume_cpus(cpuset_t map) 365 { 366 367 return (generic_restart_cpus(map, IPI_SUSPEND)); 368 } 369 #endif 370 371 /* 372 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 373 * (if specified), rendezvous, execute the action function (if specified), 374 * rendezvous again, execute the teardown function (if specified), and then 375 * resume. 376 * 377 * Note that the supplied external functions _must_ be reentrant and aware 378 * that they are running in parallel and in an unknown lock context. 379 */ 380 void 381 smp_rendezvous_action(void) 382 { 383 struct thread *td; 384 void *local_func_arg; 385 void (*local_setup_func)(void*); 386 void (*local_action_func)(void*); 387 void (*local_teardown_func)(void*); 388 #ifdef INVARIANTS 389 int owepreempt; 390 #endif 391 392 /* Ensure we have up-to-date values. */ 393 atomic_add_acq_int(&smp_rv_waiters[0], 1); 394 while (smp_rv_waiters[0] < smp_rv_ncpus) 395 cpu_spinwait(); 396 397 /* Fetch rendezvous parameters after acquire barrier. */ 398 local_func_arg = smp_rv_func_arg; 399 local_setup_func = smp_rv_setup_func; 400 local_action_func = smp_rv_action_func; 401 local_teardown_func = smp_rv_teardown_func; 402 403 /* 404 * Use a nested critical section to prevent any preemptions 405 * from occurring during a rendezvous action routine. 406 * Specifically, if a rendezvous handler is invoked via an IPI 407 * and the interrupted thread was in the critical_exit() 408 * function after setting td_critnest to 0 but before 409 * performing a deferred preemption, this routine can be 410 * invoked with td_critnest set to 0 and td_owepreempt true. 411 * In that case, a critical_exit() during the rendezvous 412 * action would trigger a preemption which is not permitted in 413 * a rendezvous action. To fix this, wrap all of the 414 * rendezvous action handlers in a critical section. We 415 * cannot use a regular critical section however as having 416 * critical_exit() preempt from this routine would also be 417 * problematic (the preemption must not occur before the IPI 418 * has been acknowledged via an EOI). Instead, we 419 * intentionally ignore td_owepreempt when leaving the 420 * critical section. This should be harmless because we do 421 * not permit rendezvous action routines to schedule threads, 422 * and thus td_owepreempt should never transition from 0 to 1 423 * during this routine. 424 */ 425 td = curthread; 426 td->td_critnest++; 427 #ifdef INVARIANTS 428 owepreempt = td->td_owepreempt; 429 #endif 430 431 /* 432 * If requested, run a setup function before the main action 433 * function. Ensure all CPUs have completed the setup 434 * function before moving on to the action function. 435 */ 436 if (local_setup_func != smp_no_rendevous_barrier) { 437 if (smp_rv_setup_func != NULL) 438 smp_rv_setup_func(smp_rv_func_arg); 439 atomic_add_int(&smp_rv_waiters[1], 1); 440 while (smp_rv_waiters[1] < smp_rv_ncpus) 441 cpu_spinwait(); 442 } 443 444 if (local_action_func != NULL) 445 local_action_func(local_func_arg); 446 447 if (local_teardown_func != smp_no_rendevous_barrier) { 448 /* 449 * Signal that the main action has been completed. If a 450 * full exit rendezvous is requested, then all CPUs will 451 * wait here until all CPUs have finished the main action. 452 */ 453 atomic_add_int(&smp_rv_waiters[2], 1); 454 while (smp_rv_waiters[2] < smp_rv_ncpus) 455 cpu_spinwait(); 456 457 if (local_teardown_func != NULL) 458 local_teardown_func(local_func_arg); 459 } 460 461 /* 462 * Signal that the rendezvous is fully completed by this CPU. 463 * This means that no member of smp_rv_* pseudo-structure will be 464 * accessed by this target CPU after this point; in particular, 465 * memory pointed by smp_rv_func_arg. 466 * 467 * The release semantic ensures that all accesses performed by 468 * the current CPU are visible when smp_rendezvous_cpus() 469 * returns, by synchronizing with the 470 * atomic_load_acq_int(&smp_rv_waiters[3]). 471 */ 472 atomic_add_rel_int(&smp_rv_waiters[3], 1); 473 474 td->td_critnest--; 475 KASSERT(owepreempt == td->td_owepreempt, 476 ("rendezvous action changed td_owepreempt")); 477 } 478 479 void 480 smp_rendezvous_cpus(cpuset_t map, 481 void (* setup_func)(void *), 482 void (* action_func)(void *), 483 void (* teardown_func)(void *), 484 void *arg) 485 { 486 int curcpumap, i, ncpus = 0; 487 488 /* Look comments in the !SMP case. */ 489 if (!smp_started) { 490 spinlock_enter(); 491 if (setup_func != NULL) 492 setup_func(arg); 493 if (action_func != NULL) 494 action_func(arg); 495 if (teardown_func != NULL) 496 teardown_func(arg); 497 spinlock_exit(); 498 return; 499 } 500 501 CPU_FOREACH(i) { 502 if (CPU_ISSET(i, &map)) 503 ncpus++; 504 } 505 if (ncpus == 0) 506 panic("ncpus is 0 with non-zero map"); 507 508 mtx_lock_spin(&smp_ipi_mtx); 509 510 /* Pass rendezvous parameters via global variables. */ 511 smp_rv_ncpus = ncpus; 512 smp_rv_setup_func = setup_func; 513 smp_rv_action_func = action_func; 514 smp_rv_teardown_func = teardown_func; 515 smp_rv_func_arg = arg; 516 smp_rv_waiters[1] = 0; 517 smp_rv_waiters[2] = 0; 518 smp_rv_waiters[3] = 0; 519 atomic_store_rel_int(&smp_rv_waiters[0], 0); 520 521 /* 522 * Signal other processors, which will enter the IPI with 523 * interrupts off. 524 */ 525 curcpumap = CPU_ISSET(curcpu, &map); 526 CPU_CLR(curcpu, &map); 527 ipi_selected(map, IPI_RENDEZVOUS); 528 529 /* Check if the current CPU is in the map */ 530 if (curcpumap != 0) 531 smp_rendezvous_action(); 532 533 /* 534 * Ensure that the master CPU waits for all the other 535 * CPUs to finish the rendezvous, so that smp_rv_* 536 * pseudo-structure and the arg are guaranteed to not 537 * be in use. 538 * 539 * Load acquire synchronizes with the release add in 540 * smp_rendezvous_action(), which ensures that our caller sees 541 * all memory actions done by the called functions on other 542 * CPUs. 543 */ 544 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) 545 cpu_spinwait(); 546 547 mtx_unlock_spin(&smp_ipi_mtx); 548 } 549 550 void 551 smp_rendezvous(void (* setup_func)(void *), 552 void (* action_func)(void *), 553 void (* teardown_func)(void *), 554 void *arg) 555 { 556 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 557 } 558 559 static struct cpu_group group[MAXCPU]; 560 561 struct cpu_group * 562 smp_topo(void) 563 { 564 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 565 struct cpu_group *top; 566 567 /* 568 * Check for a fake topology request for debugging purposes. 569 */ 570 switch (smp_topology) { 571 case 1: 572 /* Dual core with no sharing. */ 573 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 574 break; 575 case 2: 576 /* No topology, all cpus are equal. */ 577 top = smp_topo_none(); 578 break; 579 case 3: 580 /* Dual core with shared L2. */ 581 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 582 break; 583 case 4: 584 /* quad core, shared l3 among each package, private l2. */ 585 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 586 break; 587 case 5: 588 /* quad core, 2 dualcore parts on each package share l2. */ 589 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 590 break; 591 case 6: 592 /* Single-core 2xHTT */ 593 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 594 break; 595 case 7: 596 /* quad core with a shared l3, 8 threads sharing L2. */ 597 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 598 CG_FLAG_SMT); 599 break; 600 default: 601 /* Default, ask the system what it wants. */ 602 top = cpu_topo(); 603 break; 604 } 605 /* 606 * Verify the returned topology. 607 */ 608 if (top->cg_count != mp_ncpus) 609 panic("Built bad topology at %p. CPU count %d != %d", 610 top, top->cg_count, mp_ncpus); 611 if (CPU_CMP(&top->cg_mask, &all_cpus)) 612 panic("Built bad topology at %p. CPU mask (%s) != (%s)", 613 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), 614 cpusetobj_strprint(cpusetbuf2, &all_cpus)); 615 return (top); 616 } 617 618 struct cpu_group * 619 smp_topo_none(void) 620 { 621 struct cpu_group *top; 622 623 top = &group[0]; 624 top->cg_parent = NULL; 625 top->cg_child = NULL; 626 top->cg_mask = all_cpus; 627 top->cg_count = mp_ncpus; 628 top->cg_children = 0; 629 top->cg_level = CG_SHARE_NONE; 630 top->cg_flags = 0; 631 632 return (top); 633 } 634 635 static int 636 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 637 int count, int flags, int start) 638 { 639 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 640 cpuset_t mask; 641 int i; 642 643 CPU_ZERO(&mask); 644 for (i = 0; i < count; i++, start++) 645 CPU_SET(start, &mask); 646 child->cg_parent = parent; 647 child->cg_child = NULL; 648 child->cg_children = 0; 649 child->cg_level = share; 650 child->cg_count = count; 651 child->cg_flags = flags; 652 child->cg_mask = mask; 653 parent->cg_children++; 654 for (; parent != NULL; parent = parent->cg_parent) { 655 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) 656 panic("Duplicate children in %p. mask (%s) child (%s)", 657 parent, 658 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), 659 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); 660 CPU_OR(&parent->cg_mask, &child->cg_mask); 661 parent->cg_count += child->cg_count; 662 } 663 664 return (start); 665 } 666 667 struct cpu_group * 668 smp_topo_1level(int share, int count, int flags) 669 { 670 struct cpu_group *child; 671 struct cpu_group *top; 672 int packages; 673 int cpu; 674 int i; 675 676 cpu = 0; 677 top = &group[0]; 678 packages = mp_ncpus / count; 679 top->cg_child = child = &group[1]; 680 top->cg_level = CG_SHARE_NONE; 681 for (i = 0; i < packages; i++, child++) 682 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 683 return (top); 684 } 685 686 struct cpu_group * 687 smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 688 int l1flags) 689 { 690 struct cpu_group *top; 691 struct cpu_group *l1g; 692 struct cpu_group *l2g; 693 int cpu; 694 int i; 695 int j; 696 697 cpu = 0; 698 top = &group[0]; 699 l2g = &group[1]; 700 top->cg_child = l2g; 701 top->cg_level = CG_SHARE_NONE; 702 top->cg_children = mp_ncpus / (l2count * l1count); 703 l1g = l2g + top->cg_children; 704 for (i = 0; i < top->cg_children; i++, l2g++) { 705 l2g->cg_parent = top; 706 l2g->cg_child = l1g; 707 l2g->cg_level = l2share; 708 for (j = 0; j < l2count; j++, l1g++) 709 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 710 l1flags, cpu); 711 } 712 return (top); 713 } 714 715 716 struct cpu_group * 717 smp_topo_find(struct cpu_group *top, int cpu) 718 { 719 struct cpu_group *cg; 720 cpuset_t mask; 721 int children; 722 int i; 723 724 CPU_SETOF(cpu, &mask); 725 cg = top; 726 for (;;) { 727 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) 728 return (NULL); 729 if (cg->cg_children == 0) 730 return (cg); 731 children = cg->cg_children; 732 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 733 if (CPU_OVERLAP(&cg->cg_mask, &mask)) 734 break; 735 } 736 return (NULL); 737 } 738 #else /* !SMP */ 739 740 void 741 smp_rendezvous_cpus(cpuset_t map, 742 void (*setup_func)(void *), 743 void (*action_func)(void *), 744 void (*teardown_func)(void *), 745 void *arg) 746 { 747 /* 748 * In the !SMP case we just need to ensure the same initial conditions 749 * as the SMP case. 750 */ 751 spinlock_enter(); 752 if (setup_func != NULL) 753 setup_func(arg); 754 if (action_func != NULL) 755 action_func(arg); 756 if (teardown_func != NULL) 757 teardown_func(arg); 758 spinlock_exit(); 759 } 760 761 void 762 smp_rendezvous(void (*setup_func)(void *), 763 void (*action_func)(void *), 764 void (*teardown_func)(void *), 765 void *arg) 766 { 767 768 /* Look comments in the smp_rendezvous_cpus() case. */ 769 spinlock_enter(); 770 if (setup_func != NULL) 771 setup_func(arg); 772 if (action_func != NULL) 773 action_func(arg); 774 if (teardown_func != NULL) 775 teardown_func(arg); 776 spinlock_exit(); 777 } 778 779 /* 780 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 781 * APIs will still work using this dummy support. 782 */ 783 static void 784 mp_setvariables_for_up(void *dummy) 785 { 786 mp_ncpus = 1; 787 mp_maxid = PCPU_GET(cpuid); 788 CPU_SETOF(mp_maxid, &all_cpus); 789 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 790 } 791 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 792 mp_setvariables_for_up, NULL); 793 #endif /* SMP */ 794 795 void 796 smp_no_rendevous_barrier(void *dummy) 797 { 798 #ifdef SMP 799 KASSERT((!smp_started),("smp_no_rendevous called and smp is started")); 800 #endif 801 } 802 803 /* 804 * Wait specified idle threads to switch once. This ensures that even 805 * preempted threads have cycled through the switch function once, 806 * exiting their codepaths. This allows us to change global pointers 807 * with no other synchronization. 808 */ 809 int 810 quiesce_cpus(cpuset_t map, const char *wmesg, int prio) 811 { 812 struct pcpu *pcpu; 813 u_int gen[MAXCPU]; 814 int error; 815 int cpu; 816 817 error = 0; 818 for (cpu = 0; cpu <= mp_maxid; cpu++) { 819 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 820 continue; 821 pcpu = pcpu_find(cpu); 822 gen[cpu] = pcpu->pc_idlethread->td_generation; 823 } 824 for (cpu = 0; cpu <= mp_maxid; cpu++) { 825 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 826 continue; 827 pcpu = pcpu_find(cpu); 828 thread_lock(curthread); 829 sched_bind(curthread, cpu); 830 thread_unlock(curthread); 831 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { 832 error = tsleep(quiesce_cpus, prio, wmesg, 1); 833 if (error != EWOULDBLOCK) 834 goto out; 835 error = 0; 836 } 837 } 838 out: 839 thread_lock(curthread); 840 sched_unbind(curthread); 841 thread_unlock(curthread); 842 843 return (error); 844 } 845 846 int 847 quiesce_all_cpus(const char *wmesg, int prio) 848 { 849 850 return quiesce_cpus(all_cpus, wmesg, prio); 851 } 852 853 /* Extra care is taken with this sysctl because the data type is volatile */ 854 static int 855 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS) 856 { 857 int error, active; 858 859 active = smp_started; 860 error = SYSCTL_OUT(req, &active, sizeof(active)); 861 return (error); 862 } 863 864