1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * This module holds the global variables and machine independent functions 30 * used for the kernel SMP support. 31 */ 32 33 #include <sys/cdefs.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/ktr.h> 38 #include <sys/proc.h> 39 #include <sys/bus.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/pcpu.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/sysctl.h> 47 48 #include <machine/cpu.h> 49 #include <machine/pcb.h> 50 #include <machine/smp.h> 51 52 #include "opt_sched.h" 53 54 #ifdef SMP 55 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data"); 56 57 volatile cpuset_t stopped_cpus; 58 volatile cpuset_t started_cpus; 59 volatile cpuset_t suspended_cpus; 60 cpuset_t hlt_cpus_mask; 61 cpuset_t logical_cpus_mask; 62 63 void (*cpustop_restartfunc)(void); 64 #endif 65 66 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS); 67 68 /* This is used in modules that need to work in both SMP and UP. */ 69 cpuset_t all_cpus; 70 71 int mp_ncpus; 72 /* export this for libkvm consumers. */ 73 int mp_maxcpus = MAXCPU; 74 75 volatile int smp_started; 76 u_int mp_maxid; 77 78 /* Array of CPU contexts saved during a panic. */ 79 struct pcb *stoppcbs; 80 81 static SYSCTL_NODE(_kern, OID_AUTO, smp, 82 CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL, 83 "Kernel SMP"); 84 85 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, 86 "Max CPU ID."); 87 88 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 89 0, "Max number of CPUs that the system was compiled for."); 90 91 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE, 92 NULL, 0, sysctl_kern_smp_active, "I", 93 "Indicates system is running in SMP mode"); 94 95 int smp_disabled = 0; /* has smp been disabled? */ 96 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, 97 &smp_disabled, 0, "SMP has been disabled from the loader"); 98 99 int smp_cpus = 1; /* how many cpu's running */ 100 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, 101 "Number of CPUs online"); 102 103 int smp_threads_per_core = 1; /* how many SMT threads are running per core */ 104 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD, 105 &smp_threads_per_core, 0, "Number of SMT threads online per core"); 106 107 int mp_ncores = -1; /* how many physical cores running */ 108 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0, 109 "Number of physical cores online"); 110 111 int smp_topology = 0; /* Which topology we're using. */ 112 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0, 113 "Topology override setting; 0 is default provided by hardware."); 114 115 #ifdef SMP 116 /* Enable forwarding of a signal to a process running on a different CPU */ 117 static int forward_signal_enabled = 1; 118 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 119 &forward_signal_enabled, 0, 120 "Forwarding of a signal to a process on a different CPU"); 121 122 /* Variables needed for SMP rendezvous. */ 123 static volatile int smp_rv_ncpus; 124 static void (*volatile smp_rv_setup_func)(void *arg); 125 static void (*volatile smp_rv_action_func)(void *arg); 126 static void (*volatile smp_rv_teardown_func)(void *arg); 127 static void *volatile smp_rv_func_arg; 128 static volatile int smp_rv_waiters[4]; 129 130 /* 131 * Shared mutex to restrict busywaits between smp_rendezvous() and 132 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 133 * functions trigger at once and cause multiple CPUs to busywait with 134 * interrupts disabled. 135 */ 136 struct mtx smp_ipi_mtx; 137 138 /* 139 * Let the MD SMP code initialize mp_maxid very early if it can. 140 */ 141 static void 142 mp_setmaxid(void *dummy) 143 { 144 145 cpu_mp_setmaxid(); 146 147 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__)); 148 KASSERT(mp_ncpus > 1 || mp_maxid == 0, 149 ("%s: one CPU but mp_maxid is not zero", __func__)); 150 KASSERT(mp_maxid >= mp_ncpus - 1, 151 ("%s: counters out of sync: max %d, count %d", __func__, 152 mp_maxid, mp_ncpus)); 153 154 cpusetsizemin = howmany(mp_maxid + 1, NBBY); 155 } 156 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 157 158 /* 159 * Call the MD SMP initialization code. 160 */ 161 static void 162 mp_start(void *dummy) 163 { 164 165 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 166 167 /* Probe for MP hardware. */ 168 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 169 mp_ncores = 1; 170 mp_ncpus = 1; 171 CPU_SETOF(PCPU_GET(cpuid), &all_cpus); 172 return; 173 } 174 175 cpu_mp_start(); 176 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 177 mp_ncpus); 178 179 /* Provide a default for most architectures that don't have SMT/HTT. */ 180 if (mp_ncores < 0) 181 mp_ncores = mp_ncpus; 182 183 stoppcbs = mallocarray(mp_maxid + 1, sizeof(struct pcb), M_DEVBUF, 184 M_WAITOK | M_ZERO); 185 186 cpu_mp_announce(); 187 } 188 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 189 190 void 191 forward_signal(struct thread *td) 192 { 193 int id; 194 195 /* 196 * signotify() has already set TDA_AST and TDA_SIG on td_ast for 197 * this thread, so all we need to do is poke it if it is currently 198 * executing so that it executes ast(). 199 */ 200 THREAD_LOCK_ASSERT(td, MA_OWNED); 201 KASSERT(TD_IS_RUNNING(td), 202 ("forward_signal: thread is not TDS_RUNNING")); 203 204 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 205 206 if (!smp_started || cold || KERNEL_PANICKED()) 207 return; 208 if (!forward_signal_enabled) 209 return; 210 211 /* No need to IPI ourself. */ 212 if (td == curthread) 213 return; 214 215 id = td->td_oncpu; 216 if (id == NOCPU) 217 return; 218 ipi_cpu(id, IPI_AST); 219 } 220 221 /* 222 * When called the executing CPU will send an IPI to all other CPUs 223 * requesting that they halt execution. 224 * 225 * Usually (but not necessarily) called with 'other_cpus' as its arg. 226 * 227 * - Signals all CPUs in map to stop. 228 * - Waits for each to stop. 229 * 230 * Returns: 231 * -1: error 232 * 0: NA 233 * 1: ok 234 * 235 */ 236 #if defined(__amd64__) || defined(__i386__) 237 #define X86 1 238 #else 239 #define X86 0 240 #endif 241 static int 242 generic_stop_cpus(cpuset_t map, u_int type) 243 { 244 #ifdef KTR 245 char cpusetbuf[CPUSETBUFSIZ]; 246 #endif 247 static volatile u_int stopping_cpu = NOCPU; 248 int i; 249 volatile cpuset_t *cpus; 250 251 KASSERT( 252 type == IPI_STOP || type == IPI_STOP_HARD 253 #if X86 254 || type == IPI_SUSPEND 255 #endif 256 , ("%s: invalid stop type", __func__)); 257 258 if (!smp_started) 259 return (0); 260 261 CTR2(KTR_SMP, "stop_cpus(%s) with %u type", 262 cpusetobj_strprint(cpusetbuf, &map), type); 263 264 #if X86 265 /* 266 * When suspending, ensure there are are no IPIs in progress. 267 * IPIs that have been issued, but not yet delivered (e.g. 268 * not pending on a vCPU when running under virtualization) 269 * will be lost, violating FreeBSD's assumption of reliable 270 * IPI delivery. 271 */ 272 if (type == IPI_SUSPEND) 273 mtx_lock_spin(&smp_ipi_mtx); 274 #endif 275 276 #if X86 277 if (!nmi_is_broadcast || nmi_kdb_lock == 0) { 278 #endif 279 if (stopping_cpu != PCPU_GET(cpuid)) 280 while (atomic_cmpset_int(&stopping_cpu, NOCPU, 281 PCPU_GET(cpuid)) == 0) 282 while (stopping_cpu != NOCPU) 283 cpu_spinwait(); /* spin */ 284 285 /* send the stop IPI to all CPUs in map */ 286 ipi_selected(map, type); 287 #if X86 288 } 289 #endif 290 291 #if X86 292 if (type == IPI_SUSPEND) 293 cpus = &suspended_cpus; 294 else 295 #endif 296 cpus = &stopped_cpus; 297 298 i = 0; 299 while (!CPU_SUBSET(cpus, &map)) { 300 /* spin */ 301 cpu_spinwait(); 302 i++; 303 if (i == 100000000) { 304 printf("timeout stopping cpus\n"); 305 break; 306 } 307 } 308 309 #if X86 310 if (type == IPI_SUSPEND) 311 mtx_unlock_spin(&smp_ipi_mtx); 312 #endif 313 314 stopping_cpu = NOCPU; 315 return (1); 316 } 317 318 int 319 stop_cpus(cpuset_t map) 320 { 321 322 return (generic_stop_cpus(map, IPI_STOP)); 323 } 324 325 int 326 stop_cpus_hard(cpuset_t map) 327 { 328 329 return (generic_stop_cpus(map, IPI_STOP_HARD)); 330 } 331 332 #if X86 333 int 334 suspend_cpus(cpuset_t map) 335 { 336 337 return (generic_stop_cpus(map, IPI_SUSPEND)); 338 } 339 #endif 340 341 /* 342 * Called by a CPU to restart stopped CPUs. 343 * 344 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 345 * 346 * - Signals all CPUs in map to restart. 347 * - Waits for each to restart. 348 * 349 * Returns: 350 * -1: error 351 * 0: NA 352 * 1: ok 353 */ 354 static int 355 generic_restart_cpus(cpuset_t map, u_int type) 356 { 357 #ifdef KTR 358 char cpusetbuf[CPUSETBUFSIZ]; 359 #endif 360 volatile cpuset_t *cpus; 361 362 #if X86 363 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD 364 || type == IPI_SUSPEND, ("%s: invalid stop type", __func__)); 365 366 if (!smp_started) 367 return (0); 368 369 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 370 371 if (type == IPI_SUSPEND) 372 cpus = &resuming_cpus; 373 else 374 cpus = &stopped_cpus; 375 376 /* signal other cpus to restart */ 377 if (type == IPI_SUSPEND) 378 CPU_COPY_STORE_REL(&map, &toresume_cpus); 379 else 380 CPU_COPY_STORE_REL(&map, &started_cpus); 381 382 /* 383 * Wake up any CPUs stopped with MWAIT. From MI code we can't tell if 384 * MONITOR/MWAIT is enabled, but the potentially redundant writes are 385 * relatively inexpensive. 386 */ 387 if (type == IPI_STOP) { 388 struct monitorbuf *mb; 389 u_int id; 390 391 CPU_FOREACH(id) { 392 if (!CPU_ISSET(id, &map)) 393 continue; 394 395 mb = &pcpu_find(id)->pc_monitorbuf; 396 atomic_store_int(&mb->stop_state, 397 MONITOR_STOPSTATE_RUNNING); 398 } 399 } 400 401 if (!nmi_is_broadcast || nmi_kdb_lock == 0) { 402 /* wait for each to clear its bit */ 403 while (CPU_OVERLAP(cpus, &map)) 404 cpu_spinwait(); 405 } 406 #else /* !X86 */ 407 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD, 408 ("%s: invalid stop type", __func__)); 409 410 if (!smp_started) 411 return (0); 412 413 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 414 415 cpus = &stopped_cpus; 416 417 /* signal other cpus to restart */ 418 CPU_COPY_STORE_REL(&map, &started_cpus); 419 420 /* wait for each to clear its bit */ 421 while (CPU_OVERLAP(cpus, &map)) 422 cpu_spinwait(); 423 #endif 424 return (1); 425 } 426 427 int 428 restart_cpus(cpuset_t map) 429 { 430 431 return (generic_restart_cpus(map, IPI_STOP)); 432 } 433 434 #if X86 435 int 436 resume_cpus(cpuset_t map) 437 { 438 439 return (generic_restart_cpus(map, IPI_SUSPEND)); 440 } 441 #endif 442 #undef X86 443 444 /* 445 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 446 * (if specified), rendezvous, execute the action function (if specified), 447 * rendezvous again, execute the teardown function (if specified), and then 448 * resume. 449 * 450 * Note that the supplied external functions _must_ be reentrant and aware 451 * that they are running in parallel and in an unknown lock context. 452 */ 453 void 454 smp_rendezvous_action(void) 455 { 456 struct thread *td; 457 void *local_func_arg; 458 void (*local_setup_func)(void*); 459 void (*local_action_func)(void*); 460 void (*local_teardown_func)(void*); 461 #ifdef INVARIANTS 462 int owepreempt; 463 #endif 464 465 /* Ensure we have up-to-date values. */ 466 atomic_add_acq_int(&smp_rv_waiters[0], 1); 467 while (smp_rv_waiters[0] < smp_rv_ncpus) 468 cpu_spinwait(); 469 470 /* Fetch rendezvous parameters after acquire barrier. */ 471 local_func_arg = smp_rv_func_arg; 472 local_setup_func = smp_rv_setup_func; 473 local_action_func = smp_rv_action_func; 474 local_teardown_func = smp_rv_teardown_func; 475 476 /* 477 * Use a nested critical section to prevent any preemptions 478 * from occurring during a rendezvous action routine. 479 * Specifically, if a rendezvous handler is invoked via an IPI 480 * and the interrupted thread was in the critical_exit() 481 * function after setting td_critnest to 0 but before 482 * performing a deferred preemption, this routine can be 483 * invoked with td_critnest set to 0 and td_owepreempt true. 484 * In that case, a critical_exit() during the rendezvous 485 * action would trigger a preemption which is not permitted in 486 * a rendezvous action. To fix this, wrap all of the 487 * rendezvous action handlers in a critical section. We 488 * cannot use a regular critical section however as having 489 * critical_exit() preempt from this routine would also be 490 * problematic (the preemption must not occur before the IPI 491 * has been acknowledged via an EOI). Instead, we 492 * intentionally ignore td_owepreempt when leaving the 493 * critical section. This should be harmless because we do 494 * not permit rendezvous action routines to schedule threads, 495 * and thus td_owepreempt should never transition from 0 to 1 496 * during this routine. 497 */ 498 td = curthread; 499 td->td_critnest++; 500 #ifdef INVARIANTS 501 owepreempt = td->td_owepreempt; 502 #endif 503 504 /* 505 * If requested, run a setup function before the main action 506 * function. Ensure all CPUs have completed the setup 507 * function before moving on to the action function. 508 */ 509 if (local_setup_func != smp_no_rendezvous_barrier) { 510 if (local_setup_func != NULL) 511 local_setup_func(local_func_arg); 512 atomic_add_int(&smp_rv_waiters[1], 1); 513 while (smp_rv_waiters[1] < smp_rv_ncpus) 514 cpu_spinwait(); 515 } 516 517 if (local_action_func != NULL) 518 local_action_func(local_func_arg); 519 520 if (local_teardown_func != smp_no_rendezvous_barrier) { 521 /* 522 * Signal that the main action has been completed. If a 523 * full exit rendezvous is requested, then all CPUs will 524 * wait here until all CPUs have finished the main action. 525 */ 526 atomic_add_int(&smp_rv_waiters[2], 1); 527 while (smp_rv_waiters[2] < smp_rv_ncpus) 528 cpu_spinwait(); 529 530 if (local_teardown_func != NULL) 531 local_teardown_func(local_func_arg); 532 } 533 534 /* 535 * Signal that the rendezvous is fully completed by this CPU. 536 * This means that no member of smp_rv_* pseudo-structure will be 537 * accessed by this target CPU after this point; in particular, 538 * memory pointed by smp_rv_func_arg. 539 * 540 * The release semantic ensures that all accesses performed by 541 * the current CPU are visible when smp_rendezvous_cpus() 542 * returns, by synchronizing with the 543 * atomic_load_acq_int(&smp_rv_waiters[3]). 544 */ 545 atomic_add_rel_int(&smp_rv_waiters[3], 1); 546 547 td->td_critnest--; 548 KASSERT(owepreempt == td->td_owepreempt, 549 ("rendezvous action changed td_owepreempt")); 550 } 551 552 void 553 smp_rendezvous_cpus(cpuset_t map, 554 void (* setup_func)(void *), 555 void (* action_func)(void *), 556 void (* teardown_func)(void *), 557 void *arg) 558 { 559 int curcpumap, i, ncpus = 0; 560 561 /* See comments in the !SMP case. */ 562 if (!smp_started) { 563 spinlock_enter(); 564 if (setup_func != NULL) 565 setup_func(arg); 566 if (action_func != NULL) 567 action_func(arg); 568 if (teardown_func != NULL) 569 teardown_func(arg); 570 spinlock_exit(); 571 return; 572 } 573 574 /* 575 * Make sure we come here with interrupts enabled. Otherwise we 576 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI. 577 */ 578 MPASS(curthread->td_md.md_spinlock_count == 0); 579 580 CPU_FOREACH(i) { 581 if (CPU_ISSET(i, &map)) 582 ncpus++; 583 } 584 if (ncpus == 0) 585 panic("ncpus is 0 with non-zero map"); 586 587 mtx_lock_spin(&smp_ipi_mtx); 588 589 /* Pass rendezvous parameters via global variables. */ 590 smp_rv_ncpus = ncpus; 591 smp_rv_setup_func = setup_func; 592 smp_rv_action_func = action_func; 593 smp_rv_teardown_func = teardown_func; 594 smp_rv_func_arg = arg; 595 smp_rv_waiters[1] = 0; 596 smp_rv_waiters[2] = 0; 597 smp_rv_waiters[3] = 0; 598 atomic_store_rel_int(&smp_rv_waiters[0], 0); 599 600 /* 601 * Signal other processors, which will enter the IPI with 602 * interrupts off. 603 */ 604 curcpumap = CPU_ISSET(curcpu, &map); 605 CPU_CLR(curcpu, &map); 606 ipi_selected(map, IPI_RENDEZVOUS); 607 608 /* Check if the current CPU is in the map */ 609 if (curcpumap != 0) 610 smp_rendezvous_action(); 611 612 /* 613 * Ensure that the master CPU waits for all the other 614 * CPUs to finish the rendezvous, so that smp_rv_* 615 * pseudo-structure and the arg are guaranteed to not 616 * be in use. 617 * 618 * Load acquire synchronizes with the release add in 619 * smp_rendezvous_action(), which ensures that our caller sees 620 * all memory actions done by the called functions on other 621 * CPUs. 622 */ 623 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) 624 cpu_spinwait(); 625 626 mtx_unlock_spin(&smp_ipi_mtx); 627 } 628 629 void 630 smp_rendezvous(void (* setup_func)(void *), 631 void (* action_func)(void *), 632 void (* teardown_func)(void *), 633 void *arg) 634 { 635 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 636 } 637 638 static void 639 smp_topo_fill(struct cpu_group *cg) 640 { 641 int c; 642 643 for (c = 0; c < cg->cg_children; c++) 644 smp_topo_fill(&cg->cg_child[c]); 645 cg->cg_first = CPU_FFS(&cg->cg_mask) - 1; 646 cg->cg_last = CPU_FLS(&cg->cg_mask) - 1; 647 } 648 649 struct cpu_group * 650 smp_topo(void) 651 { 652 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 653 static struct cpu_group *top = NULL; 654 655 /* 656 * The first call to smp_topo() is guaranteed to occur 657 * during the kernel boot while we are still single-threaded. 658 */ 659 if (top != NULL) 660 return (top); 661 662 /* 663 * Check for a fake topology request for debugging purposes. 664 */ 665 switch (smp_topology) { 666 case 1: 667 /* Dual core with no sharing. */ 668 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 669 break; 670 case 2: 671 /* No topology, all cpus are equal. */ 672 top = smp_topo_none(); 673 break; 674 case 3: 675 /* Dual core with shared L2. */ 676 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 677 break; 678 case 4: 679 /* quad core, shared l3 among each package, private l2. */ 680 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 681 break; 682 case 5: 683 /* quad core, 2 dualcore parts on each package share l2. */ 684 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 685 break; 686 case 6: 687 /* Single-core 2xHTT */ 688 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 689 break; 690 case 7: 691 /* quad core with a shared l3, 8 threads sharing L2. */ 692 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 693 CG_FLAG_SMT); 694 break; 695 default: 696 /* Default, ask the system what it wants. */ 697 top = cpu_topo(); 698 break; 699 } 700 /* 701 * Verify the returned topology. 702 */ 703 if (top->cg_count != mp_ncpus) 704 panic("Built bad topology at %p. CPU count %d != %d", 705 top, top->cg_count, mp_ncpus); 706 if (CPU_CMP(&top->cg_mask, &all_cpus)) 707 panic("Built bad topology at %p. CPU mask (%s) != (%s)", 708 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), 709 cpusetobj_strprint(cpusetbuf2, &all_cpus)); 710 711 /* 712 * Collapse nonsense levels that may be created out of convenience by 713 * the MD layers. They cause extra work in the search functions. 714 */ 715 while (top->cg_children == 1) { 716 top = &top->cg_child[0]; 717 top->cg_parent = NULL; 718 } 719 smp_topo_fill(top); 720 return (top); 721 } 722 723 struct cpu_group * 724 smp_topo_alloc(u_int count) 725 { 726 static struct cpu_group *group = NULL; 727 static u_int index; 728 u_int curr; 729 730 if (group == NULL) { 731 group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1, 732 sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO); 733 } 734 curr = index; 735 index += count; 736 return (&group[curr]); 737 } 738 739 struct cpu_group * 740 smp_topo_none(void) 741 { 742 struct cpu_group *top; 743 744 top = smp_topo_alloc(1); 745 top->cg_parent = NULL; 746 top->cg_child = NULL; 747 top->cg_mask = all_cpus; 748 top->cg_count = mp_ncpus; 749 top->cg_children = 0; 750 top->cg_level = CG_SHARE_NONE; 751 top->cg_flags = 0; 752 753 return (top); 754 } 755 756 static int 757 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 758 int count, int flags, int start) 759 { 760 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 761 cpuset_t mask; 762 int i; 763 764 CPU_ZERO(&mask); 765 for (i = 0; i < count; i++, start++) 766 CPU_SET(start, &mask); 767 child->cg_parent = parent; 768 child->cg_child = NULL; 769 child->cg_children = 0; 770 child->cg_level = share; 771 child->cg_count = count; 772 child->cg_flags = flags; 773 child->cg_mask = mask; 774 parent->cg_children++; 775 for (; parent != NULL; parent = parent->cg_parent) { 776 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) 777 panic("Duplicate children in %p. mask (%s) child (%s)", 778 parent, 779 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), 780 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); 781 CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); 782 parent->cg_count += child->cg_count; 783 } 784 785 return (start); 786 } 787 788 struct cpu_group * 789 smp_topo_1level(int share, int count, int flags) 790 { 791 struct cpu_group *child; 792 struct cpu_group *top; 793 int packages; 794 int cpu; 795 int i; 796 797 cpu = 0; 798 packages = mp_ncpus / count; 799 top = smp_topo_alloc(1 + packages); 800 top->cg_child = child = top + 1; 801 top->cg_level = CG_SHARE_NONE; 802 for (i = 0; i < packages; i++, child++) 803 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 804 return (top); 805 } 806 807 struct cpu_group * 808 smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 809 int l1flags) 810 { 811 struct cpu_group *top; 812 struct cpu_group *l1g; 813 struct cpu_group *l2g; 814 int cpu; 815 int i; 816 int j; 817 818 cpu = 0; 819 top = smp_topo_alloc(1 + mp_ncpus / (l2count * l1count) + 820 mp_ncpus / l1count); 821 l2g = top + 1; 822 top->cg_child = l2g; 823 top->cg_level = CG_SHARE_NONE; 824 top->cg_children = mp_ncpus / (l2count * l1count); 825 l1g = l2g + top->cg_children; 826 for (i = 0; i < top->cg_children; i++, l2g++) { 827 l2g->cg_parent = top; 828 l2g->cg_child = l1g; 829 l2g->cg_level = l2share; 830 for (j = 0; j < l2count; j++, l1g++) 831 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 832 l1flags, cpu); 833 } 834 return (top); 835 } 836 837 struct cpu_group * 838 smp_topo_find(struct cpu_group *top, int cpu) 839 { 840 struct cpu_group *cg; 841 cpuset_t mask; 842 int children; 843 int i; 844 845 CPU_SETOF(cpu, &mask); 846 cg = top; 847 for (;;) { 848 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) 849 return (NULL); 850 if (cg->cg_children == 0) 851 return (cg); 852 children = cg->cg_children; 853 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 854 if (CPU_OVERLAP(&cg->cg_mask, &mask)) 855 break; 856 } 857 return (NULL); 858 } 859 #else /* !SMP */ 860 861 void 862 smp_rendezvous_cpus(cpuset_t map, 863 void (*setup_func)(void *), 864 void (*action_func)(void *), 865 void (*teardown_func)(void *), 866 void *arg) 867 { 868 /* 869 * In the !SMP case we just need to ensure the same initial conditions 870 * as the SMP case. 871 */ 872 spinlock_enter(); 873 if (setup_func != NULL) 874 setup_func(arg); 875 if (action_func != NULL) 876 action_func(arg); 877 if (teardown_func != NULL) 878 teardown_func(arg); 879 spinlock_exit(); 880 } 881 882 void 883 smp_rendezvous(void (*setup_func)(void *), 884 void (*action_func)(void *), 885 void (*teardown_func)(void *), 886 void *arg) 887 { 888 889 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, 890 arg); 891 } 892 893 /* 894 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 895 * APIs will still work using this dummy support. 896 */ 897 static void 898 mp_setvariables_for_up(void *dummy) 899 { 900 mp_ncpus = 1; 901 mp_ncores = 1; 902 mp_maxid = PCPU_GET(cpuid); 903 CPU_SETOF(mp_maxid, &all_cpus); 904 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 905 } 906 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 907 mp_setvariables_for_up, NULL); 908 #endif /* SMP */ 909 910 void 911 smp_no_rendezvous_barrier(void *dummy) 912 { 913 #ifdef SMP 914 KASSERT((!smp_started),("smp_no_rendezvous called and smp is started")); 915 #endif 916 } 917 918 void 919 smp_rendezvous_cpus_retry(cpuset_t map, 920 void (* setup_func)(void *), 921 void (* action_func)(void *), 922 void (* teardown_func)(void *), 923 void (* wait_func)(void *, int), 924 struct smp_rendezvous_cpus_retry_arg *arg) 925 { 926 int cpu; 927 928 CPU_COPY(&map, &arg->cpus); 929 930 /* 931 * Only one CPU to execute on. 932 */ 933 if (!smp_started) { 934 spinlock_enter(); 935 if (setup_func != NULL) 936 setup_func(arg); 937 if (action_func != NULL) 938 action_func(arg); 939 if (teardown_func != NULL) 940 teardown_func(arg); 941 spinlock_exit(); 942 return; 943 } 944 945 /* 946 * Execute an action on all specified CPUs while retrying until they 947 * all acknowledge completion. 948 */ 949 for (;;) { 950 smp_rendezvous_cpus( 951 arg->cpus, 952 setup_func, 953 action_func, 954 teardown_func, 955 arg); 956 957 if (CPU_EMPTY(&arg->cpus)) 958 break; 959 960 CPU_FOREACH(cpu) { 961 if (!CPU_ISSET(cpu, &arg->cpus)) 962 continue; 963 wait_func(arg, cpu); 964 } 965 } 966 } 967 968 void 969 smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg) 970 { 971 972 CPU_CLR_ATOMIC(curcpu, &arg->cpus); 973 } 974 975 /* 976 * If (prio & PDROP) == 0: 977 * Wait for specified idle threads to switch once. This ensures that even 978 * preempted threads have cycled through the switch function once, 979 * exiting their codepaths. This allows us to change global pointers 980 * with no other synchronization. 981 * If (prio & PDROP) != 0: 982 * Force the specified CPUs to switch context at least once. 983 */ 984 int 985 quiesce_cpus(cpuset_t map, const char *wmesg, int prio) 986 { 987 struct pcpu *pcpu; 988 u_int *gen; 989 int error; 990 int cpu; 991 992 error = 0; 993 if ((prio & PDROP) == 0) { 994 gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP, 995 M_WAITOK); 996 for (cpu = 0; cpu <= mp_maxid; cpu++) { 997 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 998 continue; 999 pcpu = pcpu_find(cpu); 1000 gen[cpu] = pcpu->pc_idlethread->td_generation; 1001 } 1002 } 1003 for (cpu = 0; cpu <= mp_maxid; cpu++) { 1004 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 1005 continue; 1006 pcpu = pcpu_find(cpu); 1007 thread_lock(curthread); 1008 sched_bind(curthread, cpu); 1009 thread_unlock(curthread); 1010 if ((prio & PDROP) != 0) 1011 continue; 1012 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { 1013 error = tsleep(quiesce_cpus, prio & ~PDROP, wmesg, 1); 1014 if (error != EWOULDBLOCK) 1015 goto out; 1016 error = 0; 1017 } 1018 } 1019 out: 1020 thread_lock(curthread); 1021 sched_unbind(curthread); 1022 thread_unlock(curthread); 1023 if ((prio & PDROP) == 0) 1024 free(gen, M_TEMP); 1025 1026 return (error); 1027 } 1028 1029 int 1030 quiesce_all_cpus(const char *wmesg, int prio) 1031 { 1032 1033 return quiesce_cpus(all_cpus, wmesg, prio); 1034 } 1035 1036 /* 1037 * Observe all CPUs not executing in critical section. 1038 * We are not in one so the check for us is safe. If the found 1039 * thread changes to something else we know the section was 1040 * exited as well. 1041 */ 1042 void 1043 quiesce_all_critical(void) 1044 { 1045 struct thread *td, *newtd; 1046 struct pcpu *pcpu; 1047 int cpu; 1048 1049 MPASS(curthread->td_critnest == 0); 1050 1051 CPU_FOREACH(cpu) { 1052 pcpu = cpuid_to_pcpu[cpu]; 1053 td = pcpu->pc_curthread; 1054 for (;;) { 1055 if (td->td_critnest == 0) 1056 break; 1057 cpu_spinwait(); 1058 newtd = (struct thread *) 1059 atomic_load_acq_ptr((void *)pcpu->pc_curthread); 1060 if (td != newtd) 1061 break; 1062 } 1063 } 1064 } 1065 1066 static void 1067 cpus_fence_seq_cst_issue(void *arg __unused) 1068 { 1069 1070 atomic_thread_fence_seq_cst(); 1071 } 1072 1073 /* 1074 * Send an IPI forcing a sequentially consistent fence. 1075 * 1076 * Allows replacement of an explicitly fence with a compiler barrier. 1077 * Trades speed up during normal execution for a significant slowdown when 1078 * the barrier is needed. 1079 */ 1080 void 1081 cpus_fence_seq_cst(void) 1082 { 1083 1084 #ifdef SMP 1085 smp_rendezvous( 1086 smp_no_rendezvous_barrier, 1087 cpus_fence_seq_cst_issue, 1088 smp_no_rendezvous_barrier, 1089 NULL 1090 ); 1091 #else 1092 cpus_fence_seq_cst_issue(NULL); 1093 #endif 1094 } 1095 1096 /* Extra care is taken with this sysctl because the data type is volatile */ 1097 static int 1098 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS) 1099 { 1100 int error, active; 1101 1102 active = smp_started; 1103 error = SYSCTL_OUT(req, &active, sizeof(active)); 1104 return (error); 1105 } 1106 1107 #ifdef SMP 1108 void 1109 topo_init_node(struct topo_node *node) 1110 { 1111 1112 bzero(node, sizeof(*node)); 1113 TAILQ_INIT(&node->children); 1114 } 1115 1116 void 1117 topo_init_root(struct topo_node *root) 1118 { 1119 1120 topo_init_node(root); 1121 root->type = TOPO_TYPE_SYSTEM; 1122 } 1123 1124 /* 1125 * Add a child node with the given ID under the given parent. 1126 * Do nothing if there is already a child with that ID. 1127 */ 1128 struct topo_node * 1129 topo_add_node_by_hwid(struct topo_node *parent, int hwid, 1130 topo_node_type type, uintptr_t subtype) 1131 { 1132 struct topo_node *node; 1133 1134 TAILQ_FOREACH_REVERSE(node, &parent->children, 1135 topo_children, siblings) { 1136 if (node->hwid == hwid 1137 && node->type == type && node->subtype == subtype) { 1138 return (node); 1139 } 1140 } 1141 1142 node = malloc(sizeof(*node), M_TOPO, M_WAITOK); 1143 topo_init_node(node); 1144 node->parent = parent; 1145 node->hwid = hwid; 1146 node->type = type; 1147 node->subtype = subtype; 1148 TAILQ_INSERT_TAIL(&parent->children, node, siblings); 1149 parent->nchildren++; 1150 1151 return (node); 1152 } 1153 1154 /* 1155 * Find a child node with the given ID under the given parent. 1156 */ 1157 struct topo_node * 1158 topo_find_node_by_hwid(struct topo_node *parent, int hwid, 1159 topo_node_type type, uintptr_t subtype) 1160 { 1161 1162 struct topo_node *node; 1163 1164 TAILQ_FOREACH(node, &parent->children, siblings) { 1165 if (node->hwid == hwid 1166 && node->type == type && node->subtype == subtype) { 1167 return (node); 1168 } 1169 } 1170 1171 return (NULL); 1172 } 1173 1174 /* 1175 * Given a node change the order of its parent's child nodes such 1176 * that the node becomes the firt child while preserving the cyclic 1177 * order of the children. In other words, the given node is promoted 1178 * by rotation. 1179 */ 1180 void 1181 topo_promote_child(struct topo_node *child) 1182 { 1183 struct topo_node *next; 1184 struct topo_node *node; 1185 struct topo_node *parent; 1186 1187 parent = child->parent; 1188 next = TAILQ_NEXT(child, siblings); 1189 TAILQ_REMOVE(&parent->children, child, siblings); 1190 TAILQ_INSERT_HEAD(&parent->children, child, siblings); 1191 1192 while (next != NULL) { 1193 node = next; 1194 next = TAILQ_NEXT(node, siblings); 1195 TAILQ_REMOVE(&parent->children, node, siblings); 1196 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings); 1197 child = node; 1198 } 1199 } 1200 1201 /* 1202 * Iterate to the next node in the depth-first search (traversal) of 1203 * the topology tree. 1204 */ 1205 struct topo_node * 1206 topo_next_node(struct topo_node *top, struct topo_node *node) 1207 { 1208 struct topo_node *next; 1209 1210 if ((next = TAILQ_FIRST(&node->children)) != NULL) 1211 return (next); 1212 1213 if ((next = TAILQ_NEXT(node, siblings)) != NULL) 1214 return (next); 1215 1216 while (node != top && (node = node->parent) != top) 1217 if ((next = TAILQ_NEXT(node, siblings)) != NULL) 1218 return (next); 1219 1220 return (NULL); 1221 } 1222 1223 /* 1224 * Iterate to the next node in the depth-first search of the topology tree, 1225 * but without descending below the current node. 1226 */ 1227 struct topo_node * 1228 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node) 1229 { 1230 struct topo_node *next; 1231 1232 if ((next = TAILQ_NEXT(node, siblings)) != NULL) 1233 return (next); 1234 1235 while (node != top && (node = node->parent) != top) 1236 if ((next = TAILQ_NEXT(node, siblings)) != NULL) 1237 return (next); 1238 1239 return (NULL); 1240 } 1241 1242 /* 1243 * Assign the given ID to the given topology node that represents a logical 1244 * processor. 1245 */ 1246 void 1247 topo_set_pu_id(struct topo_node *node, cpuid_t id) 1248 { 1249 1250 KASSERT(node->type == TOPO_TYPE_PU, 1251 ("topo_set_pu_id: wrong node type: %u", node->type)); 1252 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0, 1253 ("topo_set_pu_id: cpuset already not empty")); 1254 node->id = id; 1255 CPU_SET(id, &node->cpuset); 1256 node->cpu_count = 1; 1257 node->subtype = 1; 1258 1259 while ((node = node->parent) != NULL) { 1260 KASSERT(!CPU_ISSET(id, &node->cpuset), 1261 ("logical ID %u is already set in node %p", id, node)); 1262 CPU_SET(id, &node->cpuset); 1263 node->cpu_count++; 1264 } 1265 } 1266 1267 static struct topology_spec { 1268 topo_node_type type; 1269 bool match_subtype; 1270 uintptr_t subtype; 1271 } topology_level_table[TOPO_LEVEL_COUNT] = { 1272 [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, }, 1273 [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, }, 1274 [TOPO_LEVEL_CACHEGROUP] = { 1275 .type = TOPO_TYPE_CACHE, 1276 .match_subtype = true, 1277 .subtype = CG_SHARE_L3, 1278 }, 1279 [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, }, 1280 [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, }, 1281 }; 1282 1283 static bool 1284 topo_analyze_table(struct topo_node *root, int all, enum topo_level level, 1285 struct topo_analysis *results) 1286 { 1287 struct topology_spec *spec; 1288 struct topo_node *node; 1289 int count; 1290 1291 if (level >= TOPO_LEVEL_COUNT) 1292 return (true); 1293 1294 spec = &topology_level_table[level]; 1295 count = 0; 1296 node = topo_next_node(root, root); 1297 1298 while (node != NULL) { 1299 if (node->type != spec->type || 1300 (spec->match_subtype && node->subtype != spec->subtype)) { 1301 node = topo_next_node(root, node); 1302 continue; 1303 } 1304 if (!all && CPU_EMPTY(&node->cpuset)) { 1305 node = topo_next_nonchild_node(root, node); 1306 continue; 1307 } 1308 1309 count++; 1310 1311 if (!topo_analyze_table(node, all, level + 1, results)) 1312 return (false); 1313 1314 node = topo_next_nonchild_node(root, node); 1315 } 1316 1317 /* No explicit subgroups is essentially one subgroup. */ 1318 if (count == 0) { 1319 count = 1; 1320 1321 if (!topo_analyze_table(root, all, level + 1, results)) 1322 return (false); 1323 } 1324 1325 if (results->entities[level] == -1) 1326 results->entities[level] = count; 1327 else if (results->entities[level] != count) 1328 return (false); 1329 1330 return (true); 1331 } 1332 1333 /* 1334 * Check if the topology is uniform, that is, each package has the same number 1335 * of cores in it and each core has the same number of threads (logical 1336 * processors) in it. If so, calculate the number of packages, the number of 1337 * groups per package, the number of cachegroups per group, and the number of 1338 * logical processors per cachegroup. 'all' parameter tells whether to include 1339 * administratively disabled logical processors into the analysis. 1340 */ 1341 int 1342 topo_analyze(struct topo_node *topo_root, int all, 1343 struct topo_analysis *results) 1344 { 1345 1346 results->entities[TOPO_LEVEL_PKG] = -1; 1347 results->entities[TOPO_LEVEL_CORE] = -1; 1348 results->entities[TOPO_LEVEL_THREAD] = -1; 1349 results->entities[TOPO_LEVEL_GROUP] = -1; 1350 results->entities[TOPO_LEVEL_CACHEGROUP] = -1; 1351 1352 if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results)) 1353 return (0); 1354 1355 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0, 1356 ("bug in topology or analysis")); 1357 1358 return (1); 1359 } 1360 1361 #endif /* SMP */ 1362