1 /*- 2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This module holds the global variables and machine independent functions 32 * used for the kernel SMP support. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/proc.h> 43 #include <sys/bus.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/pcpu.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 50 #include <machine/cpu.h> 51 #include <machine/smp.h> 52 53 #include "opt_sched.h" 54 55 #ifdef SMP 56 volatile cpumask_t stopped_cpus; 57 volatile cpumask_t started_cpus; 58 cpumask_t idle_cpus_mask; 59 cpumask_t hlt_cpus_mask; 60 cpumask_t logical_cpus_mask; 61 62 void (*cpustop_restartfunc)(void); 63 #endif 64 /* This is used in modules that need to work in both SMP and UP. */ 65 cpumask_t all_cpus; 66 67 int mp_ncpus; 68 /* export this for libkvm consumers. */ 69 int mp_maxcpus = MAXCPU; 70 71 volatile int smp_started; 72 u_int mp_maxid; 73 74 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP"); 75 76 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD, &mp_maxid, 0, 77 "Max CPU ID."); 78 79 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0, 80 "Max number of CPUs that the system was compiled for."); 81 82 int smp_active = 0; /* are the APs allowed to run? */ 83 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, 84 "Number of Auxillary Processors (APs) that were successfully started"); 85 86 int smp_disabled = 0; /* has smp been disabled? */ 87 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0, 88 "SMP has been disabled from the loader"); 89 TUNABLE_INT("kern.smp.disabled", &smp_disabled); 90 91 int smp_cpus = 1; /* how many cpu's running */ 92 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0, 93 "Number of CPUs online"); 94 95 int smp_topology = 0; /* Which topology we're using. */ 96 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0, 97 "Topology override setting; 0 is default provided by hardware."); 98 TUNABLE_INT("kern.smp.topology", &smp_topology); 99 100 #ifdef SMP 101 /* Enable forwarding of a signal to a process running on a different CPU */ 102 static int forward_signal_enabled = 1; 103 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 104 &forward_signal_enabled, 0, 105 "Forwarding of a signal to a process on a different CPU"); 106 107 /* Variables needed for SMP rendezvous. */ 108 static volatile int smp_rv_ncpus; 109 static void (*volatile smp_rv_setup_func)(void *arg); 110 static void (*volatile smp_rv_action_func)(void *arg); 111 static void (*volatile smp_rv_teardown_func)(void *arg); 112 static void *volatile smp_rv_func_arg; 113 static volatile int smp_rv_waiters[3]; 114 115 /* 116 * Shared mutex to restrict busywaits between smp_rendezvous() and 117 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 118 * functions trigger at once and cause multiple CPUs to busywait with 119 * interrupts disabled. 120 */ 121 struct mtx smp_ipi_mtx; 122 123 /* 124 * Let the MD SMP code initialize mp_maxid very early if it can. 125 */ 126 static void 127 mp_setmaxid(void *dummy) 128 { 129 cpu_mp_setmaxid(); 130 } 131 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 132 133 /* 134 * Call the MD SMP initialization code. 135 */ 136 static void 137 mp_start(void *dummy) 138 { 139 140 /* Probe for MP hardware. */ 141 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 142 mp_ncpus = 1; 143 all_cpus = PCPU_GET(cpumask); 144 return; 145 } 146 147 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 148 cpu_mp_start(); 149 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 150 mp_ncpus); 151 cpu_mp_announce(); 152 } 153 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 154 155 void 156 forward_signal(struct thread *td) 157 { 158 int id; 159 160 /* 161 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 162 * this thread, so all we need to do is poke it if it is currently 163 * executing so that it executes ast(). 164 */ 165 THREAD_LOCK_ASSERT(td, MA_OWNED); 166 KASSERT(TD_IS_RUNNING(td), 167 ("forward_signal: thread is not TDS_RUNNING")); 168 169 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 170 171 if (!smp_started || cold || panicstr) 172 return; 173 if (!forward_signal_enabled) 174 return; 175 176 /* No need to IPI ourself. */ 177 if (td == curthread) 178 return; 179 180 id = td->td_oncpu; 181 if (id == NOCPU) 182 return; 183 ipi_selected(1 << id, IPI_AST); 184 } 185 186 /* 187 * When called the executing CPU will send an IPI to all other CPUs 188 * requesting that they halt execution. 189 * 190 * Usually (but not necessarily) called with 'other_cpus' as its arg. 191 * 192 * - Signals all CPUs in map to stop. 193 * - Waits for each to stop. 194 * 195 * Returns: 196 * -1: error 197 * 0: NA 198 * 1: ok 199 * 200 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 201 * from executing at same time. 202 */ 203 static int 204 generic_stop_cpus(cpumask_t map, u_int type) 205 { 206 int i; 207 208 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD, 209 ("%s: invalid stop type", __func__)); 210 211 if (!smp_started) 212 return 0; 213 214 CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type); 215 216 /* send the stop IPI to all CPUs in map */ 217 ipi_selected(map, type); 218 219 i = 0; 220 while ((stopped_cpus & map) != map) { 221 /* spin */ 222 cpu_spinwait(); 223 i++; 224 #ifdef DIAGNOSTIC 225 if (i == 100000) { 226 printf("timeout stopping cpus\n"); 227 break; 228 } 229 #endif 230 } 231 232 return 1; 233 } 234 235 int 236 stop_cpus(cpumask_t map) 237 { 238 239 return (generic_stop_cpus(map, IPI_STOP)); 240 } 241 242 int 243 stop_cpus_hard(cpumask_t map) 244 { 245 246 return (generic_stop_cpus(map, IPI_STOP_HARD)); 247 } 248 249 #if defined(__amd64__) 250 /* 251 * When called the executing CPU will send an IPI to all other CPUs 252 * requesting that they halt execution. 253 * 254 * Usually (but not necessarily) called with 'other_cpus' as its arg. 255 * 256 * - Signals all CPUs in map to suspend. 257 * - Waits for each to suspend. 258 * 259 * Returns: 260 * -1: error 261 * 0: NA 262 * 1: ok 263 * 264 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 265 * from executing at same time. 266 */ 267 int 268 suspend_cpus(cpumask_t map) 269 { 270 int i; 271 272 if (!smp_started) 273 return (0); 274 275 CTR1(KTR_SMP, "suspend_cpus(%x)", map); 276 277 /* send the suspend IPI to all CPUs in map */ 278 ipi_selected(map, IPI_SUSPEND); 279 280 i = 0; 281 while ((stopped_cpus & map) != map) { 282 /* spin */ 283 cpu_spinwait(); 284 i++; 285 #ifdef DIAGNOSTIC 286 if (i == 100000) { 287 printf("timeout suspending cpus\n"); 288 break; 289 } 290 #endif 291 } 292 293 return (1); 294 } 295 #endif 296 297 /* 298 * Called by a CPU to restart stopped CPUs. 299 * 300 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 301 * 302 * - Signals all CPUs in map to restart. 303 * - Waits for each to restart. 304 * 305 * Returns: 306 * -1: error 307 * 0: NA 308 * 1: ok 309 */ 310 int 311 restart_cpus(cpumask_t map) 312 { 313 314 if (!smp_started) 315 return 0; 316 317 CTR1(KTR_SMP, "restart_cpus(%x)", map); 318 319 /* signal other cpus to restart */ 320 atomic_store_rel_int(&started_cpus, map); 321 322 /* wait for each to clear its bit */ 323 while ((stopped_cpus & map) != 0) 324 cpu_spinwait(); 325 326 return 1; 327 } 328 329 /* 330 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 331 * (if specified), rendezvous, execute the action function (if specified), 332 * rendezvous again, execute the teardown function (if specified), and then 333 * resume. 334 * 335 * Note that the supplied external functions _must_ be reentrant and aware 336 * that they are running in parallel and in an unknown lock context. 337 */ 338 void 339 smp_rendezvous_action(void) 340 { 341 void* local_func_arg = smp_rv_func_arg; 342 void (*local_setup_func)(void*) = smp_rv_setup_func; 343 void (*local_action_func)(void*) = smp_rv_action_func; 344 void (*local_teardown_func)(void*) = smp_rv_teardown_func; 345 346 /* Ensure we have up-to-date values. */ 347 atomic_add_acq_int(&smp_rv_waiters[0], 1); 348 while (smp_rv_waiters[0] < smp_rv_ncpus) 349 cpu_spinwait(); 350 351 /* setup function */ 352 if (local_setup_func != smp_no_rendevous_barrier) { 353 if (smp_rv_setup_func != NULL) 354 smp_rv_setup_func(smp_rv_func_arg); 355 356 /* spin on entry rendezvous */ 357 atomic_add_int(&smp_rv_waiters[1], 1); 358 while (smp_rv_waiters[1] < smp_rv_ncpus) 359 cpu_spinwait(); 360 } 361 362 /* action function */ 363 if (local_action_func != NULL) 364 local_action_func(local_func_arg); 365 366 /* spin on exit rendezvous */ 367 atomic_add_int(&smp_rv_waiters[2], 1); 368 if (local_teardown_func == smp_no_rendevous_barrier) 369 return; 370 while (smp_rv_waiters[2] < smp_rv_ncpus) 371 cpu_spinwait(); 372 373 /* teardown function */ 374 if (local_teardown_func != NULL) 375 local_teardown_func(local_func_arg); 376 } 377 378 void 379 smp_rendezvous_cpus(cpumask_t map, 380 void (* setup_func)(void *), 381 void (* action_func)(void *), 382 void (* teardown_func)(void *), 383 void *arg) 384 { 385 int i, ncpus = 0; 386 387 if (!smp_started) { 388 if (setup_func != NULL) 389 setup_func(arg); 390 if (action_func != NULL) 391 action_func(arg); 392 if (teardown_func != NULL) 393 teardown_func(arg); 394 return; 395 } 396 397 for (i = 0; i <= mp_maxid; i++) 398 if (((1 << i) & map) != 0 && !CPU_ABSENT(i)) 399 ncpus++; 400 if (ncpus == 0) 401 panic("ncpus is 0 with map=0x%x", map); 402 403 /* obtain rendezvous lock */ 404 mtx_lock_spin(&smp_ipi_mtx); 405 406 /* set static function pointers */ 407 smp_rv_ncpus = ncpus; 408 smp_rv_setup_func = setup_func; 409 smp_rv_action_func = action_func; 410 smp_rv_teardown_func = teardown_func; 411 smp_rv_func_arg = arg; 412 smp_rv_waiters[1] = 0; 413 smp_rv_waiters[2] = 0; 414 atomic_store_rel_int(&smp_rv_waiters[0], 0); 415 416 /* signal other processors, which will enter the IPI with interrupts off */ 417 ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS); 418 419 /* Check if the current CPU is in the map */ 420 if ((map & (1 << curcpu)) != 0) 421 smp_rendezvous_action(); 422 423 if (teardown_func == smp_no_rendevous_barrier) 424 while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus) 425 cpu_spinwait(); 426 427 /* release lock */ 428 mtx_unlock_spin(&smp_ipi_mtx); 429 } 430 431 void 432 smp_rendezvous(void (* setup_func)(void *), 433 void (* action_func)(void *), 434 void (* teardown_func)(void *), 435 void *arg) 436 { 437 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 438 } 439 440 static struct cpu_group group[MAXCPU]; 441 442 struct cpu_group * 443 smp_topo(void) 444 { 445 struct cpu_group *top; 446 447 /* 448 * Check for a fake topology request for debugging purposes. 449 */ 450 switch (smp_topology) { 451 case 1: 452 /* Dual core with no sharing. */ 453 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 454 break; 455 case 2: 456 /* No topology, all cpus are equal. */ 457 top = smp_topo_none(); 458 break; 459 case 3: 460 /* Dual core with shared L2. */ 461 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 462 break; 463 case 4: 464 /* quad core, shared l3 among each package, private l2. */ 465 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 466 break; 467 case 5: 468 /* quad core, 2 dualcore parts on each package share l2. */ 469 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 470 break; 471 case 6: 472 /* Single-core 2xHTT */ 473 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 474 break; 475 case 7: 476 /* quad core with a shared l3, 8 threads sharing L2. */ 477 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 478 CG_FLAG_SMT); 479 break; 480 default: 481 /* Default, ask the system what it wants. */ 482 top = cpu_topo(); 483 break; 484 } 485 /* 486 * Verify the returned topology. 487 */ 488 if (top->cg_count != mp_ncpus) 489 panic("Built bad topology at %p. CPU count %d != %d", 490 top, top->cg_count, mp_ncpus); 491 if (top->cg_mask != all_cpus) 492 panic("Built bad topology at %p. CPU mask 0x%X != 0x%X", 493 top, top->cg_mask, all_cpus); 494 return (top); 495 } 496 497 struct cpu_group * 498 smp_topo_none(void) 499 { 500 struct cpu_group *top; 501 502 top = &group[0]; 503 top->cg_parent = NULL; 504 top->cg_child = NULL; 505 top->cg_mask = (1 << mp_ncpus) - 1; 506 top->cg_count = mp_ncpus; 507 top->cg_children = 0; 508 top->cg_level = CG_SHARE_NONE; 509 top->cg_flags = 0; 510 511 return (top); 512 } 513 514 static int 515 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 516 int count, int flags, int start) 517 { 518 cpumask_t mask; 519 int i; 520 521 for (mask = 0, i = 0; i < count; i++, start++) 522 mask |= (1 << start); 523 child->cg_parent = parent; 524 child->cg_child = NULL; 525 child->cg_children = 0; 526 child->cg_level = share; 527 child->cg_count = count; 528 child->cg_flags = flags; 529 child->cg_mask = mask; 530 parent->cg_children++; 531 for (; parent != NULL; parent = parent->cg_parent) { 532 if ((parent->cg_mask & child->cg_mask) != 0) 533 panic("Duplicate children in %p. mask 0x%X child 0x%X", 534 parent, parent->cg_mask, child->cg_mask); 535 parent->cg_mask |= child->cg_mask; 536 parent->cg_count += child->cg_count; 537 } 538 539 return (start); 540 } 541 542 struct cpu_group * 543 smp_topo_1level(int share, int count, int flags) 544 { 545 struct cpu_group *child; 546 struct cpu_group *top; 547 int packages; 548 int cpu; 549 int i; 550 551 cpu = 0; 552 top = &group[0]; 553 packages = mp_ncpus / count; 554 top->cg_child = child = &group[1]; 555 top->cg_level = CG_SHARE_NONE; 556 for (i = 0; i < packages; i++, child++) 557 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 558 return (top); 559 } 560 561 struct cpu_group * 562 smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 563 int l1flags) 564 { 565 struct cpu_group *top; 566 struct cpu_group *l1g; 567 struct cpu_group *l2g; 568 int cpu; 569 int i; 570 int j; 571 572 cpu = 0; 573 top = &group[0]; 574 l2g = &group[1]; 575 top->cg_child = l2g; 576 top->cg_level = CG_SHARE_NONE; 577 top->cg_children = mp_ncpus / (l2count * l1count); 578 l1g = l2g + top->cg_children; 579 for (i = 0; i < top->cg_children; i++, l2g++) { 580 l2g->cg_parent = top; 581 l2g->cg_child = l1g; 582 l2g->cg_level = l2share; 583 for (j = 0; j < l2count; j++, l1g++) 584 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 585 l1flags, cpu); 586 } 587 return (top); 588 } 589 590 591 struct cpu_group * 592 smp_topo_find(struct cpu_group *top, int cpu) 593 { 594 struct cpu_group *cg; 595 cpumask_t mask; 596 int children; 597 int i; 598 599 mask = (1 << cpu); 600 cg = top; 601 for (;;) { 602 if ((cg->cg_mask & mask) == 0) 603 return (NULL); 604 if (cg->cg_children == 0) 605 return (cg); 606 children = cg->cg_children; 607 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 608 if ((cg->cg_mask & mask) != 0) 609 break; 610 } 611 return (NULL); 612 } 613 #else /* !SMP */ 614 615 void 616 smp_rendezvous_cpus(cpumask_t map, 617 void (*setup_func)(void *), 618 void (*action_func)(void *), 619 void (*teardown_func)(void *), 620 void *arg) 621 { 622 if (setup_func != NULL) 623 setup_func(arg); 624 if (action_func != NULL) 625 action_func(arg); 626 if (teardown_func != NULL) 627 teardown_func(arg); 628 } 629 630 void 631 smp_rendezvous(void (*setup_func)(void *), 632 void (*action_func)(void *), 633 void (*teardown_func)(void *), 634 void *arg) 635 { 636 637 if (setup_func != NULL) 638 setup_func(arg); 639 if (action_func != NULL) 640 action_func(arg); 641 if (teardown_func != NULL) 642 teardown_func(arg); 643 } 644 645 /* 646 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 647 * APIs will still work using this dummy support. 648 */ 649 static void 650 mp_setvariables_for_up(void *dummy) 651 { 652 mp_ncpus = 1; 653 mp_maxid = PCPU_GET(cpuid); 654 all_cpus = PCPU_GET(cpumask); 655 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 656 } 657 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 658 mp_setvariables_for_up, NULL); 659 #endif /* SMP */ 660 661 void 662 smp_no_rendevous_barrier(void *dummy) 663 { 664 #ifdef SMP 665 KASSERT((!smp_started),("smp_no_rendevous called and smp is started")); 666 #endif 667 } 668