1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SMP initialisation and IPI support 4 * Based on arch/arm/kernel/smp.c 5 * 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/arm_sdei.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/hotplug.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/interrupt.h> 18 #include <linux/cache.h> 19 #include <linux/profile.h> 20 #include <linux/errno.h> 21 #include <linux/mm.h> 22 #include <linux/err.h> 23 #include <linux/cpu.h> 24 #include <linux/smp.h> 25 #include <linux/seq_file.h> 26 #include <linux/irq.h> 27 #include <linux/irqchip/arm-gic-v3.h> 28 #include <linux/percpu.h> 29 #include <linux/clockchips.h> 30 #include <linux/completion.h> 31 #include <linux/of.h> 32 #include <linux/irq_work.h> 33 #include <linux/kernel_stat.h> 34 #include <linux/kexec.h> 35 #include <linux/kgdb.h> 36 #include <linux/kvm_host.h> 37 #include <linux/nmi.h> 38 39 #include <asm/alternative.h> 40 #include <asm/atomic.h> 41 #include <asm/cacheflush.h> 42 #include <asm/cpu.h> 43 #include <asm/cputype.h> 44 #include <asm/cpu_ops.h> 45 #include <asm/daifflags.h> 46 #include <asm/kvm_mmu.h> 47 #include <asm/mmu_context.h> 48 #include <asm/numa.h> 49 #include <asm/processor.h> 50 #include <asm/smp_plat.h> 51 #include <asm/sections.h> 52 #include <asm/tlbflush.h> 53 #include <asm/ptrace.h> 54 #include <asm/virt.h> 55 56 #include <trace/events/ipi.h> 57 58 /* 59 * as from 2.5, kernels no longer have an init_tasks structure 60 * so we need some other way of telling a new secondary core 61 * where to place its SVC stack 62 */ 63 struct secondary_data secondary_data; 64 /* Number of CPUs which aren't online, but looping in kernel text. */ 65 static int cpus_stuck_in_kernel; 66 67 static int ipi_irq_base __ro_after_init; 68 static int nr_ipi __ro_after_init = NR_IPI; 69 70 struct ipi_descs { 71 struct irq_desc *descs[MAX_IPI]; 72 }; 73 74 static DEFINE_PER_CPU_READ_MOSTLY(struct ipi_descs, pcpu_ipi_desc); 75 76 #define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi]) 77 78 static bool percpu_ipi_descs __ro_after_init; 79 80 static bool crash_stop; 81 82 static void ipi_setup(int cpu); 83 84 #ifdef CONFIG_HOTPLUG_CPU 85 static void ipi_teardown(int cpu); 86 static int op_cpu_kill(unsigned int cpu); 87 #else 88 static inline int op_cpu_kill(unsigned int cpu) 89 { 90 return -ENOSYS; 91 } 92 #endif 93 94 95 /* 96 * Boot a secondary CPU, and assign it the specified idle task. 97 * This also gives us the initial stack to use for this CPU. 98 */ 99 static int boot_secondary(unsigned int cpu, struct task_struct *idle) 100 { 101 const struct cpu_operations *ops = get_cpu_ops(cpu); 102 103 if (ops->cpu_boot) 104 return ops->cpu_boot(cpu); 105 106 return -EOPNOTSUPP; 107 } 108 109 static DECLARE_COMPLETION(cpu_running); 110 111 int __cpu_up(unsigned int cpu, struct task_struct *idle) 112 { 113 int ret; 114 long status; 115 116 /* 117 * We need to tell the secondary core where to find its stack and the 118 * page tables. 119 */ 120 secondary_data.task = idle; 121 update_cpu_boot_status(CPU_MMU_OFF); 122 123 /* Now bring the CPU into our world */ 124 ret = boot_secondary(cpu, idle); 125 if (ret) { 126 if (ret != -EPERM) 127 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 128 return ret; 129 } 130 131 /* 132 * CPU was successfully started, wait for it to come online or 133 * time out. 134 */ 135 wait_for_completion_timeout(&cpu_running, 136 msecs_to_jiffies(5000)); 137 if (cpu_online(cpu)) 138 return 0; 139 140 pr_crit("CPU%u: failed to come online\n", cpu); 141 secondary_data.task = NULL; 142 status = READ_ONCE(secondary_data.status); 143 if (status == CPU_MMU_OFF) 144 status = READ_ONCE(__early_cpu_boot_status); 145 146 switch (status & CPU_BOOT_STATUS_MASK) { 147 default: 148 pr_err("CPU%u: failed in unknown state : 0x%lx\n", 149 cpu, status); 150 cpus_stuck_in_kernel++; 151 break; 152 case CPU_KILL_ME: 153 if (!op_cpu_kill(cpu)) { 154 pr_crit("CPU%u: died during early boot\n", cpu); 155 break; 156 } 157 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); 158 fallthrough; 159 case CPU_STUCK_IN_KERNEL: 160 pr_crit("CPU%u: is stuck in kernel\n", cpu); 161 if (status & CPU_STUCK_REASON_52_BIT_VA) 162 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); 163 if (status & CPU_STUCK_REASON_NO_GRAN) { 164 pr_crit("CPU%u: does not support %luK granule\n", 165 cpu, PAGE_SIZE / SZ_1K); 166 } 167 cpus_stuck_in_kernel++; 168 break; 169 case CPU_PANIC_KERNEL: 170 panic("CPU%u detected unsupported configuration\n", cpu); 171 } 172 173 return -EIO; 174 } 175 176 static void init_gic_priority_masking(void) 177 { 178 u32 cpuflags; 179 180 if (WARN_ON(!gic_enable_sre())) 181 return; 182 183 cpuflags = read_sysreg(daif); 184 185 WARN_ON(!(cpuflags & PSR_I_BIT)); 186 WARN_ON(!(cpuflags & PSR_F_BIT)); 187 188 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); 189 } 190 191 /* 192 * This is the secondary CPU boot entry. We're using this CPUs 193 * idle thread stack, but a set of temporary page tables. 194 */ 195 asmlinkage notrace void secondary_start_kernel(void) 196 { 197 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; 198 struct mm_struct *mm = &init_mm; 199 const struct cpu_operations *ops; 200 unsigned int cpu = smp_processor_id(); 201 202 /* 203 * All kernel threads share the same mm context; grab a 204 * reference and switch to it. 205 */ 206 mmgrab(mm); 207 current->active_mm = mm; 208 209 /* 210 * TTBR0 is only used for the identity mapping at this stage. Make it 211 * point to zero page to avoid speculatively fetching new entries. 212 */ 213 cpu_uninstall_idmap(); 214 215 if (system_uses_irq_prio_masking()) 216 init_gic_priority_masking(); 217 218 rcutree_report_cpu_starting(cpu); 219 trace_hardirqs_off(); 220 221 /* 222 * If the system has established the capabilities, make sure 223 * this CPU ticks all of those. If it doesn't, the CPU will 224 * fail to come online. 225 */ 226 check_local_cpu_capabilities(); 227 228 ops = get_cpu_ops(cpu); 229 if (ops->cpu_postboot) 230 ops->cpu_postboot(); 231 232 /* 233 * Log the CPU info before it is marked online and might get read. 234 */ 235 cpuinfo_store_cpu(); 236 store_cpu_topology(cpu); 237 238 /* 239 * Enable GIC and timers. 240 */ 241 notify_cpu_starting(cpu); 242 243 ipi_setup(cpu); 244 245 numa_add_cpu(cpu); 246 247 /* 248 * OK, now it's safe to let the boot CPU continue. Wait for 249 * the CPU migration code to notice that the CPU is online 250 * before we continue. 251 */ 252 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", 253 cpu, (unsigned long)mpidr, 254 read_cpuid_id()); 255 update_cpu_boot_status(CPU_BOOT_SUCCESS); 256 set_cpu_online(cpu, true); 257 complete(&cpu_running); 258 259 /* 260 * Secondary CPUs enter the kernel with all DAIF exceptions masked. 261 * 262 * As with setup_arch() we must unmask Debug and SError exceptions, and 263 * as the root irqchip has already been detected and initialized we can 264 * unmask IRQ and FIQ at the same time. 265 */ 266 local_daif_restore(DAIF_PROCCTX); 267 268 /* 269 * OK, it's off to the idle thread for us 270 */ 271 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 272 } 273 274 #ifdef CONFIG_HOTPLUG_CPU 275 static int op_cpu_disable(unsigned int cpu) 276 { 277 const struct cpu_operations *ops = get_cpu_ops(cpu); 278 279 /* 280 * If we don't have a cpu_die method, abort before we reach the point 281 * of no return. CPU0 may not have an cpu_ops, so test for it. 282 */ 283 if (!ops || !ops->cpu_die) 284 return -EOPNOTSUPP; 285 286 /* 287 * We may need to abort a hot unplug for some other mechanism-specific 288 * reason. 289 */ 290 if (ops->cpu_disable) 291 return ops->cpu_disable(cpu); 292 293 return 0; 294 } 295 296 /* 297 * __cpu_disable runs on the processor to be shutdown. 298 */ 299 int __cpu_disable(void) 300 { 301 unsigned int cpu = smp_processor_id(); 302 int ret; 303 304 ret = op_cpu_disable(cpu); 305 if (ret) 306 return ret; 307 308 remove_cpu_topology(cpu); 309 numa_remove_cpu(cpu); 310 311 /* 312 * Take this CPU offline. Once we clear this, we can't return, 313 * and we must not schedule until we're ready to give up the cpu. 314 */ 315 set_cpu_online(cpu, false); 316 ipi_teardown(cpu); 317 318 /* 319 * OK - migrate IRQs away from this CPU 320 */ 321 irq_migrate_all_off_this_cpu(); 322 323 return 0; 324 } 325 326 static int op_cpu_kill(unsigned int cpu) 327 { 328 const struct cpu_operations *ops = get_cpu_ops(cpu); 329 330 /* 331 * If we have no means of synchronising with the dying CPU, then assume 332 * that it is really dead. We can only wait for an arbitrary length of 333 * time and hope that it's dead, so let's skip the wait and just hope. 334 */ 335 if (!ops->cpu_kill) 336 return 0; 337 338 return ops->cpu_kill(cpu); 339 } 340 341 /* 342 * Called on the thread which is asking for a CPU to be shutdown after the 343 * shutdown completed. 344 */ 345 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) 346 { 347 int err; 348 349 pr_debug("CPU%u: shutdown\n", cpu); 350 351 /* 352 * Now that the dying CPU is beyond the point of no return w.r.t. 353 * in-kernel synchronisation, try to get the firwmare to help us to 354 * verify that it has really left the kernel before we consider 355 * clobbering anything it might still be using. 356 */ 357 err = op_cpu_kill(cpu); 358 if (err) 359 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); 360 } 361 362 /* 363 * Called from the idle thread for the CPU which has been shutdown. 364 * 365 */ 366 void __noreturn cpu_die(void) 367 { 368 unsigned int cpu = smp_processor_id(); 369 const struct cpu_operations *ops = get_cpu_ops(cpu); 370 371 idle_task_exit(); 372 373 local_daif_mask(); 374 375 /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */ 376 cpuhp_ap_report_dead(); 377 378 /* 379 * Actually shutdown the CPU. This must never fail. The specific hotplug 380 * mechanism must perform all required cache maintenance to ensure that 381 * no dirty lines are lost in the process of shutting down the CPU. 382 */ 383 ops->cpu_die(cpu); 384 385 BUG(); 386 } 387 #endif 388 389 static void __cpu_try_die(int cpu) 390 { 391 #ifdef CONFIG_HOTPLUG_CPU 392 const struct cpu_operations *ops = get_cpu_ops(cpu); 393 394 if (ops && ops->cpu_die) 395 ops->cpu_die(cpu); 396 #endif 397 } 398 399 /* 400 * Kill the calling secondary CPU, early in bringup before it is turned 401 * online. 402 */ 403 void __noreturn cpu_die_early(void) 404 { 405 int cpu = smp_processor_id(); 406 407 pr_crit("CPU%d: will not boot\n", cpu); 408 409 /* Mark this CPU absent */ 410 set_cpu_present(cpu, 0); 411 rcutree_report_cpu_dead(); 412 413 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { 414 update_cpu_boot_status(CPU_KILL_ME); 415 __cpu_try_die(cpu); 416 } 417 418 update_cpu_boot_status(CPU_STUCK_IN_KERNEL); 419 420 cpu_park_loop(); 421 } 422 423 static void __init hyp_mode_check(void) 424 { 425 if (is_hyp_mode_available()) 426 pr_info("CPU: All CPU(s) started at EL2\n"); 427 else if (is_hyp_mode_mismatched()) 428 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, 429 "CPU: CPUs started in inconsistent modes"); 430 else 431 pr_info("CPU: All CPU(s) started at EL1\n"); 432 if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { 433 kvm_compute_layout(); 434 kvm_apply_hyp_relocations(); 435 } 436 } 437 438 void __init smp_cpus_done(unsigned int max_cpus) 439 { 440 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); 441 hyp_mode_check(); 442 setup_system_features(); 443 setup_user_features(); 444 mark_linear_text_alias_ro(); 445 } 446 447 void __init smp_prepare_boot_cpu(void) 448 { 449 /* 450 * The runtime per-cpu areas have been allocated by 451 * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be 452 * freed shortly, so we must move over to the runtime per-cpu area. 453 */ 454 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 455 456 cpuinfo_store_boot_cpu(); 457 setup_boot_cpu_features(); 458 459 /* Conditionally switch to GIC PMR for interrupt masking */ 460 if (system_uses_irq_prio_masking()) 461 init_gic_priority_masking(); 462 463 kasan_init_hw_tags(); 464 /* Init percpu seeds for random tags after cpus are set up. */ 465 kasan_init_sw_tags(); 466 } 467 468 /* 469 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized 470 * entries and check for duplicates. If any is found just ignore the 471 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid 472 * matching valid MPIDR values. 473 */ 474 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) 475 { 476 unsigned int i; 477 478 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) 479 if (cpu_logical_map(i) == hwid) 480 return true; 481 return false; 482 } 483 484 /* 485 * Initialize cpu operations for a logical cpu and 486 * set it in the possible mask on success 487 */ 488 static int __init smp_cpu_setup(int cpu) 489 { 490 const struct cpu_operations *ops; 491 492 if (init_cpu_ops(cpu)) 493 return -ENODEV; 494 495 ops = get_cpu_ops(cpu); 496 if (ops->cpu_init(cpu)) 497 return -ENODEV; 498 499 set_cpu_possible(cpu, true); 500 501 return 0; 502 } 503 504 static bool bootcpu_valid __initdata; 505 static unsigned int cpu_count = 1; 506 507 int arch_register_cpu(int cpu) 508 { 509 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); 510 struct cpu *c = &per_cpu(cpu_devices, cpu); 511 512 if (!acpi_disabled && !acpi_handle && 513 IS_ENABLED(CONFIG_ACPI_HOTPLUG_CPU)) 514 return -EPROBE_DEFER; 515 516 #ifdef CONFIG_ACPI_HOTPLUG_CPU 517 /* For now block anything that looks like physical CPU Hotplug */ 518 if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) { 519 pr_err_once("Changing CPU present bit is not supported\n"); 520 return -ENODEV; 521 } 522 #endif 523 524 /* 525 * Availability of the acpi handle is sufficient to establish 526 * that _STA has aleady been checked. No need to recheck here. 527 */ 528 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); 529 530 return register_cpu(c, cpu); 531 } 532 533 #ifdef CONFIG_ACPI_HOTPLUG_CPU 534 void arch_unregister_cpu(int cpu) 535 { 536 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); 537 struct cpu *c = &per_cpu(cpu_devices, cpu); 538 acpi_status status; 539 unsigned long long sta; 540 541 if (!acpi_handle) { 542 pr_err_once("Removing a CPU without associated ACPI handle\n"); 543 return; 544 } 545 546 status = acpi_evaluate_integer(acpi_handle, "_STA", NULL, &sta); 547 if (ACPI_FAILURE(status)) 548 return; 549 550 /* For now do not allow anything that looks like physical CPU HP */ 551 if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) { 552 pr_err_once("Changing CPU present bit is not supported\n"); 553 return; 554 } 555 556 unregister_cpu(c); 557 } 558 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 559 560 #ifdef CONFIG_ACPI 561 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; 562 563 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) 564 { 565 return &cpu_madt_gicc[cpu]; 566 } 567 EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc); 568 569 /* 570 * acpi_map_gic_cpu_interface - parse processor MADT entry 571 * 572 * Carry out sanity checks on MADT processor entry and initialize 573 * cpu_logical_map on success 574 */ 575 static void __init 576 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) 577 { 578 u64 hwid = processor->arm_mpidr; 579 580 if (!(processor->flags & 581 (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) { 582 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); 583 return; 584 } 585 586 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { 587 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); 588 return; 589 } 590 591 if (is_mpidr_duplicate(cpu_count, hwid)) { 592 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); 593 return; 594 } 595 596 /* Check if GICC structure of boot CPU is available in the MADT */ 597 if (cpu_logical_map(0) == hwid) { 598 if (bootcpu_valid) { 599 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", 600 hwid); 601 return; 602 } 603 bootcpu_valid = true; 604 cpu_madt_gicc[0] = *processor; 605 return; 606 } 607 608 if (cpu_count >= NR_CPUS) 609 return; 610 611 /* map the logical cpu id to cpu MPIDR */ 612 set_cpu_logical_map(cpu_count, hwid); 613 614 cpu_madt_gicc[cpu_count] = *processor; 615 616 /* 617 * Set-up the ACPI parking protocol cpu entries 618 * while initializing the cpu_logical_map to 619 * avoid parsing MADT entries multiple times for 620 * nothing (ie a valid cpu_logical_map entry should 621 * contain a valid parking protocol data set to 622 * initialize the cpu if the parking protocol is 623 * the only available enable method). 624 */ 625 acpi_set_mailbox_entry(cpu_count, processor); 626 627 cpu_count++; 628 } 629 630 static int __init 631 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, 632 const unsigned long end) 633 { 634 struct acpi_madt_generic_interrupt *processor; 635 636 processor = (struct acpi_madt_generic_interrupt *)header; 637 if (BAD_MADT_GICC_ENTRY(processor, end)) 638 return -EINVAL; 639 640 acpi_table_print_madt_entry(&header->common); 641 642 acpi_map_gic_cpu_interface(processor); 643 644 return 0; 645 } 646 647 static void __init acpi_parse_and_init_cpus(void) 648 { 649 int i; 650 651 /* 652 * do a walk of MADT to determine how many CPUs 653 * we have including disabled CPUs, and get information 654 * we need for SMP init. 655 */ 656 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 657 acpi_parse_gic_cpu_interface, 0); 658 659 /* 660 * In ACPI, SMP and CPU NUMA information is provided in separate 661 * static tables, namely the MADT and the SRAT. 662 * 663 * Thus, it is simpler to first create the cpu logical map through 664 * an MADT walk and then map the logical cpus to their node ids 665 * as separate steps. 666 */ 667 acpi_map_cpus_to_nodes(); 668 669 for (i = 0; i < nr_cpu_ids; i++) 670 early_map_cpu_to_node(i, acpi_numa_get_nid(i)); 671 } 672 #else 673 #define acpi_parse_and_init_cpus(...) do { } while (0) 674 #endif 675 676 /* 677 * Enumerate the possible CPU set from the device tree and build the 678 * cpu logical map array containing MPIDR values related to logical 679 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 680 */ 681 static void __init of_parse_and_init_cpus(void) 682 { 683 struct device_node *dn; 684 685 for_each_of_cpu_node(dn) { 686 u64 hwid = of_get_cpu_hwid(dn, 0); 687 688 if (hwid & ~MPIDR_HWID_BITMASK) 689 goto next; 690 691 if (is_mpidr_duplicate(cpu_count, hwid)) { 692 pr_err("%pOF: duplicate cpu reg properties in the DT\n", 693 dn); 694 goto next; 695 } 696 697 /* 698 * The numbering scheme requires that the boot CPU 699 * must be assigned logical id 0. Record it so that 700 * the logical map built from DT is validated and can 701 * be used. 702 */ 703 if (hwid == cpu_logical_map(0)) { 704 if (bootcpu_valid) { 705 pr_err("%pOF: duplicate boot cpu reg property in DT\n", 706 dn); 707 goto next; 708 } 709 710 bootcpu_valid = true; 711 early_map_cpu_to_node(0, of_node_to_nid(dn)); 712 713 /* 714 * cpu_logical_map has already been 715 * initialized and the boot cpu doesn't need 716 * the enable-method so continue without 717 * incrementing cpu. 718 */ 719 continue; 720 } 721 722 if (cpu_count >= NR_CPUS) 723 goto next; 724 725 pr_debug("cpu logical map 0x%llx\n", hwid); 726 set_cpu_logical_map(cpu_count, hwid); 727 728 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); 729 next: 730 cpu_count++; 731 } 732 } 733 734 /* 735 * Enumerate the possible CPU set from the device tree or ACPI and build the 736 * cpu logical map array containing MPIDR values related to logical 737 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 738 */ 739 void __init smp_init_cpus(void) 740 { 741 int i; 742 743 if (acpi_disabled) 744 of_parse_and_init_cpus(); 745 else 746 acpi_parse_and_init_cpus(); 747 748 if (cpu_count > nr_cpu_ids) 749 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", 750 cpu_count, nr_cpu_ids); 751 752 if (!bootcpu_valid) { 753 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 754 return; 755 } 756 757 /* 758 * We need to set the cpu_logical_map entries before enabling 759 * the cpus so that cpu processor description entries (DT cpu nodes 760 * and ACPI MADT entries) can be retrieved by matching the cpu hwid 761 * with entries in cpu_logical_map while initializing the cpus. 762 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 763 */ 764 for (i = 1; i < nr_cpu_ids; i++) { 765 if (cpu_logical_map(i) != INVALID_HWID) { 766 if (smp_cpu_setup(i)) 767 set_cpu_logical_map(i, INVALID_HWID); 768 } 769 } 770 } 771 772 void __init smp_prepare_cpus(unsigned int max_cpus) 773 { 774 const struct cpu_operations *ops; 775 int err; 776 unsigned int cpu; 777 unsigned int this_cpu; 778 779 init_cpu_topology(); 780 781 this_cpu = smp_processor_id(); 782 store_cpu_topology(this_cpu); 783 numa_store_cpu_info(this_cpu); 784 numa_add_cpu(this_cpu); 785 786 /* 787 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 788 * secondary CPUs present. 789 */ 790 if (max_cpus == 0) 791 return; 792 793 /* 794 * Initialise the present map (which describes the set of CPUs 795 * actually populated at the present time) and release the 796 * secondaries from the bootloader. 797 */ 798 for_each_possible_cpu(cpu) { 799 800 if (cpu == smp_processor_id()) 801 continue; 802 803 ops = get_cpu_ops(cpu); 804 if (!ops) 805 continue; 806 807 err = ops->cpu_prepare(cpu); 808 if (err) 809 continue; 810 811 set_cpu_present(cpu, true); 812 numa_store_cpu_info(cpu); 813 } 814 } 815 816 static const char *ipi_types[MAX_IPI] __tracepoint_string = { 817 [IPI_RESCHEDULE] = "Rescheduling interrupts", 818 [IPI_CALL_FUNC] = "Function call interrupts", 819 [IPI_CPU_STOP] = "CPU stop interrupts", 820 [IPI_CPU_STOP_NMI] = "CPU stop NMIs", 821 [IPI_TIMER] = "Timer broadcast interrupts", 822 [IPI_IRQ_WORK] = "IRQ work interrupts", 823 [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", 824 [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", 825 }; 826 827 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); 828 829 unsigned long irq_err_count; 830 831 int arch_show_interrupts(struct seq_file *p, int prec) 832 { 833 unsigned int cpu, i; 834 835 for (i = 0; i < MAX_IPI; i++) { 836 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, 837 prec >= 4 ? " " : ""); 838 for_each_online_cpu(cpu) 839 seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu)); 840 seq_printf(p, " %s\n", ipi_types[i]); 841 } 842 843 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 844 return 0; 845 } 846 847 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 848 { 849 smp_cross_call(mask, IPI_CALL_FUNC); 850 } 851 852 void arch_send_call_function_single_ipi(int cpu) 853 { 854 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); 855 } 856 857 #ifdef CONFIG_IRQ_WORK 858 void arch_irq_work_raise(void) 859 { 860 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); 861 } 862 #endif 863 864 static void __noreturn local_cpu_stop(unsigned int cpu) 865 { 866 set_cpu_online(cpu, false); 867 868 local_daif_mask(); 869 sdei_mask_local_cpu(); 870 cpu_park_loop(); 871 } 872 873 /* 874 * We need to implement panic_smp_self_stop() for parallel panic() calls, so 875 * that cpu_online_mask gets correctly updated and smp_send_stop() can skip 876 * CPUs that have already stopped themselves. 877 */ 878 void __noreturn panic_smp_self_stop(void) 879 { 880 local_cpu_stop(smp_processor_id()); 881 } 882 883 static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) 884 { 885 #ifdef CONFIG_KEXEC_CORE 886 /* 887 * Use local_daif_mask() instead of local_irq_disable() to make sure 888 * that pseudo-NMIs are disabled. The "crash stop" code starts with 889 * an IRQ and falls back to NMI (which might be pseudo). If the IRQ 890 * finally goes through right as we're timing out then the NMI could 891 * interrupt us. It's better to prevent the NMI and let the IRQ 892 * finish since the pt_regs will be better. 893 */ 894 local_daif_mask(); 895 896 crash_save_cpu(regs, cpu); 897 898 set_cpu_online(cpu, false); 899 900 sdei_mask_local_cpu(); 901 902 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) 903 __cpu_try_die(cpu); 904 905 /* just in case */ 906 cpu_park_loop(); 907 #else 908 BUG(); 909 #endif 910 } 911 912 static void arm64_send_ipi(const cpumask_t *mask, unsigned int nr) 913 { 914 unsigned int cpu; 915 916 if (!percpu_ipi_descs) 917 __ipi_send_mask(get_ipi_desc(0, nr), mask); 918 else 919 for_each_cpu(cpu, mask) 920 __ipi_send_single(get_ipi_desc(cpu, nr), cpu); 921 } 922 923 static void arm64_backtrace_ipi(cpumask_t *mask) 924 { 925 arm64_send_ipi(mask, IPI_CPU_BACKTRACE); 926 } 927 928 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) 929 { 930 /* 931 * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name, 932 * nothing about it truly needs to be implemented using an NMI, it's 933 * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi() 934 * returned false our backtrace attempt will just use a regular IPI. 935 */ 936 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi); 937 } 938 939 #ifdef CONFIG_KGDB 940 void kgdb_roundup_cpus(void) 941 { 942 int this_cpu = raw_smp_processor_id(); 943 int cpu; 944 945 for_each_online_cpu(cpu) { 946 /* No need to roundup ourselves */ 947 if (cpu == this_cpu) 948 continue; 949 950 __ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu); 951 } 952 } 953 #endif 954 955 /* 956 * Main handler for inter-processor interrupts 957 */ 958 static void do_handle_IPI(int ipinr) 959 { 960 unsigned int cpu = smp_processor_id(); 961 962 if ((unsigned)ipinr < NR_IPI) 963 trace_ipi_entry(ipi_types[ipinr]); 964 965 switch (ipinr) { 966 case IPI_RESCHEDULE: 967 scheduler_ipi(); 968 break; 969 970 case IPI_CALL_FUNC: 971 generic_smp_call_function_interrupt(); 972 break; 973 974 case IPI_CPU_STOP: 975 case IPI_CPU_STOP_NMI: 976 if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) { 977 ipi_cpu_crash_stop(cpu, get_irq_regs()); 978 unreachable(); 979 } else { 980 local_cpu_stop(cpu); 981 } 982 break; 983 984 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 985 case IPI_TIMER: 986 tick_receive_broadcast(); 987 break; 988 #endif 989 990 #ifdef CONFIG_IRQ_WORK 991 case IPI_IRQ_WORK: 992 irq_work_run(); 993 break; 994 #endif 995 996 case IPI_CPU_BACKTRACE: 997 /* 998 * NOTE: in some cases this _won't_ be NMI context. See the 999 * comment in arch_trigger_cpumask_backtrace(). 1000 */ 1001 nmi_cpu_backtrace(get_irq_regs()); 1002 break; 1003 1004 case IPI_KGDB_ROUNDUP: 1005 kgdb_nmicallback(cpu, get_irq_regs()); 1006 break; 1007 1008 default: 1009 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 1010 break; 1011 } 1012 1013 if ((unsigned)ipinr < NR_IPI) 1014 trace_ipi_exit(ipi_types[ipinr]); 1015 } 1016 1017 static irqreturn_t ipi_handler(int irq, void *data) 1018 { 1019 unsigned int ipi = (irq - ipi_irq_base) % nr_ipi; 1020 1021 do_handle_IPI(ipi); 1022 return IRQ_HANDLED; 1023 } 1024 1025 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 1026 { 1027 trace_ipi_raise(target, ipi_types[ipinr]); 1028 arm64_send_ipi(target, ipinr); 1029 } 1030 1031 static bool ipi_should_be_nmi(enum ipi_msg_type ipi) 1032 { 1033 if (!system_uses_irq_prio_masking()) 1034 return false; 1035 1036 switch (ipi) { 1037 case IPI_CPU_STOP_NMI: 1038 case IPI_CPU_BACKTRACE: 1039 case IPI_KGDB_ROUNDUP: 1040 return true; 1041 default: 1042 return false; 1043 } 1044 } 1045 1046 static void ipi_setup(int cpu) 1047 { 1048 int i; 1049 1050 if (WARN_ON_ONCE(!ipi_irq_base)) 1051 return; 1052 1053 for (i = 0; i < nr_ipi; i++) { 1054 if (!percpu_ipi_descs) { 1055 if (ipi_should_be_nmi(i)) { 1056 prepare_percpu_nmi(ipi_irq_base + i); 1057 enable_percpu_nmi(ipi_irq_base + i, 0); 1058 } else { 1059 enable_percpu_irq(ipi_irq_base + i, 0); 1060 } 1061 } else { 1062 enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); 1063 } 1064 } 1065 } 1066 1067 #ifdef CONFIG_HOTPLUG_CPU 1068 static void ipi_teardown(int cpu) 1069 { 1070 int i; 1071 1072 if (WARN_ON_ONCE(!ipi_irq_base)) 1073 return; 1074 1075 for (i = 0; i < nr_ipi; i++) { 1076 if (!percpu_ipi_descs) { 1077 if (ipi_should_be_nmi(i)) { 1078 disable_percpu_nmi(ipi_irq_base + i); 1079 teardown_percpu_nmi(ipi_irq_base + i); 1080 } else { 1081 disable_percpu_irq(ipi_irq_base + i); 1082 } 1083 } else { 1084 disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); 1085 } 1086 } 1087 } 1088 #endif 1089 1090 static void ipi_setup_sgi(int ipi) 1091 { 1092 int err, irq, cpu; 1093 1094 irq = ipi_irq_base + ipi; 1095 1096 if (ipi_should_be_nmi(ipi)) { 1097 err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat); 1098 WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err); 1099 } else { 1100 err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat); 1101 WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err); 1102 } 1103 1104 for_each_possible_cpu(cpu) 1105 get_ipi_desc(cpu, ipi) = irq_to_desc(irq); 1106 1107 irq_set_status_flags(irq, IRQ_HIDDEN); 1108 } 1109 1110 static void ipi_setup_lpi(int ipi, int ncpus) 1111 { 1112 for (int cpu = 0; cpu < ncpus; cpu++) { 1113 int err, irq; 1114 1115 irq = ipi_irq_base + (cpu * nr_ipi) + ipi; 1116 1117 err = irq_force_affinity(irq, cpumask_of(cpu)); 1118 WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err); 1119 1120 err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI", 1121 NULL); 1122 WARN(err, "Could not request IRQ %d, err=%d\n", irq, err); 1123 1124 irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK)); 1125 1126 get_ipi_desc(cpu, ipi) = irq_to_desc(irq); 1127 } 1128 } 1129 1130 void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus) 1131 { 1132 int i; 1133 1134 WARN_ON(n < MAX_IPI); 1135 nr_ipi = min(n, MAX_IPI); 1136 1137 percpu_ipi_descs = !!ncpus; 1138 ipi_irq_base = ipi_base; 1139 1140 for (i = 0; i < nr_ipi; i++) { 1141 if (!percpu_ipi_descs) 1142 ipi_setup_sgi(i); 1143 else 1144 ipi_setup_lpi(i, ncpus); 1145 } 1146 1147 /* Setup the boot CPU immediately */ 1148 ipi_setup(smp_processor_id()); 1149 } 1150 1151 void arch_smp_send_reschedule(int cpu) 1152 { 1153 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 1154 } 1155 1156 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL 1157 void arch_send_wakeup_ipi(unsigned int cpu) 1158 { 1159 /* 1160 * We use a scheduler IPI to wake the CPU as this avoids the need for a 1161 * dedicated IPI and we can safely handle spurious scheduler IPIs. 1162 */ 1163 smp_send_reschedule(cpu); 1164 } 1165 #endif 1166 1167 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 1168 void tick_broadcast(const struct cpumask *mask) 1169 { 1170 smp_cross_call(mask, IPI_TIMER); 1171 } 1172 #endif 1173 1174 /* 1175 * The number of CPUs online, not counting this CPU (which may not be 1176 * fully online and so not counted in num_online_cpus()). 1177 */ 1178 static inline unsigned int num_other_online_cpus(void) 1179 { 1180 unsigned int this_cpu_online = cpu_online(smp_processor_id()); 1181 1182 return num_online_cpus() - this_cpu_online; 1183 } 1184 1185 void smp_send_stop(void) 1186 { 1187 static unsigned long stop_in_progress; 1188 static cpumask_t mask; 1189 unsigned long timeout; 1190 1191 /* 1192 * If this cpu is the only one alive at this point in time, online or 1193 * not, there are no stop messages to be sent around, so just back out. 1194 */ 1195 if (num_other_online_cpus() == 0) 1196 goto skip_ipi; 1197 1198 /* Only proceed if this is the first CPU to reach this code */ 1199 if (test_and_set_bit(0, &stop_in_progress)) 1200 return; 1201 1202 /* 1203 * Send an IPI to all currently online CPUs except the CPU running 1204 * this code. 1205 * 1206 * NOTE: we don't do anything here to prevent other CPUs from coming 1207 * online after we snapshot `cpu_online_mask`. Ideally, the calling code 1208 * should do something to prevent other CPUs from coming up. This code 1209 * can be called in the panic path and thus it doesn't seem wise to 1210 * grab the CPU hotplug mutex ourselves. Worst case: 1211 * - If a CPU comes online as we're running, we'll likely notice it 1212 * during the 1 second wait below and then we'll catch it when we try 1213 * with an NMI (assuming NMIs are enabled) since we re-snapshot the 1214 * mask before sending an NMI. 1215 * - If we leave the function and see that CPUs are still online we'll 1216 * at least print a warning. Especially without NMIs this function 1217 * isn't foolproof anyway so calling code will just have to accept 1218 * the fact that there could be cases where a CPU can't be stopped. 1219 */ 1220 cpumask_copy(&mask, cpu_online_mask); 1221 cpumask_clear_cpu(smp_processor_id(), &mask); 1222 1223 if (system_state <= SYSTEM_RUNNING) 1224 pr_crit("SMP: stopping secondary CPUs\n"); 1225 1226 /* 1227 * Start with a normal IPI and wait up to one second for other CPUs to 1228 * stop. We do this first because it gives other processors a chance 1229 * to exit critical sections / drop locks and makes the rest of the 1230 * stop process (especially console flush) more robust. 1231 */ 1232 smp_cross_call(&mask, IPI_CPU_STOP); 1233 timeout = USEC_PER_SEC; 1234 while (num_other_online_cpus() && timeout--) 1235 udelay(1); 1236 1237 /* 1238 * If CPUs are still online, try an NMI. There's no excuse for this to 1239 * be slow, so we only give them an extra 10 ms to respond. 1240 */ 1241 if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) { 1242 smp_rmb(); 1243 cpumask_copy(&mask, cpu_online_mask); 1244 cpumask_clear_cpu(smp_processor_id(), &mask); 1245 1246 pr_info("SMP: retry stop with NMI for CPUs %*pbl\n", 1247 cpumask_pr_args(&mask)); 1248 1249 smp_cross_call(&mask, IPI_CPU_STOP_NMI); 1250 timeout = USEC_PER_MSEC * 10; 1251 while (num_other_online_cpus() && timeout--) 1252 udelay(1); 1253 } 1254 1255 if (num_other_online_cpus()) { 1256 smp_rmb(); 1257 cpumask_copy(&mask, cpu_online_mask); 1258 cpumask_clear_cpu(smp_processor_id(), &mask); 1259 1260 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", 1261 cpumask_pr_args(&mask)); 1262 } 1263 1264 skip_ipi: 1265 sdei_mask_local_cpu(); 1266 } 1267 1268 #ifdef CONFIG_KEXEC_CORE 1269 void crash_smp_send_stop(void) 1270 { 1271 /* 1272 * This function can be called twice in panic path, but obviously 1273 * we execute this only once. 1274 * 1275 * We use this same boolean to tell whether the IPI we send was a 1276 * stop or a "crash stop". 1277 */ 1278 if (crash_stop) 1279 return; 1280 crash_stop = 1; 1281 1282 smp_send_stop(); 1283 1284 sdei_handler_abort(); 1285 } 1286 1287 bool smp_crash_stop_failed(void) 1288 { 1289 return num_other_online_cpus() != 0; 1290 } 1291 #endif 1292 1293 static bool have_cpu_die(void) 1294 { 1295 #ifdef CONFIG_HOTPLUG_CPU 1296 int any_cpu = raw_smp_processor_id(); 1297 const struct cpu_operations *ops = get_cpu_ops(any_cpu); 1298 1299 if (ops && ops->cpu_die) 1300 return true; 1301 #endif 1302 return false; 1303 } 1304 1305 bool cpus_are_stuck_in_kernel(void) 1306 { 1307 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); 1308 1309 return !!cpus_stuck_in_kernel || smp_spin_tables || 1310 is_protected_kvm_enabled(); 1311 } 1312