1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/export.h> 22 #include <linux/sched/mm.h> 23 #include <linux/sched/topology.h> 24 #include <linux/smp.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/init.h> 28 #include <linux/spinlock.h> 29 #include <linux/cache.h> 30 #include <linux/err.h> 31 #include <linux/device.h> 32 #include <linux/cpu.h> 33 #include <linux/notifier.h> 34 #include <linux/topology.h> 35 #include <linux/profile.h> 36 #include <linux/processor.h> 37 38 #include <asm/ptrace.h> 39 #include <linux/atomic.h> 40 #include <asm/irq.h> 41 #include <asm/hw_irq.h> 42 #include <asm/kvm_ppc.h> 43 #include <asm/dbell.h> 44 #include <asm/page.h> 45 #include <asm/pgtable.h> 46 #include <asm/prom.h> 47 #include <asm/smp.h> 48 #include <asm/time.h> 49 #include <asm/machdep.h> 50 #include <asm/cputhreads.h> 51 #include <asm/cputable.h> 52 #include <asm/mpic.h> 53 #include <asm/vdso_datapage.h> 54 #ifdef CONFIG_PPC64 55 #include <asm/paca.h> 56 #endif 57 #include <asm/vdso.h> 58 #include <asm/debug.h> 59 #include <asm/kexec.h> 60 #include <asm/asm-prototypes.h> 61 #include <asm/cpu_has_feature.h> 62 63 #ifdef DEBUG 64 #include <asm/udbg.h> 65 #define DBG(fmt...) udbg_printf(fmt) 66 #else 67 #define DBG(fmt...) 68 #endif 69 70 #ifdef CONFIG_HOTPLUG_CPU 71 /* State of each CPU during hotplug phases */ 72 static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 73 #endif 74 75 struct thread_info *secondary_ti; 76 77 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 78 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); 79 80 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 81 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 82 83 /* SMP operations for this machine */ 84 struct smp_ops_t *smp_ops; 85 86 /* Can't be static due to PowerMac hackery */ 87 volatile unsigned int cpu_callin_map[NR_CPUS]; 88 89 int smt_enabled_at_boot = 1; 90 91 /* 92 * Returns 1 if the specified cpu should be brought up during boot. 93 * Used to inhibit booting threads if they've been disabled or 94 * limited on the command line 95 */ 96 int smp_generic_cpu_bootable(unsigned int nr) 97 { 98 /* Special case - we inhibit secondary thread startup 99 * during boot if the user requests it. 100 */ 101 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { 102 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) 103 return 0; 104 if (smt_enabled_at_boot 105 && cpu_thread_in_core(nr) >= smt_enabled_at_boot) 106 return 0; 107 } 108 109 return 1; 110 } 111 112 113 #ifdef CONFIG_PPC64 114 int smp_generic_kick_cpu(int nr) 115 { 116 if (nr < 0 || nr >= nr_cpu_ids) 117 return -EINVAL; 118 119 /* 120 * The processor is currently spinning, waiting for the 121 * cpu_start field to become non-zero After we set cpu_start, 122 * the processor will continue on to secondary_start 123 */ 124 if (!paca[nr].cpu_start) { 125 paca[nr].cpu_start = 1; 126 smp_mb(); 127 return 0; 128 } 129 130 #ifdef CONFIG_HOTPLUG_CPU 131 /* 132 * Ok it's not there, so it might be soft-unplugged, let's 133 * try to bring it back 134 */ 135 generic_set_cpu_up(nr); 136 smp_wmb(); 137 smp_send_reschedule(nr); 138 #endif /* CONFIG_HOTPLUG_CPU */ 139 140 return 0; 141 } 142 #endif /* CONFIG_PPC64 */ 143 144 static irqreturn_t call_function_action(int irq, void *data) 145 { 146 generic_smp_call_function_interrupt(); 147 return IRQ_HANDLED; 148 } 149 150 static irqreturn_t reschedule_action(int irq, void *data) 151 { 152 scheduler_ipi(); 153 return IRQ_HANDLED; 154 } 155 156 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) 157 { 158 tick_broadcast_ipi_handler(); 159 return IRQ_HANDLED; 160 } 161 162 #ifdef CONFIG_NMI_IPI 163 static irqreturn_t nmi_ipi_action(int irq, void *data) 164 { 165 smp_handle_nmi_ipi(get_irq_regs()); 166 return IRQ_HANDLED; 167 } 168 #endif 169 170 static irq_handler_t smp_ipi_action[] = { 171 [PPC_MSG_CALL_FUNCTION] = call_function_action, 172 [PPC_MSG_RESCHEDULE] = reschedule_action, 173 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, 174 #ifdef CONFIG_NMI_IPI 175 [PPC_MSG_NMI_IPI] = nmi_ipi_action, 176 #endif 177 }; 178 179 /* 180 * The NMI IPI is a fallback and not truly non-maskable. It is simpler 181 * than going through the call function infrastructure, and strongly 182 * serialized, so it is more appropriate for debugging. 183 */ 184 const char *smp_ipi_name[] = { 185 [PPC_MSG_CALL_FUNCTION] = "ipi call function", 186 [PPC_MSG_RESCHEDULE] = "ipi reschedule", 187 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", 188 [PPC_MSG_NMI_IPI] = "nmi ipi", 189 }; 190 191 /* optional function to request ipi, for controllers with >= 4 ipis */ 192 int smp_request_message_ipi(int virq, int msg) 193 { 194 int err; 195 196 if (msg < 0 || msg > PPC_MSG_NMI_IPI) 197 return -EINVAL; 198 #ifndef CONFIG_NMI_IPI 199 if (msg == PPC_MSG_NMI_IPI) 200 return 1; 201 #endif 202 203 err = request_irq(virq, smp_ipi_action[msg], 204 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 205 smp_ipi_name[msg], NULL); 206 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 207 virq, smp_ipi_name[msg], err); 208 209 return err; 210 } 211 212 #ifdef CONFIG_PPC_SMP_MUXED_IPI 213 struct cpu_messages { 214 long messages; /* current messages */ 215 }; 216 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); 217 218 void smp_muxed_ipi_set_message(int cpu, int msg) 219 { 220 struct cpu_messages *info = &per_cpu(ipi_message, cpu); 221 char *message = (char *)&info->messages; 222 223 /* 224 * Order previous accesses before accesses in the IPI handler. 225 */ 226 smp_mb(); 227 message[msg] = 1; 228 } 229 230 void smp_muxed_ipi_message_pass(int cpu, int msg) 231 { 232 smp_muxed_ipi_set_message(cpu, msg); 233 234 /* 235 * cause_ipi functions are required to include a full barrier 236 * before doing whatever causes the IPI. 237 */ 238 smp_ops->cause_ipi(cpu); 239 } 240 241 #ifdef __BIG_ENDIAN__ 242 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) 243 #else 244 #define IPI_MESSAGE(A) (1uL << (8 * (A))) 245 #endif 246 247 irqreturn_t smp_ipi_demux(void) 248 { 249 mb(); /* order any irq clear */ 250 251 return smp_ipi_demux_relaxed(); 252 } 253 254 /* sync-free variant. Callers should ensure synchronization */ 255 irqreturn_t smp_ipi_demux_relaxed(void) 256 { 257 struct cpu_messages *info; 258 unsigned long all; 259 260 info = this_cpu_ptr(&ipi_message); 261 do { 262 all = xchg(&info->messages, 0); 263 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) 264 /* 265 * Must check for PPC_MSG_RM_HOST_ACTION messages 266 * before PPC_MSG_CALL_FUNCTION messages because when 267 * a VM is destroyed, we call kick_all_cpus_sync() 268 * to ensure that any pending PPC_MSG_RM_HOST_ACTION 269 * messages have completed before we free any VCPUs. 270 */ 271 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) 272 kvmppc_xics_ipi_action(); 273 #endif 274 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) 275 generic_smp_call_function_interrupt(); 276 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) 277 scheduler_ipi(); 278 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) 279 tick_broadcast_ipi_handler(); 280 #ifdef CONFIG_NMI_IPI 281 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) 282 nmi_ipi_action(0, NULL); 283 #endif 284 } while (info->messages); 285 286 return IRQ_HANDLED; 287 } 288 #endif /* CONFIG_PPC_SMP_MUXED_IPI */ 289 290 static inline void do_message_pass(int cpu, int msg) 291 { 292 if (smp_ops->message_pass) 293 smp_ops->message_pass(cpu, msg); 294 #ifdef CONFIG_PPC_SMP_MUXED_IPI 295 else 296 smp_muxed_ipi_message_pass(cpu, msg); 297 #endif 298 } 299 300 void smp_send_reschedule(int cpu) 301 { 302 if (likely(smp_ops)) 303 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 304 } 305 EXPORT_SYMBOL_GPL(smp_send_reschedule); 306 307 void arch_send_call_function_single_ipi(int cpu) 308 { 309 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 310 } 311 312 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 313 { 314 unsigned int cpu; 315 316 for_each_cpu(cpu, mask) 317 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); 318 } 319 320 #ifdef CONFIG_NMI_IPI 321 322 /* 323 * "NMI IPI" system. 324 * 325 * NMI IPIs may not be recoverable, so should not be used as ongoing part of 326 * a running system. They can be used for crash, debug, halt/reboot, etc. 327 * 328 * NMI IPIs are globally single threaded. No more than one in progress at 329 * any time. 330 * 331 * The IPI call waits with interrupts disabled until all targets enter the 332 * NMI handler, then the call returns. 333 * 334 * No new NMI can be initiated until targets exit the handler. 335 * 336 * The IPI call may time out without all targets entering the NMI handler. 337 * In that case, there is some logic to recover (and ignore subsequent 338 * NMI interrupts that may eventually be raised), but the platform interrupt 339 * handler may not be able to distinguish this from other exception causes, 340 * which may cause a crash. 341 */ 342 343 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); 344 static struct cpumask nmi_ipi_pending_mask; 345 static int nmi_ipi_busy_count = 0; 346 static void (*nmi_ipi_function)(struct pt_regs *) = NULL; 347 348 static void nmi_ipi_lock_start(unsigned long *flags) 349 { 350 raw_local_irq_save(*flags); 351 hard_irq_disable(); 352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 353 raw_local_irq_restore(*flags); 354 cpu_relax(); 355 raw_local_irq_save(*flags); 356 hard_irq_disable(); 357 } 358 } 359 360 static void nmi_ipi_lock(void) 361 { 362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 363 cpu_relax(); 364 } 365 366 static void nmi_ipi_unlock(void) 367 { 368 smp_mb(); 369 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); 370 atomic_set(&__nmi_ipi_lock, 0); 371 } 372 373 static void nmi_ipi_unlock_end(unsigned long *flags) 374 { 375 nmi_ipi_unlock(); 376 raw_local_irq_restore(*flags); 377 } 378 379 /* 380 * Platform NMI handler calls this to ack 381 */ 382 int smp_handle_nmi_ipi(struct pt_regs *regs) 383 { 384 void (*fn)(struct pt_regs *); 385 unsigned long flags; 386 int me = raw_smp_processor_id(); 387 int ret = 0; 388 389 /* 390 * Unexpected NMIs are possible here because the interrupt may not 391 * be able to distinguish NMI IPIs from other types of NMIs, or 392 * because the caller may have timed out. 393 */ 394 nmi_ipi_lock_start(&flags); 395 if (!nmi_ipi_busy_count) 396 goto out; 397 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) 398 goto out; 399 400 fn = nmi_ipi_function; 401 if (!fn) 402 goto out; 403 404 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 405 nmi_ipi_busy_count++; 406 nmi_ipi_unlock(); 407 408 ret = 1; 409 410 fn(regs); 411 412 nmi_ipi_lock(); 413 nmi_ipi_busy_count--; 414 out: 415 nmi_ipi_unlock_end(&flags); 416 417 return ret; 418 } 419 420 static void do_smp_send_nmi_ipi(int cpu) 421 { 422 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) 423 return; 424 425 if (cpu >= 0) { 426 do_message_pass(cpu, PPC_MSG_NMI_IPI); 427 } else { 428 int c; 429 430 for_each_online_cpu(c) { 431 if (c == raw_smp_processor_id()) 432 continue; 433 do_message_pass(c, PPC_MSG_NMI_IPI); 434 } 435 } 436 } 437 438 /* 439 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. 440 * - fn is the target callback function. 441 * - delay_us > 0 is the delay before giving up waiting for targets to 442 * enter the handler, == 0 specifies indefinite delay. 443 */ 444 static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) 445 { 446 unsigned long flags; 447 int me = raw_smp_processor_id(); 448 int ret = 1; 449 450 BUG_ON(cpu == me); 451 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); 452 453 if (unlikely(!smp_ops)) 454 return 0; 455 456 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ 457 nmi_ipi_lock_start(&flags); 458 while (nmi_ipi_busy_count) { 459 nmi_ipi_unlock_end(&flags); 460 cpu_relax(); 461 nmi_ipi_lock_start(&flags); 462 } 463 464 nmi_ipi_function = fn; 465 466 if (cpu < 0) { 467 /* ALL_OTHERS */ 468 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); 469 cpumask_clear_cpu(me, &nmi_ipi_pending_mask); 470 } else { 471 /* cpumask starts clear */ 472 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); 473 } 474 nmi_ipi_busy_count++; 475 nmi_ipi_unlock(); 476 477 do_smp_send_nmi_ipi(cpu); 478 479 while (!cpumask_empty(&nmi_ipi_pending_mask)) { 480 udelay(1); 481 if (delay_us) { 482 delay_us--; 483 if (!delay_us) 484 break; 485 } 486 } 487 488 nmi_ipi_lock(); 489 if (!cpumask_empty(&nmi_ipi_pending_mask)) { 490 /* Could not gather all CPUs */ 491 ret = 0; 492 cpumask_clear(&nmi_ipi_pending_mask); 493 } 494 nmi_ipi_busy_count--; 495 nmi_ipi_unlock_end(&flags); 496 497 return ret; 498 } 499 #endif /* CONFIG_NMI_IPI */ 500 501 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 502 void tick_broadcast(const struct cpumask *mask) 503 { 504 unsigned int cpu; 505 506 for_each_cpu(cpu, mask) 507 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); 508 } 509 #endif 510 511 #ifdef CONFIG_DEBUGGER 512 void debugger_ipi_callback(struct pt_regs *regs) 513 { 514 debugger_ipi(regs); 515 } 516 517 void smp_send_debugger_break(void) 518 { 519 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); 520 } 521 #endif 522 523 #ifdef CONFIG_KEXEC_CORE 524 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 525 { 526 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); 527 } 528 #endif 529 530 static void stop_this_cpu(void *dummy) 531 { 532 /* Remove this CPU */ 533 set_cpu_online(smp_processor_id(), false); 534 535 local_irq_disable(); 536 while (1) 537 ; 538 } 539 540 void smp_send_stop(void) 541 { 542 smp_call_function(stop_this_cpu, NULL, 0); 543 } 544 545 struct thread_info *current_set[NR_CPUS]; 546 547 static void smp_store_cpu_info(int id) 548 { 549 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 550 #ifdef CONFIG_PPC_FSL_BOOK3E 551 per_cpu(next_tlbcam_idx, id) 552 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 553 #endif 554 } 555 556 void __init smp_prepare_cpus(unsigned int max_cpus) 557 { 558 unsigned int cpu; 559 560 DBG("smp_prepare_cpus\n"); 561 562 /* 563 * setup_cpu may need to be called on the boot cpu. We havent 564 * spun any cpus up but lets be paranoid. 565 */ 566 BUG_ON(boot_cpuid != smp_processor_id()); 567 568 /* Fixup boot cpu */ 569 smp_store_cpu_info(boot_cpuid); 570 cpu_callin_map[boot_cpuid] = 1; 571 572 for_each_possible_cpu(cpu) { 573 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), 574 GFP_KERNEL, cpu_to_node(cpu)); 575 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), 576 GFP_KERNEL, cpu_to_node(cpu)); 577 /* 578 * numa_node_id() works after this. 579 */ 580 if (cpu_present(cpu)) { 581 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); 582 set_cpu_numa_mem(cpu, 583 local_memory_node(numa_cpu_lookup_table[cpu])); 584 } 585 } 586 587 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); 588 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); 589 590 if (smp_ops && smp_ops->probe) 591 smp_ops->probe(); 592 } 593 594 void smp_prepare_boot_cpu(void) 595 { 596 BUG_ON(smp_processor_id() != boot_cpuid); 597 #ifdef CONFIG_PPC64 598 paca[boot_cpuid].__current = current; 599 #endif 600 set_numa_node(numa_cpu_lookup_table[boot_cpuid]); 601 current_set[boot_cpuid] = task_thread_info(current); 602 } 603 604 #ifdef CONFIG_HOTPLUG_CPU 605 606 int generic_cpu_disable(void) 607 { 608 unsigned int cpu = smp_processor_id(); 609 610 if (cpu == boot_cpuid) 611 return -EBUSY; 612 613 set_cpu_online(cpu, false); 614 #ifdef CONFIG_PPC64 615 vdso_data->processorCount--; 616 #endif 617 /* Update affinity of all IRQs previously aimed at this CPU */ 618 irq_migrate_all_off_this_cpu(); 619 620 /* 621 * Depending on the details of the interrupt controller, it's possible 622 * that one of the interrupts we just migrated away from this CPU is 623 * actually already pending on this CPU. If we leave it in that state 624 * the interrupt will never be EOI'ed, and will never fire again. So 625 * temporarily enable interrupts here, to allow any pending interrupt to 626 * be received (and EOI'ed), before we take this CPU offline. 627 */ 628 local_irq_enable(); 629 mdelay(1); 630 local_irq_disable(); 631 632 return 0; 633 } 634 635 void generic_cpu_die(unsigned int cpu) 636 { 637 int i; 638 639 for (i = 0; i < 100; i++) { 640 smp_rmb(); 641 if (is_cpu_dead(cpu)) 642 return; 643 msleep(100); 644 } 645 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 646 } 647 648 void generic_set_cpu_dead(unsigned int cpu) 649 { 650 per_cpu(cpu_state, cpu) = CPU_DEAD; 651 } 652 653 /* 654 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise 655 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), 656 * which makes the delay in generic_cpu_die() not happen. 657 */ 658 void generic_set_cpu_up(unsigned int cpu) 659 { 660 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 661 } 662 663 int generic_check_cpu_restart(unsigned int cpu) 664 { 665 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 666 } 667 668 int is_cpu_dead(unsigned int cpu) 669 { 670 return per_cpu(cpu_state, cpu) == CPU_DEAD; 671 } 672 673 static bool secondaries_inhibited(void) 674 { 675 return kvm_hv_mode_active(); 676 } 677 678 #else /* HOTPLUG_CPU */ 679 680 #define secondaries_inhibited() 0 681 682 #endif 683 684 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) 685 { 686 struct thread_info *ti = task_thread_info(idle); 687 688 #ifdef CONFIG_PPC64 689 paca[cpu].__current = idle; 690 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 691 #endif 692 ti->cpu = cpu; 693 secondary_ti = current_set[cpu] = ti; 694 } 695 696 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 697 { 698 int rc, c; 699 700 /* 701 * Don't allow secondary threads to come online if inhibited 702 */ 703 if (threads_per_core > 1 && secondaries_inhibited() && 704 cpu_thread_in_subcore(cpu)) 705 return -EBUSY; 706 707 if (smp_ops == NULL || 708 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 709 return -EINVAL; 710 711 cpu_idle_thread_init(cpu, tidle); 712 713 /* 714 * The platform might need to allocate resources prior to bringing 715 * up the CPU 716 */ 717 if (smp_ops->prepare_cpu) { 718 rc = smp_ops->prepare_cpu(cpu); 719 if (rc) 720 return rc; 721 } 722 723 /* Make sure callin-map entry is 0 (can be leftover a CPU 724 * hotplug 725 */ 726 cpu_callin_map[cpu] = 0; 727 728 /* The information for processor bringup must 729 * be written out to main store before we release 730 * the processor. 731 */ 732 smp_mb(); 733 734 /* wake up cpus */ 735 DBG("smp: kicking cpu %d\n", cpu); 736 rc = smp_ops->kick_cpu(cpu); 737 if (rc) { 738 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); 739 return rc; 740 } 741 742 /* 743 * wait to see if the cpu made a callin (is actually up). 744 * use this value that I found through experimentation. 745 * -- Cort 746 */ 747 if (system_state < SYSTEM_RUNNING) 748 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 749 udelay(100); 750 #ifdef CONFIG_HOTPLUG_CPU 751 else 752 /* 753 * CPUs can take much longer to come up in the 754 * hotplug case. Wait five seconds. 755 */ 756 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 757 msleep(1); 758 #endif 759 760 if (!cpu_callin_map[cpu]) { 761 printk(KERN_ERR "Processor %u is stuck.\n", cpu); 762 return -ENOENT; 763 } 764 765 DBG("Processor %u found.\n", cpu); 766 767 if (smp_ops->give_timebase) 768 smp_ops->give_timebase(); 769 770 /* Wait until cpu puts itself in the online & active maps */ 771 spin_until_cond(cpu_online(cpu)); 772 773 return 0; 774 } 775 776 /* Return the value of the reg property corresponding to the given 777 * logical cpu. 778 */ 779 int cpu_to_core_id(int cpu) 780 { 781 struct device_node *np; 782 const __be32 *reg; 783 int id = -1; 784 785 np = of_get_cpu_node(cpu, NULL); 786 if (!np) 787 goto out; 788 789 reg = of_get_property(np, "reg", NULL); 790 if (!reg) 791 goto out; 792 793 id = be32_to_cpup(reg); 794 out: 795 of_node_put(np); 796 return id; 797 } 798 EXPORT_SYMBOL_GPL(cpu_to_core_id); 799 800 /* Helper routines for cpu to core mapping */ 801 int cpu_core_index_of_thread(int cpu) 802 { 803 return cpu >> threads_shift; 804 } 805 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); 806 807 int cpu_first_thread_of_core(int core) 808 { 809 return core << threads_shift; 810 } 811 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 812 813 static void traverse_siblings_chip_id(int cpu, bool add, int chipid) 814 { 815 const struct cpumask *mask; 816 struct device_node *np; 817 int i, plen; 818 const __be32 *prop; 819 820 mask = add ? cpu_online_mask : cpu_present_mask; 821 for_each_cpu(i, mask) { 822 np = of_get_cpu_node(i, NULL); 823 if (!np) 824 continue; 825 prop = of_get_property(np, "ibm,chip-id", &plen); 826 if (prop && plen == sizeof(int) && 827 of_read_number(prop, 1) == chipid) { 828 if (add) { 829 cpumask_set_cpu(cpu, cpu_core_mask(i)); 830 cpumask_set_cpu(i, cpu_core_mask(cpu)); 831 } else { 832 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 833 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 834 } 835 } 836 of_node_put(np); 837 } 838 } 839 840 /* Must be called when no change can occur to cpu_present_mask, 841 * i.e. during cpu online or offline. 842 */ 843 static struct device_node *cpu_to_l2cache(int cpu) 844 { 845 struct device_node *np; 846 struct device_node *cache; 847 848 if (!cpu_present(cpu)) 849 return NULL; 850 851 np = of_get_cpu_node(cpu, NULL); 852 if (np == NULL) 853 return NULL; 854 855 cache = of_find_next_cache_node(np); 856 857 of_node_put(np); 858 859 return cache; 860 } 861 862 static void traverse_core_siblings(int cpu, bool add) 863 { 864 struct device_node *l2_cache, *np; 865 const struct cpumask *mask; 866 int i, chip, plen; 867 const __be32 *prop; 868 869 /* First see if we have ibm,chip-id properties in cpu nodes */ 870 np = of_get_cpu_node(cpu, NULL); 871 if (np) { 872 chip = -1; 873 prop = of_get_property(np, "ibm,chip-id", &plen); 874 if (prop && plen == sizeof(int)) 875 chip = of_read_number(prop, 1); 876 of_node_put(np); 877 if (chip >= 0) { 878 traverse_siblings_chip_id(cpu, add, chip); 879 return; 880 } 881 } 882 883 l2_cache = cpu_to_l2cache(cpu); 884 mask = add ? cpu_online_mask : cpu_present_mask; 885 for_each_cpu(i, mask) { 886 np = cpu_to_l2cache(i); 887 if (!np) 888 continue; 889 if (np == l2_cache) { 890 if (add) { 891 cpumask_set_cpu(cpu, cpu_core_mask(i)); 892 cpumask_set_cpu(i, cpu_core_mask(cpu)); 893 } else { 894 cpumask_clear_cpu(cpu, cpu_core_mask(i)); 895 cpumask_clear_cpu(i, cpu_core_mask(cpu)); 896 } 897 } 898 of_node_put(np); 899 } 900 of_node_put(l2_cache); 901 } 902 903 /* Activate a secondary processor. */ 904 void start_secondary(void *unused) 905 { 906 unsigned int cpu = smp_processor_id(); 907 int i, base; 908 909 mmgrab(&init_mm); 910 current->active_mm = &init_mm; 911 912 smp_store_cpu_info(cpu); 913 set_dec(tb_ticks_per_jiffy); 914 preempt_disable(); 915 cpu_callin_map[cpu] = 1; 916 917 if (smp_ops->setup_cpu) 918 smp_ops->setup_cpu(cpu); 919 if (smp_ops->take_timebase) 920 smp_ops->take_timebase(); 921 922 secondary_cpu_time_init(); 923 924 #ifdef CONFIG_PPC64 925 if (system_state == SYSTEM_RUNNING) 926 vdso_data->processorCount++; 927 928 vdso_getcpu_init(); 929 #endif 930 /* Update sibling maps */ 931 base = cpu_first_thread_sibling(cpu); 932 for (i = 0; i < threads_per_core; i++) { 933 if (cpu_is_offline(base + i) && (cpu != base + i)) 934 continue; 935 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); 936 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); 937 938 /* cpu_core_map should be a superset of 939 * cpu_sibling_map even if we don't have cache 940 * information, so update the former here, too. 941 */ 942 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 943 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 944 } 945 traverse_core_siblings(cpu, true); 946 947 set_numa_node(numa_cpu_lookup_table[cpu]); 948 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); 949 950 smp_wmb(); 951 notify_cpu_starting(cpu); 952 set_cpu_online(cpu, true); 953 954 local_irq_enable(); 955 956 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 957 958 BUG(); 959 } 960 961 int setup_profiling_timer(unsigned int multiplier) 962 { 963 return 0; 964 } 965 966 #ifdef CONFIG_SCHED_SMT 967 /* cpumask of CPUs with asymetric SMT dependancy */ 968 static int powerpc_smt_flags(void) 969 { 970 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 971 972 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 973 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 974 flags |= SD_ASYM_PACKING; 975 } 976 return flags; 977 } 978 #endif 979 980 static struct sched_domain_topology_level powerpc_topology[] = { 981 #ifdef CONFIG_SCHED_SMT 982 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, 983 #endif 984 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 985 { NULL, }, 986 }; 987 988 static __init long smp_setup_cpu_workfn(void *data __always_unused) 989 { 990 smp_ops->setup_cpu(boot_cpuid); 991 return 0; 992 } 993 994 void __init smp_cpus_done(unsigned int max_cpus) 995 { 996 /* 997 * We want the setup_cpu() here to be called on the boot CPU, but 998 * init might run on any CPU, so make sure it's invoked on the boot 999 * CPU. 1000 */ 1001 if (smp_ops && smp_ops->setup_cpu) 1002 work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL); 1003 1004 if (smp_ops && smp_ops->bringup_done) 1005 smp_ops->bringup_done(); 1006 1007 dump_numa_cpu_topology(); 1008 1009 set_sched_topology(powerpc_topology); 1010 } 1011 1012 #ifdef CONFIG_HOTPLUG_CPU 1013 int __cpu_disable(void) 1014 { 1015 int cpu = smp_processor_id(); 1016 int base, i; 1017 int err; 1018 1019 if (!smp_ops->cpu_disable) 1020 return -ENOSYS; 1021 1022 err = smp_ops->cpu_disable(); 1023 if (err) 1024 return err; 1025 1026 /* Update sibling maps */ 1027 base = cpu_first_thread_sibling(cpu); 1028 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) { 1029 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 1030 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 1031 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 1032 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 1033 } 1034 traverse_core_siblings(cpu, false); 1035 1036 return 0; 1037 } 1038 1039 void __cpu_die(unsigned int cpu) 1040 { 1041 if (smp_ops->cpu_die) 1042 smp_ops->cpu_die(cpu); 1043 } 1044 1045 void cpu_die(void) 1046 { 1047 if (ppc_md.cpu_die) 1048 ppc_md.cpu_die(); 1049 1050 /* If we return, we re-enter start_secondary */ 1051 start_secondary_resume(); 1052 } 1053 1054 #endif 1055