1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/sched.h> 23 #include <linux/smp.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/init.h> 27 #include <linux/spinlock.h> 28 #include <linux/cache.h> 29 #include <linux/err.h> 30 #include <linux/sysdev.h> 31 #include <linux/cpu.h> 32 #include <linux/notifier.h> 33 #include <linux/topology.h> 34 35 #include <asm/ptrace.h> 36 #include <asm/atomic.h> 37 #include <asm/irq.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/prom.h> 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/machdep.h> 44 #include <asm/cputable.h> 45 #include <asm/system.h> 46 #include <asm/mpic.h> 47 #include <asm/vdso_datapage.h> 48 #ifdef CONFIG_PPC64 49 #include <asm/paca.h> 50 #endif 51 52 #ifdef DEBUG 53 #include <asm/udbg.h> 54 #define DBG(fmt...) udbg_printf(fmt) 55 #else 56 #define DBG(fmt...) 57 #endif 58 59 int smp_hw_index[NR_CPUS]; 60 struct thread_info *secondary_ti; 61 62 cpumask_t cpu_possible_map = CPU_MASK_NONE; 63 cpumask_t cpu_online_map = CPU_MASK_NONE; 64 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 65 66 EXPORT_SYMBOL(cpu_online_map); 67 EXPORT_SYMBOL(cpu_possible_map); 68 69 /* SMP operations for this machine */ 70 struct smp_ops_t *smp_ops; 71 72 static volatile unsigned int cpu_callin_map[NR_CPUS]; 73 74 void smp_call_function_interrupt(void); 75 76 int smt_enabled_at_boot = 1; 77 78 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 79 80 #ifdef CONFIG_MPIC 81 int __init smp_mpic_probe(void) 82 { 83 int nr_cpus; 84 85 DBG("smp_mpic_probe()...\n"); 86 87 nr_cpus = cpus_weight(cpu_possible_map); 88 89 DBG("nr_cpus: %d\n", nr_cpus); 90 91 if (nr_cpus > 1) 92 mpic_request_ipis(); 93 94 return nr_cpus; 95 } 96 97 void __devinit smp_mpic_setup_cpu(int cpu) 98 { 99 mpic_setup_this_cpu(); 100 } 101 #endif /* CONFIG_MPIC */ 102 103 #ifdef CONFIG_PPC64 104 void __devinit smp_generic_kick_cpu(int nr) 105 { 106 BUG_ON(nr < 0 || nr >= NR_CPUS); 107 108 /* 109 * The processor is currently spinning, waiting for the 110 * cpu_start field to become non-zero After we set cpu_start, 111 * the processor will continue on to secondary_start 112 */ 113 paca[nr].cpu_start = 1; 114 smp_mb(); 115 } 116 #endif 117 118 void smp_message_recv(int msg, struct pt_regs *regs) 119 { 120 switch(msg) { 121 case PPC_MSG_CALL_FUNCTION: 122 smp_call_function_interrupt(); 123 break; 124 case PPC_MSG_RESCHEDULE: 125 /* XXX Do we have to do this? */ 126 set_need_resched(); 127 break; 128 case PPC_MSG_DEBUGGER_BREAK: 129 if (crash_ipi_function_ptr) { 130 crash_ipi_function_ptr(regs); 131 break; 132 } 133 #ifdef CONFIG_DEBUGGER 134 debugger_ipi(regs); 135 break; 136 #endif /* CONFIG_DEBUGGER */ 137 /* FALLTHROUGH */ 138 default: 139 printk("SMP %d: smp_message_recv(): unknown msg %d\n", 140 smp_processor_id(), msg); 141 break; 142 } 143 } 144 145 void smp_send_reschedule(int cpu) 146 { 147 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 148 } 149 150 #ifdef CONFIG_DEBUGGER 151 void smp_send_debugger_break(int cpu) 152 { 153 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 154 } 155 #endif 156 157 #ifdef CONFIG_KEXEC 158 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 159 { 160 crash_ipi_function_ptr = crash_ipi_callback; 161 if (crash_ipi_callback) { 162 mb(); 163 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 164 } 165 } 166 #endif 167 168 static void stop_this_cpu(void *dummy) 169 { 170 local_irq_disable(); 171 while (1) 172 ; 173 } 174 175 void smp_send_stop(void) 176 { 177 smp_call_function(stop_this_cpu, NULL, 1, 0); 178 } 179 180 /* 181 * Structure and data for smp_call_function(). This is designed to minimise 182 * static memory requirements. It also looks cleaner. 183 * Stolen from the i386 version. 184 */ 185 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); 186 187 static struct call_data_struct { 188 void (*func) (void *info); 189 void *info; 190 atomic_t started; 191 atomic_t finished; 192 int wait; 193 } *call_data; 194 195 /* delay of at least 8 seconds */ 196 #define SMP_CALL_TIMEOUT 8 197 198 /* 199 * This function sends a 'generic call function' IPI to all other CPUs 200 * in the system. 201 * 202 * [SUMMARY] Run a function on all other CPUs. 203 * <func> The function to run. This must be fast and non-blocking. 204 * <info> An arbitrary pointer to pass to the function. 205 * <nonatomic> currently unused. 206 * <wait> If true, wait (atomically) until function has completed on other CPUs. 207 * [RETURNS] 0 on success, else a negative status code. Does not return until 208 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 209 * 210 * You must not call this function with disabled interrupts or from a 211 * hardware interrupt handler or from a bottom half handler. 212 */ 213 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 214 int wait) 215 { 216 struct call_data_struct data; 217 int ret = -1, cpus; 218 u64 timeout; 219 220 /* Can deadlock when called with interrupts disabled */ 221 WARN_ON(irqs_disabled()); 222 223 data.func = func; 224 data.info = info; 225 atomic_set(&data.started, 0); 226 data.wait = wait; 227 if (wait) 228 atomic_set(&data.finished, 0); 229 230 spin_lock(&call_lock); 231 /* Must grab online cpu count with preempt disabled, otherwise 232 * it can change. */ 233 cpus = num_online_cpus() - 1; 234 if (!cpus) { 235 ret = 0; 236 goto out; 237 } 238 239 call_data = &data; 240 smp_wmb(); 241 /* Send a message to all other CPUs and wait for them to respond */ 242 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); 243 244 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; 245 246 /* Wait for response */ 247 while (atomic_read(&data.started) != cpus) { 248 HMT_low(); 249 if (get_tb() >= timeout) { 250 printk("smp_call_function on cpu %d: other cpus not " 251 "responding (%d)\n", smp_processor_id(), 252 atomic_read(&data.started)); 253 debugger(NULL); 254 goto out; 255 } 256 } 257 258 if (wait) { 259 while (atomic_read(&data.finished) != cpus) { 260 HMT_low(); 261 if (get_tb() >= timeout) { 262 printk("smp_call_function on cpu %d: other " 263 "cpus not finishing (%d/%d)\n", 264 smp_processor_id(), 265 atomic_read(&data.finished), 266 atomic_read(&data.started)); 267 debugger(NULL); 268 goto out; 269 } 270 } 271 } 272 273 ret = 0; 274 275 out: 276 call_data = NULL; 277 HMT_medium(); 278 spin_unlock(&call_lock); 279 return ret; 280 } 281 282 EXPORT_SYMBOL(smp_call_function); 283 284 void smp_call_function_interrupt(void) 285 { 286 void (*func) (void *info); 287 void *info; 288 int wait; 289 290 /* call_data will be NULL if the sender timed out while 291 * waiting on us to receive the call. 292 */ 293 if (!call_data) 294 return; 295 296 func = call_data->func; 297 info = call_data->info; 298 wait = call_data->wait; 299 300 if (!wait) 301 smp_mb__before_atomic_inc(); 302 303 /* 304 * Notify initiating CPU that I've grabbed the data and am 305 * about to execute the function 306 */ 307 atomic_inc(&call_data->started); 308 /* 309 * At this point the info structure may be out of scope unless wait==1 310 */ 311 (*func)(info); 312 if (wait) { 313 smp_mb__before_atomic_inc(); 314 atomic_inc(&call_data->finished); 315 } 316 } 317 318 extern struct gettimeofday_struct do_gtod; 319 320 struct thread_info *current_set[NR_CPUS]; 321 322 DECLARE_PER_CPU(unsigned int, pvr); 323 324 static void __devinit smp_store_cpu_info(int id) 325 { 326 per_cpu(pvr, id) = mfspr(SPRN_PVR); 327 } 328 329 static void __init smp_create_idle(unsigned int cpu) 330 { 331 struct task_struct *p; 332 333 /* create a process for the processor */ 334 p = fork_idle(cpu); 335 if (IS_ERR(p)) 336 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 337 #ifdef CONFIG_PPC64 338 paca[cpu].__current = p; 339 #endif 340 current_set[cpu] = task_thread_info(p); 341 task_thread_info(p)->cpu = cpu; 342 } 343 344 void __init smp_prepare_cpus(unsigned int max_cpus) 345 { 346 unsigned int cpu; 347 348 DBG("smp_prepare_cpus\n"); 349 350 /* 351 * setup_cpu may need to be called on the boot cpu. We havent 352 * spun any cpus up but lets be paranoid. 353 */ 354 BUG_ON(boot_cpuid != smp_processor_id()); 355 356 /* Fixup boot cpu */ 357 smp_store_cpu_info(boot_cpuid); 358 cpu_callin_map[boot_cpuid] = 1; 359 360 max_cpus = smp_ops->probe(); 361 362 smp_space_timers(max_cpus); 363 364 for_each_possible_cpu(cpu) 365 if (cpu != boot_cpuid) 366 smp_create_idle(cpu); 367 } 368 369 void __devinit smp_prepare_boot_cpu(void) 370 { 371 BUG_ON(smp_processor_id() != boot_cpuid); 372 373 cpu_set(boot_cpuid, cpu_online_map); 374 #ifdef CONFIG_PPC64 375 paca[boot_cpuid].__current = current; 376 #endif 377 current_set[boot_cpuid] = task_thread_info(current); 378 } 379 380 #ifdef CONFIG_HOTPLUG_CPU 381 /* State of each CPU during hotplug phases */ 382 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 383 384 int generic_cpu_disable(void) 385 { 386 unsigned int cpu = smp_processor_id(); 387 388 if (cpu == boot_cpuid) 389 return -EBUSY; 390 391 cpu_clear(cpu, cpu_online_map); 392 #ifdef CONFIG_PPC64 393 vdso_data->processorCount--; 394 fixup_irqs(cpu_online_map); 395 #endif 396 return 0; 397 } 398 399 int generic_cpu_enable(unsigned int cpu) 400 { 401 /* Do the normal bootup if we haven't 402 * already bootstrapped. */ 403 if (system_state != SYSTEM_RUNNING) 404 return -ENOSYS; 405 406 /* get the target out of it's holding state */ 407 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 408 smp_wmb(); 409 410 while (!cpu_online(cpu)) 411 cpu_relax(); 412 413 #ifdef CONFIG_PPC64 414 fixup_irqs(cpu_online_map); 415 /* counter the irq disable in fixup_irqs */ 416 local_irq_enable(); 417 #endif 418 return 0; 419 } 420 421 void generic_cpu_die(unsigned int cpu) 422 { 423 int i; 424 425 for (i = 0; i < 100; i++) { 426 smp_rmb(); 427 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 428 return; 429 msleep(100); 430 } 431 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 432 } 433 434 void generic_mach_cpu_die(void) 435 { 436 unsigned int cpu; 437 438 local_irq_disable(); 439 cpu = smp_processor_id(); 440 printk(KERN_DEBUG "CPU%d offline\n", cpu); 441 __get_cpu_var(cpu_state) = CPU_DEAD; 442 smp_wmb(); 443 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 444 cpu_relax(); 445 446 #ifdef CONFIG_PPC64 447 flush_tlb_pending(); 448 #endif 449 cpu_set(cpu, cpu_online_map); 450 local_irq_enable(); 451 } 452 #endif 453 454 static int __devinit cpu_enable(unsigned int cpu) 455 { 456 if (smp_ops->cpu_enable) 457 return smp_ops->cpu_enable(cpu); 458 459 return -ENOSYS; 460 } 461 462 int __devinit __cpu_up(unsigned int cpu) 463 { 464 int c; 465 466 secondary_ti = current_set[cpu]; 467 if (!cpu_enable(cpu)) 468 return 0; 469 470 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) 471 return -EINVAL; 472 473 /* Make sure callin-map entry is 0 (can be leftover a CPU 474 * hotplug 475 */ 476 cpu_callin_map[cpu] = 0; 477 478 /* The information for processor bringup must 479 * be written out to main store before we release 480 * the processor. 481 */ 482 smp_mb(); 483 484 /* wake up cpus */ 485 DBG("smp: kicking cpu %d\n", cpu); 486 smp_ops->kick_cpu(cpu); 487 488 /* 489 * wait to see if the cpu made a callin (is actually up). 490 * use this value that I found through experimentation. 491 * -- Cort 492 */ 493 if (system_state < SYSTEM_RUNNING) 494 for (c = 50000; c && !cpu_callin_map[cpu]; c--) 495 udelay(100); 496 #ifdef CONFIG_HOTPLUG_CPU 497 else 498 /* 499 * CPUs can take much longer to come up in the 500 * hotplug case. Wait five seconds. 501 */ 502 for (c = 25; c && !cpu_callin_map[cpu]; c--) { 503 msleep(200); 504 } 505 #endif 506 507 if (!cpu_callin_map[cpu]) { 508 printk("Processor %u is stuck.\n", cpu); 509 return -ENOENT; 510 } 511 512 printk("Processor %u found.\n", cpu); 513 514 if (smp_ops->give_timebase) 515 smp_ops->give_timebase(); 516 517 /* Wait until cpu puts itself in the online map */ 518 while (!cpu_online(cpu)) 519 cpu_relax(); 520 521 return 0; 522 } 523 524 525 /* Activate a secondary processor. */ 526 int __devinit start_secondary(void *unused) 527 { 528 unsigned int cpu = smp_processor_id(); 529 530 atomic_inc(&init_mm.mm_count); 531 current->active_mm = &init_mm; 532 533 smp_store_cpu_info(cpu); 534 set_dec(tb_ticks_per_jiffy); 535 preempt_disable(); 536 cpu_callin_map[cpu] = 1; 537 538 smp_ops->setup_cpu(cpu); 539 if (smp_ops->take_timebase) 540 smp_ops->take_timebase(); 541 542 if (system_state > SYSTEM_BOOTING) 543 snapshot_timebase(); 544 545 spin_lock(&call_lock); 546 cpu_set(cpu, cpu_online_map); 547 spin_unlock(&call_lock); 548 549 local_irq_enable(); 550 551 cpu_idle(); 552 return 0; 553 } 554 555 int setup_profiling_timer(unsigned int multiplier) 556 { 557 return 0; 558 } 559 560 void __init smp_cpus_done(unsigned int max_cpus) 561 { 562 cpumask_t old_mask; 563 564 /* We want the setup_cpu() here to be called from CPU 0, but our 565 * init thread may have been "borrowed" by another CPU in the meantime 566 * se we pin us down to CPU 0 for a short while 567 */ 568 old_mask = current->cpus_allowed; 569 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 570 571 smp_ops->setup_cpu(boot_cpuid); 572 573 set_cpus_allowed(current, old_mask); 574 575 snapshot_timebases(); 576 577 dump_numa_cpu_topology(); 578 } 579 580 #ifdef CONFIG_HOTPLUG_CPU 581 int __cpu_disable(void) 582 { 583 if (smp_ops->cpu_disable) 584 return smp_ops->cpu_disable(); 585 586 return -ENOSYS; 587 } 588 589 void __cpu_die(unsigned int cpu) 590 { 591 if (smp_ops->cpu_die) 592 smp_ops->cpu_die(cpu); 593 } 594 #endif 595