1 /* 2 * SMP support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 #undef DEBUG 19 20 #include <linux/config.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/sched.h> 24 #include <linux/smp.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/init.h> 28 #include <linux/spinlock.h> 29 #include <linux/cache.h> 30 #include <linux/err.h> 31 #include <linux/sysdev.h> 32 #include <linux/cpu.h> 33 #include <linux/notifier.h> 34 35 #include <asm/ptrace.h> 36 #include <asm/atomic.h> 37 #include <asm/irq.h> 38 #include <asm/page.h> 39 #include <asm/pgtable.h> 40 #include <asm/prom.h> 41 #include <asm/smp.h> 42 #include <asm/time.h> 43 #include <asm/xmon.h> 44 #include <asm/machdep.h> 45 #include <asm/cputable.h> 46 #include <asm/system.h> 47 #include <asm/mpic.h> 48 #ifdef CONFIG_PPC64 49 #include <asm/paca.h> 50 #endif 51 52 int smp_hw_index[NR_CPUS]; 53 struct thread_info *secondary_ti; 54 55 #ifdef DEBUG 56 #define DBG(fmt...) udbg_printf(fmt) 57 #else 58 #define DBG(fmt...) 59 #endif 60 61 cpumask_t cpu_possible_map = CPU_MASK_NONE; 62 cpumask_t cpu_online_map = CPU_MASK_NONE; 63 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64 65 EXPORT_SYMBOL(cpu_online_map); 66 EXPORT_SYMBOL(cpu_possible_map); 67 68 /* SMP operations for this machine */ 69 struct smp_ops_t *smp_ops; 70 71 static volatile unsigned int cpu_callin_map[NR_CPUS]; 72 73 void smp_call_function_interrupt(void); 74 75 int smt_enabled_at_boot = 1; 76 77 #ifdef CONFIG_MPIC 78 int __init smp_mpic_probe(void) 79 { 80 int nr_cpus; 81 82 DBG("smp_mpic_probe()...\n"); 83 84 nr_cpus = cpus_weight(cpu_possible_map); 85 86 DBG("nr_cpus: %d\n", nr_cpus); 87 88 if (nr_cpus > 1) 89 mpic_request_ipis(); 90 91 return nr_cpus; 92 } 93 94 void __devinit smp_mpic_setup_cpu(int cpu) 95 { 96 mpic_setup_this_cpu(); 97 } 98 #endif /* CONFIG_MPIC */ 99 100 #ifdef CONFIG_PPC64 101 void __devinit smp_generic_kick_cpu(int nr) 102 { 103 BUG_ON(nr < 0 || nr >= NR_CPUS); 104 105 /* 106 * The processor is currently spinning, waiting for the 107 * cpu_start field to become non-zero After we set cpu_start, 108 * the processor will continue on to secondary_start 109 */ 110 paca[nr].cpu_start = 1; 111 smp_mb(); 112 } 113 #endif 114 115 void smp_message_recv(int msg, struct pt_regs *regs) 116 { 117 switch(msg) { 118 case PPC_MSG_CALL_FUNCTION: 119 smp_call_function_interrupt(); 120 break; 121 case PPC_MSG_RESCHEDULE: 122 /* XXX Do we have to do this? */ 123 set_need_resched(); 124 break; 125 #ifdef CONFIG_DEBUGGER 126 case PPC_MSG_DEBUGGER_BREAK: 127 debugger_ipi(regs); 128 break; 129 #endif 130 default: 131 printk("SMP %d: smp_message_recv(): unknown msg %d\n", 132 smp_processor_id(), msg); 133 break; 134 } 135 } 136 137 void smp_send_reschedule(int cpu) 138 { 139 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 140 } 141 142 #ifdef CONFIG_DEBUGGER 143 void smp_send_debugger_break(int cpu) 144 { 145 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 146 } 147 #endif 148 149 static void stop_this_cpu(void *dummy) 150 { 151 local_irq_disable(); 152 while (1) 153 ; 154 } 155 156 void smp_send_stop(void) 157 { 158 smp_call_function(stop_this_cpu, NULL, 1, 0); 159 } 160 161 /* 162 * Structure and data for smp_call_function(). This is designed to minimise 163 * static memory requirements. It also looks cleaner. 164 * Stolen from the i386 version. 165 */ 166 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); 167 168 static struct call_data_struct { 169 void (*func) (void *info); 170 void *info; 171 atomic_t started; 172 atomic_t finished; 173 int wait; 174 } *call_data; 175 176 /* delay of at least 8 seconds */ 177 #define SMP_CALL_TIMEOUT 8 178 179 /* 180 * This function sends a 'generic call function' IPI to all other CPUs 181 * in the system. 182 * 183 * [SUMMARY] Run a function on all other CPUs. 184 * <func> The function to run. This must be fast and non-blocking. 185 * <info> An arbitrary pointer to pass to the function. 186 * <nonatomic> currently unused. 187 * <wait> If true, wait (atomically) until function has completed on other CPUs. 188 * [RETURNS] 0 on success, else a negative status code. Does not return until 189 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 190 * 191 * You must not call this function with disabled interrupts or from a 192 * hardware interrupt handler or from a bottom half handler. 193 */ 194 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 195 int wait) 196 { 197 struct call_data_struct data; 198 int ret = -1, cpus; 199 u64 timeout; 200 201 /* Can deadlock when called with interrupts disabled */ 202 WARN_ON(irqs_disabled()); 203 204 data.func = func; 205 data.info = info; 206 atomic_set(&data.started, 0); 207 data.wait = wait; 208 if (wait) 209 atomic_set(&data.finished, 0); 210 211 spin_lock(&call_lock); 212 /* Must grab online cpu count with preempt disabled, otherwise 213 * it can change. */ 214 cpus = num_online_cpus() - 1; 215 if (!cpus) { 216 ret = 0; 217 goto out; 218 } 219 220 call_data = &data; 221 smp_wmb(); 222 /* Send a message to all other CPUs and wait for them to respond */ 223 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); 224 225 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; 226 227 /* Wait for response */ 228 while (atomic_read(&data.started) != cpus) { 229 HMT_low(); 230 if (get_tb() >= timeout) { 231 printk("smp_call_function on cpu %d: other cpus not " 232 "responding (%d)\n", smp_processor_id(), 233 atomic_read(&data.started)); 234 debugger(NULL); 235 goto out; 236 } 237 } 238 239 if (wait) { 240 while (atomic_read(&data.finished) != cpus) { 241 HMT_low(); 242 if (get_tb() >= timeout) { 243 printk("smp_call_function on cpu %d: other " 244 "cpus not finishing (%d/%d)\n", 245 smp_processor_id(), 246 atomic_read(&data.finished), 247 atomic_read(&data.started)); 248 debugger(NULL); 249 goto out; 250 } 251 } 252 } 253 254 ret = 0; 255 256 out: 257 call_data = NULL; 258 HMT_medium(); 259 spin_unlock(&call_lock); 260 return ret; 261 } 262 263 EXPORT_SYMBOL(smp_call_function); 264 265 void smp_call_function_interrupt(void) 266 { 267 void (*func) (void *info); 268 void *info; 269 int wait; 270 271 /* call_data will be NULL if the sender timed out while 272 * waiting on us to receive the call. 273 */ 274 if (!call_data) 275 return; 276 277 func = call_data->func; 278 info = call_data->info; 279 wait = call_data->wait; 280 281 if (!wait) 282 smp_mb__before_atomic_inc(); 283 284 /* 285 * Notify initiating CPU that I've grabbed the data and am 286 * about to execute the function 287 */ 288 atomic_inc(&call_data->started); 289 /* 290 * At this point the info structure may be out of scope unless wait==1 291 */ 292 (*func)(info); 293 if (wait) { 294 smp_mb__before_atomic_inc(); 295 atomic_inc(&call_data->finished); 296 } 297 } 298 299 extern struct gettimeofday_struct do_gtod; 300 301 struct thread_info *current_set[NR_CPUS]; 302 303 DECLARE_PER_CPU(unsigned int, pvr); 304 305 static void __devinit smp_store_cpu_info(int id) 306 { 307 per_cpu(pvr, id) = mfspr(SPRN_PVR); 308 } 309 310 static void __init smp_create_idle(unsigned int cpu) 311 { 312 struct task_struct *p; 313 314 /* create a process for the processor */ 315 p = fork_idle(cpu); 316 if (IS_ERR(p)) 317 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 318 #ifdef CONFIG_PPC64 319 paca[cpu].__current = p; 320 #endif 321 current_set[cpu] = p->thread_info; 322 p->thread_info->cpu = cpu; 323 } 324 325 void __init smp_prepare_cpus(unsigned int max_cpus) 326 { 327 unsigned int cpu; 328 329 DBG("smp_prepare_cpus\n"); 330 331 /* 332 * setup_cpu may need to be called on the boot cpu. We havent 333 * spun any cpus up but lets be paranoid. 334 */ 335 BUG_ON(boot_cpuid != smp_processor_id()); 336 337 /* Fixup boot cpu */ 338 smp_store_cpu_info(boot_cpuid); 339 cpu_callin_map[boot_cpuid] = 1; 340 341 max_cpus = smp_ops->probe(); 342 343 smp_space_timers(max_cpus); 344 345 for_each_cpu(cpu) 346 if (cpu != boot_cpuid) 347 smp_create_idle(cpu); 348 } 349 350 void __devinit smp_prepare_boot_cpu(void) 351 { 352 BUG_ON(smp_processor_id() != boot_cpuid); 353 354 cpu_set(boot_cpuid, cpu_online_map); 355 #ifdef CONFIG_PPC64 356 paca[boot_cpuid].__current = current; 357 #endif 358 current_set[boot_cpuid] = current->thread_info; 359 } 360 361 #ifdef CONFIG_HOTPLUG_CPU 362 /* State of each CPU during hotplug phases */ 363 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 364 365 int generic_cpu_disable(void) 366 { 367 unsigned int cpu = smp_processor_id(); 368 369 if (cpu == boot_cpuid) 370 return -EBUSY; 371 372 systemcfg->processorCount--; 373 cpu_clear(cpu, cpu_online_map); 374 fixup_irqs(cpu_online_map); 375 return 0; 376 } 377 378 int generic_cpu_enable(unsigned int cpu) 379 { 380 /* Do the normal bootup if we haven't 381 * already bootstrapped. */ 382 if (system_state != SYSTEM_RUNNING) 383 return -ENOSYS; 384 385 /* get the target out of it's holding state */ 386 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 387 smp_wmb(); 388 389 while (!cpu_online(cpu)) 390 cpu_relax(); 391 392 fixup_irqs(cpu_online_map); 393 /* counter the irq disable in fixup_irqs */ 394 local_irq_enable(); 395 return 0; 396 } 397 398 void generic_cpu_die(unsigned int cpu) 399 { 400 int i; 401 402 for (i = 0; i < 100; i++) { 403 smp_rmb(); 404 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 405 return; 406 msleep(100); 407 } 408 printk(KERN_ERR "CPU%d didn't die...\n", cpu); 409 } 410 411 void generic_mach_cpu_die(void) 412 { 413 unsigned int cpu; 414 415 local_irq_disable(); 416 cpu = smp_processor_id(); 417 printk(KERN_DEBUG "CPU%d offline\n", cpu); 418 __get_cpu_var(cpu_state) = CPU_DEAD; 419 smp_wmb(); 420 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 421 cpu_relax(); 422 423 flush_tlb_pending(); 424 cpu_set(cpu, cpu_online_map); 425 local_irq_enable(); 426 } 427 #endif 428 429 static int __devinit cpu_enable(unsigned int cpu) 430 { 431 if (smp_ops->cpu_enable) 432 return smp_ops->cpu_enable(cpu); 433 434 return -ENOSYS; 435 } 436 437 int __devinit __cpu_up(unsigned int cpu) 438 { 439 int c; 440 441 secondary_ti = current_set[cpu]; 442 if (!cpu_enable(cpu)) 443 return 0; 444 445 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) 446 return -EINVAL; 447 448 #ifdef CONFIG_PPC64 449 paca[cpu].default_decr = tb_ticks_per_jiffy; 450 #endif 451 452 /* Make sure callin-map entry is 0 (can be leftover a CPU 453 * hotplug 454 */ 455 cpu_callin_map[cpu] = 0; 456 457 /* The information for processor bringup must 458 * be written out to main store before we release 459 * the processor. 460 */ 461 smp_mb(); 462 463 /* wake up cpus */ 464 DBG("smp: kicking cpu %d\n", cpu); 465 smp_ops->kick_cpu(cpu); 466 467 /* 468 * wait to see if the cpu made a callin (is actually up). 469 * use this value that I found through experimentation. 470 * -- Cort 471 */ 472 if (system_state < SYSTEM_RUNNING) 473 for (c = 5000; c && !cpu_callin_map[cpu]; c--) 474 udelay(100); 475 #ifdef CONFIG_HOTPLUG_CPU 476 else 477 /* 478 * CPUs can take much longer to come up in the 479 * hotplug case. Wait five seconds. 480 */ 481 for (c = 25; c && !cpu_callin_map[cpu]; c--) { 482 msleep(200); 483 } 484 #endif 485 486 if (!cpu_callin_map[cpu]) { 487 printk("Processor %u is stuck.\n", cpu); 488 return -ENOENT; 489 } 490 491 printk("Processor %u found.\n", cpu); 492 493 if (smp_ops->give_timebase) 494 smp_ops->give_timebase(); 495 496 /* Wait until cpu puts itself in the online map */ 497 while (!cpu_online(cpu)) 498 cpu_relax(); 499 500 return 0; 501 } 502 503 504 /* Activate a secondary processor. */ 505 int __devinit start_secondary(void *unused) 506 { 507 unsigned int cpu = smp_processor_id(); 508 509 atomic_inc(&init_mm.mm_count); 510 current->active_mm = &init_mm; 511 512 smp_store_cpu_info(cpu); 513 set_dec(tb_ticks_per_jiffy); 514 cpu_callin_map[cpu] = 1; 515 516 smp_ops->setup_cpu(cpu); 517 if (smp_ops->take_timebase) 518 smp_ops->take_timebase(); 519 520 spin_lock(&call_lock); 521 cpu_set(cpu, cpu_online_map); 522 spin_unlock(&call_lock); 523 524 local_irq_enable(); 525 526 cpu_idle(); 527 return 0; 528 } 529 530 int setup_profiling_timer(unsigned int multiplier) 531 { 532 return 0; 533 } 534 535 void __init smp_cpus_done(unsigned int max_cpus) 536 { 537 cpumask_t old_mask; 538 539 /* We want the setup_cpu() here to be called from CPU 0, but our 540 * init thread may have been "borrowed" by another CPU in the meantime 541 * se we pin us down to CPU 0 for a short while 542 */ 543 old_mask = current->cpus_allowed; 544 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 545 546 smp_ops->setup_cpu(boot_cpuid); 547 548 set_cpus_allowed(current, old_mask); 549 } 550 551 #ifdef CONFIG_HOTPLUG_CPU 552 int __cpu_disable(void) 553 { 554 if (smp_ops->cpu_disable) 555 return smp_ops->cpu_disable(); 556 557 return -ENOSYS; 558 } 559 560 void __cpu_die(unsigned int cpu) 561 { 562 if (smp_ops->cpu_die) 563 smp_ops->cpu_die(cpu); 564 } 565 #endif 566