1 /* 2 * linux/arch/arm/kernel/smp.c 3 * 4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/config.h> 11 #include <linux/delay.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/cache.h> 17 #include <linux/profile.h> 18 #include <linux/errno.h> 19 #include <linux/mm.h> 20 #include <linux/cpu.h> 21 #include <linux/smp.h> 22 #include <linux/seq_file.h> 23 24 #include <asm/atomic.h> 25 #include <asm/cacheflush.h> 26 #include <asm/cpu.h> 27 #include <asm/mmu_context.h> 28 #include <asm/pgtable.h> 29 #include <asm/pgalloc.h> 30 #include <asm/processor.h> 31 #include <asm/tlbflush.h> 32 #include <asm/ptrace.h> 33 34 /* 35 * bitmask of present and online CPUs. 36 * The present bitmask indicates that the CPU is physically present. 37 * The online bitmask indicates that the CPU is up and running. 38 */ 39 cpumask_t cpu_possible_map; 40 cpumask_t cpu_online_map; 41 42 /* 43 * as from 2.5, kernels no longer have an init_tasks structure 44 * so we need some other way of telling a new secondary core 45 * where to place its SVC stack 46 */ 47 struct secondary_data secondary_data; 48 49 /* 50 * structures for inter-processor calls 51 * - A collection of single bit ipi messages. 52 */ 53 struct ipi_data { 54 spinlock_t lock; 55 unsigned long ipi_count; 56 unsigned long bits; 57 }; 58 59 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 60 .lock = SPIN_LOCK_UNLOCKED, 61 }; 62 63 enum ipi_msg_type { 64 IPI_TIMER, 65 IPI_RESCHEDULE, 66 IPI_CALL_FUNC, 67 IPI_CPU_STOP, 68 }; 69 70 struct smp_call_struct { 71 void (*func)(void *info); 72 void *info; 73 int wait; 74 cpumask_t pending; 75 cpumask_t unfinished; 76 }; 77 78 static struct smp_call_struct * volatile smp_call_function_data; 79 static DEFINE_SPINLOCK(smp_call_function_lock); 80 81 int __cpuinit __cpu_up(unsigned int cpu) 82 { 83 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 84 struct task_struct *idle = ci->idle; 85 pgd_t *pgd; 86 pmd_t *pmd; 87 int ret; 88 89 /* 90 * Spawn a new process manually, if not already done. 91 * Grab a pointer to its task struct so we can mess with it 92 */ 93 if (!idle) { 94 idle = fork_idle(cpu); 95 if (IS_ERR(idle)) { 96 printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 97 return PTR_ERR(idle); 98 } 99 ci->idle = idle; 100 } 101 102 /* 103 * Allocate initial page tables to allow the new CPU to 104 * enable the MMU safely. This essentially means a set 105 * of our "standard" page tables, with the addition of 106 * a 1:1 mapping for the physical address of the kernel. 107 */ 108 pgd = pgd_alloc(&init_mm); 109 pmd = pmd_offset(pgd, PHYS_OFFSET); 110 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 111 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 112 113 /* 114 * We need to tell the secondary core where to find 115 * its stack and the page tables. 116 */ 117 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 118 secondary_data.pgdir = virt_to_phys(pgd); 119 wmb(); 120 121 /* 122 * Now bring the CPU into our world. 123 */ 124 ret = boot_secondary(cpu, idle); 125 if (ret == 0) { 126 unsigned long timeout; 127 128 /* 129 * CPU was successfully started, wait for it 130 * to come online or time out. 131 */ 132 timeout = jiffies + HZ; 133 while (time_before(jiffies, timeout)) { 134 if (cpu_online(cpu)) 135 break; 136 137 udelay(10); 138 barrier(); 139 } 140 141 if (!cpu_online(cpu)) 142 ret = -EIO; 143 } 144 145 secondary_data.stack = NULL; 146 secondary_data.pgdir = 0; 147 148 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); 149 pgd_free(pgd); 150 151 if (ret) { 152 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 153 154 /* 155 * FIXME: We need to clean up the new idle thread. --rmk 156 */ 157 } 158 159 return ret; 160 } 161 162 #ifdef CONFIG_HOTPLUG_CPU 163 /* 164 * __cpu_disable runs on the processor to be shutdown. 165 */ 166 int __cpuexit __cpu_disable(void) 167 { 168 unsigned int cpu = smp_processor_id(); 169 struct task_struct *p; 170 int ret; 171 172 ret = mach_cpu_disable(cpu); 173 if (ret) 174 return ret; 175 176 /* 177 * Take this CPU offline. Once we clear this, we can't return, 178 * and we must not schedule until we're ready to give up the cpu. 179 */ 180 cpu_clear(cpu, cpu_online_map); 181 182 /* 183 * OK - migrate IRQs away from this CPU 184 */ 185 migrate_irqs(); 186 187 /* 188 * Stop the local timer for this CPU. 189 */ 190 local_timer_stop(cpu); 191 192 /* 193 * Flush user cache and TLB mappings, and then remove this CPU 194 * from the vm mask set of all processes. 195 */ 196 flush_cache_all(); 197 local_flush_tlb_all(); 198 199 read_lock(&tasklist_lock); 200 for_each_process(p) { 201 if (p->mm) 202 cpu_clear(cpu, p->mm->cpu_vm_mask); 203 } 204 read_unlock(&tasklist_lock); 205 206 return 0; 207 } 208 209 /* 210 * called on the thread which is asking for a CPU to be shutdown - 211 * waits until shutdown has completed, or it is timed out. 212 */ 213 void __cpuexit __cpu_die(unsigned int cpu) 214 { 215 if (!platform_cpu_kill(cpu)) 216 printk("CPU%u: unable to kill\n", cpu); 217 } 218 219 /* 220 * Called from the idle thread for the CPU which has been shutdown. 221 * 222 * Note that we disable IRQs here, but do not re-enable them 223 * before returning to the caller. This is also the behaviour 224 * of the other hotplug-cpu capable cores, so presumably coming 225 * out of idle fixes this. 226 */ 227 void __cpuexit cpu_die(void) 228 { 229 unsigned int cpu = smp_processor_id(); 230 231 local_irq_disable(); 232 idle_task_exit(); 233 234 /* 235 * actual CPU shutdown procedure is at least platform (if not 236 * CPU) specific 237 */ 238 platform_cpu_die(cpu); 239 240 /* 241 * Do not return to the idle loop - jump back to the secondary 242 * cpu initialisation. There's some initialisation which needs 243 * to be repeated to undo the effects of taking the CPU offline. 244 */ 245 __asm__("mov sp, %0\n" 246 " b secondary_start_kernel" 247 : 248 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 249 } 250 #endif /* CONFIG_HOTPLUG_CPU */ 251 252 /* 253 * This is the secondary CPU boot entry. We're using this CPUs 254 * idle thread stack, but a set of temporary page tables. 255 */ 256 asmlinkage void __cpuinit secondary_start_kernel(void) 257 { 258 struct mm_struct *mm = &init_mm; 259 unsigned int cpu = smp_processor_id(); 260 261 printk("CPU%u: Booted secondary processor\n", cpu); 262 263 /* 264 * All kernel threads share the same mm context; grab a 265 * reference and switch to it. 266 */ 267 atomic_inc(&mm->mm_users); 268 atomic_inc(&mm->mm_count); 269 current->active_mm = mm; 270 cpu_set(cpu, mm->cpu_vm_mask); 271 cpu_switch_mm(mm->pgd, mm); 272 enter_lazy_tlb(mm, current); 273 local_flush_tlb_all(); 274 275 cpu_init(); 276 preempt_disable(); 277 278 /* 279 * Give the platform a chance to do its own initialisation. 280 */ 281 platform_secondary_init(cpu); 282 283 /* 284 * Enable local interrupts. 285 */ 286 local_irq_enable(); 287 local_fiq_enable(); 288 289 calibrate_delay(); 290 291 smp_store_cpu_info(cpu); 292 293 /* 294 * OK, now it's safe to let the boot CPU continue 295 */ 296 cpu_set(cpu, cpu_online_map); 297 298 /* 299 * Setup local timer for this CPU. 300 */ 301 local_timer_setup(cpu); 302 303 /* 304 * OK, it's off to the idle thread for us 305 */ 306 cpu_idle(); 307 } 308 309 /* 310 * Called by both boot and secondaries to move global data into 311 * per-processor storage. 312 */ 313 void __cpuinit smp_store_cpu_info(unsigned int cpuid) 314 { 315 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 316 317 cpu_info->loops_per_jiffy = loops_per_jiffy; 318 } 319 320 void __init smp_cpus_done(unsigned int max_cpus) 321 { 322 int cpu; 323 unsigned long bogosum = 0; 324 325 for_each_online_cpu(cpu) 326 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 327 328 printk(KERN_INFO "SMP: Total of %d processors activated " 329 "(%lu.%02lu BogoMIPS).\n", 330 num_online_cpus(), 331 bogosum / (500000/HZ), 332 (bogosum / (5000/HZ)) % 100); 333 } 334 335 void __init smp_prepare_boot_cpu(void) 336 { 337 unsigned int cpu = smp_processor_id(); 338 339 per_cpu(cpu_data, cpu).idle = current; 340 } 341 342 static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) 343 { 344 unsigned long flags; 345 unsigned int cpu; 346 347 local_irq_save(flags); 348 349 for_each_cpu_mask(cpu, callmap) { 350 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 351 352 spin_lock(&ipi->lock); 353 ipi->bits |= 1 << msg; 354 spin_unlock(&ipi->lock); 355 } 356 357 /* 358 * Call the platform specific cross-CPU call function. 359 */ 360 smp_cross_call(callmap); 361 362 local_irq_restore(flags); 363 } 364 365 /* 366 * You must not call this function with disabled interrupts, from a 367 * hardware interrupt handler, nor from a bottom half handler. 368 */ 369 static int smp_call_function_on_cpu(void (*func)(void *info), void *info, 370 int retry, int wait, cpumask_t callmap) 371 { 372 struct smp_call_struct data; 373 unsigned long timeout; 374 int ret = 0; 375 376 data.func = func; 377 data.info = info; 378 data.wait = wait; 379 380 cpu_clear(smp_processor_id(), callmap); 381 if (cpus_empty(callmap)) 382 goto out; 383 384 data.pending = callmap; 385 if (wait) 386 data.unfinished = callmap; 387 388 /* 389 * try to get the mutex on smp_call_function_data 390 */ 391 spin_lock(&smp_call_function_lock); 392 smp_call_function_data = &data; 393 394 send_ipi_message(callmap, IPI_CALL_FUNC); 395 396 timeout = jiffies + HZ; 397 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 398 barrier(); 399 400 /* 401 * did we time out? 402 */ 403 if (!cpus_empty(data.pending)) { 404 /* 405 * this may be causing our panic - report it 406 */ 407 printk(KERN_CRIT 408 "CPU%u: smp_call_function timeout for %p(%p)\n" 409 " callmap %lx pending %lx, %swait\n", 410 smp_processor_id(), func, info, *cpus_addr(callmap), 411 *cpus_addr(data.pending), wait ? "" : "no "); 412 413 /* 414 * TRACE 415 */ 416 timeout = jiffies + (5 * HZ); 417 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 418 barrier(); 419 420 if (cpus_empty(data.pending)) 421 printk(KERN_CRIT " RESOLVED\n"); 422 else 423 printk(KERN_CRIT " STILL STUCK\n"); 424 } 425 426 /* 427 * whatever happened, we're done with the data, so release it 428 */ 429 smp_call_function_data = NULL; 430 spin_unlock(&smp_call_function_lock); 431 432 if (!cpus_empty(data.pending)) { 433 ret = -ETIMEDOUT; 434 goto out; 435 } 436 437 if (wait) 438 while (!cpus_empty(data.unfinished)) 439 barrier(); 440 out: 441 442 return 0; 443 } 444 445 int smp_call_function(void (*func)(void *info), void *info, int retry, 446 int wait) 447 { 448 return smp_call_function_on_cpu(func, info, retry, wait, 449 cpu_online_map); 450 } 451 452 void show_ipi_list(struct seq_file *p) 453 { 454 unsigned int cpu; 455 456 seq_puts(p, "IPI:"); 457 458 for_each_present_cpu(cpu) 459 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 460 461 seq_putc(p, '\n'); 462 } 463 464 void show_local_irqs(struct seq_file *p) 465 { 466 unsigned int cpu; 467 468 seq_printf(p, "LOC: "); 469 470 for_each_present_cpu(cpu) 471 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 472 473 seq_putc(p, '\n'); 474 } 475 476 static void ipi_timer(struct pt_regs *regs) 477 { 478 int user = user_mode(regs); 479 480 irq_enter(); 481 profile_tick(CPU_PROFILING, regs); 482 update_process_times(user); 483 irq_exit(); 484 } 485 486 #ifdef CONFIG_LOCAL_TIMERS 487 asmlinkage void do_local_timer(struct pt_regs *regs) 488 { 489 int cpu = smp_processor_id(); 490 491 if (local_timer_ack()) { 492 irq_stat[cpu].local_timer_irqs++; 493 ipi_timer(regs); 494 } 495 } 496 #endif 497 498 /* 499 * ipi_call_function - handle IPI from smp_call_function() 500 * 501 * Note that we copy data out of the cross-call structure and then 502 * let the caller know that we're here and have done with their data 503 */ 504 static void ipi_call_function(unsigned int cpu) 505 { 506 struct smp_call_struct *data = smp_call_function_data; 507 void (*func)(void *info) = data->func; 508 void *info = data->info; 509 int wait = data->wait; 510 511 cpu_clear(cpu, data->pending); 512 513 func(info); 514 515 if (wait) 516 cpu_clear(cpu, data->unfinished); 517 } 518 519 static DEFINE_SPINLOCK(stop_lock); 520 521 /* 522 * ipi_cpu_stop - handle IPI from smp_send_stop() 523 */ 524 static void ipi_cpu_stop(unsigned int cpu) 525 { 526 spin_lock(&stop_lock); 527 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 528 dump_stack(); 529 spin_unlock(&stop_lock); 530 531 cpu_clear(cpu, cpu_online_map); 532 533 local_fiq_disable(); 534 local_irq_disable(); 535 536 while (1) 537 cpu_relax(); 538 } 539 540 /* 541 * Main handler for inter-processor interrupts 542 * 543 * For ARM, the ipimask now only identifies a single 544 * category of IPI (Bit 1 IPIs have been replaced by a 545 * different mechanism): 546 * 547 * Bit 0 - Inter-processor function call 548 */ 549 asmlinkage void do_IPI(struct pt_regs *regs) 550 { 551 unsigned int cpu = smp_processor_id(); 552 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 553 554 ipi->ipi_count++; 555 556 for (;;) { 557 unsigned long msgs; 558 559 spin_lock(&ipi->lock); 560 msgs = ipi->bits; 561 ipi->bits = 0; 562 spin_unlock(&ipi->lock); 563 564 if (!msgs) 565 break; 566 567 do { 568 unsigned nextmsg; 569 570 nextmsg = msgs & -msgs; 571 msgs &= ~nextmsg; 572 nextmsg = ffz(~nextmsg); 573 574 switch (nextmsg) { 575 case IPI_TIMER: 576 ipi_timer(regs); 577 break; 578 579 case IPI_RESCHEDULE: 580 /* 581 * nothing more to do - eveything is 582 * done on the interrupt return path 583 */ 584 break; 585 586 case IPI_CALL_FUNC: 587 ipi_call_function(cpu); 588 break; 589 590 case IPI_CPU_STOP: 591 ipi_cpu_stop(cpu); 592 break; 593 594 default: 595 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 596 cpu, nextmsg); 597 break; 598 } 599 } while (msgs); 600 } 601 } 602 603 void smp_send_reschedule(int cpu) 604 { 605 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 606 } 607 608 void smp_send_timer(void) 609 { 610 cpumask_t mask = cpu_online_map; 611 cpu_clear(smp_processor_id(), mask); 612 send_ipi_message(mask, IPI_TIMER); 613 } 614 615 void smp_send_stop(void) 616 { 617 cpumask_t mask = cpu_online_map; 618 cpu_clear(smp_processor_id(), mask); 619 send_ipi_message(mask, IPI_CPU_STOP); 620 } 621 622 /* 623 * not supported here 624 */ 625 int __init setup_profiling_timer(unsigned int multiplier) 626 { 627 return -EINVAL; 628 } 629 630 static int 631 on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 632 cpumask_t mask) 633 { 634 int ret = 0; 635 636 preempt_disable(); 637 638 ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 639 if (cpu_isset(smp_processor_id(), mask)) 640 func(info); 641 642 preempt_enable(); 643 644 return ret; 645 } 646 647 /**********************************************************************/ 648 649 /* 650 * TLB operations 651 */ 652 struct tlb_args { 653 struct vm_area_struct *ta_vma; 654 unsigned long ta_start; 655 unsigned long ta_end; 656 }; 657 658 static inline void ipi_flush_tlb_all(void *ignored) 659 { 660 local_flush_tlb_all(); 661 } 662 663 static inline void ipi_flush_tlb_mm(void *arg) 664 { 665 struct mm_struct *mm = (struct mm_struct *)arg; 666 667 local_flush_tlb_mm(mm); 668 } 669 670 static inline void ipi_flush_tlb_page(void *arg) 671 { 672 struct tlb_args *ta = (struct tlb_args *)arg; 673 674 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 675 } 676 677 static inline void ipi_flush_tlb_kernel_page(void *arg) 678 { 679 struct tlb_args *ta = (struct tlb_args *)arg; 680 681 local_flush_tlb_kernel_page(ta->ta_start); 682 } 683 684 static inline void ipi_flush_tlb_range(void *arg) 685 { 686 struct tlb_args *ta = (struct tlb_args *)arg; 687 688 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 689 } 690 691 static inline void ipi_flush_tlb_kernel_range(void *arg) 692 { 693 struct tlb_args *ta = (struct tlb_args *)arg; 694 695 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 696 } 697 698 void flush_tlb_all(void) 699 { 700 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 701 } 702 703 void flush_tlb_mm(struct mm_struct *mm) 704 { 705 cpumask_t mask = mm->cpu_vm_mask; 706 707 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 708 } 709 710 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 711 { 712 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 713 struct tlb_args ta; 714 715 ta.ta_vma = vma; 716 ta.ta_start = uaddr; 717 718 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 719 } 720 721 void flush_tlb_kernel_page(unsigned long kaddr) 722 { 723 struct tlb_args ta; 724 725 ta.ta_start = kaddr; 726 727 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 728 } 729 730 void flush_tlb_range(struct vm_area_struct *vma, 731 unsigned long start, unsigned long end) 732 { 733 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 734 struct tlb_args ta; 735 736 ta.ta_vma = vma; 737 ta.ta_start = start; 738 ta.ta_end = end; 739 740 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 741 } 742 743 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 744 { 745 struct tlb_args ta; 746 747 ta.ta_start = start; 748 ta.ta_end = end; 749 750 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 751 } 752