1 /* 2 * linux/arch/arm/kernel/smp.c 3 * 4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/spinlock.h> 13 #include <linux/sched.h> 14 #include <linux/interrupt.h> 15 #include <linux/cache.h> 16 #include <linux/profile.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/cpu.h> 20 #include <linux/smp.h> 21 #include <linux/seq_file.h> 22 23 #include <asm/atomic.h> 24 #include <asm/cacheflush.h> 25 #include <asm/cpu.h> 26 #include <asm/mmu_context.h> 27 #include <asm/pgtable.h> 28 #include <asm/pgalloc.h> 29 #include <asm/processor.h> 30 #include <asm/tlbflush.h> 31 #include <asm/ptrace.h> 32 33 /* 34 * bitmask of present and online CPUs. 35 * The present bitmask indicates that the CPU is physically present. 36 * The online bitmask indicates that the CPU is up and running. 37 */ 38 cpumask_t cpu_possible_map; 39 cpumask_t cpu_online_map; 40 41 /* 42 * as from 2.5, kernels no longer have an init_tasks structure 43 * so we need some other way of telling a new secondary core 44 * where to place its SVC stack 45 */ 46 struct secondary_data secondary_data; 47 48 /* 49 * structures for inter-processor calls 50 * - A collection of single bit ipi messages. 51 */ 52 struct ipi_data { 53 spinlock_t lock; 54 unsigned long ipi_count; 55 unsigned long bits; 56 }; 57 58 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { 59 .lock = SPIN_LOCK_UNLOCKED, 60 }; 61 62 enum ipi_msg_type { 63 IPI_TIMER, 64 IPI_RESCHEDULE, 65 IPI_CALL_FUNC, 66 IPI_CPU_STOP, 67 }; 68 69 struct smp_call_struct { 70 void (*func)(void *info); 71 void *info; 72 int wait; 73 cpumask_t pending; 74 cpumask_t unfinished; 75 }; 76 77 static struct smp_call_struct * volatile smp_call_function_data; 78 static DEFINE_SPINLOCK(smp_call_function_lock); 79 80 int __cpuinit __cpu_up(unsigned int cpu) 81 { 82 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 83 struct task_struct *idle = ci->idle; 84 pgd_t *pgd; 85 pmd_t *pmd; 86 int ret; 87 88 /* 89 * Spawn a new process manually, if not already done. 90 * Grab a pointer to its task struct so we can mess with it 91 */ 92 if (!idle) { 93 idle = fork_idle(cpu); 94 if (IS_ERR(idle)) { 95 printk(KERN_ERR "CPU%u: fork() failed\n", cpu); 96 return PTR_ERR(idle); 97 } 98 ci->idle = idle; 99 } 100 101 /* 102 * Allocate initial page tables to allow the new CPU to 103 * enable the MMU safely. This essentially means a set 104 * of our "standard" page tables, with the addition of 105 * a 1:1 mapping for the physical address of the kernel. 106 */ 107 pgd = pgd_alloc(&init_mm); 108 pmd = pmd_offset(pgd, PHYS_OFFSET); 109 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 110 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 111 112 /* 113 * We need to tell the secondary core where to find 114 * its stack and the page tables. 115 */ 116 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 117 secondary_data.pgdir = virt_to_phys(pgd); 118 wmb(); 119 120 /* 121 * Now bring the CPU into our world. 122 */ 123 ret = boot_secondary(cpu, idle); 124 if (ret == 0) { 125 unsigned long timeout; 126 127 /* 128 * CPU was successfully started, wait for it 129 * to come online or time out. 130 */ 131 timeout = jiffies + HZ; 132 while (time_before(jiffies, timeout)) { 133 if (cpu_online(cpu)) 134 break; 135 136 udelay(10); 137 barrier(); 138 } 139 140 if (!cpu_online(cpu)) 141 ret = -EIO; 142 } 143 144 secondary_data.stack = NULL; 145 secondary_data.pgdir = 0; 146 147 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); 148 pgd_free(pgd); 149 150 if (ret) { 151 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 152 153 /* 154 * FIXME: We need to clean up the new idle thread. --rmk 155 */ 156 } 157 158 return ret; 159 } 160 161 #ifdef CONFIG_HOTPLUG_CPU 162 /* 163 * __cpu_disable runs on the processor to be shutdown. 164 */ 165 int __cpuexit __cpu_disable(void) 166 { 167 unsigned int cpu = smp_processor_id(); 168 struct task_struct *p; 169 int ret; 170 171 ret = mach_cpu_disable(cpu); 172 if (ret) 173 return ret; 174 175 /* 176 * Take this CPU offline. Once we clear this, we can't return, 177 * and we must not schedule until we're ready to give up the cpu. 178 */ 179 cpu_clear(cpu, cpu_online_map); 180 181 /* 182 * OK - migrate IRQs away from this CPU 183 */ 184 migrate_irqs(); 185 186 /* 187 * Stop the local timer for this CPU. 188 */ 189 local_timer_stop(cpu); 190 191 /* 192 * Flush user cache and TLB mappings, and then remove this CPU 193 * from the vm mask set of all processes. 194 */ 195 flush_cache_all(); 196 local_flush_tlb_all(); 197 198 read_lock(&tasklist_lock); 199 for_each_process(p) { 200 if (p->mm) 201 cpu_clear(cpu, p->mm->cpu_vm_mask); 202 } 203 read_unlock(&tasklist_lock); 204 205 return 0; 206 } 207 208 /* 209 * called on the thread which is asking for a CPU to be shutdown - 210 * waits until shutdown has completed, or it is timed out. 211 */ 212 void __cpuexit __cpu_die(unsigned int cpu) 213 { 214 if (!platform_cpu_kill(cpu)) 215 printk("CPU%u: unable to kill\n", cpu); 216 } 217 218 /* 219 * Called from the idle thread for the CPU which has been shutdown. 220 * 221 * Note that we disable IRQs here, but do not re-enable them 222 * before returning to the caller. This is also the behaviour 223 * of the other hotplug-cpu capable cores, so presumably coming 224 * out of idle fixes this. 225 */ 226 void __cpuexit cpu_die(void) 227 { 228 unsigned int cpu = smp_processor_id(); 229 230 local_irq_disable(); 231 idle_task_exit(); 232 233 /* 234 * actual CPU shutdown procedure is at least platform (if not 235 * CPU) specific 236 */ 237 platform_cpu_die(cpu); 238 239 /* 240 * Do not return to the idle loop - jump back to the secondary 241 * cpu initialisation. There's some initialisation which needs 242 * to be repeated to undo the effects of taking the CPU offline. 243 */ 244 __asm__("mov sp, %0\n" 245 " b secondary_start_kernel" 246 : 247 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 248 } 249 #endif /* CONFIG_HOTPLUG_CPU */ 250 251 /* 252 * This is the secondary CPU boot entry. We're using this CPUs 253 * idle thread stack, but a set of temporary page tables. 254 */ 255 asmlinkage void __cpuinit secondary_start_kernel(void) 256 { 257 struct mm_struct *mm = &init_mm; 258 unsigned int cpu = smp_processor_id(); 259 260 printk("CPU%u: Booted secondary processor\n", cpu); 261 262 /* 263 * All kernel threads share the same mm context; grab a 264 * reference and switch to it. 265 */ 266 atomic_inc(&mm->mm_users); 267 atomic_inc(&mm->mm_count); 268 current->active_mm = mm; 269 cpu_set(cpu, mm->cpu_vm_mask); 270 cpu_switch_mm(mm->pgd, mm); 271 enter_lazy_tlb(mm, current); 272 local_flush_tlb_all(); 273 274 cpu_init(); 275 preempt_disable(); 276 277 /* 278 * Give the platform a chance to do its own initialisation. 279 */ 280 platform_secondary_init(cpu); 281 282 /* 283 * Enable local interrupts. 284 */ 285 local_irq_enable(); 286 local_fiq_enable(); 287 288 calibrate_delay(); 289 290 smp_store_cpu_info(cpu); 291 292 /* 293 * OK, now it's safe to let the boot CPU continue 294 */ 295 cpu_set(cpu, cpu_online_map); 296 297 /* 298 * Setup local timer for this CPU. 299 */ 300 local_timer_setup(cpu); 301 302 /* 303 * OK, it's off to the idle thread for us 304 */ 305 cpu_idle(); 306 } 307 308 /* 309 * Called by both boot and secondaries to move global data into 310 * per-processor storage. 311 */ 312 void __cpuinit smp_store_cpu_info(unsigned int cpuid) 313 { 314 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 315 316 cpu_info->loops_per_jiffy = loops_per_jiffy; 317 } 318 319 void __init smp_cpus_done(unsigned int max_cpus) 320 { 321 int cpu; 322 unsigned long bogosum = 0; 323 324 for_each_online_cpu(cpu) 325 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 326 327 printk(KERN_INFO "SMP: Total of %d processors activated " 328 "(%lu.%02lu BogoMIPS).\n", 329 num_online_cpus(), 330 bogosum / (500000/HZ), 331 (bogosum / (5000/HZ)) % 100); 332 } 333 334 void __init smp_prepare_boot_cpu(void) 335 { 336 unsigned int cpu = smp_processor_id(); 337 338 per_cpu(cpu_data, cpu).idle = current; 339 } 340 341 static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) 342 { 343 unsigned long flags; 344 unsigned int cpu; 345 346 local_irq_save(flags); 347 348 for_each_cpu_mask(cpu, callmap) { 349 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 350 351 spin_lock(&ipi->lock); 352 ipi->bits |= 1 << msg; 353 spin_unlock(&ipi->lock); 354 } 355 356 /* 357 * Call the platform specific cross-CPU call function. 358 */ 359 smp_cross_call(callmap); 360 361 local_irq_restore(flags); 362 } 363 364 /* 365 * You must not call this function with disabled interrupts, from a 366 * hardware interrupt handler, nor from a bottom half handler. 367 */ 368 static int smp_call_function_on_cpu(void (*func)(void *info), void *info, 369 int retry, int wait, cpumask_t callmap) 370 { 371 struct smp_call_struct data; 372 unsigned long timeout; 373 int ret = 0; 374 375 data.func = func; 376 data.info = info; 377 data.wait = wait; 378 379 cpu_clear(smp_processor_id(), callmap); 380 if (cpus_empty(callmap)) 381 goto out; 382 383 data.pending = callmap; 384 if (wait) 385 data.unfinished = callmap; 386 387 /* 388 * try to get the mutex on smp_call_function_data 389 */ 390 spin_lock(&smp_call_function_lock); 391 smp_call_function_data = &data; 392 393 send_ipi_message(callmap, IPI_CALL_FUNC); 394 395 timeout = jiffies + HZ; 396 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 397 barrier(); 398 399 /* 400 * did we time out? 401 */ 402 if (!cpus_empty(data.pending)) { 403 /* 404 * this may be causing our panic - report it 405 */ 406 printk(KERN_CRIT 407 "CPU%u: smp_call_function timeout for %p(%p)\n" 408 " callmap %lx pending %lx, %swait\n", 409 smp_processor_id(), func, info, *cpus_addr(callmap), 410 *cpus_addr(data.pending), wait ? "" : "no "); 411 412 /* 413 * TRACE 414 */ 415 timeout = jiffies + (5 * HZ); 416 while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) 417 barrier(); 418 419 if (cpus_empty(data.pending)) 420 printk(KERN_CRIT " RESOLVED\n"); 421 else 422 printk(KERN_CRIT " STILL STUCK\n"); 423 } 424 425 /* 426 * whatever happened, we're done with the data, so release it 427 */ 428 smp_call_function_data = NULL; 429 spin_unlock(&smp_call_function_lock); 430 431 if (!cpus_empty(data.pending)) { 432 ret = -ETIMEDOUT; 433 goto out; 434 } 435 436 if (wait) 437 while (!cpus_empty(data.unfinished)) 438 barrier(); 439 out: 440 441 return 0; 442 } 443 444 int smp_call_function(void (*func)(void *info), void *info, int retry, 445 int wait) 446 { 447 return smp_call_function_on_cpu(func, info, retry, wait, 448 cpu_online_map); 449 } 450 451 void show_ipi_list(struct seq_file *p) 452 { 453 unsigned int cpu; 454 455 seq_puts(p, "IPI:"); 456 457 for_each_present_cpu(cpu) 458 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 459 460 seq_putc(p, '\n'); 461 } 462 463 void show_local_irqs(struct seq_file *p) 464 { 465 unsigned int cpu; 466 467 seq_printf(p, "LOC: "); 468 469 for_each_present_cpu(cpu) 470 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 471 472 seq_putc(p, '\n'); 473 } 474 475 static void ipi_timer(struct pt_regs *regs) 476 { 477 int user = user_mode(regs); 478 479 irq_enter(); 480 profile_tick(CPU_PROFILING, regs); 481 update_process_times(user); 482 irq_exit(); 483 } 484 485 #ifdef CONFIG_LOCAL_TIMERS 486 asmlinkage void do_local_timer(struct pt_regs *regs) 487 { 488 int cpu = smp_processor_id(); 489 490 if (local_timer_ack()) { 491 irq_stat[cpu].local_timer_irqs++; 492 ipi_timer(regs); 493 } 494 } 495 #endif 496 497 /* 498 * ipi_call_function - handle IPI from smp_call_function() 499 * 500 * Note that we copy data out of the cross-call structure and then 501 * let the caller know that we're here and have done with their data 502 */ 503 static void ipi_call_function(unsigned int cpu) 504 { 505 struct smp_call_struct *data = smp_call_function_data; 506 void (*func)(void *info) = data->func; 507 void *info = data->info; 508 int wait = data->wait; 509 510 cpu_clear(cpu, data->pending); 511 512 func(info); 513 514 if (wait) 515 cpu_clear(cpu, data->unfinished); 516 } 517 518 static DEFINE_SPINLOCK(stop_lock); 519 520 /* 521 * ipi_cpu_stop - handle IPI from smp_send_stop() 522 */ 523 static void ipi_cpu_stop(unsigned int cpu) 524 { 525 spin_lock(&stop_lock); 526 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 527 dump_stack(); 528 spin_unlock(&stop_lock); 529 530 cpu_clear(cpu, cpu_online_map); 531 532 local_fiq_disable(); 533 local_irq_disable(); 534 535 while (1) 536 cpu_relax(); 537 } 538 539 /* 540 * Main handler for inter-processor interrupts 541 * 542 * For ARM, the ipimask now only identifies a single 543 * category of IPI (Bit 1 IPIs have been replaced by a 544 * different mechanism): 545 * 546 * Bit 0 - Inter-processor function call 547 */ 548 asmlinkage void do_IPI(struct pt_regs *regs) 549 { 550 unsigned int cpu = smp_processor_id(); 551 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 552 553 ipi->ipi_count++; 554 555 for (;;) { 556 unsigned long msgs; 557 558 spin_lock(&ipi->lock); 559 msgs = ipi->bits; 560 ipi->bits = 0; 561 spin_unlock(&ipi->lock); 562 563 if (!msgs) 564 break; 565 566 do { 567 unsigned nextmsg; 568 569 nextmsg = msgs & -msgs; 570 msgs &= ~nextmsg; 571 nextmsg = ffz(~nextmsg); 572 573 switch (nextmsg) { 574 case IPI_TIMER: 575 ipi_timer(regs); 576 break; 577 578 case IPI_RESCHEDULE: 579 /* 580 * nothing more to do - eveything is 581 * done on the interrupt return path 582 */ 583 break; 584 585 case IPI_CALL_FUNC: 586 ipi_call_function(cpu); 587 break; 588 589 case IPI_CPU_STOP: 590 ipi_cpu_stop(cpu); 591 break; 592 593 default: 594 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 595 cpu, nextmsg); 596 break; 597 } 598 } while (msgs); 599 } 600 } 601 602 void smp_send_reschedule(int cpu) 603 { 604 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 605 } 606 607 void smp_send_timer(void) 608 { 609 cpumask_t mask = cpu_online_map; 610 cpu_clear(smp_processor_id(), mask); 611 send_ipi_message(mask, IPI_TIMER); 612 } 613 614 void smp_send_stop(void) 615 { 616 cpumask_t mask = cpu_online_map; 617 cpu_clear(smp_processor_id(), mask); 618 send_ipi_message(mask, IPI_CPU_STOP); 619 } 620 621 /* 622 * not supported here 623 */ 624 int __init setup_profiling_timer(unsigned int multiplier) 625 { 626 return -EINVAL; 627 } 628 629 static int 630 on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 631 cpumask_t mask) 632 { 633 int ret = 0; 634 635 preempt_disable(); 636 637 ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 638 if (cpu_isset(smp_processor_id(), mask)) 639 func(info); 640 641 preempt_enable(); 642 643 return ret; 644 } 645 646 /**********************************************************************/ 647 648 /* 649 * TLB operations 650 */ 651 struct tlb_args { 652 struct vm_area_struct *ta_vma; 653 unsigned long ta_start; 654 unsigned long ta_end; 655 }; 656 657 static inline void ipi_flush_tlb_all(void *ignored) 658 { 659 local_flush_tlb_all(); 660 } 661 662 static inline void ipi_flush_tlb_mm(void *arg) 663 { 664 struct mm_struct *mm = (struct mm_struct *)arg; 665 666 local_flush_tlb_mm(mm); 667 } 668 669 static inline void ipi_flush_tlb_page(void *arg) 670 { 671 struct tlb_args *ta = (struct tlb_args *)arg; 672 673 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 674 } 675 676 static inline void ipi_flush_tlb_kernel_page(void *arg) 677 { 678 struct tlb_args *ta = (struct tlb_args *)arg; 679 680 local_flush_tlb_kernel_page(ta->ta_start); 681 } 682 683 static inline void ipi_flush_tlb_range(void *arg) 684 { 685 struct tlb_args *ta = (struct tlb_args *)arg; 686 687 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 688 } 689 690 static inline void ipi_flush_tlb_kernel_range(void *arg) 691 { 692 struct tlb_args *ta = (struct tlb_args *)arg; 693 694 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 695 } 696 697 void flush_tlb_all(void) 698 { 699 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 700 } 701 702 void flush_tlb_mm(struct mm_struct *mm) 703 { 704 cpumask_t mask = mm->cpu_vm_mask; 705 706 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 707 } 708 709 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 710 { 711 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 712 struct tlb_args ta; 713 714 ta.ta_vma = vma; 715 ta.ta_start = uaddr; 716 717 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 718 } 719 720 void flush_tlb_kernel_page(unsigned long kaddr) 721 { 722 struct tlb_args ta; 723 724 ta.ta_start = kaddr; 725 726 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 727 } 728 729 void flush_tlb_range(struct vm_area_struct *vma, 730 unsigned long start, unsigned long end) 731 { 732 cpumask_t mask = vma->vm_mm->cpu_vm_mask; 733 struct tlb_args ta; 734 735 ta.ta_vma = vma; 736 ta.ta_start = start; 737 ta.ta_end = end; 738 739 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 740 } 741 742 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 743 { 744 struct tlb_args ta; 745 746 ta.ta_start = start; 747 ta.ta_end = end; 748 749 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 750 } 751