1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright (C) IBM Corp. 1999,2006 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 26 #include <linux/mm.h> 27 #include <linux/spinlock.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/smp_lock.h> 30 31 #include <linux/delay.h> 32 #include <linux/cache.h> 33 #include <linux/interrupt.h> 34 #include <linux/cpu.h> 35 36 #include <asm/sigp.h> 37 #include <asm/pgalloc.h> 38 #include <asm/irq.h> 39 #include <asm/s390_ext.h> 40 #include <asm/cpcmd.h> 41 #include <asm/tlbflush.h> 42 43 extern volatile int __cpu_logical_map[]; 44 45 /* 46 * An array with a pointer the lowcore of every CPU. 47 */ 48 49 struct _lowcore *lowcore_ptr[NR_CPUS]; 50 51 cpumask_t cpu_online_map = CPU_MASK_NONE; 52 cpumask_t cpu_possible_map = CPU_MASK_NONE; 53 54 static struct task_struct *current_set[NR_CPUS]; 55 56 /* 57 * Reboot, halt and power_off routines for SMP. 58 */ 59 extern char vmhalt_cmd[]; 60 extern char vmpoff_cmd[]; 61 62 extern void reipl(unsigned long devno); 63 extern void reipl_diag(void); 64 65 static void smp_ext_bitcall(int, ec_bit_sig); 66 static void smp_ext_bitcall_others(ec_bit_sig); 67 68 /* 69 * Structure and data for smp_call_function(). This is designed to minimise 70 * static memory requirements. It also looks cleaner. 71 */ 72 static DEFINE_SPINLOCK(call_lock); 73 74 struct call_data_struct { 75 void (*func) (void *info); 76 void *info; 77 atomic_t started; 78 atomic_t finished; 79 int wait; 80 }; 81 82 static struct call_data_struct * call_data; 83 84 /* 85 * 'Call function' interrupt callback 86 */ 87 static void do_call_function(void) 88 { 89 void (*func) (void *info) = call_data->func; 90 void *info = call_data->info; 91 int wait = call_data->wait; 92 93 atomic_inc(&call_data->started); 94 (*func)(info); 95 if (wait) 96 atomic_inc(&call_data->finished); 97 } 98 99 /* 100 * this function sends a 'generic call function' IPI to all other CPUs 101 * in the system. 102 */ 103 104 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 105 int wait) 106 /* 107 * [SUMMARY] Run a function on all other CPUs. 108 * <func> The function to run. This must be fast and non-blocking. 109 * <info> An arbitrary pointer to pass to the function. 110 * <nonatomic> currently unused. 111 * <wait> If true, wait (atomically) until function has completed on other CPUs. 112 * [RETURNS] 0 on success, else a negative status code. Does not return until 113 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 114 * 115 * You must not call this function with disabled interrupts or from a 116 * hardware interrupt handler or from a bottom half handler. 117 */ 118 { 119 struct call_data_struct data; 120 int cpus = num_online_cpus()-1; 121 122 if (cpus <= 0) 123 return 0; 124 125 /* Can deadlock when called with interrupts disabled */ 126 WARN_ON(irqs_disabled()); 127 128 data.func = func; 129 data.info = info; 130 atomic_set(&data.started, 0); 131 data.wait = wait; 132 if (wait) 133 atomic_set(&data.finished, 0); 134 135 spin_lock(&call_lock); 136 call_data = &data; 137 /* Send a message to all other CPUs and wait for them to respond */ 138 smp_ext_bitcall_others(ec_call_function); 139 140 /* Wait for response */ 141 while (atomic_read(&data.started) != cpus) 142 cpu_relax(); 143 144 if (wait) 145 while (atomic_read(&data.finished) != cpus) 146 cpu_relax(); 147 spin_unlock(&call_lock); 148 149 return 0; 150 } 151 152 /* 153 * Call a function on one CPU 154 * cpu : the CPU the function should be executed on 155 * 156 * You must not call this function with disabled interrupts or from a 157 * hardware interrupt handler. You may call it from a bottom half. 158 * 159 * It is guaranteed that the called function runs on the specified CPU, 160 * preemption is disabled. 161 */ 162 int smp_call_function_on(void (*func) (void *info), void *info, 163 int nonatomic, int wait, int cpu) 164 { 165 struct call_data_struct data; 166 int curr_cpu; 167 168 if (!cpu_online(cpu)) 169 return -EINVAL; 170 171 /* disable preemption for local function call */ 172 curr_cpu = get_cpu(); 173 174 if (curr_cpu == cpu) { 175 /* direct call to function */ 176 func(info); 177 put_cpu(); 178 return 0; 179 } 180 181 data.func = func; 182 data.info = info; 183 atomic_set(&data.started, 0); 184 data.wait = wait; 185 if (wait) 186 atomic_set(&data.finished, 0); 187 188 spin_lock_bh(&call_lock); 189 call_data = &data; 190 smp_ext_bitcall(cpu, ec_call_function); 191 192 /* Wait for response */ 193 while (atomic_read(&data.started) != 1) 194 cpu_relax(); 195 196 if (wait) 197 while (atomic_read(&data.finished) != 1) 198 cpu_relax(); 199 200 spin_unlock_bh(&call_lock); 201 put_cpu(); 202 return 0; 203 } 204 EXPORT_SYMBOL(smp_call_function_on); 205 206 static inline void do_send_stop(void) 207 { 208 int cpu, rc; 209 210 /* stop all processors */ 211 for_each_online_cpu(cpu) { 212 if (cpu == smp_processor_id()) 213 continue; 214 do { 215 rc = signal_processor(cpu, sigp_stop); 216 } while (rc == sigp_busy); 217 } 218 } 219 220 static inline void do_store_status(void) 221 { 222 int cpu, rc; 223 224 /* store status of all processors in their lowcores (real 0) */ 225 for_each_online_cpu(cpu) { 226 if (cpu == smp_processor_id()) 227 continue; 228 do { 229 rc = signal_processor_p( 230 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 231 sigp_store_status_at_address); 232 } while(rc == sigp_busy); 233 } 234 } 235 236 /* 237 * this function sends a 'stop' sigp to all other CPUs in the system. 238 * it goes straight through. 239 */ 240 void smp_send_stop(void) 241 { 242 /* write magic number to zero page (absolute 0) */ 243 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 244 245 /* stop other processors. */ 246 do_send_stop(); 247 248 /* store status of other processors. */ 249 do_store_status(); 250 } 251 252 /* 253 * Reboot, halt and power_off routines for SMP. 254 */ 255 256 static void do_machine_restart(void * __unused) 257 { 258 int cpu; 259 static atomic_t cpuid = ATOMIC_INIT(-1); 260 261 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) 262 signal_processor(smp_processor_id(), sigp_stop); 263 264 /* Wait for all other cpus to enter stopped state */ 265 for_each_online_cpu(cpu) { 266 if (cpu == smp_processor_id()) 267 continue; 268 while(!smp_cpu_not_running(cpu)) 269 cpu_relax(); 270 } 271 272 /* Store status of other cpus. */ 273 do_store_status(); 274 275 /* 276 * Finally call reipl. Because we waited for all other 277 * cpus to enter this function we know that they do 278 * not hold any s390irq-locks (the cpus have been 279 * interrupted by an external interrupt and s390irq 280 * locks are always held disabled). 281 */ 282 reipl_diag(); 283 284 if (MACHINE_IS_VM) 285 cpcmd ("IPL", NULL, 0, NULL); 286 else 287 reipl (0x10000 | S390_lowcore.ipl_device); 288 } 289 290 void machine_restart_smp(char * __unused) 291 { 292 on_each_cpu(do_machine_restart, NULL, 0, 0); 293 } 294 295 static void do_wait_for_stop(void) 296 { 297 unsigned long cr[16]; 298 299 __ctl_store(cr, 0, 15); 300 cr[0] &= ~0xffff; 301 cr[6] = 0; 302 __ctl_load(cr, 0, 15); 303 for (;;) 304 enabled_wait(); 305 } 306 307 static void do_machine_halt(void * __unused) 308 { 309 static atomic_t cpuid = ATOMIC_INIT(-1); 310 311 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { 312 smp_send_stop(); 313 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 314 cpcmd(vmhalt_cmd, NULL, 0, NULL); 315 signal_processor(smp_processor_id(), 316 sigp_stop_and_store_status); 317 } 318 do_wait_for_stop(); 319 } 320 321 void machine_halt_smp(void) 322 { 323 on_each_cpu(do_machine_halt, NULL, 0, 0); 324 } 325 326 static void do_machine_power_off(void * __unused) 327 { 328 static atomic_t cpuid = ATOMIC_INIT(-1); 329 330 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { 331 smp_send_stop(); 332 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 333 cpcmd(vmpoff_cmd, NULL, 0, NULL); 334 signal_processor(smp_processor_id(), 335 sigp_stop_and_store_status); 336 } 337 do_wait_for_stop(); 338 } 339 340 void machine_power_off_smp(void) 341 { 342 on_each_cpu(do_machine_power_off, NULL, 0, 0); 343 } 344 345 /* 346 * This is the main routine where commands issued by other 347 * cpus are handled. 348 */ 349 350 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) 351 { 352 unsigned long bits; 353 354 /* 355 * handle bit signal external calls 356 * 357 * For the ec_schedule signal we have to do nothing. All the work 358 * is done automatically when we return from the interrupt. 359 */ 360 bits = xchg(&S390_lowcore.ext_call_fast, 0); 361 362 if (test_bit(ec_call_function, &bits)) 363 do_call_function(); 364 } 365 366 /* 367 * Send an external call sigp to another cpu and return without waiting 368 * for its completion. 369 */ 370 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 371 { 372 /* 373 * Set signaling bit in lowcore of target cpu and kick it 374 */ 375 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 376 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 377 udelay(10); 378 } 379 380 /* 381 * Send an external call sigp to every other cpu in the system and 382 * return without waiting for its completion. 383 */ 384 static void smp_ext_bitcall_others(ec_bit_sig sig) 385 { 386 int cpu; 387 388 for_each_online_cpu(cpu) { 389 if (cpu == smp_processor_id()) 390 continue; 391 /* 392 * Set signaling bit in lowcore of target cpu and kick it 393 */ 394 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 395 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 396 udelay(10); 397 } 398 } 399 400 #ifndef CONFIG_64BIT 401 /* 402 * this function sends a 'purge tlb' signal to another CPU. 403 */ 404 void smp_ptlb_callback(void *info) 405 { 406 local_flush_tlb(); 407 } 408 409 void smp_ptlb_all(void) 410 { 411 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 412 } 413 EXPORT_SYMBOL(smp_ptlb_all); 414 #endif /* ! CONFIG_64BIT */ 415 416 /* 417 * this function sends a 'reschedule' IPI to another CPU. 418 * it goes straight through and wastes no time serializing 419 * anything. Worst case is that we lose a reschedule ... 420 */ 421 void smp_send_reschedule(int cpu) 422 { 423 smp_ext_bitcall(cpu, ec_schedule); 424 } 425 426 /* 427 * parameter area for the set/clear control bit callbacks 428 */ 429 typedef struct 430 { 431 __u16 start_ctl; 432 __u16 end_ctl; 433 unsigned long orvals[16]; 434 unsigned long andvals[16]; 435 } ec_creg_mask_parms; 436 437 /* 438 * callback for setting/clearing control bits 439 */ 440 void smp_ctl_bit_callback(void *info) { 441 ec_creg_mask_parms *pp; 442 unsigned long cregs[16]; 443 int i; 444 445 pp = (ec_creg_mask_parms *) info; 446 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 447 for (i = pp->start_ctl; i <= pp->end_ctl; i++) 448 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 449 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 450 } 451 452 /* 453 * Set a bit in a control register of all cpus 454 */ 455 void smp_ctl_set_bit(int cr, int bit) { 456 ec_creg_mask_parms parms; 457 458 parms.start_ctl = cr; 459 parms.end_ctl = cr; 460 parms.orvals[cr] = 1 << bit; 461 parms.andvals[cr] = -1L; 462 preempt_disable(); 463 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 464 __ctl_set_bit(cr, bit); 465 preempt_enable(); 466 } 467 468 /* 469 * Clear a bit in a control register of all cpus 470 */ 471 void smp_ctl_clear_bit(int cr, int bit) { 472 ec_creg_mask_parms parms; 473 474 parms.start_ctl = cr; 475 parms.end_ctl = cr; 476 parms.orvals[cr] = 0; 477 parms.andvals[cr] = ~(1L << bit); 478 preempt_disable(); 479 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 480 __ctl_clear_bit(cr, bit); 481 preempt_enable(); 482 } 483 484 /* 485 * Lets check how many CPUs we have. 486 */ 487 488 static unsigned int 489 __init smp_count_cpus(void) 490 { 491 unsigned int cpu, num_cpus; 492 __u16 boot_cpu_addr; 493 494 /* 495 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 496 */ 497 498 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 499 current_thread_info()->cpu = 0; 500 num_cpus = 1; 501 for (cpu = 0; cpu <= 65535; cpu++) { 502 if ((__u16) cpu == boot_cpu_addr) 503 continue; 504 __cpu_logical_map[1] = (__u16) cpu; 505 if (signal_processor(1, sigp_sense) == 506 sigp_not_operational) 507 continue; 508 num_cpus++; 509 } 510 511 printk("Detected %d CPU's\n",(int) num_cpus); 512 printk("Boot cpu address %2X\n", boot_cpu_addr); 513 514 return num_cpus; 515 } 516 517 /* 518 * Activate a secondary processor. 519 */ 520 extern void init_cpu_timer(void); 521 extern void init_cpu_vtimer(void); 522 extern int pfault_init(void); 523 extern void pfault_fini(void); 524 525 int __devinit start_secondary(void *cpuvoid) 526 { 527 /* Setup the cpu */ 528 cpu_init(); 529 preempt_disable(); 530 /* init per CPU timer */ 531 init_cpu_timer(); 532 #ifdef CONFIG_VIRT_TIMER 533 init_cpu_vtimer(); 534 #endif 535 #ifdef CONFIG_PFAULT 536 /* Enable pfault pseudo page faults on this cpu. */ 537 if (MACHINE_IS_VM) 538 pfault_init(); 539 #endif 540 /* Mark this cpu as online */ 541 cpu_set(smp_processor_id(), cpu_online_map); 542 /* Switch on interrupts */ 543 local_irq_enable(); 544 /* Print info about this processor */ 545 print_cpu_info(&S390_lowcore.cpu_data); 546 /* cpu_idle will call schedule for us */ 547 cpu_idle(); 548 return 0; 549 } 550 551 static void __init smp_create_idle(unsigned int cpu) 552 { 553 struct task_struct *p; 554 555 /* 556 * don't care about the psw and regs settings since we'll never 557 * reschedule the forked task. 558 */ 559 p = fork_idle(cpu); 560 if (IS_ERR(p)) 561 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 562 current_set[cpu] = p; 563 } 564 565 /* Reserving and releasing of CPUs */ 566 567 static DEFINE_SPINLOCK(smp_reserve_lock); 568 static int smp_cpu_reserved[NR_CPUS]; 569 570 int 571 smp_get_cpu(cpumask_t cpu_mask) 572 { 573 unsigned long flags; 574 int cpu; 575 576 spin_lock_irqsave(&smp_reserve_lock, flags); 577 /* Try to find an already reserved cpu. */ 578 for_each_cpu_mask(cpu, cpu_mask) { 579 if (smp_cpu_reserved[cpu] != 0) { 580 smp_cpu_reserved[cpu]++; 581 /* Found one. */ 582 goto out; 583 } 584 } 585 /* Reserve a new cpu from cpu_mask. */ 586 for_each_cpu_mask(cpu, cpu_mask) { 587 if (cpu_online(cpu)) { 588 smp_cpu_reserved[cpu]++; 589 goto out; 590 } 591 } 592 cpu = -ENODEV; 593 out: 594 spin_unlock_irqrestore(&smp_reserve_lock, flags); 595 return cpu; 596 } 597 598 void 599 smp_put_cpu(int cpu) 600 { 601 unsigned long flags; 602 603 spin_lock_irqsave(&smp_reserve_lock, flags); 604 smp_cpu_reserved[cpu]--; 605 spin_unlock_irqrestore(&smp_reserve_lock, flags); 606 } 607 608 static inline int 609 cpu_stopped(int cpu) 610 { 611 __u32 status; 612 613 /* Check for stopped state */ 614 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 615 if (status & 0x40) 616 return 1; 617 } 618 return 0; 619 } 620 621 /* Upping and downing of CPUs */ 622 623 int 624 __cpu_up(unsigned int cpu) 625 { 626 struct task_struct *idle; 627 struct _lowcore *cpu_lowcore; 628 struct stack_frame *sf; 629 sigp_ccode ccode; 630 int curr_cpu; 631 632 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 633 __cpu_logical_map[cpu] = (__u16) curr_cpu; 634 if (cpu_stopped(cpu)) 635 break; 636 } 637 638 if (!cpu_stopped(cpu)) 639 return -ENODEV; 640 641 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 642 cpu, sigp_set_prefix); 643 if (ccode){ 644 printk("sigp_set_prefix failed for cpu %d " 645 "with condition code %d\n", 646 (int) cpu, (int) ccode); 647 return -EIO; 648 } 649 650 idle = current_set[cpu]; 651 cpu_lowcore = lowcore_ptr[cpu]; 652 cpu_lowcore->kernel_stack = (unsigned long) 653 task_stack_page(idle) + (THREAD_SIZE); 654 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 655 - sizeof(struct pt_regs) 656 - sizeof(struct stack_frame)); 657 memset(sf, 0, sizeof(struct stack_frame)); 658 sf->gprs[9] = (unsigned long) sf; 659 cpu_lowcore->save_area[15] = (unsigned long) sf; 660 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 661 __asm__ __volatile__("stam 0,15,0(%0)" 662 : : "a" (&cpu_lowcore->access_regs_save_area) 663 : "memory"); 664 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 665 cpu_lowcore->current_task = (unsigned long) idle; 666 cpu_lowcore->cpu_data.cpu_nr = cpu; 667 eieio(); 668 669 while (signal_processor(cpu,sigp_restart) == sigp_busy) 670 udelay(10); 671 672 while (!cpu_online(cpu)) 673 cpu_relax(); 674 return 0; 675 } 676 677 static unsigned int __initdata additional_cpus; 678 static unsigned int __initdata possible_cpus; 679 680 void __init smp_setup_cpu_possible_map(void) 681 { 682 unsigned int phy_cpus, pos_cpus, cpu; 683 684 phy_cpus = smp_count_cpus(); 685 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 686 687 if (possible_cpus) 688 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 689 690 for (cpu = 0; cpu < pos_cpus; cpu++) 691 cpu_set(cpu, cpu_possible_map); 692 693 phy_cpus = min(phy_cpus, pos_cpus); 694 695 for (cpu = 0; cpu < phy_cpus; cpu++) 696 cpu_set(cpu, cpu_present_map); 697 } 698 699 #ifdef CONFIG_HOTPLUG_CPU 700 701 static int __init setup_additional_cpus(char *s) 702 { 703 additional_cpus = simple_strtoul(s, NULL, 0); 704 return 0; 705 } 706 early_param("additional_cpus", setup_additional_cpus); 707 708 static int __init setup_possible_cpus(char *s) 709 { 710 possible_cpus = simple_strtoul(s, NULL, 0); 711 return 0; 712 } 713 early_param("possible_cpus", setup_possible_cpus); 714 715 int 716 __cpu_disable(void) 717 { 718 unsigned long flags; 719 ec_creg_mask_parms cr_parms; 720 int cpu = smp_processor_id(); 721 722 spin_lock_irqsave(&smp_reserve_lock, flags); 723 if (smp_cpu_reserved[cpu] != 0) { 724 spin_unlock_irqrestore(&smp_reserve_lock, flags); 725 return -EBUSY; 726 } 727 cpu_clear(cpu, cpu_online_map); 728 729 #ifdef CONFIG_PFAULT 730 /* Disable pfault pseudo page faults on this cpu. */ 731 if (MACHINE_IS_VM) 732 pfault_fini(); 733 #endif 734 735 /* disable all external interrupts */ 736 737 cr_parms.start_ctl = 0; 738 cr_parms.end_ctl = 0; 739 cr_parms.orvals[0] = 0; 740 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 741 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 742 smp_ctl_bit_callback(&cr_parms); 743 744 /* disable all I/O interrupts */ 745 746 cr_parms.start_ctl = 6; 747 cr_parms.end_ctl = 6; 748 cr_parms.orvals[6] = 0; 749 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 750 1<<27 | 1<<26 | 1<<25 | 1<<24); 751 smp_ctl_bit_callback(&cr_parms); 752 753 /* disable most machine checks */ 754 755 cr_parms.start_ctl = 14; 756 cr_parms.end_ctl = 14; 757 cr_parms.orvals[14] = 0; 758 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 759 smp_ctl_bit_callback(&cr_parms); 760 761 spin_unlock_irqrestore(&smp_reserve_lock, flags); 762 return 0; 763 } 764 765 void 766 __cpu_die(unsigned int cpu) 767 { 768 /* Wait until target cpu is down */ 769 while (!smp_cpu_not_running(cpu)) 770 cpu_relax(); 771 printk("Processor %d spun down\n", cpu); 772 } 773 774 void 775 cpu_die(void) 776 { 777 idle_task_exit(); 778 signal_processor(smp_processor_id(), sigp_stop); 779 BUG(); 780 for(;;); 781 } 782 783 #endif /* CONFIG_HOTPLUG_CPU */ 784 785 /* 786 * Cycle through the processors and setup structures. 787 */ 788 789 void __init smp_prepare_cpus(unsigned int max_cpus) 790 { 791 unsigned long stack; 792 unsigned int cpu; 793 int i; 794 795 /* request the 0x1201 emergency signal external interrupt */ 796 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 797 panic("Couldn't request external interrupt 0x1201"); 798 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 799 /* 800 * Initialize prefix pages and stacks for all possible cpus 801 */ 802 print_cpu_info(&S390_lowcore.cpu_data); 803 804 for_each_possible_cpu(i) { 805 lowcore_ptr[i] = (struct _lowcore *) 806 __get_free_pages(GFP_KERNEL|GFP_DMA, 807 sizeof(void*) == 8 ? 1 : 0); 808 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 809 if (lowcore_ptr[i] == NULL || stack == 0ULL) 810 panic("smp_boot_cpus failed to allocate memory\n"); 811 812 *(lowcore_ptr[i]) = S390_lowcore; 813 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 814 stack = __get_free_pages(GFP_KERNEL,0); 815 if (stack == 0ULL) 816 panic("smp_boot_cpus failed to allocate memory\n"); 817 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 818 #ifndef CONFIG_64BIT 819 if (MACHINE_HAS_IEEE) { 820 lowcore_ptr[i]->extended_save_area_addr = 821 (__u32) __get_free_pages(GFP_KERNEL,0); 822 if (lowcore_ptr[i]->extended_save_area_addr == 0) 823 panic("smp_boot_cpus failed to " 824 "allocate memory\n"); 825 } 826 #endif 827 } 828 #ifndef CONFIG_64BIT 829 if (MACHINE_HAS_IEEE) 830 ctl_set_bit(14, 29); /* enable extended save area */ 831 #endif 832 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 833 834 for_each_possible_cpu(cpu) 835 if (cpu != smp_processor_id()) 836 smp_create_idle(cpu); 837 } 838 839 void __devinit smp_prepare_boot_cpu(void) 840 { 841 BUG_ON(smp_processor_id() != 0); 842 843 cpu_set(0, cpu_online_map); 844 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 845 current_set[0] = current; 846 } 847 848 void smp_cpus_done(unsigned int max_cpus) 849 { 850 cpu_present_map = cpu_possible_map; 851 } 852 853 /* 854 * the frequency of the profiling timer can be changed 855 * by writing a multiplier value into /proc/profile. 856 * 857 * usually you want to run this on all CPUs ;) 858 */ 859 int setup_profiling_timer(unsigned int multiplier) 860 { 861 return 0; 862 } 863 864 static DEFINE_PER_CPU(struct cpu, cpu_devices); 865 866 static int __init topology_init(void) 867 { 868 int cpu; 869 int ret; 870 871 for_each_possible_cpu(cpu) { 872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 873 if (ret) 874 printk(KERN_WARNING "topology_init: register_cpu %d " 875 "failed (%d)\n", cpu, ret); 876 } 877 return 0; 878 } 879 880 subsys_initcall(topology_init); 881 882 EXPORT_SYMBOL(cpu_online_map); 883 EXPORT_SYMBOL(cpu_possible_map); 884 EXPORT_SYMBOL(lowcore_ptr); 885 EXPORT_SYMBOL(smp_ctl_set_bit); 886 EXPORT_SYMBOL(smp_ctl_clear_bit); 887 EXPORT_SYMBOL(smp_call_function); 888 EXPORT_SYMBOL(smp_get_cpu); 889 EXPORT_SYMBOL(smp_put_cpu); 890 891