1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Heiko Carstens (heiko.carstens@de.ibm.com) 9 * 10 * based on other smp stuff by 11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 12 * (c) 1998 Ingo Molnar 13 * 14 * We work with logical cpu numbering everywhere we can. The only 15 * functions using the real cpu address (got from STAP) are the sigp 16 * functions. For all other functions we use the identity mapping. 17 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 18 * used e.g. to find the idle task belonging to a logical cpu. Every array 19 * in the kernel is sorted by the logical cpu number and not by the physical 20 * one which is causing all the confusion with __cpu_logical_map and 21 * cpu_number_map in other architectures. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/init.h> 26 27 #include <linux/mm.h> 28 #include <linux/spinlock.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/smp_lock.h> 31 32 #include <linux/delay.h> 33 #include <linux/cache.h> 34 #include <linux/interrupt.h> 35 #include <linux/cpu.h> 36 37 #include <asm/sigp.h> 38 #include <asm/pgalloc.h> 39 #include <asm/irq.h> 40 #include <asm/s390_ext.h> 41 #include <asm/cpcmd.h> 42 #include <asm/tlbflush.h> 43 44 /* prototypes */ 45 46 extern volatile int __cpu_logical_map[]; 47 48 /* 49 * An array with a pointer the lowcore of every CPU. 50 */ 51 52 struct _lowcore *lowcore_ptr[NR_CPUS]; 53 54 cpumask_t cpu_online_map; 55 cpumask_t cpu_possible_map; 56 57 static struct task_struct *current_set[NR_CPUS]; 58 59 EXPORT_SYMBOL(cpu_online_map); 60 61 /* 62 * Reboot, halt and power_off routines for SMP. 63 */ 64 extern char vmhalt_cmd[]; 65 extern char vmpoff_cmd[]; 66 67 extern void reipl(unsigned long devno); 68 69 static void smp_ext_bitcall(int, ec_bit_sig); 70 static void smp_ext_bitcall_others(ec_bit_sig); 71 72 /* 73 * Structure and data for smp_call_function(). This is designed to minimise 74 * static memory requirements. It also looks cleaner. 75 */ 76 static DEFINE_SPINLOCK(call_lock); 77 78 struct call_data_struct { 79 void (*func) (void *info); 80 void *info; 81 atomic_t started; 82 atomic_t finished; 83 int wait; 84 }; 85 86 static struct call_data_struct * call_data; 87 88 /* 89 * 'Call function' interrupt callback 90 */ 91 static void do_call_function(void) 92 { 93 void (*func) (void *info) = call_data->func; 94 void *info = call_data->info; 95 int wait = call_data->wait; 96 97 atomic_inc(&call_data->started); 98 (*func)(info); 99 if (wait) 100 atomic_inc(&call_data->finished); 101 } 102 103 /* 104 * this function sends a 'generic call function' IPI to all other CPUs 105 * in the system. 106 */ 107 108 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 109 int wait) 110 /* 111 * [SUMMARY] Run a function on all other CPUs. 112 * <func> The function to run. This must be fast and non-blocking. 113 * <info> An arbitrary pointer to pass to the function. 114 * <nonatomic> currently unused. 115 * <wait> If true, wait (atomically) until function has completed on other CPUs. 116 * [RETURNS] 0 on success, else a negative status code. Does not return until 117 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 118 * 119 * You must not call this function with disabled interrupts or from a 120 * hardware interrupt handler or from a bottom half handler. 121 */ 122 { 123 struct call_data_struct data; 124 int cpus = num_online_cpus()-1; 125 126 if (cpus <= 0) 127 return 0; 128 129 /* Can deadlock when called with interrupts disabled */ 130 WARN_ON(irqs_disabled()); 131 132 data.func = func; 133 data.info = info; 134 atomic_set(&data.started, 0); 135 data.wait = wait; 136 if (wait) 137 atomic_set(&data.finished, 0); 138 139 spin_lock(&call_lock); 140 call_data = &data; 141 /* Send a message to all other CPUs and wait for them to respond */ 142 smp_ext_bitcall_others(ec_call_function); 143 144 /* Wait for response */ 145 while (atomic_read(&data.started) != cpus) 146 cpu_relax(); 147 148 if (wait) 149 while (atomic_read(&data.finished) != cpus) 150 cpu_relax(); 151 spin_unlock(&call_lock); 152 153 return 0; 154 } 155 156 /* 157 * Call a function on one CPU 158 * cpu : the CPU the function should be executed on 159 * 160 * You must not call this function with disabled interrupts or from a 161 * hardware interrupt handler. You may call it from a bottom half. 162 * 163 * It is guaranteed that the called function runs on the specified CPU, 164 * preemption is disabled. 165 */ 166 int smp_call_function_on(void (*func) (void *info), void *info, 167 int nonatomic, int wait, int cpu) 168 { 169 struct call_data_struct data; 170 int curr_cpu; 171 172 if (!cpu_online(cpu)) 173 return -EINVAL; 174 175 /* disable preemption for local function call */ 176 curr_cpu = get_cpu(); 177 178 if (curr_cpu == cpu) { 179 /* direct call to function */ 180 func(info); 181 put_cpu(); 182 return 0; 183 } 184 185 data.func = func; 186 data.info = info; 187 atomic_set(&data.started, 0); 188 data.wait = wait; 189 if (wait) 190 atomic_set(&data.finished, 0); 191 192 spin_lock_bh(&call_lock); 193 call_data = &data; 194 smp_ext_bitcall(cpu, ec_call_function); 195 196 /* Wait for response */ 197 while (atomic_read(&data.started) != 1) 198 cpu_relax(); 199 200 if (wait) 201 while (atomic_read(&data.finished) != 1) 202 cpu_relax(); 203 204 spin_unlock_bh(&call_lock); 205 put_cpu(); 206 return 0; 207 } 208 EXPORT_SYMBOL(smp_call_function_on); 209 210 static inline void do_send_stop(void) 211 { 212 int cpu, rc; 213 214 /* stop all processors */ 215 for_each_online_cpu(cpu) { 216 if (cpu == smp_processor_id()) 217 continue; 218 do { 219 rc = signal_processor(cpu, sigp_stop); 220 } while (rc == sigp_busy); 221 } 222 } 223 224 static inline void do_store_status(void) 225 { 226 int cpu, rc; 227 228 /* store status of all processors in their lowcores (real 0) */ 229 for_each_online_cpu(cpu) { 230 if (cpu == smp_processor_id()) 231 continue; 232 do { 233 rc = signal_processor_p( 234 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 235 sigp_store_status_at_address); 236 } while(rc == sigp_busy); 237 } 238 } 239 240 /* 241 * this function sends a 'stop' sigp to all other CPUs in the system. 242 * it goes straight through. 243 */ 244 void smp_send_stop(void) 245 { 246 /* write magic number to zero page (absolute 0) */ 247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 248 249 /* stop other processors. */ 250 do_send_stop(); 251 252 /* store status of other processors. */ 253 do_store_status(); 254 } 255 256 /* 257 * Reboot, halt and power_off routines for SMP. 258 */ 259 260 static void do_machine_restart(void * __unused) 261 { 262 int cpu; 263 static atomic_t cpuid = ATOMIC_INIT(-1); 264 265 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 266 signal_processor(smp_processor_id(), sigp_stop); 267 268 /* Wait for all other cpus to enter stopped state */ 269 for_each_online_cpu(cpu) { 270 if (cpu == smp_processor_id()) 271 continue; 272 while(!smp_cpu_not_running(cpu)) 273 cpu_relax(); 274 } 275 276 /* Store status of other cpus. */ 277 do_store_status(); 278 279 /* 280 * Finally call reipl. Because we waited for all other 281 * cpus to enter this function we know that they do 282 * not hold any s390irq-locks (the cpus have been 283 * interrupted by an external interrupt and s390irq 284 * locks are always held disabled). 285 */ 286 if (MACHINE_IS_VM) 287 cpcmd ("IPL", NULL, 0, NULL); 288 else 289 reipl (0x10000 | S390_lowcore.ipl_device); 290 } 291 292 void machine_restart_smp(char * __unused) 293 { 294 on_each_cpu(do_machine_restart, NULL, 0, 0); 295 } 296 297 static void do_wait_for_stop(void) 298 { 299 unsigned long cr[16]; 300 301 __ctl_store(cr, 0, 15); 302 cr[0] &= ~0xffff; 303 cr[6] = 0; 304 __ctl_load(cr, 0, 15); 305 for (;;) 306 enabled_wait(); 307 } 308 309 static void do_machine_halt(void * __unused) 310 { 311 static atomic_t cpuid = ATOMIC_INIT(-1); 312 313 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 314 smp_send_stop(); 315 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 316 cpcmd(vmhalt_cmd, NULL, 0, NULL); 317 signal_processor(smp_processor_id(), 318 sigp_stop_and_store_status); 319 } 320 do_wait_for_stop(); 321 } 322 323 void machine_halt_smp(void) 324 { 325 on_each_cpu(do_machine_halt, NULL, 0, 0); 326 } 327 328 static void do_machine_power_off(void * __unused) 329 { 330 static atomic_t cpuid = ATOMIC_INIT(-1); 331 332 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 333 smp_send_stop(); 334 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 335 cpcmd(vmpoff_cmd, NULL, 0, NULL); 336 signal_processor(smp_processor_id(), 337 sigp_stop_and_store_status); 338 } 339 do_wait_for_stop(); 340 } 341 342 void machine_power_off_smp(void) 343 { 344 on_each_cpu(do_machine_power_off, NULL, 0, 0); 345 } 346 347 /* 348 * This is the main routine where commands issued by other 349 * cpus are handled. 350 */ 351 352 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) 353 { 354 unsigned long bits; 355 356 /* 357 * handle bit signal external calls 358 * 359 * For the ec_schedule signal we have to do nothing. All the work 360 * is done automatically when we return from the interrupt. 361 */ 362 bits = xchg(&S390_lowcore.ext_call_fast, 0); 363 364 if (test_bit(ec_call_function, &bits)) 365 do_call_function(); 366 } 367 368 /* 369 * Send an external call sigp to another cpu and return without waiting 370 * for its completion. 371 */ 372 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 373 { 374 /* 375 * Set signaling bit in lowcore of target cpu and kick it 376 */ 377 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 378 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 379 udelay(10); 380 } 381 382 /* 383 * Send an external call sigp to every other cpu in the system and 384 * return without waiting for its completion. 385 */ 386 static void smp_ext_bitcall_others(ec_bit_sig sig) 387 { 388 int cpu; 389 390 for_each_online_cpu(cpu) { 391 if (cpu == smp_processor_id()) 392 continue; 393 /* 394 * Set signaling bit in lowcore of target cpu and kick it 395 */ 396 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 397 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 398 udelay(10); 399 } 400 } 401 402 #ifndef CONFIG_ARCH_S390X 403 /* 404 * this function sends a 'purge tlb' signal to another CPU. 405 */ 406 void smp_ptlb_callback(void *info) 407 { 408 local_flush_tlb(); 409 } 410 411 void smp_ptlb_all(void) 412 { 413 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 414 } 415 EXPORT_SYMBOL(smp_ptlb_all); 416 #endif /* ! CONFIG_ARCH_S390X */ 417 418 /* 419 * this function sends a 'reschedule' IPI to another CPU. 420 * it goes straight through and wastes no time serializing 421 * anything. Worst case is that we lose a reschedule ... 422 */ 423 void smp_send_reschedule(int cpu) 424 { 425 smp_ext_bitcall(cpu, ec_schedule); 426 } 427 428 /* 429 * parameter area for the set/clear control bit callbacks 430 */ 431 typedef struct 432 { 433 __u16 start_ctl; 434 __u16 end_ctl; 435 unsigned long orvals[16]; 436 unsigned long andvals[16]; 437 } ec_creg_mask_parms; 438 439 /* 440 * callback for setting/clearing control bits 441 */ 442 void smp_ctl_bit_callback(void *info) { 443 ec_creg_mask_parms *pp; 444 unsigned long cregs[16]; 445 int i; 446 447 pp = (ec_creg_mask_parms *) info; 448 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 449 for (i = pp->start_ctl; i <= pp->end_ctl; i++) 450 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 451 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 452 } 453 454 /* 455 * Set a bit in a control register of all cpus 456 */ 457 void smp_ctl_set_bit(int cr, int bit) { 458 ec_creg_mask_parms parms; 459 460 parms.start_ctl = cr; 461 parms.end_ctl = cr; 462 parms.orvals[cr] = 1 << bit; 463 parms.andvals[cr] = -1L; 464 preempt_disable(); 465 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 466 __ctl_set_bit(cr, bit); 467 preempt_enable(); 468 } 469 470 /* 471 * Clear a bit in a control register of all cpus 472 */ 473 void smp_ctl_clear_bit(int cr, int bit) { 474 ec_creg_mask_parms parms; 475 476 parms.start_ctl = cr; 477 parms.end_ctl = cr; 478 parms.orvals[cr] = 0; 479 parms.andvals[cr] = ~(1L << bit); 480 preempt_disable(); 481 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 482 __ctl_clear_bit(cr, bit); 483 preempt_enable(); 484 } 485 486 /* 487 * Lets check how many CPUs we have. 488 */ 489 490 void 491 __init smp_check_cpus(unsigned int max_cpus) 492 { 493 int cpu, num_cpus; 494 __u16 boot_cpu_addr; 495 496 /* 497 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 498 */ 499 500 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 501 current_thread_info()->cpu = 0; 502 num_cpus = 1; 503 for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { 504 if ((__u16) cpu == boot_cpu_addr) 505 continue; 506 __cpu_logical_map[num_cpus] = (__u16) cpu; 507 if (signal_processor(num_cpus, sigp_sense) == 508 sigp_not_operational) 509 continue; 510 cpu_set(num_cpus, cpu_present_map); 511 num_cpus++; 512 } 513 514 for (cpu = 1; cpu < max_cpus; cpu++) 515 cpu_set(cpu, cpu_possible_map); 516 517 printk("Detected %d CPU's\n",(int) num_cpus); 518 printk("Boot cpu address %2X\n", boot_cpu_addr); 519 } 520 521 /* 522 * Activate a secondary processor. 523 */ 524 extern void init_cpu_timer(void); 525 extern void init_cpu_vtimer(void); 526 extern int pfault_init(void); 527 extern void pfault_fini(void); 528 529 int __devinit start_secondary(void *cpuvoid) 530 { 531 /* Setup the cpu */ 532 cpu_init(); 533 /* init per CPU timer */ 534 init_cpu_timer(); 535 #ifdef CONFIG_VIRT_TIMER 536 init_cpu_vtimer(); 537 #endif 538 #ifdef CONFIG_PFAULT 539 /* Enable pfault pseudo page faults on this cpu. */ 540 if (MACHINE_IS_VM) 541 pfault_init(); 542 #endif 543 /* Mark this cpu as online */ 544 cpu_set(smp_processor_id(), cpu_online_map); 545 /* Switch on interrupts */ 546 local_irq_enable(); 547 /* Print info about this processor */ 548 print_cpu_info(&S390_lowcore.cpu_data); 549 /* cpu_idle will call schedule for us */ 550 cpu_idle(); 551 return 0; 552 } 553 554 static void __init smp_create_idle(unsigned int cpu) 555 { 556 struct task_struct *p; 557 558 /* 559 * don't care about the psw and regs settings since we'll never 560 * reschedule the forked task. 561 */ 562 p = fork_idle(cpu); 563 if (IS_ERR(p)) 564 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 565 current_set[cpu] = p; 566 } 567 568 /* Reserving and releasing of CPUs */ 569 570 static DEFINE_SPINLOCK(smp_reserve_lock); 571 static int smp_cpu_reserved[NR_CPUS]; 572 573 int 574 smp_get_cpu(cpumask_t cpu_mask) 575 { 576 unsigned long flags; 577 int cpu; 578 579 spin_lock_irqsave(&smp_reserve_lock, flags); 580 /* Try to find an already reserved cpu. */ 581 for_each_cpu_mask(cpu, cpu_mask) { 582 if (smp_cpu_reserved[cpu] != 0) { 583 smp_cpu_reserved[cpu]++; 584 /* Found one. */ 585 goto out; 586 } 587 } 588 /* Reserve a new cpu from cpu_mask. */ 589 for_each_cpu_mask(cpu, cpu_mask) { 590 if (cpu_online(cpu)) { 591 smp_cpu_reserved[cpu]++; 592 goto out; 593 } 594 } 595 cpu = -ENODEV; 596 out: 597 spin_unlock_irqrestore(&smp_reserve_lock, flags); 598 return cpu; 599 } 600 601 void 602 smp_put_cpu(int cpu) 603 { 604 unsigned long flags; 605 606 spin_lock_irqsave(&smp_reserve_lock, flags); 607 smp_cpu_reserved[cpu]--; 608 spin_unlock_irqrestore(&smp_reserve_lock, flags); 609 } 610 611 static inline int 612 cpu_stopped(int cpu) 613 { 614 __u32 status; 615 616 /* Check for stopped state */ 617 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 618 if (status & 0x40) 619 return 1; 620 } 621 return 0; 622 } 623 624 /* Upping and downing of CPUs */ 625 626 int 627 __cpu_up(unsigned int cpu) 628 { 629 struct task_struct *idle; 630 struct _lowcore *cpu_lowcore; 631 struct stack_frame *sf; 632 sigp_ccode ccode; 633 int curr_cpu; 634 635 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 636 __cpu_logical_map[cpu] = (__u16) curr_cpu; 637 if (cpu_stopped(cpu)) 638 break; 639 } 640 641 if (!cpu_stopped(cpu)) 642 return -ENODEV; 643 644 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 645 cpu, sigp_set_prefix); 646 if (ccode){ 647 printk("sigp_set_prefix failed for cpu %d " 648 "with condition code %d\n", 649 (int) cpu, (int) ccode); 650 return -EIO; 651 } 652 653 idle = current_set[cpu]; 654 cpu_lowcore = lowcore_ptr[cpu]; 655 cpu_lowcore->kernel_stack = (unsigned long) 656 idle->thread_info + (THREAD_SIZE); 657 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 658 - sizeof(struct pt_regs) 659 - sizeof(struct stack_frame)); 660 memset(sf, 0, sizeof(struct stack_frame)); 661 sf->gprs[9] = (unsigned long) sf; 662 cpu_lowcore->save_area[15] = (unsigned long) sf; 663 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 664 __asm__ __volatile__("stam 0,15,0(%0)" 665 : : "a" (&cpu_lowcore->access_regs_save_area) 666 : "memory"); 667 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 668 cpu_lowcore->current_task = (unsigned long) idle; 669 cpu_lowcore->cpu_data.cpu_nr = cpu; 670 eieio(); 671 signal_processor(cpu,sigp_restart); 672 673 while (!cpu_online(cpu)) 674 cpu_relax(); 675 return 0; 676 } 677 678 int 679 __cpu_disable(void) 680 { 681 unsigned long flags; 682 ec_creg_mask_parms cr_parms; 683 int cpu = smp_processor_id(); 684 685 spin_lock_irqsave(&smp_reserve_lock, flags); 686 if (smp_cpu_reserved[cpu] != 0) { 687 spin_unlock_irqrestore(&smp_reserve_lock, flags); 688 return -EBUSY; 689 } 690 cpu_clear(cpu, cpu_online_map); 691 692 #ifdef CONFIG_PFAULT 693 /* Disable pfault pseudo page faults on this cpu. */ 694 if (MACHINE_IS_VM) 695 pfault_fini(); 696 #endif 697 698 /* disable all external interrupts */ 699 700 cr_parms.start_ctl = 0; 701 cr_parms.end_ctl = 0; 702 cr_parms.orvals[0] = 0; 703 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 704 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 705 smp_ctl_bit_callback(&cr_parms); 706 707 /* disable all I/O interrupts */ 708 709 cr_parms.start_ctl = 6; 710 cr_parms.end_ctl = 6; 711 cr_parms.orvals[6] = 0; 712 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 713 1<<27 | 1<<26 | 1<<25 | 1<<24); 714 smp_ctl_bit_callback(&cr_parms); 715 716 /* disable most machine checks */ 717 718 cr_parms.start_ctl = 14; 719 cr_parms.end_ctl = 14; 720 cr_parms.orvals[14] = 0; 721 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 722 smp_ctl_bit_callback(&cr_parms); 723 724 spin_unlock_irqrestore(&smp_reserve_lock, flags); 725 return 0; 726 } 727 728 void 729 __cpu_die(unsigned int cpu) 730 { 731 /* Wait until target cpu is down */ 732 while (!smp_cpu_not_running(cpu)) 733 cpu_relax(); 734 printk("Processor %d spun down\n", cpu); 735 } 736 737 void 738 cpu_die(void) 739 { 740 idle_task_exit(); 741 signal_processor(smp_processor_id(), sigp_stop); 742 BUG(); 743 for(;;); 744 } 745 746 /* 747 * Cycle through the processors and setup structures. 748 */ 749 750 void __init smp_prepare_cpus(unsigned int max_cpus) 751 { 752 unsigned long stack; 753 unsigned int cpu; 754 int i; 755 756 /* request the 0x1201 emergency signal external interrupt */ 757 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 758 panic("Couldn't request external interrupt 0x1201"); 759 smp_check_cpus(max_cpus); 760 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 761 /* 762 * Initialize prefix pages and stacks for all possible cpus 763 */ 764 print_cpu_info(&S390_lowcore.cpu_data); 765 766 for(i = 0; i < NR_CPUS; i++) { 767 if (!cpu_possible(i)) 768 continue; 769 lowcore_ptr[i] = (struct _lowcore *) 770 __get_free_pages(GFP_KERNEL|GFP_DMA, 771 sizeof(void*) == 8 ? 1 : 0); 772 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 773 if (lowcore_ptr[i] == NULL || stack == 0ULL) 774 panic("smp_boot_cpus failed to allocate memory\n"); 775 776 *(lowcore_ptr[i]) = S390_lowcore; 777 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 778 stack = __get_free_pages(GFP_KERNEL,0); 779 if (stack == 0ULL) 780 panic("smp_boot_cpus failed to allocate memory\n"); 781 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 782 #ifndef __s390x__ 783 if (MACHINE_HAS_IEEE) { 784 lowcore_ptr[i]->extended_save_area_addr = 785 (__u32) __get_free_pages(GFP_KERNEL,0); 786 if (lowcore_ptr[i]->extended_save_area_addr == 0) 787 panic("smp_boot_cpus failed to " 788 "allocate memory\n"); 789 } 790 #endif 791 } 792 #ifndef __s390x__ 793 if (MACHINE_HAS_IEEE) 794 ctl_set_bit(14, 29); /* enable extended save area */ 795 #endif 796 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 797 798 for_each_cpu(cpu) 799 if (cpu != smp_processor_id()) 800 smp_create_idle(cpu); 801 } 802 803 void __devinit smp_prepare_boot_cpu(void) 804 { 805 BUG_ON(smp_processor_id() != 0); 806 807 cpu_set(0, cpu_online_map); 808 cpu_set(0, cpu_present_map); 809 cpu_set(0, cpu_possible_map); 810 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 811 current_set[0] = current; 812 } 813 814 void smp_cpus_done(unsigned int max_cpus) 815 { 816 cpu_present_map = cpu_possible_map; 817 } 818 819 /* 820 * the frequency of the profiling timer can be changed 821 * by writing a multiplier value into /proc/profile. 822 * 823 * usually you want to run this on all CPUs ;) 824 */ 825 int setup_profiling_timer(unsigned int multiplier) 826 { 827 return 0; 828 } 829 830 static DEFINE_PER_CPU(struct cpu, cpu_devices); 831 832 static int __init topology_init(void) 833 { 834 int cpu; 835 int ret; 836 837 for_each_cpu(cpu) { 838 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 839 if (ret) 840 printk(KERN_WARNING "topology_init: register_cpu %d " 841 "failed (%d)\n", cpu, ret); 842 } 843 return 0; 844 } 845 846 subsys_initcall(topology_init); 847 848 EXPORT_SYMBOL(cpu_possible_map); 849 EXPORT_SYMBOL(lowcore_ptr); 850 EXPORT_SYMBOL(smp_ctl_set_bit); 851 EXPORT_SYMBOL(smp_ctl_clear_bit); 852 EXPORT_SYMBOL(smp_call_function); 853 EXPORT_SYMBOL(smp_get_cpu); 854 EXPORT_SYMBOL(smp_put_cpu); 855 856