1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright (C) IBM Corp. 1999,2006 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 26 #include <linux/mm.h> 27 #include <linux/spinlock.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/smp_lock.h> 30 31 #include <linux/delay.h> 32 #include <linux/cache.h> 33 #include <linux/interrupt.h> 34 #include <linux/cpu.h> 35 36 #include <asm/sigp.h> 37 #include <asm/pgalloc.h> 38 #include <asm/irq.h> 39 #include <asm/s390_ext.h> 40 #include <asm/cpcmd.h> 41 #include <asm/tlbflush.h> 42 43 extern volatile int __cpu_logical_map[]; 44 45 /* 46 * An array with a pointer the lowcore of every CPU. 47 */ 48 49 struct _lowcore *lowcore_ptr[NR_CPUS]; 50 51 cpumask_t cpu_online_map = CPU_MASK_NONE; 52 cpumask_t cpu_possible_map = CPU_MASK_NONE; 53 54 static struct task_struct *current_set[NR_CPUS]; 55 56 /* 57 * Reboot, halt and power_off routines for SMP. 58 */ 59 extern char vmhalt_cmd[]; 60 extern char vmpoff_cmd[]; 61 62 static void smp_ext_bitcall(int, ec_bit_sig); 63 static void smp_ext_bitcall_others(ec_bit_sig); 64 65 /* 66 5B * Structure and data for smp_call_function(). This is designed to minimise 67 * static memory requirements. It also looks cleaner. 68 */ 69 static DEFINE_SPINLOCK(call_lock); 70 71 struct call_data_struct { 72 void (*func) (void *info); 73 void *info; 74 atomic_t started; 75 atomic_t finished; 76 int wait; 77 }; 78 79 static struct call_data_struct * call_data; 80 81 /* 82 * 'Call function' interrupt callback 83 */ 84 static void do_call_function(void) 85 { 86 void (*func) (void *info) = call_data->func; 87 void *info = call_data->info; 88 int wait = call_data->wait; 89 90 atomic_inc(&call_data->started); 91 (*func)(info); 92 if (wait) 93 atomic_inc(&call_data->finished); 94 } 95 96 /* 97 * this function sends a 'generic call function' IPI to all other CPUs 98 * in the system. 99 */ 100 101 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 102 int wait) 103 /* 104 * [SUMMARY] Run a function on all other CPUs. 105 * <func> The function to run. This must be fast and non-blocking. 106 * <info> An arbitrary pointer to pass to the function. 107 * <nonatomic> currently unused. 108 * <wait> If true, wait (atomically) until function has completed on other CPUs. 109 * [RETURNS] 0 on success, else a negative status code. Does not return until 110 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 111 * 112 * You must not call this function with disabled interrupts or from a 113 * hardware interrupt handler or from a bottom half handler. 114 */ 115 { 116 struct call_data_struct data; 117 int cpus = num_online_cpus()-1; 118 119 if (cpus <= 0) 120 return 0; 121 122 /* Can deadlock when called with interrupts disabled */ 123 WARN_ON(irqs_disabled()); 124 125 data.func = func; 126 data.info = info; 127 atomic_set(&data.started, 0); 128 data.wait = wait; 129 if (wait) 130 atomic_set(&data.finished, 0); 131 132 spin_lock(&call_lock); 133 call_data = &data; 134 /* Send a message to all other CPUs and wait for them to respond */ 135 smp_ext_bitcall_others(ec_call_function); 136 137 /* Wait for response */ 138 while (atomic_read(&data.started) != cpus) 139 cpu_relax(); 140 141 if (wait) 142 while (atomic_read(&data.finished) != cpus) 143 cpu_relax(); 144 spin_unlock(&call_lock); 145 146 return 0; 147 } 148 149 /* 150 * Call a function on one CPU 151 * cpu : the CPU the function should be executed on 152 * 153 * You must not call this function with disabled interrupts or from a 154 * hardware interrupt handler. You may call it from a bottom half. 155 * 156 * It is guaranteed that the called function runs on the specified CPU, 157 * preemption is disabled. 158 */ 159 int smp_call_function_on(void (*func) (void *info), void *info, 160 int nonatomic, int wait, int cpu) 161 { 162 struct call_data_struct data; 163 int curr_cpu; 164 165 if (!cpu_online(cpu)) 166 return -EINVAL; 167 168 /* disable preemption for local function call */ 169 curr_cpu = get_cpu(); 170 171 if (curr_cpu == cpu) { 172 /* direct call to function */ 173 func(info); 174 put_cpu(); 175 return 0; 176 } 177 178 data.func = func; 179 data.info = info; 180 atomic_set(&data.started, 0); 181 data.wait = wait; 182 if (wait) 183 atomic_set(&data.finished, 0); 184 185 spin_lock_bh(&call_lock); 186 call_data = &data; 187 smp_ext_bitcall(cpu, ec_call_function); 188 189 /* Wait for response */ 190 while (atomic_read(&data.started) != 1) 191 cpu_relax(); 192 193 if (wait) 194 while (atomic_read(&data.finished) != 1) 195 cpu_relax(); 196 197 spin_unlock_bh(&call_lock); 198 put_cpu(); 199 return 0; 200 } 201 EXPORT_SYMBOL(smp_call_function_on); 202 203 static inline void do_send_stop(void) 204 { 205 int cpu, rc; 206 207 /* stop all processors */ 208 for_each_online_cpu(cpu) { 209 if (cpu == smp_processor_id()) 210 continue; 211 do { 212 rc = signal_processor(cpu, sigp_stop); 213 } while (rc == sigp_busy); 214 } 215 } 216 217 static inline void do_store_status(void) 218 { 219 int cpu, rc; 220 221 /* store status of all processors in their lowcores (real 0) */ 222 for_each_online_cpu(cpu) { 223 if (cpu == smp_processor_id()) 224 continue; 225 do { 226 rc = signal_processor_p( 227 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 228 sigp_store_status_at_address); 229 } while(rc == sigp_busy); 230 } 231 } 232 233 static inline void do_wait_for_stop(void) 234 { 235 int cpu; 236 237 /* Wait for all other cpus to enter stopped state */ 238 for_each_online_cpu(cpu) { 239 if (cpu == smp_processor_id()) 240 continue; 241 while(!smp_cpu_not_running(cpu)) 242 cpu_relax(); 243 } 244 } 245 246 /* 247 * this function sends a 'stop' sigp to all other CPUs in the system. 248 * it goes straight through. 249 */ 250 void smp_send_stop(void) 251 { 252 /* Disable all interrupts/machine checks */ 253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 254 255 /* write magic number to zero page (absolute 0) */ 256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 257 258 /* stop other processors. */ 259 do_send_stop(); 260 261 /* wait until other processors are stopped */ 262 do_wait_for_stop(); 263 264 /* store status of other processors. */ 265 do_store_status(); 266 } 267 268 /* 269 * Reboot, halt and power_off routines for SMP. 270 */ 271 272 void machine_restart_smp(char * __unused) 273 { 274 smp_send_stop(); 275 do_reipl(); 276 } 277 278 void machine_halt_smp(void) 279 { 280 smp_send_stop(); 281 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 282 __cpcmd(vmhalt_cmd, NULL, 0, NULL); 283 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 284 for (;;); 285 } 286 287 void machine_power_off_smp(void) 288 { 289 smp_send_stop(); 290 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 291 __cpcmd(vmpoff_cmd, NULL, 0, NULL); 292 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 293 for (;;); 294 } 295 296 /* 297 * This is the main routine where commands issued by other 298 * cpus are handled. 299 */ 300 301 void do_ext_call_interrupt(__u16 code) 302 { 303 unsigned long bits; 304 305 /* 306 * handle bit signal external calls 307 * 308 * For the ec_schedule signal we have to do nothing. All the work 309 * is done automatically when we return from the interrupt. 310 */ 311 bits = xchg(&S390_lowcore.ext_call_fast, 0); 312 313 if (test_bit(ec_call_function, &bits)) 314 do_call_function(); 315 } 316 317 /* 318 * Send an external call sigp to another cpu and return without waiting 319 * for its completion. 320 */ 321 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 322 { 323 /* 324 * Set signaling bit in lowcore of target cpu and kick it 325 */ 326 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 327 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 328 udelay(10); 329 } 330 331 /* 332 * Send an external call sigp to every other cpu in the system and 333 * return without waiting for its completion. 334 */ 335 static void smp_ext_bitcall_others(ec_bit_sig sig) 336 { 337 int cpu; 338 339 for_each_online_cpu(cpu) { 340 if (cpu == smp_processor_id()) 341 continue; 342 /* 343 * Set signaling bit in lowcore of target cpu and kick it 344 */ 345 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 346 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 347 udelay(10); 348 } 349 } 350 351 #ifndef CONFIG_64BIT 352 /* 353 * this function sends a 'purge tlb' signal to another CPU. 354 */ 355 void smp_ptlb_callback(void *info) 356 { 357 local_flush_tlb(); 358 } 359 360 void smp_ptlb_all(void) 361 { 362 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 363 } 364 EXPORT_SYMBOL(smp_ptlb_all); 365 #endif /* ! CONFIG_64BIT */ 366 367 /* 368 * this function sends a 'reschedule' IPI to another CPU. 369 * it goes straight through and wastes no time serializing 370 * anything. Worst case is that we lose a reschedule ... 371 */ 372 void smp_send_reschedule(int cpu) 373 { 374 smp_ext_bitcall(cpu, ec_schedule); 375 } 376 377 /* 378 * parameter area for the set/clear control bit callbacks 379 */ 380 struct ec_creg_mask_parms { 381 unsigned long orvals[16]; 382 unsigned long andvals[16]; 383 }; 384 385 /* 386 * callback for setting/clearing control bits 387 */ 388 void smp_ctl_bit_callback(void *info) { 389 struct ec_creg_mask_parms *pp = info; 390 unsigned long cregs[16]; 391 int i; 392 393 __ctl_store(cregs, 0, 15); 394 for (i = 0; i <= 15; i++) 395 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 396 __ctl_load(cregs, 0, 15); 397 } 398 399 /* 400 * Set a bit in a control register of all cpus 401 */ 402 void smp_ctl_set_bit(int cr, int bit) 403 { 404 struct ec_creg_mask_parms parms; 405 406 memset(&parms.orvals, 0, sizeof(parms.orvals)); 407 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 408 parms.orvals[cr] = 1 << bit; 409 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 410 } 411 412 /* 413 * Clear a bit in a control register of all cpus 414 */ 415 void smp_ctl_clear_bit(int cr, int bit) 416 { 417 struct ec_creg_mask_parms parms; 418 419 memset(&parms.orvals, 0, sizeof(parms.orvals)); 420 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 421 parms.andvals[cr] = ~(1L << bit); 422 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 423 } 424 425 /* 426 * Lets check how many CPUs we have. 427 */ 428 429 static unsigned int 430 __init smp_count_cpus(void) 431 { 432 unsigned int cpu, num_cpus; 433 __u16 boot_cpu_addr; 434 435 /* 436 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 437 */ 438 439 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 440 current_thread_info()->cpu = 0; 441 num_cpus = 1; 442 for (cpu = 0; cpu <= 65535; cpu++) { 443 if ((__u16) cpu == boot_cpu_addr) 444 continue; 445 __cpu_logical_map[1] = (__u16) cpu; 446 if (signal_processor(1, sigp_sense) == 447 sigp_not_operational) 448 continue; 449 num_cpus++; 450 } 451 452 printk("Detected %d CPU's\n",(int) num_cpus); 453 printk("Boot cpu address %2X\n", boot_cpu_addr); 454 455 return num_cpus; 456 } 457 458 /* 459 * Activate a secondary processor. 460 */ 461 extern void init_cpu_timer(void); 462 extern void init_cpu_vtimer(void); 463 464 int __devinit start_secondary(void *cpuvoid) 465 { 466 /* Setup the cpu */ 467 cpu_init(); 468 preempt_disable(); 469 /* init per CPU timer */ 470 init_cpu_timer(); 471 #ifdef CONFIG_VIRT_TIMER 472 init_cpu_vtimer(); 473 #endif 474 /* Enable pfault pseudo page faults on this cpu. */ 475 pfault_init(); 476 477 /* Mark this cpu as online */ 478 cpu_set(smp_processor_id(), cpu_online_map); 479 /* Switch on interrupts */ 480 local_irq_enable(); 481 /* Print info about this processor */ 482 print_cpu_info(&S390_lowcore.cpu_data); 483 /* cpu_idle will call schedule for us */ 484 cpu_idle(); 485 return 0; 486 } 487 488 static void __init smp_create_idle(unsigned int cpu) 489 { 490 struct task_struct *p; 491 492 /* 493 * don't care about the psw and regs settings since we'll never 494 * reschedule the forked task. 495 */ 496 p = fork_idle(cpu); 497 if (IS_ERR(p)) 498 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 499 current_set[cpu] = p; 500 } 501 502 /* Reserving and releasing of CPUs */ 503 504 static DEFINE_SPINLOCK(smp_reserve_lock); 505 static int smp_cpu_reserved[NR_CPUS]; 506 507 int 508 smp_get_cpu(cpumask_t cpu_mask) 509 { 510 unsigned long flags; 511 int cpu; 512 513 spin_lock_irqsave(&smp_reserve_lock, flags); 514 /* Try to find an already reserved cpu. */ 515 for_each_cpu_mask(cpu, cpu_mask) { 516 if (smp_cpu_reserved[cpu] != 0) { 517 smp_cpu_reserved[cpu]++; 518 /* Found one. */ 519 goto out; 520 } 521 } 522 /* Reserve a new cpu from cpu_mask. */ 523 for_each_cpu_mask(cpu, cpu_mask) { 524 if (cpu_online(cpu)) { 525 smp_cpu_reserved[cpu]++; 526 goto out; 527 } 528 } 529 cpu = -ENODEV; 530 out: 531 spin_unlock_irqrestore(&smp_reserve_lock, flags); 532 return cpu; 533 } 534 535 void 536 smp_put_cpu(int cpu) 537 { 538 unsigned long flags; 539 540 spin_lock_irqsave(&smp_reserve_lock, flags); 541 smp_cpu_reserved[cpu]--; 542 spin_unlock_irqrestore(&smp_reserve_lock, flags); 543 } 544 545 static inline int 546 cpu_stopped(int cpu) 547 { 548 __u32 status; 549 550 /* Check for stopped state */ 551 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 552 if (status & 0x40) 553 return 1; 554 } 555 return 0; 556 } 557 558 /* Upping and downing of CPUs */ 559 560 int 561 __cpu_up(unsigned int cpu) 562 { 563 struct task_struct *idle; 564 struct _lowcore *cpu_lowcore; 565 struct stack_frame *sf; 566 sigp_ccode ccode; 567 int curr_cpu; 568 569 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 570 __cpu_logical_map[cpu] = (__u16) curr_cpu; 571 if (cpu_stopped(cpu)) 572 break; 573 } 574 575 if (!cpu_stopped(cpu)) 576 return -ENODEV; 577 578 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 579 cpu, sigp_set_prefix); 580 if (ccode){ 581 printk("sigp_set_prefix failed for cpu %d " 582 "with condition code %d\n", 583 (int) cpu, (int) ccode); 584 return -EIO; 585 } 586 587 idle = current_set[cpu]; 588 cpu_lowcore = lowcore_ptr[cpu]; 589 cpu_lowcore->kernel_stack = (unsigned long) 590 task_stack_page(idle) + (THREAD_SIZE); 591 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 592 - sizeof(struct pt_regs) 593 - sizeof(struct stack_frame)); 594 memset(sf, 0, sizeof(struct stack_frame)); 595 sf->gprs[9] = (unsigned long) sf; 596 cpu_lowcore->save_area[15] = (unsigned long) sf; 597 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 598 asm volatile( 599 " stam 0,15,0(%0)" 600 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 601 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 602 cpu_lowcore->current_task = (unsigned long) idle; 603 cpu_lowcore->cpu_data.cpu_nr = cpu; 604 eieio(); 605 606 while (signal_processor(cpu,sigp_restart) == sigp_busy) 607 udelay(10); 608 609 while (!cpu_online(cpu)) 610 cpu_relax(); 611 return 0; 612 } 613 614 static unsigned int __initdata additional_cpus; 615 static unsigned int __initdata possible_cpus; 616 617 void __init smp_setup_cpu_possible_map(void) 618 { 619 unsigned int phy_cpus, pos_cpus, cpu; 620 621 phy_cpus = smp_count_cpus(); 622 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 623 624 if (possible_cpus) 625 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 626 627 for (cpu = 0; cpu < pos_cpus; cpu++) 628 cpu_set(cpu, cpu_possible_map); 629 630 phy_cpus = min(phy_cpus, pos_cpus); 631 632 for (cpu = 0; cpu < phy_cpus; cpu++) 633 cpu_set(cpu, cpu_present_map); 634 } 635 636 #ifdef CONFIG_HOTPLUG_CPU 637 638 static int __init setup_additional_cpus(char *s) 639 { 640 additional_cpus = simple_strtoul(s, NULL, 0); 641 return 0; 642 } 643 early_param("additional_cpus", setup_additional_cpus); 644 645 static int __init setup_possible_cpus(char *s) 646 { 647 possible_cpus = simple_strtoul(s, NULL, 0); 648 return 0; 649 } 650 early_param("possible_cpus", setup_possible_cpus); 651 652 int 653 __cpu_disable(void) 654 { 655 unsigned long flags; 656 struct ec_creg_mask_parms cr_parms; 657 int cpu = smp_processor_id(); 658 659 spin_lock_irqsave(&smp_reserve_lock, flags); 660 if (smp_cpu_reserved[cpu] != 0) { 661 spin_unlock_irqrestore(&smp_reserve_lock, flags); 662 return -EBUSY; 663 } 664 cpu_clear(cpu, cpu_online_map); 665 666 /* Disable pfault pseudo page faults on this cpu. */ 667 pfault_fini(); 668 669 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 670 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 671 672 /* disable all external interrupts */ 673 cr_parms.orvals[0] = 0; 674 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 675 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 676 /* disable all I/O interrupts */ 677 cr_parms.orvals[6] = 0; 678 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 679 1<<27 | 1<<26 | 1<<25 | 1<<24); 680 /* disable most machine checks */ 681 cr_parms.orvals[14] = 0; 682 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 683 684 smp_ctl_bit_callback(&cr_parms); 685 686 spin_unlock_irqrestore(&smp_reserve_lock, flags); 687 return 0; 688 } 689 690 void 691 __cpu_die(unsigned int cpu) 692 { 693 /* Wait until target cpu is down */ 694 while (!smp_cpu_not_running(cpu)) 695 cpu_relax(); 696 printk("Processor %d spun down\n", cpu); 697 } 698 699 void 700 cpu_die(void) 701 { 702 idle_task_exit(); 703 signal_processor(smp_processor_id(), sigp_stop); 704 BUG(); 705 for(;;); 706 } 707 708 #endif /* CONFIG_HOTPLUG_CPU */ 709 710 /* 711 * Cycle through the processors and setup structures. 712 */ 713 714 void __init smp_prepare_cpus(unsigned int max_cpus) 715 { 716 unsigned long stack; 717 unsigned int cpu; 718 int i; 719 720 /* request the 0x1201 emergency signal external interrupt */ 721 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 722 panic("Couldn't request external interrupt 0x1201"); 723 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 724 /* 725 * Initialize prefix pages and stacks for all possible cpus 726 */ 727 print_cpu_info(&S390_lowcore.cpu_data); 728 729 for_each_possible_cpu(i) { 730 lowcore_ptr[i] = (struct _lowcore *) 731 __get_free_pages(GFP_KERNEL|GFP_DMA, 732 sizeof(void*) == 8 ? 1 : 0); 733 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 734 if (lowcore_ptr[i] == NULL || stack == 0ULL) 735 panic("smp_boot_cpus failed to allocate memory\n"); 736 737 *(lowcore_ptr[i]) = S390_lowcore; 738 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 739 stack = __get_free_pages(GFP_KERNEL,0); 740 if (stack == 0ULL) 741 panic("smp_boot_cpus failed to allocate memory\n"); 742 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 743 #ifndef CONFIG_64BIT 744 if (MACHINE_HAS_IEEE) { 745 lowcore_ptr[i]->extended_save_area_addr = 746 (__u32) __get_free_pages(GFP_KERNEL,0); 747 if (lowcore_ptr[i]->extended_save_area_addr == 0) 748 panic("smp_boot_cpus failed to " 749 "allocate memory\n"); 750 } 751 #endif 752 } 753 #ifndef CONFIG_64BIT 754 if (MACHINE_HAS_IEEE) 755 ctl_set_bit(14, 29); /* enable extended save area */ 756 #endif 757 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 758 759 for_each_possible_cpu(cpu) 760 if (cpu != smp_processor_id()) 761 smp_create_idle(cpu); 762 } 763 764 void __devinit smp_prepare_boot_cpu(void) 765 { 766 BUG_ON(smp_processor_id() != 0); 767 768 cpu_set(0, cpu_online_map); 769 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 770 current_set[0] = current; 771 } 772 773 void smp_cpus_done(unsigned int max_cpus) 774 { 775 cpu_present_map = cpu_possible_map; 776 } 777 778 /* 779 * the frequency of the profiling timer can be changed 780 * by writing a multiplier value into /proc/profile. 781 * 782 * usually you want to run this on all CPUs ;) 783 */ 784 int setup_profiling_timer(unsigned int multiplier) 785 { 786 return 0; 787 } 788 789 static DEFINE_PER_CPU(struct cpu, cpu_devices); 790 791 static int __init topology_init(void) 792 { 793 int cpu; 794 int ret; 795 796 for_each_possible_cpu(cpu) { 797 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); 798 if (ret) 799 printk(KERN_WARNING "topology_init: register_cpu %d " 800 "failed (%d)\n", cpu, ret); 801 } 802 return 0; 803 } 804 805 subsys_initcall(topology_init); 806 807 EXPORT_SYMBOL(cpu_online_map); 808 EXPORT_SYMBOL(cpu_possible_map); 809 EXPORT_SYMBOL(lowcore_ptr); 810 EXPORT_SYMBOL(smp_ctl_set_bit); 811 EXPORT_SYMBOL(smp_ctl_clear_bit); 812 EXPORT_SYMBOL(smp_call_function); 813 EXPORT_SYMBOL(smp_get_cpu); 814 EXPORT_SYMBOL(smp_put_cpu); 815