1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright IBM Corp. 1999,2007 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 #include <linux/mm.h> 26 #include <linux/err.h> 27 #include <linux/spinlock.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/delay.h> 30 #include <linux/cache.h> 31 #include <linux/interrupt.h> 32 #include <linux/cpu.h> 33 #include <linux/timex.h> 34 #include <linux/bootmem.h> 35 #include <asm/ipl.h> 36 #include <asm/setup.h> 37 #include <asm/sigp.h> 38 #include <asm/pgalloc.h> 39 #include <asm/irq.h> 40 #include <asm/s390_ext.h> 41 #include <asm/cpcmd.h> 42 #include <asm/tlbflush.h> 43 #include <asm/timer.h> 44 #include <asm/lowcore.h> 45 #include <asm/cpu.h> 46 47 /* 48 * An array with a pointer the lowcore of every CPU. 49 */ 50 struct _lowcore *lowcore_ptr[NR_CPUS]; 51 EXPORT_SYMBOL(lowcore_ptr); 52 53 cpumask_t cpu_online_map = CPU_MASK_NONE; 54 EXPORT_SYMBOL(cpu_online_map); 55 56 cpumask_t cpu_possible_map = CPU_MASK_NONE; 57 EXPORT_SYMBOL(cpu_possible_map); 58 59 static struct task_struct *current_set[NR_CPUS]; 60 61 static void smp_ext_bitcall(int, ec_bit_sig); 62 63 /* 64 * Structure and data for __smp_call_function_map(). This is designed to 65 * minimise static memory requirements. It also looks cleaner. 66 */ 67 static DEFINE_SPINLOCK(call_lock); 68 69 struct call_data_struct { 70 void (*func) (void *info); 71 void *info; 72 cpumask_t started; 73 cpumask_t finished; 74 int wait; 75 }; 76 77 static struct call_data_struct *call_data; 78 79 /* 80 * 'Call function' interrupt callback 81 */ 82 static void do_call_function(void) 83 { 84 void (*func) (void *info) = call_data->func; 85 void *info = call_data->info; 86 int wait = call_data->wait; 87 88 cpu_set(smp_processor_id(), call_data->started); 89 (*func)(info); 90 if (wait) 91 cpu_set(smp_processor_id(), call_data->finished);; 92 } 93 94 static void __smp_call_function_map(void (*func) (void *info), void *info, 95 int nonatomic, int wait, cpumask_t map) 96 { 97 struct call_data_struct data; 98 int cpu, local = 0; 99 100 /* 101 * Can deadlock when interrupts are disabled or if in wrong context. 102 */ 103 WARN_ON(irqs_disabled() || in_irq()); 104 105 /* 106 * Check for local function call. We have to have the same call order 107 * as in on_each_cpu() because of machine_restart_smp(). 108 */ 109 if (cpu_isset(smp_processor_id(), map)) { 110 local = 1; 111 cpu_clear(smp_processor_id(), map); 112 } 113 114 cpus_and(map, map, cpu_online_map); 115 if (cpus_empty(map)) 116 goto out; 117 118 data.func = func; 119 data.info = info; 120 data.started = CPU_MASK_NONE; 121 data.wait = wait; 122 if (wait) 123 data.finished = CPU_MASK_NONE; 124 125 spin_lock(&call_lock); 126 call_data = &data; 127 128 for_each_cpu_mask(cpu, map) 129 smp_ext_bitcall(cpu, ec_call_function); 130 131 /* Wait for response */ 132 while (!cpus_equal(map, data.started)) 133 cpu_relax(); 134 if (wait) 135 while (!cpus_equal(map, data.finished)) 136 cpu_relax(); 137 spin_unlock(&call_lock); 138 out: 139 if (local) { 140 local_irq_disable(); 141 func(info); 142 local_irq_enable(); 143 } 144 } 145 146 /* 147 * smp_call_function: 148 * @func: the function to run; this must be fast and non-blocking 149 * @info: an arbitrary pointer to pass to the function 150 * @nonatomic: unused 151 * @wait: if true, wait (atomically) until function has completed on other CPUs 152 * 153 * Run a function on all other CPUs. 154 * 155 * You must not call this function with disabled interrupts, from a 156 * hardware interrupt handler or from a bottom half. 157 */ 158 int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159 int wait) 160 { 161 cpumask_t map; 162 163 preempt_disable(); 164 map = cpu_online_map; 165 cpu_clear(smp_processor_id(), map); 166 __smp_call_function_map(func, info, nonatomic, wait, map); 167 preempt_enable(); 168 return 0; 169 } 170 EXPORT_SYMBOL(smp_call_function); 171 172 /* 173 * smp_call_function_single: 174 * @cpu: the CPU where func should run 175 * @func: the function to run; this must be fast and non-blocking 176 * @info: an arbitrary pointer to pass to the function 177 * @nonatomic: unused 178 * @wait: if true, wait (atomically) until function has completed on other CPUs 179 * 180 * Run a function on one processor. 181 * 182 * You must not call this function with disabled interrupts, from a 183 * hardware interrupt handler or from a bottom half. 184 */ 185 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 186 int nonatomic, int wait) 187 { 188 preempt_disable(); 189 __smp_call_function_map(func, info, nonatomic, wait, 190 cpumask_of_cpu(cpu)); 191 preempt_enable(); 192 return 0; 193 } 194 EXPORT_SYMBOL(smp_call_function_single); 195 196 static void do_send_stop(void) 197 { 198 int cpu, rc; 199 200 /* stop all processors */ 201 for_each_online_cpu(cpu) { 202 if (cpu == smp_processor_id()) 203 continue; 204 do { 205 rc = signal_processor(cpu, sigp_stop); 206 } while (rc == sigp_busy); 207 } 208 } 209 210 static void do_store_status(void) 211 { 212 int cpu, rc; 213 214 /* store status of all processors in their lowcores (real 0) */ 215 for_each_online_cpu(cpu) { 216 if (cpu == smp_processor_id()) 217 continue; 218 do { 219 rc = signal_processor_p( 220 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 221 sigp_store_status_at_address); 222 } while (rc == sigp_busy); 223 } 224 } 225 226 static void do_wait_for_stop(void) 227 { 228 int cpu; 229 230 /* Wait for all other cpus to enter stopped state */ 231 for_each_online_cpu(cpu) { 232 if (cpu == smp_processor_id()) 233 continue; 234 while (!smp_cpu_not_running(cpu)) 235 cpu_relax(); 236 } 237 } 238 239 /* 240 * this function sends a 'stop' sigp to all other CPUs in the system. 241 * it goes straight through. 242 */ 243 void smp_send_stop(void) 244 { 245 /* Disable all interrupts/machine checks */ 246 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 247 248 /* write magic number to zero page (absolute 0) */ 249 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 251 /* stop other processors. */ 252 do_send_stop(); 253 254 /* wait until other processors are stopped */ 255 do_wait_for_stop(); 256 257 /* store status of other processors. */ 258 do_store_status(); 259 } 260 261 /* 262 * Reboot, halt and power_off routines for SMP. 263 */ 264 void machine_restart_smp(char *__unused) 265 { 266 smp_send_stop(); 267 do_reipl(); 268 } 269 270 void machine_halt_smp(void) 271 { 272 smp_send_stop(); 273 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 274 __cpcmd(vmhalt_cmd, NULL, 0, NULL); 275 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 276 for (;;); 277 } 278 279 void machine_power_off_smp(void) 280 { 281 smp_send_stop(); 282 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 283 __cpcmd(vmpoff_cmd, NULL, 0, NULL); 284 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 285 for (;;); 286 } 287 288 /* 289 * This is the main routine where commands issued by other 290 * cpus are handled. 291 */ 292 293 static void do_ext_call_interrupt(__u16 code) 294 { 295 unsigned long bits; 296 297 /* 298 * handle bit signal external calls 299 * 300 * For the ec_schedule signal we have to do nothing. All the work 301 * is done automatically when we return from the interrupt. 302 */ 303 bits = xchg(&S390_lowcore.ext_call_fast, 0); 304 305 if (test_bit(ec_call_function, &bits)) 306 do_call_function(); 307 } 308 309 /* 310 * Send an external call sigp to another cpu and return without waiting 311 * for its completion. 312 */ 313 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 314 { 315 /* 316 * Set signaling bit in lowcore of target cpu and kick it 317 */ 318 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 319 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 320 udelay(10); 321 } 322 323 #ifndef CONFIG_64BIT 324 /* 325 * this function sends a 'purge tlb' signal to another CPU. 326 */ 327 void smp_ptlb_callback(void *info) 328 { 329 __tlb_flush_local(); 330 } 331 332 void smp_ptlb_all(void) 333 { 334 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 335 } 336 EXPORT_SYMBOL(smp_ptlb_all); 337 #endif /* ! CONFIG_64BIT */ 338 339 /* 340 * this function sends a 'reschedule' IPI to another CPU. 341 * it goes straight through and wastes no time serializing 342 * anything. Worst case is that we lose a reschedule ... 343 */ 344 void smp_send_reschedule(int cpu) 345 { 346 smp_ext_bitcall(cpu, ec_schedule); 347 } 348 349 /* 350 * parameter area for the set/clear control bit callbacks 351 */ 352 struct ec_creg_mask_parms { 353 unsigned long orvals[16]; 354 unsigned long andvals[16]; 355 }; 356 357 /* 358 * callback for setting/clearing control bits 359 */ 360 static void smp_ctl_bit_callback(void *info) 361 { 362 struct ec_creg_mask_parms *pp = info; 363 unsigned long cregs[16]; 364 int i; 365 366 __ctl_store(cregs, 0, 15); 367 for (i = 0; i <= 15; i++) 368 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 369 __ctl_load(cregs, 0, 15); 370 } 371 372 /* 373 * Set a bit in a control register of all cpus 374 */ 375 void smp_ctl_set_bit(int cr, int bit) 376 { 377 struct ec_creg_mask_parms parms; 378 379 memset(&parms.orvals, 0, sizeof(parms.orvals)); 380 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 381 parms.orvals[cr] = 1 << bit; 382 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 383 } 384 EXPORT_SYMBOL(smp_ctl_set_bit); 385 386 /* 387 * Clear a bit in a control register of all cpus 388 */ 389 void smp_ctl_clear_bit(int cr, int bit) 390 { 391 struct ec_creg_mask_parms parms; 392 393 memset(&parms.orvals, 0, sizeof(parms.orvals)); 394 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 395 parms.andvals[cr] = ~(1L << bit); 396 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 397 } 398 EXPORT_SYMBOL(smp_ctl_clear_bit); 399 400 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 401 402 /* 403 * zfcpdump_prefix_array holds prefix registers for the following scenario: 404 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to 405 * save its prefix registers, since they get lost, when switching from 31 bit 406 * to 64 bit. 407 */ 408 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ 409 __attribute__((__section__(".data"))); 410 411 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 412 { 413 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 414 return; 415 if (cpu >= NR_CPUS) { 416 printk(KERN_WARNING "Registers for cpu %i not saved since dump " 417 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 418 return; 419 } 420 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 421 __cpu_logical_map[1] = (__u16) phy_cpu; 422 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 423 cpu_relax(); 424 memcpy(zfcpdump_save_areas[cpu], 425 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 426 SAVE_AREA_SIZE); 427 #ifdef CONFIG_64BIT 428 /* copy original prefix register */ 429 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; 430 #endif 431 } 432 433 union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 434 EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 435 436 #else 437 438 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 439 440 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 441 442 /* 443 * Lets check how many CPUs we have. 444 */ 445 static unsigned int __init smp_count_cpus(void) 446 { 447 unsigned int cpu, num_cpus; 448 __u16 boot_cpu_addr; 449 450 /* 451 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 452 */ 453 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 454 current_thread_info()->cpu = 0; 455 num_cpus = 1; 456 for (cpu = 0; cpu <= 65535; cpu++) { 457 if ((__u16) cpu == boot_cpu_addr) 458 continue; 459 __cpu_logical_map[1] = (__u16) cpu; 460 if (signal_processor(1, sigp_sense) == sigp_not_operational) 461 continue; 462 smp_get_save_area(num_cpus, cpu); 463 num_cpus++; 464 } 465 printk("Detected %d CPU's\n", (int) num_cpus); 466 printk("Boot cpu address %2X\n", boot_cpu_addr); 467 return num_cpus; 468 } 469 470 /* 471 * Activate a secondary processor. 472 */ 473 int __cpuinit start_secondary(void *cpuvoid) 474 { 475 /* Setup the cpu */ 476 cpu_init(); 477 preempt_disable(); 478 /* Enable TOD clock interrupts on the secondary cpu. */ 479 init_cpu_timer(); 480 #ifdef CONFIG_VIRT_TIMER 481 /* Enable cpu timer interrupts on the secondary cpu. */ 482 init_cpu_vtimer(); 483 #endif 484 /* Enable pfault pseudo page faults on this cpu. */ 485 pfault_init(); 486 487 /* Mark this cpu as online */ 488 cpu_set(smp_processor_id(), cpu_online_map); 489 /* Switch on interrupts */ 490 local_irq_enable(); 491 /* Print info about this processor */ 492 print_cpu_info(&S390_lowcore.cpu_data); 493 /* cpu_idle will call schedule for us */ 494 cpu_idle(); 495 return 0; 496 } 497 498 DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 499 500 static void __init smp_create_idle(unsigned int cpu) 501 { 502 struct task_struct *p; 503 504 /* 505 * don't care about the psw and regs settings since we'll never 506 * reschedule the forked task. 507 */ 508 p = fork_idle(cpu); 509 if (IS_ERR(p)) 510 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 511 current_set[cpu] = p; 512 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 513 } 514 515 static int cpu_stopped(int cpu) 516 { 517 __u32 status; 518 519 /* Check for stopped state */ 520 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 521 sigp_status_stored) { 522 if (status & 0x40) 523 return 1; 524 } 525 return 0; 526 } 527 528 /* Upping and downing of CPUs */ 529 530 int __cpu_up(unsigned int cpu) 531 { 532 struct task_struct *idle; 533 struct _lowcore *cpu_lowcore; 534 struct stack_frame *sf; 535 sigp_ccode ccode; 536 int curr_cpu; 537 538 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 539 __cpu_logical_map[cpu] = (__u16) curr_cpu; 540 if (cpu_stopped(cpu)) 541 break; 542 } 543 544 if (!cpu_stopped(cpu)) 545 return -ENODEV; 546 547 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 548 cpu, sigp_set_prefix); 549 if (ccode) { 550 printk("sigp_set_prefix failed for cpu %d " 551 "with condition code %d\n", 552 (int) cpu, (int) ccode); 553 return -EIO; 554 } 555 556 idle = current_set[cpu]; 557 cpu_lowcore = lowcore_ptr[cpu]; 558 cpu_lowcore->kernel_stack = (unsigned long) 559 task_stack_page(idle) + THREAD_SIZE; 560 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 561 - sizeof(struct pt_regs) 562 - sizeof(struct stack_frame)); 563 memset(sf, 0, sizeof(struct stack_frame)); 564 sf->gprs[9] = (unsigned long) sf; 565 cpu_lowcore->save_area[15] = (unsigned long) sf; 566 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 567 asm volatile( 568 " stam 0,15,0(%0)" 569 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 570 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 571 cpu_lowcore->current_task = (unsigned long) idle; 572 cpu_lowcore->cpu_data.cpu_nr = cpu; 573 eieio(); 574 575 while (signal_processor(cpu, sigp_restart) == sigp_busy) 576 udelay(10); 577 578 while (!cpu_online(cpu)) 579 cpu_relax(); 580 return 0; 581 } 582 583 static unsigned int __initdata additional_cpus; 584 static unsigned int __initdata possible_cpus; 585 586 void __init smp_setup_cpu_possible_map(void) 587 { 588 unsigned int phy_cpus, pos_cpus, cpu; 589 590 phy_cpus = smp_count_cpus(); 591 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 592 593 if (possible_cpus) 594 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 595 596 for (cpu = 0; cpu < pos_cpus; cpu++) 597 cpu_set(cpu, cpu_possible_map); 598 599 phy_cpus = min(phy_cpus, pos_cpus); 600 601 for (cpu = 0; cpu < phy_cpus; cpu++) 602 cpu_set(cpu, cpu_present_map); 603 } 604 605 #ifdef CONFIG_HOTPLUG_CPU 606 607 static int __init setup_additional_cpus(char *s) 608 { 609 additional_cpus = simple_strtoul(s, NULL, 0); 610 return 0; 611 } 612 early_param("additional_cpus", setup_additional_cpus); 613 614 static int __init setup_possible_cpus(char *s) 615 { 616 possible_cpus = simple_strtoul(s, NULL, 0); 617 return 0; 618 } 619 early_param("possible_cpus", setup_possible_cpus); 620 621 int __cpu_disable(void) 622 { 623 struct ec_creg_mask_parms cr_parms; 624 int cpu = smp_processor_id(); 625 626 cpu_clear(cpu, cpu_online_map); 627 628 /* Disable pfault pseudo page faults on this cpu. */ 629 pfault_fini(); 630 631 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 632 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 633 634 /* disable all external interrupts */ 635 cr_parms.orvals[0] = 0; 636 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 637 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 638 /* disable all I/O interrupts */ 639 cr_parms.orvals[6] = 0; 640 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 641 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 642 /* disable most machine checks */ 643 cr_parms.orvals[14] = 0; 644 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 645 1 << 25 | 1 << 24); 646 647 smp_ctl_bit_callback(&cr_parms); 648 649 return 0; 650 } 651 652 void __cpu_die(unsigned int cpu) 653 { 654 /* Wait until target cpu is down */ 655 while (!smp_cpu_not_running(cpu)) 656 cpu_relax(); 657 printk("Processor %d spun down\n", cpu); 658 } 659 660 void cpu_die(void) 661 { 662 idle_task_exit(); 663 signal_processor(smp_processor_id(), sigp_stop); 664 BUG(); 665 for (;;); 666 } 667 668 #endif /* CONFIG_HOTPLUG_CPU */ 669 670 /* 671 * Cycle through the processors and setup structures. 672 */ 673 674 void __init smp_prepare_cpus(unsigned int max_cpus) 675 { 676 unsigned long stack; 677 unsigned int cpu; 678 int i; 679 680 /* request the 0x1201 emergency signal external interrupt */ 681 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 682 panic("Couldn't request external interrupt 0x1201"); 683 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 684 /* 685 * Initialize prefix pages and stacks for all possible cpus 686 */ 687 print_cpu_info(&S390_lowcore.cpu_data); 688 689 for_each_possible_cpu(i) { 690 lowcore_ptr[i] = (struct _lowcore *) 691 __get_free_pages(GFP_KERNEL | GFP_DMA, 692 sizeof(void*) == 8 ? 1 : 0); 693 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 694 if (!lowcore_ptr[i] || !stack) 695 panic("smp_boot_cpus failed to allocate memory\n"); 696 697 *(lowcore_ptr[i]) = S390_lowcore; 698 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; 699 stack = __get_free_pages(GFP_KERNEL, 0); 700 if (!stack) 701 panic("smp_boot_cpus failed to allocate memory\n"); 702 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; 703 #ifndef CONFIG_64BIT 704 if (MACHINE_HAS_IEEE) { 705 lowcore_ptr[i]->extended_save_area_addr = 706 (__u32) __get_free_pages(GFP_KERNEL, 0); 707 if (!lowcore_ptr[i]->extended_save_area_addr) 708 panic("smp_boot_cpus failed to " 709 "allocate memory\n"); 710 } 711 #endif 712 } 713 #ifndef CONFIG_64BIT 714 if (MACHINE_HAS_IEEE) 715 ctl_set_bit(14, 29); /* enable extended save area */ 716 #endif 717 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 718 719 for_each_possible_cpu(cpu) 720 if (cpu != smp_processor_id()) 721 smp_create_idle(cpu); 722 } 723 724 void __init smp_prepare_boot_cpu(void) 725 { 726 BUG_ON(smp_processor_id() != 0); 727 728 cpu_set(0, cpu_online_map); 729 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 730 current_set[0] = current; 731 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 732 } 733 734 void __init smp_cpus_done(unsigned int max_cpus) 735 { 736 cpu_present_map = cpu_possible_map; 737 } 738 739 /* 740 * the frequency of the profiling timer can be changed 741 * by writing a multiplier value into /proc/profile. 742 * 743 * usually you want to run this on all CPUs ;) 744 */ 745 int setup_profiling_timer(unsigned int multiplier) 746 { 747 return 0; 748 } 749 750 static DEFINE_PER_CPU(struct cpu, cpu_devices); 751 752 static ssize_t show_capability(struct sys_device *dev, char *buf) 753 { 754 unsigned int capability; 755 int rc; 756 757 rc = get_cpu_capability(&capability); 758 if (rc) 759 return rc; 760 return sprintf(buf, "%u\n", capability); 761 } 762 static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 763 764 static ssize_t show_idle_count(struct sys_device *dev, char *buf) 765 { 766 struct s390_idle_data *idle; 767 unsigned long long idle_count; 768 769 idle = &per_cpu(s390_idle, dev->id); 770 spin_lock_irq(&idle->lock); 771 idle_count = idle->idle_count; 772 spin_unlock_irq(&idle->lock); 773 return sprintf(buf, "%llu\n", idle_count); 774 } 775 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); 776 777 static ssize_t show_idle_time(struct sys_device *dev, char *buf) 778 { 779 struct s390_idle_data *idle; 780 unsigned long long new_time; 781 782 idle = &per_cpu(s390_idle, dev->id); 783 spin_lock_irq(&idle->lock); 784 if (idle->in_idle) { 785 new_time = get_clock(); 786 idle->idle_time += new_time - idle->idle_enter; 787 idle->idle_enter = new_time; 788 } 789 new_time = idle->idle_time; 790 spin_unlock_irq(&idle->lock); 791 return sprintf(buf, "%llu\n", new_time >> 12); 792 } 793 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 794 795 static struct attribute *cpu_attrs[] = { 796 &attr_capability.attr, 797 &attr_idle_count.attr, 798 &attr_idle_time_us.attr, 799 NULL, 800 }; 801 802 static struct attribute_group cpu_attr_group = { 803 .attrs = cpu_attrs, 804 }; 805 806 static int __cpuinit smp_cpu_notify(struct notifier_block *self, 807 unsigned long action, void *hcpu) 808 { 809 unsigned int cpu = (unsigned int)(long)hcpu; 810 struct cpu *c = &per_cpu(cpu_devices, cpu); 811 struct sys_device *s = &c->sysdev; 812 struct s390_idle_data *idle; 813 814 switch (action) { 815 case CPU_ONLINE: 816 case CPU_ONLINE_FROZEN: 817 idle = &per_cpu(s390_idle, cpu); 818 spin_lock_irq(&idle->lock); 819 idle->idle_enter = 0; 820 idle->idle_time = 0; 821 idle->idle_count = 0; 822 spin_unlock_irq(&idle->lock); 823 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 824 return NOTIFY_BAD; 825 break; 826 case CPU_DEAD: 827 case CPU_DEAD_FROZEN: 828 sysfs_remove_group(&s->kobj, &cpu_attr_group); 829 break; 830 } 831 return NOTIFY_OK; 832 } 833 834 static struct notifier_block __cpuinitdata smp_cpu_nb = { 835 .notifier_call = smp_cpu_notify, 836 }; 837 838 static int __init topology_init(void) 839 { 840 int cpu; 841 int rc; 842 843 register_cpu_notifier(&smp_cpu_nb); 844 845 for_each_possible_cpu(cpu) { 846 struct cpu *c = &per_cpu(cpu_devices, cpu); 847 struct sys_device *s = &c->sysdev; 848 849 c->hotpluggable = 1; 850 register_cpu(c, cpu); 851 if (!cpu_online(cpu)) 852 continue; 853 s = &c->sysdev; 854 rc = sysfs_create_group(&s->kobj, &cpu_attr_group); 855 if (rc) 856 return rc; 857 } 858 return 0; 859 } 860 subsys_initcall(topology_init); 861