1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SMP related functions 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Denis Joseph Barrow, 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * The code outside of smp.c uses logical cpu numbers, only smp.c does 14 * the translation of logical to physical cpu ids. All new code that 15 * operates on physical cpu numbers needs to go into smp.c. 16 */ 17 18 #define KMSG_COMPONENT "cpu" 19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 20 21 #include <linux/cpufeature.h> 22 #include <linux/workqueue.h> 23 #include <linux/memblock.h> 24 #include <linux/export.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/err.h> 28 #include <linux/spinlock.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/delay.h> 31 #include <linux/interrupt.h> 32 #include <linux/irqflags.h> 33 #include <linux/irq_work.h> 34 #include <linux/cpu.h> 35 #include <linux/slab.h> 36 #include <linux/sched/hotplug.h> 37 #include <linux/sched/task_stack.h> 38 #include <linux/crash_dump.h> 39 #include <linux/kprobes.h> 40 #include <asm/access-regs.h> 41 #include <asm/asm-offsets.h> 42 #include <asm/machine.h> 43 #include <asm/ctlreg.h> 44 #include <asm/pfault.h> 45 #include <asm/diag.h> 46 #include <asm/facility.h> 47 #include <asm/fpu.h> 48 #include <asm/ipl.h> 49 #include <asm/setup.h> 50 #include <asm/irq.h> 51 #include <asm/tlbflush.h> 52 #include <asm/vtimer.h> 53 #include <asm/abs_lowcore.h> 54 #include <asm/sclp.h> 55 #include <asm/debug.h> 56 #include <asm/os_info.h> 57 #include <asm/sigp.h> 58 #include <asm/idle.h> 59 #include <asm/nmi.h> 60 #include <asm/stacktrace.h> 61 #include <asm/topology.h> 62 #include <asm/vdso.h> 63 #include <asm/maccess.h> 64 #include "entry.h" 65 66 enum { 67 ec_schedule = 0, 68 ec_call_function_single, 69 ec_stop_cpu, 70 ec_mcck_pending, 71 ec_irq_work, 72 }; 73 74 enum { 75 CPU_STATE_STANDBY, 76 CPU_STATE_CONFIGURED, 77 }; 78 79 static u8 boot_core_type; 80 DEFINE_PER_CPU(struct pcpu, pcpu_devices); 81 /* 82 * Pointer to the pcpu area of the boot CPU. This is required when a restart 83 * interrupt is triggered on an offline CPU. For that case accessing percpu 84 * data with the common primitives does not work, since the percpu offset is 85 * stored in a non existent lowcore. 86 */ 87 static struct pcpu *ipl_pcpu; 88 89 unsigned int smp_cpu_mt_shift; 90 EXPORT_SYMBOL(smp_cpu_mt_shift); 91 92 unsigned int smp_cpu_mtid; 93 EXPORT_SYMBOL(smp_cpu_mtid); 94 95 #ifdef CONFIG_CRASH_DUMP 96 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; 97 #endif 98 99 static unsigned int smp_max_threads __initdata = -1U; 100 cpumask_t cpu_setup_mask; 101 102 static int __init early_smt(char *s) 103 { 104 get_option(&s, &smp_max_threads); 105 return 0; 106 } 107 early_param("smt", early_smt); 108 109 /* 110 * The smp_cpu_state_mutex must be held when changing the state or polarization 111 * member of a pcpu data structure within the pcpu_devices array. 112 */ 113 DEFINE_MUTEX(smp_cpu_state_mutex); 114 115 /* 116 * Signal processor helper functions. 117 */ 118 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm) 119 { 120 int cc; 121 122 while (1) { 123 cc = __pcpu_sigp(addr, order, parm, NULL); 124 if (cc != SIGP_CC_BUSY) 125 return cc; 126 cpu_relax(); 127 } 128 } 129 130 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 131 { 132 int cc, retry; 133 134 for (retry = 0; ; retry++) { 135 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); 136 if (cc != SIGP_CC_BUSY) 137 break; 138 if (retry >= 3) 139 udelay(10); 140 } 141 return cc; 142 } 143 144 static inline int pcpu_stopped(struct pcpu *pcpu) 145 { 146 u32 status; 147 148 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 149 0, &status) != SIGP_CC_STATUS_STORED) 150 return 0; 151 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 152 } 153 154 static inline int pcpu_running(struct pcpu *pcpu) 155 { 156 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 157 0, NULL) != SIGP_CC_STATUS_STORED) 158 return 1; 159 /* Status stored condition code is equivalent to cpu not running. */ 160 return 0; 161 } 162 163 /* 164 * Find struct pcpu by cpu address. 165 */ 166 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) 167 { 168 int cpu; 169 170 for_each_cpu(cpu, mask) 171 if (per_cpu(pcpu_devices, cpu).address == address) 172 return &per_cpu(pcpu_devices, cpu); 173 return NULL; 174 } 175 176 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 177 { 178 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 179 return; 180 pcpu->ec_clk = get_tod_clock_fast(); 181 pcpu_sigp_retry(pcpu, SIGP_EXTERNAL_CALL, 0); 182 } 183 184 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 185 { 186 unsigned long async_stack, nodat_stack, mcck_stack; 187 struct lowcore *lc; 188 189 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 190 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 191 async_stack = stack_alloc(); 192 mcck_stack = stack_alloc(); 193 if (!lc || !nodat_stack || !async_stack || !mcck_stack) 194 goto out; 195 memcpy(lc, get_lowcore(), 512); 196 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 197 lc->async_stack = async_stack + STACK_INIT_OFFSET; 198 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; 199 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; 200 lc->cpu_nr = cpu; 201 lc->spinlock_lockval = arch_spin_lockval(cpu); 202 lc->spinlock_index = 0; 203 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 204 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 205 lc->preempt_count = PREEMPT_DISABLED; 206 if (nmi_alloc_mcesa(&lc->mcesad)) 207 goto out; 208 if (abs_lowcore_map(cpu, lc, true)) 209 goto out_mcesa; 210 lowcore_ptr[cpu] = lc; 211 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc)); 212 return 0; 213 214 out_mcesa: 215 nmi_free_mcesa(&lc->mcesad); 216 out: 217 stack_free(mcck_stack); 218 stack_free(async_stack); 219 free_pages(nodat_stack, THREAD_SIZE_ORDER); 220 free_pages((unsigned long) lc, LC_ORDER); 221 return -ENOMEM; 222 } 223 224 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu) 225 { 226 unsigned long async_stack, nodat_stack, mcck_stack; 227 struct lowcore *lc; 228 229 lc = lowcore_ptr[cpu]; 230 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET; 231 async_stack = lc->async_stack - STACK_INIT_OFFSET; 232 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; 233 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 234 lowcore_ptr[cpu] = NULL; 235 abs_lowcore_unmap(cpu); 236 nmi_free_mcesa(&lc->mcesad); 237 stack_free(async_stack); 238 stack_free(mcck_stack); 239 free_pages(nodat_stack, THREAD_SIZE_ORDER); 240 free_pages((unsigned long) lc, LC_ORDER); 241 } 242 243 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 244 { 245 struct lowcore *lc, *abs_lc; 246 247 lc = lowcore_ptr[cpu]; 248 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); 249 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 250 lc->cpu_nr = cpu; 251 lc->pcpu = (unsigned long)pcpu; 252 lc->restart_flags = RESTART_FLAG_CTLREGS; 253 lc->spinlock_lockval = arch_spin_lockval(cpu); 254 lc->spinlock_index = 0; 255 lc->percpu_offset = __per_cpu_offset[cpu]; 256 lc->kernel_asce = get_lowcore()->kernel_asce; 257 lc->user_asce = s390_invalid_asce; 258 lc->user_timer = lc->system_timer = 259 lc->steal_timer = lc->avg_steal_timer = 0; 260 abs_lc = get_abs_lowcore(); 261 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area)); 262 put_abs_lowcore(abs_lc); 263 lc->cregs_save_area[1] = lc->user_asce; 264 lc->cregs_save_area[7] = lc->user_asce; 265 save_access_regs((unsigned int *) lc->access_regs_save_area); 266 arch_spin_lock_setup(cpu); 267 } 268 269 static void pcpu_attach_task(int cpu, struct task_struct *tsk) 270 { 271 struct lowcore *lc; 272 273 lc = lowcore_ptr[cpu]; 274 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET; 275 lc->current_task = (unsigned long)tsk; 276 lc->lpp = LPP_MAGIC; 277 lc->current_pid = tsk->pid; 278 lc->user_timer = tsk->thread.user_timer; 279 lc->guest_timer = tsk->thread.guest_timer; 280 lc->system_timer = tsk->thread.system_timer; 281 lc->hardirq_timer = tsk->thread.hardirq_timer; 282 lc->softirq_timer = tsk->thread.softirq_timer; 283 lc->steal_timer = 0; 284 } 285 286 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) 287 { 288 struct lowcore *lc; 289 290 lc = lowcore_ptr[cpu]; 291 lc->restart_stack = lc->kernel_stack; 292 lc->restart_fn = (unsigned long) func; 293 lc->restart_data = (unsigned long) data; 294 lc->restart_source = -1U; 295 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0); 296 } 297 298 typedef void (pcpu_delegate_fn)(void *); 299 300 /* 301 * Call function via PSW restart on pcpu and stop the current cpu. 302 */ 303 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data) 304 { 305 func(data); /* should not return */ 306 } 307 308 static void __noreturn pcpu_delegate(struct pcpu *pcpu, int cpu, 309 pcpu_delegate_fn *func, 310 void *data, unsigned long stack) 311 { 312 struct lowcore *lc, *abs_lc; 313 unsigned int source_cpu; 314 315 lc = lowcore_ptr[cpu]; 316 source_cpu = stap(); 317 318 if (pcpu->address == source_cpu) { 319 call_on_stack(2, stack, void, __pcpu_delegate, 320 pcpu_delegate_fn *, func, void *, data); 321 } 322 /* Stop target cpu (if func returns this stops the current cpu). */ 323 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 324 pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0); 325 /* Restart func on the target cpu and stop the current cpu. */ 326 if (lc) { 327 lc->restart_stack = stack; 328 lc->restart_fn = (unsigned long)func; 329 lc->restart_data = (unsigned long)data; 330 lc->restart_source = source_cpu; 331 } else { 332 abs_lc = get_abs_lowcore(); 333 abs_lc->restart_stack = stack; 334 abs_lc->restart_fn = (unsigned long)func; 335 abs_lc->restart_data = (unsigned long)data; 336 abs_lc->restart_source = source_cpu; 337 put_abs_lowcore(abs_lc); 338 } 339 asm volatile( 340 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 341 " brc 2,0b # busy, try again\n" 342 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 343 " brc 2,1b # busy, try again" 344 : : "d" (pcpu->address), "d" (source_cpu), 345 "K" (SIGP_RESTART), "K" (SIGP_STOP) 346 : "0", "1", "cc"); 347 for (;;) ; 348 } 349 350 /* 351 * Enable additional logical cpus for multi-threading. 352 */ 353 static int pcpu_set_smt(unsigned int mtid) 354 { 355 int cc; 356 357 if (smp_cpu_mtid == mtid) 358 return 0; 359 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL); 360 if (cc == 0) { 361 smp_cpu_mtid = mtid; 362 smp_cpu_mt_shift = 0; 363 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) 364 smp_cpu_mt_shift++; 365 per_cpu(pcpu_devices, 0).address = stap(); 366 } 367 return cc; 368 } 369 370 /* 371 * Call function on the ipl CPU. 372 */ 373 void __noreturn smp_call_ipl_cpu(void (*func)(void *), void *data) 374 { 375 struct lowcore *lc = lowcore_ptr[0]; 376 377 if (ipl_pcpu->address == stap()) 378 lc = get_lowcore(); 379 380 pcpu_delegate(ipl_pcpu, 0, func, data, lc->nodat_stack); 381 } 382 383 int smp_find_processor_id(u16 address) 384 { 385 int cpu; 386 387 for_each_present_cpu(cpu) 388 if (per_cpu(pcpu_devices, cpu).address == address) 389 return cpu; 390 return -1; 391 } 392 393 void schedule_mcck_handler(void) 394 { 395 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_mcck_pending); 396 } 397 398 bool notrace arch_vcpu_is_preempted(int cpu) 399 { 400 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) 401 return false; 402 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu))) 403 return false; 404 return true; 405 } 406 EXPORT_SYMBOL(arch_vcpu_is_preempted); 407 408 void notrace smp_yield_cpu(int cpu) 409 { 410 if (!machine_has_diag9c()) 411 return; 412 diag_stat_inc_norecursion(DIAG_STAT_X09C); 413 asm volatile("diag %0,0,0x9c" 414 : : "d" (per_cpu(pcpu_devices, cpu).address)); 415 } 416 EXPORT_SYMBOL_GPL(smp_yield_cpu); 417 418 /* 419 * Send cpus emergency shutdown signal. This gives the cpus the 420 * opportunity to complete outstanding interrupts. 421 */ 422 void notrace smp_emergency_stop(void) 423 { 424 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 425 static cpumask_t cpumask; 426 u64 end; 427 int cpu; 428 429 arch_spin_lock(&lock); 430 cpumask_copy(&cpumask, cpu_online_mask); 431 cpumask_clear_cpu(smp_processor_id(), &cpumask); 432 433 end = get_tod_clock_monotonic() + (1000000UL << 12); 434 for_each_cpu(cpu, &cpumask) { 435 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); 436 set_bit(ec_stop_cpu, &pcpu->ec_mask); 437 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 438 0, NULL) == SIGP_CC_BUSY && 439 get_tod_clock_monotonic() < end) 440 cpu_relax(); 441 } 442 while (get_tod_clock_monotonic() < end) { 443 for_each_cpu(cpu, &cpumask) 444 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) 445 cpumask_clear_cpu(cpu, &cpumask); 446 if (cpumask_empty(&cpumask)) 447 break; 448 cpu_relax(); 449 } 450 arch_spin_unlock(&lock); 451 } 452 NOKPROBE_SYMBOL(smp_emergency_stop); 453 454 /* 455 * Stop all cpus but the current one. 456 */ 457 void smp_send_stop(void) 458 { 459 struct pcpu *pcpu; 460 int cpu; 461 462 /* Disable all interrupts/machine checks */ 463 __load_psw_mask(PSW_KERNEL_BITS); 464 trace_hardirqs_off(); 465 466 debug_set_critical(); 467 468 if (oops_in_progress) 469 smp_emergency_stop(); 470 471 /* stop all processors */ 472 for_each_online_cpu(cpu) { 473 if (cpu == smp_processor_id()) 474 continue; 475 pcpu = per_cpu_ptr(&pcpu_devices, cpu); 476 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 477 while (!pcpu_stopped(pcpu)) 478 cpu_relax(); 479 } 480 } 481 482 /* 483 * This is the main routine where commands issued by other 484 * cpus are handled. 485 */ 486 static void smp_handle_ext_call(void) 487 { 488 unsigned long bits; 489 490 /* handle bit signal external calls */ 491 bits = this_cpu_xchg(pcpu_devices.ec_mask, 0); 492 if (test_bit(ec_stop_cpu, &bits)) 493 smp_stop_cpu(); 494 if (test_bit(ec_schedule, &bits)) 495 scheduler_ipi(); 496 if (test_bit(ec_call_function_single, &bits)) 497 generic_smp_call_function_single_interrupt(); 498 if (test_bit(ec_mcck_pending, &bits)) 499 s390_handle_mcck(); 500 if (test_bit(ec_irq_work, &bits)) 501 irq_work_run(); 502 } 503 504 static void do_ext_call_interrupt(struct ext_code ext_code, 505 unsigned int param32, unsigned long param64) 506 { 507 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); 508 smp_handle_ext_call(); 509 } 510 511 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 512 { 513 int cpu; 514 515 for_each_cpu(cpu, mask) 516 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); 517 } 518 519 void arch_send_call_function_single_ipi(int cpu) 520 { 521 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); 522 } 523 524 /* 525 * this function sends a 'reschedule' IPI to another CPU. 526 * it goes straight through and wastes no time serializing 527 * anything. Worst case is that we lose a reschedule ... 528 */ 529 void arch_smp_send_reschedule(int cpu) 530 { 531 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule); 532 } 533 534 #ifdef CONFIG_IRQ_WORK 535 void arch_irq_work_raise(void) 536 { 537 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_irq_work); 538 } 539 #endif 540 541 #ifdef CONFIG_CRASH_DUMP 542 543 int smp_store_status(int cpu) 544 { 545 struct lowcore *lc; 546 struct pcpu *pcpu; 547 unsigned long pa; 548 549 pcpu = per_cpu_ptr(&pcpu_devices, cpu); 550 lc = lowcore_ptr[cpu]; 551 pa = __pa(&lc->floating_pt_save_area); 552 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 553 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 554 return -EIO; 555 if (!cpu_has_vx() && !cpu_has_gs()) 556 return 0; 557 pa = lc->mcesad & MCESA_ORIGIN_MASK; 558 if (cpu_has_gs()) 559 pa |= lc->mcesad & MCESA_LC_MASK; 560 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 561 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 562 return -EIO; 563 return 0; 564 } 565 566 /* 567 * Collect CPU state of the previous, crashed system. 568 * There are three cases: 569 * 1) standard zfcp/nvme dump 570 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true 571 * The state for all CPUs except the boot CPU needs to be collected 572 * with sigp stop-and-store-status. The boot CPU state is located in 573 * the absolute lowcore of the memory stored in the HSA. The zcore code 574 * will copy the boot CPU state from the HSA. 575 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) 576 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true 577 * The state for all CPUs except the boot CPU needs to be collected 578 * with sigp stop-and-store-status. The firmware or the boot-loader 579 * stored the registers of the boot CPU in the absolute lowcore in the 580 * memory of the old system. 581 * 3) kdump or stand-alone kdump for DASD 582 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == false 583 * The state for all CPUs except the boot CPU needs to be collected 584 * with sigp stop-and-store-status. The kexec code or the boot-loader 585 * stored the registers of the boot CPU in the memory of the old system. 586 * 587 * Note that the legacy kdump mode where the old kernel stored the CPU states 588 * does no longer exist: setup_arch() explicitly deactivates the elfcorehdr= 589 * kernel parameter. The is_kdump_kernel() implementation on s390 is independent 590 * of the elfcorehdr= parameter. 591 */ 592 static bool dump_available(void) 593 { 594 return oldmem_data.start || is_ipl_type_dump(); 595 } 596 597 void __init smp_save_dump_ipl_cpu(void) 598 { 599 struct save_area *sa; 600 void *regs; 601 602 if (!dump_available()) 603 return; 604 sa = save_area_alloc(true); 605 regs = memblock_alloc_or_panic(512, 8); 606 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512); 607 save_area_add_regs(sa, regs); 608 memblock_free(regs, 512); 609 if (cpu_has_vx()) 610 save_area_add_vxrs(sa, boot_cpu_vector_save_area); 611 } 612 613 void __init smp_save_dump_secondary_cpus(void) 614 { 615 int addr, boot_cpu_addr, max_cpu_addr; 616 struct save_area *sa; 617 void *page; 618 619 if (!dump_available()) 620 return; 621 /* Allocate a page as dumping area for the store status sigps */ 622 page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 623 if (!page) 624 panic("ERROR: Failed to allocate %lx bytes below %lx\n", 625 PAGE_SIZE, 1UL << 31); 626 627 /* Set multi-threading state to the previous system. */ 628 pcpu_set_smt(sclp.mtid_prev); 629 boot_cpu_addr = stap(); 630 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; 631 for (addr = 0; addr <= max_cpu_addr; addr++) { 632 if (addr == boot_cpu_addr) 633 continue; 634 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) == 635 SIGP_CC_NOT_OPERATIONAL) 636 continue; 637 sa = save_area_alloc(false); 638 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page)); 639 save_area_add_regs(sa, page); 640 if (cpu_has_vx()) { 641 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page)); 642 save_area_add_vxrs(sa, page); 643 } 644 } 645 memblock_free(page, PAGE_SIZE); 646 diag_amode31_ops.diag308_reset(); 647 pcpu_set_smt(0); 648 } 649 #endif /* CONFIG_CRASH_DUMP */ 650 651 void smp_cpu_set_polarization(int cpu, int val) 652 { 653 per_cpu(pcpu_devices, cpu).polarization = val; 654 } 655 656 int smp_cpu_get_polarization(int cpu) 657 { 658 return per_cpu(pcpu_devices, cpu).polarization; 659 } 660 661 void smp_cpu_set_capacity(int cpu, unsigned long val) 662 { 663 per_cpu(pcpu_devices, cpu).capacity = val; 664 } 665 666 unsigned long smp_cpu_get_capacity(int cpu) 667 { 668 return per_cpu(pcpu_devices, cpu).capacity; 669 } 670 671 void smp_set_core_capacity(int cpu, unsigned long val) 672 { 673 int i; 674 675 cpu = smp_get_base_cpu(cpu); 676 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) 677 smp_cpu_set_capacity(i, val); 678 } 679 680 int smp_cpu_get_cpu_address(int cpu) 681 { 682 return per_cpu(pcpu_devices, cpu).address; 683 } 684 685 static void __ref smp_get_core_info(struct sclp_core_info *info, int early) 686 { 687 static int use_sigp_detection; 688 int address; 689 690 if (use_sigp_detection || sclp_get_core_info(info, early)) { 691 use_sigp_detection = 1; 692 for (address = 0; 693 address < (SCLP_MAX_CORES << smp_cpu_mt_shift); 694 address += (1U << smp_cpu_mt_shift)) { 695 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) == 696 SIGP_CC_NOT_OPERATIONAL) 697 continue; 698 info->core[info->configured].core_id = 699 address >> smp_cpu_mt_shift; 700 info->core[info->configured].type = boot_core_type; 701 info->configured++; 702 } 703 info->combined = info->configured; 704 } 705 } 706 707 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, 708 bool configured, bool early) 709 { 710 struct pcpu *pcpu; 711 int cpu, nr, i; 712 u16 address; 713 714 nr = 0; 715 if (sclp.has_core_type && core->type != boot_core_type) 716 return nr; 717 cpu = cpumask_first(avail); 718 address = core->core_id << smp_cpu_mt_shift; 719 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { 720 if (pcpu_find_address(cpu_present_mask, address + i)) 721 continue; 722 pcpu = per_cpu_ptr(&pcpu_devices, cpu); 723 pcpu->address = address + i; 724 if (configured) 725 pcpu->state = CPU_STATE_CONFIGURED; 726 else 727 pcpu->state = CPU_STATE_STANDBY; 728 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 729 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); 730 set_cpu_present(cpu, true); 731 if (!early && arch_register_cpu(cpu)) 732 set_cpu_present(cpu, false); 733 else 734 nr++; 735 cpumask_clear_cpu(cpu, avail); 736 cpu = cpumask_next(cpu, avail); 737 } 738 return nr; 739 } 740 741 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) 742 { 743 struct sclp_core_entry *core; 744 static cpumask_t avail; 745 bool configured; 746 u16 core_id; 747 int nr, i; 748 749 cpus_read_lock(); 750 mutex_lock(&smp_cpu_state_mutex); 751 nr = 0; 752 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 753 /* 754 * Add IPL core first (which got logical CPU number 0) to make sure 755 * that all SMT threads get subsequent logical CPU numbers. 756 */ 757 if (early) { 758 core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift; 759 for (i = 0; i < info->configured; i++) { 760 core = &info->core[i]; 761 if (core->core_id == core_id) { 762 nr += smp_add_core(core, &avail, true, early); 763 break; 764 } 765 } 766 } 767 for (i = 0; i < info->combined; i++) { 768 configured = i < info->configured; 769 nr += smp_add_core(&info->core[i], &avail, configured, early); 770 } 771 mutex_unlock(&smp_cpu_state_mutex); 772 cpus_read_unlock(); 773 return nr; 774 } 775 776 void __init smp_detect_cpus(void) 777 { 778 unsigned int cpu, mtid, c_cpus, s_cpus; 779 struct sclp_core_info *info; 780 u16 address; 781 782 /* Get CPU information */ 783 info = memblock_alloc_or_panic(sizeof(*info), 8); 784 smp_get_core_info(info, 1); 785 /* Find boot CPU type */ 786 if (sclp.has_core_type) { 787 address = stap(); 788 for (cpu = 0; cpu < info->combined; cpu++) 789 if (info->core[cpu].core_id == address) { 790 /* The boot cpu dictates the cpu type. */ 791 boot_core_type = info->core[cpu].type; 792 break; 793 } 794 if (cpu >= info->combined) 795 panic("Could not find boot CPU type"); 796 } 797 798 /* Set multi-threading state for the current system */ 799 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 800 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 801 pcpu_set_smt(mtid); 802 cpu_smt_set_num_threads(smp_cpu_mtid + 1, smp_cpu_mtid + 1); 803 804 /* Print number of CPUs */ 805 c_cpus = s_cpus = 0; 806 for (cpu = 0; cpu < info->combined; cpu++) { 807 if (sclp.has_core_type && 808 info->core[cpu].type != boot_core_type) 809 continue; 810 if (cpu < info->configured) 811 c_cpus += smp_cpu_mtid + 1; 812 else 813 s_cpus += smp_cpu_mtid + 1; 814 } 815 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 816 memblock_free(info, sizeof(*info)); 817 } 818 819 /* 820 * Activate a secondary processor. 821 */ 822 static void smp_start_secondary(void *cpuvoid) 823 { 824 struct lowcore *lc = get_lowcore(); 825 int cpu = raw_smp_processor_id(); 826 827 lc->last_update_clock = get_tod_clock(); 828 lc->restart_stack = (unsigned long)restart_stack; 829 lc->restart_fn = (unsigned long)do_restart; 830 lc->restart_data = 0; 831 lc->restart_source = -1U; 832 lc->restart_flags = 0; 833 restore_access_regs(lc->access_regs_save_area); 834 cpu_init(); 835 rcutree_report_cpu_starting(cpu); 836 init_cpu_timer(); 837 vtime_init(); 838 vdso_getcpu_init(); 839 pfault_init(); 840 cpumask_set_cpu(cpu, &cpu_setup_mask); 841 update_cpu_masks(); 842 notify_cpu_starting(cpu); 843 if (topology_cpu_dedicated(cpu)) 844 set_cpu_flag(CIF_DEDICATED_CPU); 845 else 846 clear_cpu_flag(CIF_DEDICATED_CPU); 847 set_cpu_online(cpu, true); 848 inc_irq_stat(CPU_RST); 849 local_irq_enable(); 850 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 851 } 852 853 /* Upping and downing of CPUs */ 854 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 855 { 856 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); 857 int rc; 858 859 if (pcpu->state != CPU_STATE_CONFIGURED) 860 return -EIO; 861 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 862 SIGP_CC_ORDER_CODE_ACCEPTED) 863 return -EIO; 864 865 rc = pcpu_alloc_lowcore(pcpu, cpu); 866 if (rc) 867 return rc; 868 /* 869 * Make sure global control register contents do not change 870 * until new CPU has initialized control registers. 871 */ 872 system_ctlreg_lock(); 873 pcpu_prepare_secondary(pcpu, cpu); 874 pcpu_attach_task(cpu, tidle); 875 pcpu_start_fn(cpu, smp_start_secondary, NULL); 876 /* Wait until cpu puts itself in the online & active maps */ 877 while (!cpu_online(cpu)) 878 cpu_relax(); 879 system_ctlreg_unlock(); 880 return 0; 881 } 882 883 static unsigned int setup_possible_cpus __initdata; 884 885 static int __init _setup_possible_cpus(char *s) 886 { 887 get_option(&s, &setup_possible_cpus); 888 return 0; 889 } 890 early_param("possible_cpus", _setup_possible_cpus); 891 892 int __cpu_disable(void) 893 { 894 struct ctlreg cregs[16]; 895 int cpu; 896 897 /* Handle possible pending IPIs */ 898 smp_handle_ext_call(); 899 cpu = smp_processor_id(); 900 set_cpu_online(cpu, false); 901 cpumask_clear_cpu(cpu, &cpu_setup_mask); 902 update_cpu_masks(); 903 /* Disable pseudo page faults on this cpu. */ 904 pfault_fini(); 905 /* Disable interrupt sources via control register. */ 906 __local_ctl_store(0, 15, cregs); 907 cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */ 908 cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */ 909 cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */ 910 __local_ctl_load(0, 15, cregs); 911 clear_cpu_flag(CIF_NOHZ_DELAY); 912 return 0; 913 } 914 915 void __cpu_die(unsigned int cpu) 916 { 917 struct pcpu *pcpu; 918 919 /* Wait until target cpu is down */ 920 pcpu = per_cpu_ptr(&pcpu_devices, cpu); 921 while (!pcpu_stopped(pcpu)) 922 cpu_relax(); 923 pcpu_free_lowcore(pcpu, cpu); 924 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); 925 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); 926 pcpu->flags = 0; 927 } 928 929 void __noreturn cpu_die(void) 930 { 931 idle_task_exit(); 932 pcpu_sigp_retry(this_cpu_ptr(&pcpu_devices), SIGP_STOP, 0); 933 for (;;) ; 934 } 935 936 void __init smp_fill_possible_mask(void) 937 { 938 unsigned int possible, sclp_max, cpu; 939 940 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1; 941 sclp_max = min(smp_max_threads, sclp_max); 942 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids; 943 possible = setup_possible_cpus ?: nr_cpu_ids; 944 possible = min(possible, sclp_max); 945 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 946 set_cpu_possible(cpu, true); 947 } 948 949 void __init smp_prepare_cpus(unsigned int max_cpus) 950 { 951 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 952 panic("Couldn't request external interrupt 0x1201"); 953 system_ctl_set_bit(0, 14); 954 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 955 panic("Couldn't request external interrupt 0x1202"); 956 system_ctl_set_bit(0, 13); 957 smp_rescan_cpus(true); 958 } 959 960 void __init smp_prepare_boot_cpu(void) 961 { 962 struct lowcore *lc = get_lowcore(); 963 964 WARN_ON(!cpu_present(0) || !cpu_online(0)); 965 lc->percpu_offset = __per_cpu_offset[0]; 966 ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0); 967 ipl_pcpu->state = CPU_STATE_CONFIGURED; 968 lc->pcpu = (unsigned long)ipl_pcpu; 969 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 970 smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH); 971 } 972 973 void __init smp_setup_processor_id(void) 974 { 975 struct lowcore *lc = get_lowcore(); 976 977 lc->cpu_nr = 0; 978 per_cpu(pcpu_devices, 0).address = stap(); 979 lc->spinlock_lockval = arch_spin_lockval(0); 980 lc->spinlock_index = 0; 981 } 982 983 /* 984 * the frequency of the profiling timer can be changed 985 * by writing a multiplier value into /proc/profile. 986 * 987 * usually you want to run this on all CPUs ;) 988 */ 989 int setup_profiling_timer(unsigned int multiplier) 990 { 991 return 0; 992 } 993 994 static ssize_t cpu_configure_show(struct device *dev, 995 struct device_attribute *attr, char *buf) 996 { 997 ssize_t count; 998 999 mutex_lock(&smp_cpu_state_mutex); 1000 count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state); 1001 mutex_unlock(&smp_cpu_state_mutex); 1002 return count; 1003 } 1004 1005 static ssize_t cpu_configure_store(struct device *dev, 1006 struct device_attribute *attr, 1007 const char *buf, size_t count) 1008 { 1009 struct pcpu *pcpu; 1010 int cpu, val, rc, i; 1011 char delim; 1012 1013 if (sscanf(buf, "%d %c", &val, &delim) != 1) 1014 return -EINVAL; 1015 if (val != 0 && val != 1) 1016 return -EINVAL; 1017 cpus_read_lock(); 1018 mutex_lock(&smp_cpu_state_mutex); 1019 rc = -EBUSY; 1020 /* disallow configuration changes of online cpus */ 1021 cpu = dev->id; 1022 cpu = smp_get_base_cpu(cpu); 1023 for (i = 0; i <= smp_cpu_mtid; i++) 1024 if (cpu_online(cpu + i)) 1025 goto out; 1026 pcpu = per_cpu_ptr(&pcpu_devices, cpu); 1027 rc = 0; 1028 switch (val) { 1029 case 0: 1030 if (pcpu->state != CPU_STATE_CONFIGURED) 1031 break; 1032 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); 1033 if (rc) 1034 break; 1035 for (i = 0; i <= smp_cpu_mtid; i++) { 1036 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1037 continue; 1038 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY; 1039 smp_cpu_set_polarization(cpu + i, 1040 POLARIZATION_UNKNOWN); 1041 } 1042 topology_expect_change(); 1043 break; 1044 case 1: 1045 if (pcpu->state != CPU_STATE_STANDBY) 1046 break; 1047 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); 1048 if (rc) 1049 break; 1050 for (i = 0; i <= smp_cpu_mtid; i++) { 1051 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1052 continue; 1053 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED; 1054 smp_cpu_set_polarization(cpu + i, 1055 POLARIZATION_UNKNOWN); 1056 } 1057 topology_expect_change(); 1058 break; 1059 default: 1060 break; 1061 } 1062 out: 1063 mutex_unlock(&smp_cpu_state_mutex); 1064 cpus_read_unlock(); 1065 return rc ? rc : count; 1066 } 1067 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 1068 1069 static ssize_t show_cpu_address(struct device *dev, 1070 struct device_attribute *attr, char *buf) 1071 { 1072 return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address); 1073 } 1074 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 1075 1076 static struct attribute *cpu_common_attrs[] = { 1077 &dev_attr_configure.attr, 1078 &dev_attr_address.attr, 1079 NULL, 1080 }; 1081 1082 static struct attribute_group cpu_common_attr_group = { 1083 .attrs = cpu_common_attrs, 1084 }; 1085 1086 static struct attribute *cpu_online_attrs[] = { 1087 &dev_attr_idle_count.attr, 1088 &dev_attr_idle_time_us.attr, 1089 NULL, 1090 }; 1091 1092 static struct attribute_group cpu_online_attr_group = { 1093 .attrs = cpu_online_attrs, 1094 }; 1095 1096 static int smp_cpu_online(unsigned int cpu) 1097 { 1098 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); 1099 1100 return sysfs_create_group(&c->dev.kobj, &cpu_online_attr_group); 1101 } 1102 1103 static int smp_cpu_pre_down(unsigned int cpu) 1104 { 1105 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); 1106 1107 sysfs_remove_group(&c->dev.kobj, &cpu_online_attr_group); 1108 return 0; 1109 } 1110 1111 bool arch_cpu_is_hotpluggable(int cpu) 1112 { 1113 return !!cpu; 1114 } 1115 1116 int arch_register_cpu(int cpu) 1117 { 1118 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); 1119 int rc; 1120 1121 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); 1122 rc = register_cpu(c, cpu); 1123 if (rc) 1124 goto out; 1125 rc = sysfs_create_group(&c->dev.kobj, &cpu_common_attr_group); 1126 if (rc) 1127 goto out_cpu; 1128 rc = topology_cpu_init(c); 1129 if (rc) 1130 goto out_topology; 1131 return 0; 1132 1133 out_topology: 1134 sysfs_remove_group(&c->dev.kobj, &cpu_common_attr_group); 1135 out_cpu: 1136 unregister_cpu(c); 1137 out: 1138 return rc; 1139 } 1140 1141 int __ref smp_rescan_cpus(bool early) 1142 { 1143 struct sclp_core_info *info; 1144 int nr; 1145 1146 info = kzalloc(sizeof(*info), GFP_KERNEL); 1147 if (!info) 1148 return -ENOMEM; 1149 smp_get_core_info(info, 0); 1150 nr = __smp_rescan_cpus(info, early); 1151 kfree(info); 1152 if (nr) 1153 topology_schedule_update(); 1154 return 0; 1155 } 1156 1157 static ssize_t __ref rescan_store(struct device *dev, 1158 struct device_attribute *attr, 1159 const char *buf, 1160 size_t count) 1161 { 1162 int rc; 1163 1164 rc = lock_device_hotplug_sysfs(); 1165 if (rc) 1166 return rc; 1167 rc = smp_rescan_cpus(false); 1168 unlock_device_hotplug(); 1169 return rc ? rc : count; 1170 } 1171 static DEVICE_ATTR_WO(rescan); 1172 1173 static int __init s390_smp_init(void) 1174 { 1175 struct device *dev_root; 1176 int rc; 1177 1178 dev_root = bus_get_dev_root(&cpu_subsys); 1179 if (dev_root) { 1180 rc = device_create_file(dev_root, &dev_attr_rescan); 1181 put_device(dev_root); 1182 if (rc) 1183 return rc; 1184 } 1185 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online", 1186 smp_cpu_online, smp_cpu_pre_down); 1187 rc = rc <= 0 ? rc : 0; 1188 return rc; 1189 } 1190 subsys_initcall(s390_smp_init); 1191