xref: /linux/arch/s390/kernel/smp.c (revision 08b7174fb8d126e607e385e34b9e1da4f3be274f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  SMP related functions
4  *
5  *    Copyright IBM Corp. 1999, 2012
6  *    Author(s): Denis Joseph Barrow,
7  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * The code outside of smp.c uses logical cpu numbers, only smp.c does
14  * the translation of logical to physical cpu ids. All new code that
15  * operates on physical cpu numbers needs to go into smp.c.
16  */
17 
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 
21 #include <linux/workqueue.h>
22 #include <linux/memblock.h>
23 #include <linux/export.h>
24 #include <linux/init.h>
25 #include <linux/mm.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/irqflags.h>
32 #include <linux/irq_work.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/kprobes.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/pfault.h>
41 #include <asm/diag.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
44 #include <asm/ipl.h>
45 #include <asm/setup.h>
46 #include <asm/irq.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/abs_lowcore.h>
50 #include <asm/sclp.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
53 #include <asm/sigp.h>
54 #include <asm/idle.h>
55 #include <asm/nmi.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
58 #include <asm/vdso.h>
59 #include <asm/maccess.h>
60 #include "entry.h"
61 
62 enum {
63 	ec_schedule = 0,
64 	ec_call_function_single,
65 	ec_stop_cpu,
66 	ec_mcck_pending,
67 	ec_irq_work,
68 };
69 
70 enum {
71 	CPU_STATE_STANDBY,
72 	CPU_STATE_CONFIGURED,
73 };
74 
75 static DEFINE_PER_CPU(struct cpu *, cpu_device);
76 
77 struct pcpu {
78 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
79 	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
80 	signed char state;		/* physical cpu state */
81 	signed char polarization;	/* physical polarization */
82 	u16 address;			/* physical cpu address */
83 };
84 
85 static u8 boot_core_type;
86 static struct pcpu pcpu_devices[NR_CPUS];
87 
88 unsigned int smp_cpu_mt_shift;
89 EXPORT_SYMBOL(smp_cpu_mt_shift);
90 
91 unsigned int smp_cpu_mtid;
92 EXPORT_SYMBOL(smp_cpu_mtid);
93 
94 #ifdef CONFIG_CRASH_DUMP
95 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
96 #endif
97 
98 static unsigned int smp_max_threads __initdata = -1U;
99 cpumask_t cpu_setup_mask;
100 
101 static int __init early_nosmt(char *s)
102 {
103 	smp_max_threads = 1;
104 	return 0;
105 }
106 early_param("nosmt", early_nosmt);
107 
108 static int __init early_smt(char *s)
109 {
110 	get_option(&s, &smp_max_threads);
111 	return 0;
112 }
113 early_param("smt", early_smt);
114 
115 /*
116  * The smp_cpu_state_mutex must be held when changing the state or polarization
117  * member of a pcpu data structure within the pcpu_devices array.
118  */
119 DEFINE_MUTEX(smp_cpu_state_mutex);
120 
121 /*
122  * Signal processor helper functions.
123  */
124 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
125 {
126 	int cc;
127 
128 	while (1) {
129 		cc = __pcpu_sigp(addr, order, parm, NULL);
130 		if (cc != SIGP_CC_BUSY)
131 			return cc;
132 		cpu_relax();
133 	}
134 }
135 
136 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
137 {
138 	int cc, retry;
139 
140 	for (retry = 0; ; retry++) {
141 		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
142 		if (cc != SIGP_CC_BUSY)
143 			break;
144 		if (retry >= 3)
145 			udelay(10);
146 	}
147 	return cc;
148 }
149 
150 static inline int pcpu_stopped(struct pcpu *pcpu)
151 {
152 	u32 status;
153 
154 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
155 			0, &status) != SIGP_CC_STATUS_STORED)
156 		return 0;
157 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
158 }
159 
160 static inline int pcpu_running(struct pcpu *pcpu)
161 {
162 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
163 			0, NULL) != SIGP_CC_STATUS_STORED)
164 		return 1;
165 	/* Status stored condition code is equivalent to cpu not running. */
166 	return 0;
167 }
168 
169 /*
170  * Find struct pcpu by cpu address.
171  */
172 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
173 {
174 	int cpu;
175 
176 	for_each_cpu(cpu, mask)
177 		if (pcpu_devices[cpu].address == address)
178 			return pcpu_devices + cpu;
179 	return NULL;
180 }
181 
182 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
183 {
184 	int order;
185 
186 	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
187 		return;
188 	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
189 	pcpu->ec_clk = get_tod_clock_fast();
190 	pcpu_sigp_retry(pcpu, order, 0);
191 }
192 
193 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
194 {
195 	unsigned long async_stack, nodat_stack, mcck_stack;
196 	struct lowcore *lc;
197 
198 	lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
199 	nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
200 	async_stack = stack_alloc();
201 	mcck_stack = stack_alloc();
202 	if (!lc || !nodat_stack || !async_stack || !mcck_stack)
203 		goto out;
204 	memcpy(lc, &S390_lowcore, 512);
205 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
206 	lc->async_stack = async_stack + STACK_INIT_OFFSET;
207 	lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
208 	lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
209 	lc->cpu_nr = cpu;
210 	lc->spinlock_lockval = arch_spin_lockval(cpu);
211 	lc->spinlock_index = 0;
212 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
213 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
214 	lc->preempt_count = PREEMPT_DISABLED;
215 	if (nmi_alloc_mcesa(&lc->mcesad))
216 		goto out;
217 	if (abs_lowcore_map(cpu, lc, true))
218 		goto out_mcesa;
219 	lowcore_ptr[cpu] = lc;
220 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
221 	return 0;
222 
223 out_mcesa:
224 	nmi_free_mcesa(&lc->mcesad);
225 out:
226 	stack_free(mcck_stack);
227 	stack_free(async_stack);
228 	free_pages(nodat_stack, THREAD_SIZE_ORDER);
229 	free_pages((unsigned long) lc, LC_ORDER);
230 	return -ENOMEM;
231 }
232 
233 static void pcpu_free_lowcore(struct pcpu *pcpu)
234 {
235 	unsigned long async_stack, nodat_stack, mcck_stack;
236 	struct lowcore *lc;
237 	int cpu;
238 
239 	cpu = pcpu - pcpu_devices;
240 	lc = lowcore_ptr[cpu];
241 	nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
242 	async_stack = lc->async_stack - STACK_INIT_OFFSET;
243 	mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
244 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
245 	lowcore_ptr[cpu] = NULL;
246 	abs_lowcore_unmap(cpu);
247 	nmi_free_mcesa(&lc->mcesad);
248 	stack_free(async_stack);
249 	stack_free(mcck_stack);
250 	free_pages(nodat_stack, THREAD_SIZE_ORDER);
251 	free_pages((unsigned long) lc, LC_ORDER);
252 }
253 
254 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
255 {
256 	struct lowcore *lc, *abs_lc;
257 
258 	lc = lowcore_ptr[cpu];
259 	cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
260 	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
261 	lc->cpu_nr = cpu;
262 	lc->restart_flags = RESTART_FLAG_CTLREGS;
263 	lc->spinlock_lockval = arch_spin_lockval(cpu);
264 	lc->spinlock_index = 0;
265 	lc->percpu_offset = __per_cpu_offset[cpu];
266 	lc->kernel_asce = S390_lowcore.kernel_asce;
267 	lc->user_asce = s390_invalid_asce;
268 	lc->machine_flags = S390_lowcore.machine_flags;
269 	lc->user_timer = lc->system_timer =
270 		lc->steal_timer = lc->avg_steal_timer = 0;
271 	abs_lc = get_abs_lowcore();
272 	memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
273 	put_abs_lowcore(abs_lc);
274 	lc->cregs_save_area[1] = lc->kernel_asce;
275 	lc->cregs_save_area[7] = lc->user_asce;
276 	save_access_regs((unsigned int *) lc->access_regs_save_area);
277 	arch_spin_lock_setup(cpu);
278 }
279 
280 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
281 {
282 	struct lowcore *lc;
283 	int cpu;
284 
285 	cpu = pcpu - pcpu_devices;
286 	lc = lowcore_ptr[cpu];
287 	lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
288 	lc->current_task = (unsigned long)tsk;
289 	lc->lpp = LPP_MAGIC;
290 	lc->current_pid = tsk->pid;
291 	lc->user_timer = tsk->thread.user_timer;
292 	lc->guest_timer = tsk->thread.guest_timer;
293 	lc->system_timer = tsk->thread.system_timer;
294 	lc->hardirq_timer = tsk->thread.hardirq_timer;
295 	lc->softirq_timer = tsk->thread.softirq_timer;
296 	lc->steal_timer = 0;
297 }
298 
299 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
300 {
301 	struct lowcore *lc;
302 	int cpu;
303 
304 	cpu = pcpu - pcpu_devices;
305 	lc = lowcore_ptr[cpu];
306 	lc->restart_stack = lc->kernel_stack;
307 	lc->restart_fn = (unsigned long) func;
308 	lc->restart_data = (unsigned long) data;
309 	lc->restart_source = -1U;
310 	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
311 }
312 
313 typedef void (pcpu_delegate_fn)(void *);
314 
315 /*
316  * Call function via PSW restart on pcpu and stop the current cpu.
317  */
318 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
319 {
320 	func(data);	/* should not return */
321 }
322 
323 static void pcpu_delegate(struct pcpu *pcpu,
324 			  pcpu_delegate_fn *func,
325 			  void *data, unsigned long stack)
326 {
327 	struct lowcore *lc, *abs_lc;
328 	unsigned int source_cpu;
329 
330 	lc = lowcore_ptr[pcpu - pcpu_devices];
331 	source_cpu = stap();
332 
333 	if (pcpu->address == source_cpu) {
334 		call_on_stack(2, stack, void, __pcpu_delegate,
335 			      pcpu_delegate_fn *, func, void *, data);
336 	}
337 	/* Stop target cpu (if func returns this stops the current cpu). */
338 	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
339 	pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
340 	/* Restart func on the target cpu and stop the current cpu. */
341 	if (lc) {
342 		lc->restart_stack = stack;
343 		lc->restart_fn = (unsigned long)func;
344 		lc->restart_data = (unsigned long)data;
345 		lc->restart_source = source_cpu;
346 	} else {
347 		abs_lc = get_abs_lowcore();
348 		abs_lc->restart_stack = stack;
349 		abs_lc->restart_fn = (unsigned long)func;
350 		abs_lc->restart_data = (unsigned long)data;
351 		abs_lc->restart_source = source_cpu;
352 		put_abs_lowcore(abs_lc);
353 	}
354 	asm volatile(
355 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
356 		"	brc	2,0b	# busy, try again\n"
357 		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
358 		"	brc	2,1b	# busy, try again\n"
359 		: : "d" (pcpu->address), "d" (source_cpu),
360 		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
361 		: "0", "1", "cc");
362 	for (;;) ;
363 }
364 
365 /*
366  * Enable additional logical cpus for multi-threading.
367  */
368 static int pcpu_set_smt(unsigned int mtid)
369 {
370 	int cc;
371 
372 	if (smp_cpu_mtid == mtid)
373 		return 0;
374 	cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
375 	if (cc == 0) {
376 		smp_cpu_mtid = mtid;
377 		smp_cpu_mt_shift = 0;
378 		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
379 			smp_cpu_mt_shift++;
380 		pcpu_devices[0].address = stap();
381 	}
382 	return cc;
383 }
384 
385 /*
386  * Call function on an online CPU.
387  */
388 void smp_call_online_cpu(void (*func)(void *), void *data)
389 {
390 	struct pcpu *pcpu;
391 
392 	/* Use the current cpu if it is online. */
393 	pcpu = pcpu_find_address(cpu_online_mask, stap());
394 	if (!pcpu)
395 		/* Use the first online cpu. */
396 		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
397 	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
398 }
399 
400 /*
401  * Call function on the ipl CPU.
402  */
403 void smp_call_ipl_cpu(void (*func)(void *), void *data)
404 {
405 	struct lowcore *lc = lowcore_ptr[0];
406 
407 	if (pcpu_devices[0].address == stap())
408 		lc = &S390_lowcore;
409 
410 	pcpu_delegate(&pcpu_devices[0], func, data,
411 		      lc->nodat_stack);
412 }
413 
414 int smp_find_processor_id(u16 address)
415 {
416 	int cpu;
417 
418 	for_each_present_cpu(cpu)
419 		if (pcpu_devices[cpu].address == address)
420 			return cpu;
421 	return -1;
422 }
423 
424 void schedule_mcck_handler(void)
425 {
426 	pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
427 }
428 
429 bool notrace arch_vcpu_is_preempted(int cpu)
430 {
431 	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
432 		return false;
433 	if (pcpu_running(pcpu_devices + cpu))
434 		return false;
435 	return true;
436 }
437 EXPORT_SYMBOL(arch_vcpu_is_preempted);
438 
439 void notrace smp_yield_cpu(int cpu)
440 {
441 	if (!MACHINE_HAS_DIAG9C)
442 		return;
443 	diag_stat_inc_norecursion(DIAG_STAT_X09C);
444 	asm volatile("diag %0,0,0x9c"
445 		     : : "d" (pcpu_devices[cpu].address));
446 }
447 EXPORT_SYMBOL_GPL(smp_yield_cpu);
448 
449 /*
450  * Send cpus emergency shutdown signal. This gives the cpus the
451  * opportunity to complete outstanding interrupts.
452  */
453 void notrace smp_emergency_stop(void)
454 {
455 	static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
456 	static cpumask_t cpumask;
457 	u64 end;
458 	int cpu;
459 
460 	arch_spin_lock(&lock);
461 	cpumask_copy(&cpumask, cpu_online_mask);
462 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
463 
464 	end = get_tod_clock() + (1000000UL << 12);
465 	for_each_cpu(cpu, &cpumask) {
466 		struct pcpu *pcpu = pcpu_devices + cpu;
467 		set_bit(ec_stop_cpu, &pcpu->ec_mask);
468 		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
469 				   0, NULL) == SIGP_CC_BUSY &&
470 		       get_tod_clock() < end)
471 			cpu_relax();
472 	}
473 	while (get_tod_clock() < end) {
474 		for_each_cpu(cpu, &cpumask)
475 			if (pcpu_stopped(pcpu_devices + cpu))
476 				cpumask_clear_cpu(cpu, &cpumask);
477 		if (cpumask_empty(&cpumask))
478 			break;
479 		cpu_relax();
480 	}
481 	arch_spin_unlock(&lock);
482 }
483 NOKPROBE_SYMBOL(smp_emergency_stop);
484 
485 /*
486  * Stop all cpus but the current one.
487  */
488 void smp_send_stop(void)
489 {
490 	int cpu;
491 
492 	/* Disable all interrupts/machine checks */
493 	__load_psw_mask(PSW_KERNEL_BITS);
494 	trace_hardirqs_off();
495 
496 	debug_set_critical();
497 
498 	if (oops_in_progress)
499 		smp_emergency_stop();
500 
501 	/* stop all processors */
502 	for_each_online_cpu(cpu) {
503 		if (cpu == smp_processor_id())
504 			continue;
505 		pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
506 		while (!pcpu_stopped(pcpu_devices + cpu))
507 			cpu_relax();
508 	}
509 }
510 
511 /*
512  * This is the main routine where commands issued by other
513  * cpus are handled.
514  */
515 static void smp_handle_ext_call(void)
516 {
517 	unsigned long bits;
518 
519 	/* handle bit signal external calls */
520 	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
521 	if (test_bit(ec_stop_cpu, &bits))
522 		smp_stop_cpu();
523 	if (test_bit(ec_schedule, &bits))
524 		scheduler_ipi();
525 	if (test_bit(ec_call_function_single, &bits))
526 		generic_smp_call_function_single_interrupt();
527 	if (test_bit(ec_mcck_pending, &bits))
528 		s390_handle_mcck();
529 	if (test_bit(ec_irq_work, &bits))
530 		irq_work_run();
531 }
532 
533 static void do_ext_call_interrupt(struct ext_code ext_code,
534 				  unsigned int param32, unsigned long param64)
535 {
536 	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
537 	smp_handle_ext_call();
538 }
539 
540 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
541 {
542 	int cpu;
543 
544 	for_each_cpu(cpu, mask)
545 		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
546 }
547 
548 void arch_send_call_function_single_ipi(int cpu)
549 {
550 	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
551 }
552 
553 /*
554  * this function sends a 'reschedule' IPI to another CPU.
555  * it goes straight through and wastes no time serializing
556  * anything. Worst case is that we lose a reschedule ...
557  */
558 void arch_smp_send_reschedule(int cpu)
559 {
560 	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
561 }
562 
563 #ifdef CONFIG_IRQ_WORK
564 void arch_irq_work_raise(void)
565 {
566 	pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
567 }
568 #endif
569 
570 /*
571  * parameter area for the set/clear control bit callbacks
572  */
573 struct ec_creg_mask_parms {
574 	unsigned long orval;
575 	unsigned long andval;
576 	int cr;
577 };
578 
579 /*
580  * callback for setting/clearing control bits
581  */
582 static void smp_ctl_bit_callback(void *info)
583 {
584 	struct ec_creg_mask_parms *pp = info;
585 	unsigned long cregs[16];
586 
587 	__ctl_store(cregs, 0, 15);
588 	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
589 	__ctl_load(cregs, 0, 15);
590 }
591 
592 static DEFINE_SPINLOCK(ctl_lock);
593 
594 void smp_ctl_set_clear_bit(int cr, int bit, bool set)
595 {
596 	struct ec_creg_mask_parms parms = { .cr = cr, };
597 	struct lowcore *abs_lc;
598 	u64 ctlreg;
599 
600 	if (set) {
601 		parms.orval = 1UL << bit;
602 		parms.andval = -1UL;
603 	} else {
604 		parms.orval = 0;
605 		parms.andval = ~(1UL << bit);
606 	}
607 	spin_lock(&ctl_lock);
608 	abs_lc = get_abs_lowcore();
609 	ctlreg = abs_lc->cregs_save_area[cr];
610 	ctlreg = (ctlreg & parms.andval) | parms.orval;
611 	abs_lc->cregs_save_area[cr] = ctlreg;
612 	put_abs_lowcore(abs_lc);
613 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
614 	spin_unlock(&ctl_lock);
615 }
616 EXPORT_SYMBOL(smp_ctl_set_clear_bit);
617 
618 #ifdef CONFIG_CRASH_DUMP
619 
620 int smp_store_status(int cpu)
621 {
622 	struct lowcore *lc;
623 	struct pcpu *pcpu;
624 	unsigned long pa;
625 
626 	pcpu = pcpu_devices + cpu;
627 	lc = lowcore_ptr[cpu];
628 	pa = __pa(&lc->floating_pt_save_area);
629 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
630 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
631 		return -EIO;
632 	if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
633 		return 0;
634 	pa = lc->mcesad & MCESA_ORIGIN_MASK;
635 	if (MACHINE_HAS_GS)
636 		pa |= lc->mcesad & MCESA_LC_MASK;
637 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
638 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
639 		return -EIO;
640 	return 0;
641 }
642 
643 /*
644  * Collect CPU state of the previous, crashed system.
645  * There are four cases:
646  * 1) standard zfcp/nvme dump
647  *    condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
648  *    The state for all CPUs except the boot CPU needs to be collected
649  *    with sigp stop-and-store-status. The boot CPU state is located in
650  *    the absolute lowcore of the memory stored in the HSA. The zcore code
651  *    will copy the boot CPU state from the HSA.
652  * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
653  *    condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
654  *    The state for all CPUs except the boot CPU needs to be collected
655  *    with sigp stop-and-store-status. The firmware or the boot-loader
656  *    stored the registers of the boot CPU in the absolute lowcore in the
657  *    memory of the old system.
658  * 3) kdump and the old kernel did not store the CPU state,
659  *    or stand-alone kdump for DASD
660  *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
661  *    The state for all CPUs except the boot CPU needs to be collected
662  *    with sigp stop-and-store-status. The kexec code or the boot-loader
663  *    stored the registers of the boot CPU in the memory of the old system.
664  * 4) kdump and the old kernel stored the CPU state
665  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
666  *    This case does not exist for s390 anymore, setup_arch explicitly
667  *    deactivates the elfcorehdr= kernel parameter
668  */
669 static bool dump_available(void)
670 {
671 	return oldmem_data.start || is_ipl_type_dump();
672 }
673 
674 void __init smp_save_dump_ipl_cpu(void)
675 {
676 	struct save_area *sa;
677 	void *regs;
678 
679 	if (!dump_available())
680 		return;
681 	sa = save_area_alloc(true);
682 	regs = memblock_alloc(512, 8);
683 	if (!sa || !regs)
684 		panic("could not allocate memory for boot CPU save area\n");
685 	copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
686 	save_area_add_regs(sa, regs);
687 	memblock_free(regs, 512);
688 	if (MACHINE_HAS_VX)
689 		save_area_add_vxrs(sa, boot_cpu_vector_save_area);
690 }
691 
692 void __init smp_save_dump_secondary_cpus(void)
693 {
694 	int addr, boot_cpu_addr, max_cpu_addr;
695 	struct save_area *sa;
696 	void *page;
697 
698 	if (!dump_available())
699 		return;
700 	/* Allocate a page as dumping area for the store status sigps */
701 	page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
702 	if (!page)
703 		panic("ERROR: Failed to allocate %lx bytes below %lx\n",
704 		      PAGE_SIZE, 1UL << 31);
705 
706 	/* Set multi-threading state to the previous system. */
707 	pcpu_set_smt(sclp.mtid_prev);
708 	boot_cpu_addr = stap();
709 	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
710 	for (addr = 0; addr <= max_cpu_addr; addr++) {
711 		if (addr == boot_cpu_addr)
712 			continue;
713 		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
714 		    SIGP_CC_NOT_OPERATIONAL)
715 			continue;
716 		sa = save_area_alloc(false);
717 		if (!sa)
718 			panic("could not allocate memory for save area\n");
719 		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
720 		save_area_add_regs(sa, page);
721 		if (MACHINE_HAS_VX) {
722 			__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
723 			save_area_add_vxrs(sa, page);
724 		}
725 	}
726 	memblock_free(page, PAGE_SIZE);
727 	diag_amode31_ops.diag308_reset();
728 	pcpu_set_smt(0);
729 }
730 #endif /* CONFIG_CRASH_DUMP */
731 
732 void smp_cpu_set_polarization(int cpu, int val)
733 {
734 	pcpu_devices[cpu].polarization = val;
735 }
736 
737 int smp_cpu_get_polarization(int cpu)
738 {
739 	return pcpu_devices[cpu].polarization;
740 }
741 
742 int smp_cpu_get_cpu_address(int cpu)
743 {
744 	return pcpu_devices[cpu].address;
745 }
746 
747 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
748 {
749 	static int use_sigp_detection;
750 	int address;
751 
752 	if (use_sigp_detection || sclp_get_core_info(info, early)) {
753 		use_sigp_detection = 1;
754 		for (address = 0;
755 		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
756 		     address += (1U << smp_cpu_mt_shift)) {
757 			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
758 			    SIGP_CC_NOT_OPERATIONAL)
759 				continue;
760 			info->core[info->configured].core_id =
761 				address >> smp_cpu_mt_shift;
762 			info->configured++;
763 		}
764 		info->combined = info->configured;
765 	}
766 }
767 
768 static int smp_add_present_cpu(int cpu);
769 
770 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
771 			bool configured, bool early)
772 {
773 	struct pcpu *pcpu;
774 	int cpu, nr, i;
775 	u16 address;
776 
777 	nr = 0;
778 	if (sclp.has_core_type && core->type != boot_core_type)
779 		return nr;
780 	cpu = cpumask_first(avail);
781 	address = core->core_id << smp_cpu_mt_shift;
782 	for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
783 		if (pcpu_find_address(cpu_present_mask, address + i))
784 			continue;
785 		pcpu = pcpu_devices + cpu;
786 		pcpu->address = address + i;
787 		if (configured)
788 			pcpu->state = CPU_STATE_CONFIGURED;
789 		else
790 			pcpu->state = CPU_STATE_STANDBY;
791 		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
792 		set_cpu_present(cpu, true);
793 		if (!early && smp_add_present_cpu(cpu) != 0)
794 			set_cpu_present(cpu, false);
795 		else
796 			nr++;
797 		cpumask_clear_cpu(cpu, avail);
798 		cpu = cpumask_next(cpu, avail);
799 	}
800 	return nr;
801 }
802 
803 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
804 {
805 	struct sclp_core_entry *core;
806 	static cpumask_t avail;
807 	bool configured;
808 	u16 core_id;
809 	int nr, i;
810 
811 	cpus_read_lock();
812 	mutex_lock(&smp_cpu_state_mutex);
813 	nr = 0;
814 	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
815 	/*
816 	 * Add IPL core first (which got logical CPU number 0) to make sure
817 	 * that all SMT threads get subsequent logical CPU numbers.
818 	 */
819 	if (early) {
820 		core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
821 		for (i = 0; i < info->configured; i++) {
822 			core = &info->core[i];
823 			if (core->core_id == core_id) {
824 				nr += smp_add_core(core, &avail, true, early);
825 				break;
826 			}
827 		}
828 	}
829 	for (i = 0; i < info->combined; i++) {
830 		configured = i < info->configured;
831 		nr += smp_add_core(&info->core[i], &avail, configured, early);
832 	}
833 	mutex_unlock(&smp_cpu_state_mutex);
834 	cpus_read_unlock();
835 	return nr;
836 }
837 
838 void __init smp_detect_cpus(void)
839 {
840 	unsigned int cpu, mtid, c_cpus, s_cpus;
841 	struct sclp_core_info *info;
842 	u16 address;
843 
844 	/* Get CPU information */
845 	info = memblock_alloc(sizeof(*info), 8);
846 	if (!info)
847 		panic("%s: Failed to allocate %zu bytes align=0x%x\n",
848 		      __func__, sizeof(*info), 8);
849 	smp_get_core_info(info, 1);
850 	/* Find boot CPU type */
851 	if (sclp.has_core_type) {
852 		address = stap();
853 		for (cpu = 0; cpu < info->combined; cpu++)
854 			if (info->core[cpu].core_id == address) {
855 				/* The boot cpu dictates the cpu type. */
856 				boot_core_type = info->core[cpu].type;
857 				break;
858 			}
859 		if (cpu >= info->combined)
860 			panic("Could not find boot CPU type");
861 	}
862 
863 	/* Set multi-threading state for the current system */
864 	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
865 	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
866 	pcpu_set_smt(mtid);
867 
868 	/* Print number of CPUs */
869 	c_cpus = s_cpus = 0;
870 	for (cpu = 0; cpu < info->combined; cpu++) {
871 		if (sclp.has_core_type &&
872 		    info->core[cpu].type != boot_core_type)
873 			continue;
874 		if (cpu < info->configured)
875 			c_cpus += smp_cpu_mtid + 1;
876 		else
877 			s_cpus += smp_cpu_mtid + 1;
878 	}
879 	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
880 
881 	/* Add CPUs present at boot */
882 	__smp_rescan_cpus(info, true);
883 	memblock_free(info, sizeof(*info));
884 }
885 
886 /*
887  *	Activate a secondary processor.
888  */
889 static void smp_start_secondary(void *cpuvoid)
890 {
891 	int cpu = raw_smp_processor_id();
892 
893 	S390_lowcore.last_update_clock = get_tod_clock();
894 	S390_lowcore.restart_stack = (unsigned long)restart_stack;
895 	S390_lowcore.restart_fn = (unsigned long)do_restart;
896 	S390_lowcore.restart_data = 0;
897 	S390_lowcore.restart_source = -1U;
898 	S390_lowcore.restart_flags = 0;
899 	restore_access_regs(S390_lowcore.access_regs_save_area);
900 	cpu_init();
901 	rcu_cpu_starting(cpu);
902 	init_cpu_timer();
903 	vtime_init();
904 	vdso_getcpu_init();
905 	pfault_init();
906 	cpumask_set_cpu(cpu, &cpu_setup_mask);
907 	update_cpu_masks();
908 	notify_cpu_starting(cpu);
909 	if (topology_cpu_dedicated(cpu))
910 		set_cpu_flag(CIF_DEDICATED_CPU);
911 	else
912 		clear_cpu_flag(CIF_DEDICATED_CPU);
913 	set_cpu_online(cpu, true);
914 	inc_irq_stat(CPU_RST);
915 	local_irq_enable();
916 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
917 }
918 
919 /* Upping and downing of CPUs */
920 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
921 {
922 	struct pcpu *pcpu = pcpu_devices + cpu;
923 	int rc;
924 
925 	if (pcpu->state != CPU_STATE_CONFIGURED)
926 		return -EIO;
927 	if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
928 	    SIGP_CC_ORDER_CODE_ACCEPTED)
929 		return -EIO;
930 
931 	rc = pcpu_alloc_lowcore(pcpu, cpu);
932 	if (rc)
933 		return rc;
934 	/*
935 	 * Make sure global control register contents do not change
936 	 * until new CPU has initialized control registers.
937 	 */
938 	spin_lock(&ctl_lock);
939 	pcpu_prepare_secondary(pcpu, cpu);
940 	pcpu_attach_task(pcpu, tidle);
941 	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
942 	/* Wait until cpu puts itself in the online & active maps */
943 	while (!cpu_online(cpu))
944 		cpu_relax();
945 	spin_unlock(&ctl_lock);
946 	return 0;
947 }
948 
949 static unsigned int setup_possible_cpus __initdata;
950 
951 static int __init _setup_possible_cpus(char *s)
952 {
953 	get_option(&s, &setup_possible_cpus);
954 	return 0;
955 }
956 early_param("possible_cpus", _setup_possible_cpus);
957 
958 int __cpu_disable(void)
959 {
960 	unsigned long cregs[16];
961 	int cpu;
962 
963 	/* Handle possible pending IPIs */
964 	smp_handle_ext_call();
965 	cpu = smp_processor_id();
966 	set_cpu_online(cpu, false);
967 	cpumask_clear_cpu(cpu, &cpu_setup_mask);
968 	update_cpu_masks();
969 	/* Disable pseudo page faults on this cpu. */
970 	pfault_fini();
971 	/* Disable interrupt sources via control register. */
972 	__ctl_store(cregs, 0, 15);
973 	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
974 	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
975 	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
976 	__ctl_load(cregs, 0, 15);
977 	clear_cpu_flag(CIF_NOHZ_DELAY);
978 	return 0;
979 }
980 
981 void __cpu_die(unsigned int cpu)
982 {
983 	struct pcpu *pcpu;
984 
985 	/* Wait until target cpu is down */
986 	pcpu = pcpu_devices + cpu;
987 	while (!pcpu_stopped(pcpu))
988 		cpu_relax();
989 	pcpu_free_lowcore(pcpu);
990 	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
991 	cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
992 }
993 
994 void __noreturn cpu_die(void)
995 {
996 	idle_task_exit();
997 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
998 	for (;;) ;
999 }
1000 
1001 void __init smp_fill_possible_mask(void)
1002 {
1003 	unsigned int possible, sclp_max, cpu;
1004 
1005 	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
1006 	sclp_max = min(smp_max_threads, sclp_max);
1007 	sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
1008 	possible = setup_possible_cpus ?: nr_cpu_ids;
1009 	possible = min(possible, sclp_max);
1010 	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
1011 		set_cpu_possible(cpu, true);
1012 }
1013 
1014 void __init smp_prepare_cpus(unsigned int max_cpus)
1015 {
1016 	/* request the 0x1201 emergency signal external interrupt */
1017 	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
1018 		panic("Couldn't request external interrupt 0x1201");
1019 	/* request the 0x1202 external call external interrupt */
1020 	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
1021 		panic("Couldn't request external interrupt 0x1202");
1022 }
1023 
1024 void __init smp_prepare_boot_cpu(void)
1025 {
1026 	struct pcpu *pcpu = pcpu_devices;
1027 
1028 	WARN_ON(!cpu_present(0) || !cpu_online(0));
1029 	pcpu->state = CPU_STATE_CONFIGURED;
1030 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
1031 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
1032 }
1033 
1034 void __init smp_setup_processor_id(void)
1035 {
1036 	pcpu_devices[0].address = stap();
1037 	S390_lowcore.cpu_nr = 0;
1038 	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
1039 	S390_lowcore.spinlock_index = 0;
1040 }
1041 
1042 /*
1043  * the frequency of the profiling timer can be changed
1044  * by writing a multiplier value into /proc/profile.
1045  *
1046  * usually you want to run this on all CPUs ;)
1047  */
1048 int setup_profiling_timer(unsigned int multiplier)
1049 {
1050 	return 0;
1051 }
1052 
1053 static ssize_t cpu_configure_show(struct device *dev,
1054 				  struct device_attribute *attr, char *buf)
1055 {
1056 	ssize_t count;
1057 
1058 	mutex_lock(&smp_cpu_state_mutex);
1059 	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1060 	mutex_unlock(&smp_cpu_state_mutex);
1061 	return count;
1062 }
1063 
1064 static ssize_t cpu_configure_store(struct device *dev,
1065 				   struct device_attribute *attr,
1066 				   const char *buf, size_t count)
1067 {
1068 	struct pcpu *pcpu;
1069 	int cpu, val, rc, i;
1070 	char delim;
1071 
1072 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
1073 		return -EINVAL;
1074 	if (val != 0 && val != 1)
1075 		return -EINVAL;
1076 	cpus_read_lock();
1077 	mutex_lock(&smp_cpu_state_mutex);
1078 	rc = -EBUSY;
1079 	/* disallow configuration changes of online cpus and cpu 0 */
1080 	cpu = dev->id;
1081 	cpu = smp_get_base_cpu(cpu);
1082 	if (cpu == 0)
1083 		goto out;
1084 	for (i = 0; i <= smp_cpu_mtid; i++)
1085 		if (cpu_online(cpu + i))
1086 			goto out;
1087 	pcpu = pcpu_devices + cpu;
1088 	rc = 0;
1089 	switch (val) {
1090 	case 0:
1091 		if (pcpu->state != CPU_STATE_CONFIGURED)
1092 			break;
1093 		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1094 		if (rc)
1095 			break;
1096 		for (i = 0; i <= smp_cpu_mtid; i++) {
1097 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1098 				continue;
1099 			pcpu[i].state = CPU_STATE_STANDBY;
1100 			smp_cpu_set_polarization(cpu + i,
1101 						 POLARIZATION_UNKNOWN);
1102 		}
1103 		topology_expect_change();
1104 		break;
1105 	case 1:
1106 		if (pcpu->state != CPU_STATE_STANDBY)
1107 			break;
1108 		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1109 		if (rc)
1110 			break;
1111 		for (i = 0; i <= smp_cpu_mtid; i++) {
1112 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1113 				continue;
1114 			pcpu[i].state = CPU_STATE_CONFIGURED;
1115 			smp_cpu_set_polarization(cpu + i,
1116 						 POLARIZATION_UNKNOWN);
1117 		}
1118 		topology_expect_change();
1119 		break;
1120 	default:
1121 		break;
1122 	}
1123 out:
1124 	mutex_unlock(&smp_cpu_state_mutex);
1125 	cpus_read_unlock();
1126 	return rc ? rc : count;
1127 }
1128 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1129 
1130 static ssize_t show_cpu_address(struct device *dev,
1131 				struct device_attribute *attr, char *buf)
1132 {
1133 	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1134 }
1135 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1136 
1137 static struct attribute *cpu_common_attrs[] = {
1138 	&dev_attr_configure.attr,
1139 	&dev_attr_address.attr,
1140 	NULL,
1141 };
1142 
1143 static struct attribute_group cpu_common_attr_group = {
1144 	.attrs = cpu_common_attrs,
1145 };
1146 
1147 static struct attribute *cpu_online_attrs[] = {
1148 	&dev_attr_idle_count.attr,
1149 	&dev_attr_idle_time_us.attr,
1150 	NULL,
1151 };
1152 
1153 static struct attribute_group cpu_online_attr_group = {
1154 	.attrs = cpu_online_attrs,
1155 };
1156 
1157 static int smp_cpu_online(unsigned int cpu)
1158 {
1159 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1160 
1161 	return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1162 }
1163 
1164 static int smp_cpu_pre_down(unsigned int cpu)
1165 {
1166 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1167 
1168 	sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1169 	return 0;
1170 }
1171 
1172 static int smp_add_present_cpu(int cpu)
1173 {
1174 	struct device *s;
1175 	struct cpu *c;
1176 	int rc;
1177 
1178 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1179 	if (!c)
1180 		return -ENOMEM;
1181 	per_cpu(cpu_device, cpu) = c;
1182 	s = &c->dev;
1183 	c->hotpluggable = 1;
1184 	rc = register_cpu(c, cpu);
1185 	if (rc)
1186 		goto out;
1187 	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1188 	if (rc)
1189 		goto out_cpu;
1190 	rc = topology_cpu_init(c);
1191 	if (rc)
1192 		goto out_topology;
1193 	return 0;
1194 
1195 out_topology:
1196 	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1197 out_cpu:
1198 	unregister_cpu(c);
1199 out:
1200 	return rc;
1201 }
1202 
1203 int __ref smp_rescan_cpus(void)
1204 {
1205 	struct sclp_core_info *info;
1206 	int nr;
1207 
1208 	info = kzalloc(sizeof(*info), GFP_KERNEL);
1209 	if (!info)
1210 		return -ENOMEM;
1211 	smp_get_core_info(info, 0);
1212 	nr = __smp_rescan_cpus(info, false);
1213 	kfree(info);
1214 	if (nr)
1215 		topology_schedule_update();
1216 	return 0;
1217 }
1218 
1219 static ssize_t __ref rescan_store(struct device *dev,
1220 				  struct device_attribute *attr,
1221 				  const char *buf,
1222 				  size_t count)
1223 {
1224 	int rc;
1225 
1226 	rc = lock_device_hotplug_sysfs();
1227 	if (rc)
1228 		return rc;
1229 	rc = smp_rescan_cpus();
1230 	unlock_device_hotplug();
1231 	return rc ? rc : count;
1232 }
1233 static DEVICE_ATTR_WO(rescan);
1234 
1235 static int __init s390_smp_init(void)
1236 {
1237 	struct device *dev_root;
1238 	int cpu, rc = 0;
1239 
1240 	dev_root = bus_get_dev_root(&cpu_subsys);
1241 	if (dev_root) {
1242 		rc = device_create_file(dev_root, &dev_attr_rescan);
1243 		put_device(dev_root);
1244 		if (rc)
1245 			return rc;
1246 	}
1247 
1248 	for_each_present_cpu(cpu) {
1249 		rc = smp_add_present_cpu(cpu);
1250 		if (rc)
1251 			goto out;
1252 	}
1253 
1254 	rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1255 			       smp_cpu_online, smp_cpu_pre_down);
1256 	rc = rc <= 0 ? rc : 0;
1257 out:
1258 	return rc;
1259 }
1260 subsys_initcall(s390_smp_init);
1261 
1262 static __always_inline void set_new_lowcore(struct lowcore *lc)
1263 {
1264 	union register_pair dst, src;
1265 	u32 pfx;
1266 
1267 	src.even = (unsigned long) &S390_lowcore;
1268 	src.odd  = sizeof(S390_lowcore);
1269 	dst.even = (unsigned long) lc;
1270 	dst.odd  = sizeof(*lc);
1271 	pfx = __pa(lc);
1272 
1273 	asm volatile(
1274 		"	mvcl	%[dst],%[src]\n"
1275 		"	spx	%[pfx]\n"
1276 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
1277 		: [pfx] "Q" (pfx)
1278 		: "memory", "cc");
1279 }
1280 
1281 int __init smp_reinit_ipl_cpu(void)
1282 {
1283 	unsigned long async_stack, nodat_stack, mcck_stack;
1284 	struct lowcore *lc, *lc_ipl;
1285 	unsigned long flags, cr0;
1286 	u64 mcesad;
1287 
1288 	lc_ipl = lowcore_ptr[0];
1289 	lc = (struct lowcore *)	__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
1290 	nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
1291 	async_stack = stack_alloc();
1292 	mcck_stack = stack_alloc();
1293 	if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
1294 		panic("Couldn't allocate memory");
1295 
1296 	local_irq_save(flags);
1297 	local_mcck_disable();
1298 	set_new_lowcore(lc);
1299 	S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
1300 	S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
1301 	S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1302 	__ctl_store(cr0, 0, 0);
1303 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
1304 	S390_lowcore.mcesad = mcesad;
1305 	__ctl_load(cr0, 0, 0);
1306 	if (abs_lowcore_map(0, lc, false))
1307 		panic("Couldn't remap absolute lowcore");
1308 	lowcore_ptr[0] = lc;
1309 	local_mcck_enable();
1310 	local_irq_restore(flags);
1311 
1312 	memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
1313 	memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
1314 	memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
1315 	memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
1316 	return 0;
1317 }
1318