xref: /linux/arch/s390/kernel/nmi.c (revision 98587c2d894c34c9af5cd84ca169e1cd493aa692)
1a17ae4c3SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f5daba1dSHeiko Carstens /*
3f5daba1dSHeiko Carstens  *   Machine check handler
4f5daba1dSHeiko Carstens  *
5f5daba1dSHeiko Carstens  *    Copyright IBM Corp. 2000, 2009
6f5daba1dSHeiko Carstens  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
7f5daba1dSHeiko Carstens  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
8f5daba1dSHeiko Carstens  *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
9f5daba1dSHeiko Carstens  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
10f5daba1dSHeiko Carstens  */
11f5daba1dSHeiko Carstens 
12052ff461SHeiko Carstens #include <linux/kernel_stat.h>
13f5daba1dSHeiko Carstens #include <linux/init.h>
14f5daba1dSHeiko Carstens #include <linux/errno.h>
1581f64b87SHeiko Carstens #include <linux/hardirq.h>
166c81511cSMartin Schwidefsky #include <linux/log2.h>
1700a8f886SMartin Schwidefsky #include <linux/kprobes.h>
18514c6032SRandy Dunlap #include <linux/kmemleak.h>
19f5daba1dSHeiko Carstens #include <linux/time.h>
203f07c014SIngo Molnar #include <linux/module.h>
213f07c014SIngo Molnar #include <linux/sched/signal.h>
223f07c014SIngo Molnar 
233994a52bSPaul Gortmaker #include <linux/export.h>
24f5daba1dSHeiko Carstens #include <asm/lowcore.h>
25f5daba1dSHeiko Carstens #include <asm/smp.h>
26fd5ada04SMartin Schwidefsky #include <asm/stp.h>
2776d4e00aSMartin Schwidefsky #include <asm/cputime.h>
28f5daba1dSHeiko Carstens #include <asm/nmi.h>
29f5daba1dSHeiko Carstens #include <asm/crw.h>
3080703617SMartin Schwidefsky #include <asm/switch_to.h>
31cad49cfcSHeiko Carstens #include <asm/ctl_reg.h>
32c929500dSQingFeng Hao #include <asm/asm-offsets.h>
33da72ca4dSQingFeng Hao #include <linux/kvm_host.h>
34f5daba1dSHeiko Carstens 
35f5daba1dSHeiko Carstens struct mcck_struct {
3636324963SHeiko Carstens 	unsigned int kill_task : 1;
3736324963SHeiko Carstens 	unsigned int channel_report : 1;
3836324963SHeiko Carstens 	unsigned int warning : 1;
3929b0a825SHeiko Carstens 	unsigned int stp_queue : 1;
40dc6e1555SHeiko Carstens 	unsigned long mcck_code;
41f5daba1dSHeiko Carstens };
42f5daba1dSHeiko Carstens 
43f5daba1dSHeiko Carstens static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
446c81511cSMartin Schwidefsky static struct kmem_cache *mcesa_cache;
456c81511cSMartin Schwidefsky static unsigned long mcesa_origin_lc;
466c81511cSMartin Schwidefsky 
476c81511cSMartin Schwidefsky static inline int nmi_needs_mcesa(void)
486c81511cSMartin Schwidefsky {
496c81511cSMartin Schwidefsky 	return MACHINE_HAS_VX || MACHINE_HAS_GS;
506c81511cSMartin Schwidefsky }
516c81511cSMartin Schwidefsky 
526c81511cSMartin Schwidefsky static inline unsigned long nmi_get_mcesa_size(void)
536c81511cSMartin Schwidefsky {
546c81511cSMartin Schwidefsky 	if (MACHINE_HAS_GS)
556c81511cSMartin Schwidefsky 		return MCESA_MAX_SIZE;
566c81511cSMartin Schwidefsky 	return MCESA_MIN_SIZE;
576c81511cSMartin Schwidefsky }
586c81511cSMartin Schwidefsky 
596c81511cSMartin Schwidefsky /*
606c81511cSMartin Schwidefsky  * The initial machine check extended save area for the boot CPU.
616c81511cSMartin Schwidefsky  * It will be replaced by nmi_init() with an allocated structure.
626c81511cSMartin Schwidefsky  * The structure is required for machine check happening early in
636c81511cSMartin Schwidefsky  * the boot process.
646c81511cSMartin Schwidefsky  */
656c81511cSMartin Schwidefsky static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
666c81511cSMartin Schwidefsky 
676c81511cSMartin Schwidefsky void __init nmi_alloc_boot_cpu(struct lowcore *lc)
686c81511cSMartin Schwidefsky {
696c81511cSMartin Schwidefsky 	if (!nmi_needs_mcesa())
706c81511cSMartin Schwidefsky 		return;
716c81511cSMartin Schwidefsky 	lc->mcesad = (unsigned long) &boot_mcesa;
726c81511cSMartin Schwidefsky 	if (MACHINE_HAS_GS)
736c81511cSMartin Schwidefsky 		lc->mcesad |= ilog2(MCESA_MAX_SIZE);
746c81511cSMartin Schwidefsky }
756c81511cSMartin Schwidefsky 
766c81511cSMartin Schwidefsky static int __init nmi_init(void)
776c81511cSMartin Schwidefsky {
786c81511cSMartin Schwidefsky 	unsigned long origin, cr0, size;
796c81511cSMartin Schwidefsky 
806c81511cSMartin Schwidefsky 	if (!nmi_needs_mcesa())
816c81511cSMartin Schwidefsky 		return 0;
826c81511cSMartin Schwidefsky 	size = nmi_get_mcesa_size();
836c81511cSMartin Schwidefsky 	if (size > MCESA_MIN_SIZE)
846c81511cSMartin Schwidefsky 		mcesa_origin_lc = ilog2(size);
856c81511cSMartin Schwidefsky 	/* create slab cache for the machine-check-extended-save-areas */
866c81511cSMartin Schwidefsky 	mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
876c81511cSMartin Schwidefsky 	if (!mcesa_cache)
886c81511cSMartin Schwidefsky 		panic("Couldn't create nmi save area cache");
896c81511cSMartin Schwidefsky 	origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
906c81511cSMartin Schwidefsky 	if (!origin)
916c81511cSMartin Schwidefsky 		panic("Couldn't allocate nmi save area");
926c81511cSMartin Schwidefsky 	/* The pointer is stored with mcesa_bits ORed in */
936c81511cSMartin Schwidefsky 	kmemleak_not_leak((void *) origin);
946c81511cSMartin Schwidefsky 	__ctl_store(cr0, 0, 0);
956c81511cSMartin Schwidefsky 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
966c81511cSMartin Schwidefsky 	/* Replace boot_mcesa on the boot CPU */
976c81511cSMartin Schwidefsky 	S390_lowcore.mcesad = origin | mcesa_origin_lc;
986c81511cSMartin Schwidefsky 	__ctl_load(cr0, 0, 0);
996c81511cSMartin Schwidefsky 	return 0;
1006c81511cSMartin Schwidefsky }
1016c81511cSMartin Schwidefsky early_initcall(nmi_init);
1026c81511cSMartin Schwidefsky 
1036c81511cSMartin Schwidefsky int nmi_alloc_per_cpu(struct lowcore *lc)
1046c81511cSMartin Schwidefsky {
1056c81511cSMartin Schwidefsky 	unsigned long origin;
1066c81511cSMartin Schwidefsky 
1076c81511cSMartin Schwidefsky 	if (!nmi_needs_mcesa())
1086c81511cSMartin Schwidefsky 		return 0;
1096c81511cSMartin Schwidefsky 	origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
1106c81511cSMartin Schwidefsky 	if (!origin)
1116c81511cSMartin Schwidefsky 		return -ENOMEM;
1126c81511cSMartin Schwidefsky 	/* The pointer is stored with mcesa_bits ORed in */
1136c81511cSMartin Schwidefsky 	kmemleak_not_leak((void *) origin);
1146c81511cSMartin Schwidefsky 	lc->mcesad = origin | mcesa_origin_lc;
1156c81511cSMartin Schwidefsky 	return 0;
1166c81511cSMartin Schwidefsky }
1176c81511cSMartin Schwidefsky 
1186c81511cSMartin Schwidefsky void nmi_free_per_cpu(struct lowcore *lc)
1196c81511cSMartin Schwidefsky {
1206c81511cSMartin Schwidefsky 	if (!nmi_needs_mcesa())
1216c81511cSMartin Schwidefsky 		return;
1226c81511cSMartin Schwidefsky 	kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
1236c81511cSMartin Schwidefsky }
124f5daba1dSHeiko Carstens 
12500a8f886SMartin Schwidefsky static notrace void s390_handle_damage(void)
126f5daba1dSHeiko Carstens {
12700a8f886SMartin Schwidefsky 	smp_emergency_stop();
128*98587c2dSMartin Schwidefsky 	disabled_wait();
129f5daba1dSHeiko Carstens 	while (1);
130f5daba1dSHeiko Carstens }
13100a8f886SMartin Schwidefsky NOKPROBE_SYMBOL(s390_handle_damage);
132f5daba1dSHeiko Carstens 
133f5daba1dSHeiko Carstens /*
134f5daba1dSHeiko Carstens  * Main machine check handler function. Will be called with interrupts enabled
135f5daba1dSHeiko Carstens  * or disabled and machine checks enabled or disabled.
136f5daba1dSHeiko Carstens  */
137f5daba1dSHeiko Carstens void s390_handle_mcck(void)
138f5daba1dSHeiko Carstens {
139f5daba1dSHeiko Carstens 	unsigned long flags;
140f5daba1dSHeiko Carstens 	struct mcck_struct mcck;
141f5daba1dSHeiko Carstens 
142f5daba1dSHeiko Carstens 	/*
143f5daba1dSHeiko Carstens 	 * Disable machine checks and get the current state of accumulated
144f5daba1dSHeiko Carstens 	 * machine checks. Afterwards delete the old state and enable machine
145f5daba1dSHeiko Carstens 	 * checks again.
146f5daba1dSHeiko Carstens 	 */
147f5daba1dSHeiko Carstens 	local_irq_save(flags);
148f5daba1dSHeiko Carstens 	local_mcck_disable();
1492cb4a182SSebastian Ott 	mcck = *this_cpu_ptr(&cpu_mcck);
1502cb4a182SSebastian Ott 	memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
151d3a73acbSMartin Schwidefsky 	clear_cpu_flag(CIF_MCCK_PENDING);
152f5daba1dSHeiko Carstens 	local_mcck_enable();
153f5daba1dSHeiko Carstens 	local_irq_restore(flags);
154f5daba1dSHeiko Carstens 
155f5daba1dSHeiko Carstens 	if (mcck.channel_report)
156f5daba1dSHeiko Carstens 		crw_handle_channel_report();
157f5daba1dSHeiko Carstens 	/*
1587b886416SHeiko Carstens 	 * A warning may remain for a prolonged period on the bare iron.
1597b886416SHeiko Carstens 	 * (actually until the machine is powered off, or the problem is gone)
1607b886416SHeiko Carstens 	 * So we just stop listening for the WARNING MCH and avoid continuously
161f5daba1dSHeiko Carstens 	 * being interrupted.  One caveat is however, that we must do this per
162f5daba1dSHeiko Carstens 	 * processor and cannot use the smp version of ctl_clear_bit().
163f5daba1dSHeiko Carstens 	 * On VM we only get one interrupt per virtally presented machinecheck.
1647b886416SHeiko Carstens 	 * Though one suffices, we may get one interrupt per (virtual) cpu.
165f5daba1dSHeiko Carstens 	 */
166f5daba1dSHeiko Carstens 	if (mcck.warning) {	/* WARNING pending ? */
167f5daba1dSHeiko Carstens 		static int mchchk_wng_posted = 0;
1687b886416SHeiko Carstens 
1697b886416SHeiko Carstens 		/* Use single cpu clear, as we cannot handle smp here. */
170f5daba1dSHeiko Carstens 		__ctl_clear_bit(14, 24);	/* Disable WARNING MCH */
171f5daba1dSHeiko Carstens 		if (xchg(&mchchk_wng_posted, 1) == 0)
172f5daba1dSHeiko Carstens 			kill_cad_pid(SIGPWR, 1);
173f5daba1dSHeiko Carstens 	}
17429b0a825SHeiko Carstens 	if (mcck.stp_queue)
17529b0a825SHeiko Carstens 		stp_queue_work();
176f5daba1dSHeiko Carstens 	if (mcck.kill_task) {
177f5daba1dSHeiko Carstens 		local_irq_enable();
178f5daba1dSHeiko Carstens 		printk(KERN_EMERG "mcck: Terminating task because of machine "
179dc6e1555SHeiko Carstens 		       "malfunction (code 0x%016lx).\n", mcck.mcck_code);
180f5daba1dSHeiko Carstens 		printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
181f5daba1dSHeiko Carstens 		       current->comm, current->pid);
182f5daba1dSHeiko Carstens 		do_exit(SIGSEGV);
183f5daba1dSHeiko Carstens 	}
184f5daba1dSHeiko Carstens }
185f5daba1dSHeiko Carstens EXPORT_SYMBOL_GPL(s390_handle_mcck);
186f5daba1dSHeiko Carstens 
187f5daba1dSHeiko Carstens /*
1883037a52fSMartin Schwidefsky  * returns 0 if all required registers are available
189f5daba1dSHeiko Carstens  * returns 1 otherwise
190f5daba1dSHeiko Carstens  */
1913037a52fSMartin Schwidefsky static int notrace s390_check_registers(union mci mci, int umode)
192f5daba1dSHeiko Carstens {
193ad3bc0acSMartin Schwidefsky 	union ctlreg2 cr2;
194f5daba1dSHeiko Carstens 	int kill_task;
195f5daba1dSHeiko Carstens 
196f5daba1dSHeiko Carstens 	kill_task = 0;
197f5daba1dSHeiko Carstens 
198dc6e1555SHeiko Carstens 	if (!mci.gr) {
199f5daba1dSHeiko Carstens 		/*
200f5daba1dSHeiko Carstens 		 * General purpose registers couldn't be restored and have
2018f149ea6SMartin Schwidefsky 		 * unknown contents. Stop system or terminate process.
202f5daba1dSHeiko Carstens 		 */
2038f149ea6SMartin Schwidefsky 		if (!umode)
2048f149ea6SMartin Schwidefsky 			s390_handle_damage();
205f5daba1dSHeiko Carstens 		kill_task = 1;
206f5daba1dSHeiko Carstens 	}
2073037a52fSMartin Schwidefsky 	/* Check control registers */
20870e28aa0SHeiko Carstens 	if (!mci.cr) {
20970e28aa0SHeiko Carstens 		/*
21070e28aa0SHeiko Carstens 		 * Control registers have unknown contents.
21170e28aa0SHeiko Carstens 		 * Can't recover and therefore stopping machine.
21270e28aa0SHeiko Carstens 		 */
21370e28aa0SHeiko Carstens 		s390_handle_damage();
21470e28aa0SHeiko Carstens 	}
215dc6e1555SHeiko Carstens 	if (!mci.fp) {
216f5daba1dSHeiko Carstens 		/*
2178f149ea6SMartin Schwidefsky 		 * Floating point registers can't be restored. If the
2188f149ea6SMartin Schwidefsky 		 * kernel currently uses floating point registers the
2198f149ea6SMartin Schwidefsky 		 * system is stopped. If the process has its floating
2208f149ea6SMartin Schwidefsky 		 * pointer registers loaded it is terminated.
221f5daba1dSHeiko Carstens 		 */
2228f149ea6SMartin Schwidefsky 		if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
2238f149ea6SMartin Schwidefsky 			s390_handle_damage();
2248f149ea6SMartin Schwidefsky 		if (!test_cpu_flag(CIF_FPU))
225f5daba1dSHeiko Carstens 			kill_task = 1;
226f5daba1dSHeiko Carstens 	}
227dc6e1555SHeiko Carstens 	if (!mci.fc) {
228f5daba1dSHeiko Carstens 		/*
229f5daba1dSHeiko Carstens 		 * Floating point control register can't be restored.
2308f149ea6SMartin Schwidefsky 		 * If the kernel currently uses the floating pointer
2318f149ea6SMartin Schwidefsky 		 * registers and needs the FPC register the system is
2328f149ea6SMartin Schwidefsky 		 * stopped. If the process has its floating pointer
2333037a52fSMartin Schwidefsky 		 * registers loaded it is terminated.
234f5daba1dSHeiko Carstens 		 */
2358f149ea6SMartin Schwidefsky 		if (S390_lowcore.fpu_flags & KERNEL_FPC)
2368f149ea6SMartin Schwidefsky 			s390_handle_damage();
2378f149ea6SMartin Schwidefsky 		if (!test_cpu_flag(CIF_FPU))
238f5daba1dSHeiko Carstens 			kill_task = 1;
23986fa7087SHeiko Carstens 	}
240f5daba1dSHeiko Carstens 
2413037a52fSMartin Schwidefsky 	if (MACHINE_HAS_VX) {
242dc6e1555SHeiko Carstens 		if (!mci.vr) {
24380703617SMartin Schwidefsky 			/*
2448f149ea6SMartin Schwidefsky 			 * Vector registers can't be restored. If the kernel
2458f149ea6SMartin Schwidefsky 			 * currently uses vector registers the system is
2468f149ea6SMartin Schwidefsky 			 * stopped. If the process has its vector registers
2473037a52fSMartin Schwidefsky 			 * loaded it is terminated.
24880703617SMartin Schwidefsky 			 */
2498f149ea6SMartin Schwidefsky 			if (S390_lowcore.fpu_flags & KERNEL_VXR)
2508f149ea6SMartin Schwidefsky 				s390_handle_damage();
2518f149ea6SMartin Schwidefsky 			if (!test_cpu_flag(CIF_FPU))
25280703617SMartin Schwidefsky 				kill_task = 1;
25380703617SMartin Schwidefsky 		}
25480703617SMartin Schwidefsky 	}
2553037a52fSMartin Schwidefsky 	/* Check if access registers are valid */
256dc6e1555SHeiko Carstens 	if (!mci.ar) {
257f5daba1dSHeiko Carstens 		/*
258f5daba1dSHeiko Carstens 		 * Access registers have unknown contents.
259f5daba1dSHeiko Carstens 		 * Terminating task.
260f5daba1dSHeiko Carstens 		 */
261f5daba1dSHeiko Carstens 		kill_task = 1;
262f5daba1dSHeiko Carstens 	}
2633037a52fSMartin Schwidefsky 	/* Check guarded storage registers */
264ad3bc0acSMartin Schwidefsky 	cr2.val = S390_lowcore.cregs_save_area[2];
265ad3bc0acSMartin Schwidefsky 	if (cr2.gse) {
2663037a52fSMartin Schwidefsky 		if (!mci.gs) {
267916cda1aSMartin Schwidefsky 			/*
268916cda1aSMartin Schwidefsky 			 * Guarded storage register can't be restored and
269916cda1aSMartin Schwidefsky 			 * the current processes uses guarded storage.
270916cda1aSMartin Schwidefsky 			 * It has to be terminated.
271916cda1aSMartin Schwidefsky 			 */
272916cda1aSMartin Schwidefsky 			kill_task = 1;
273916cda1aSMartin Schwidefsky 		}
2743037a52fSMartin Schwidefsky 	}
275f5daba1dSHeiko Carstens 	/* Check if old PSW is valid */
2763037a52fSMartin Schwidefsky 	if (!mci.wp) {
277f5daba1dSHeiko Carstens 		/*
278f5daba1dSHeiko Carstens 		 * Can't tell if we come from user or kernel mode
279f5daba1dSHeiko Carstens 		 * -> stopping machine.
280f5daba1dSHeiko Carstens 		 */
2813d68286aSHeiko Carstens 		s390_handle_damage();
2823037a52fSMartin Schwidefsky 	}
2833037a52fSMartin Schwidefsky 	/* Check for invalid kernel instruction address */
2843037a52fSMartin Schwidefsky 	if (!mci.ia && !umode) {
2853037a52fSMartin Schwidefsky 		/*
2863037a52fSMartin Schwidefsky 		 * The instruction address got lost while running
2873037a52fSMartin Schwidefsky 		 * in the kernel -> stopping machine.
2883037a52fSMartin Schwidefsky 		 */
2893037a52fSMartin Schwidefsky 		s390_handle_damage();
2903037a52fSMartin Schwidefsky 	}
291f5daba1dSHeiko Carstens 
292dc6e1555SHeiko Carstens 	if (!mci.ms || !mci.pm || !mci.ia)
293f5daba1dSHeiko Carstens 		kill_task = 1;
294f5daba1dSHeiko Carstens 
295f5daba1dSHeiko Carstens 	return kill_task;
296f5daba1dSHeiko Carstens }
2973037a52fSMartin Schwidefsky NOKPROBE_SYMBOL(s390_check_registers);
298f5daba1dSHeiko Carstens 
299da72ca4dSQingFeng Hao /*
300da72ca4dSQingFeng Hao  * Backup the guest's machine check info to its description block
301da72ca4dSQingFeng Hao  */
302da72ca4dSQingFeng Hao static void notrace s390_backup_mcck_info(struct pt_regs *regs)
303da72ca4dSQingFeng Hao {
304da72ca4dSQingFeng Hao 	struct mcck_volatile_info *mcck_backup;
305da72ca4dSQingFeng Hao 	struct sie_page *sie_page;
306da72ca4dSQingFeng Hao 
307da72ca4dSQingFeng Hao 	/* r14 contains the sie block, which was set in sie64a */
308da72ca4dSQingFeng Hao 	struct kvm_s390_sie_block *sie_block =
309da72ca4dSQingFeng Hao 			(struct kvm_s390_sie_block *) regs->gprs[14];
310da72ca4dSQingFeng Hao 
311da72ca4dSQingFeng Hao 	if (sie_block == NULL)
312da72ca4dSQingFeng Hao 		/* Something's seriously wrong, stop system. */
313da72ca4dSQingFeng Hao 		s390_handle_damage();
314da72ca4dSQingFeng Hao 
315da72ca4dSQingFeng Hao 	sie_page = container_of(sie_block, struct sie_page, sie_block);
316da72ca4dSQingFeng Hao 	mcck_backup = &sie_page->mcck_info;
317da72ca4dSQingFeng Hao 	mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
318da72ca4dSQingFeng Hao 				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
319da72ca4dSQingFeng Hao 	mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
320da72ca4dSQingFeng Hao 	mcck_backup->failing_storage_address
321da72ca4dSQingFeng Hao 			= S390_lowcore.failing_storage_address;
322da72ca4dSQingFeng Hao }
32300a8f886SMartin Schwidefsky NOKPROBE_SYMBOL(s390_backup_mcck_info);
324da72ca4dSQingFeng Hao 
325f5daba1dSHeiko Carstens #define MAX_IPD_COUNT	29
326f5daba1dSHeiko Carstens #define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
327f5daba1dSHeiko Carstens 
328f5daba1dSHeiko Carstens #define ED_STP_ISLAND	6	/* External damage STP island check */
329f5daba1dSHeiko Carstens #define ED_STP_SYNC	7	/* External damage STP sync check */
330f5daba1dSHeiko Carstens 
331c929500dSQingFeng Hao #define MCCK_CODE_NO_GUEST	(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
332c929500dSQingFeng Hao 
333f5daba1dSHeiko Carstens /*
334f5daba1dSHeiko Carstens  * machine check handler.
335f5daba1dSHeiko Carstens  */
336f5daba1dSHeiko Carstens void notrace s390_do_machine_check(struct pt_regs *regs)
337f5daba1dSHeiko Carstens {
338f5daba1dSHeiko Carstens 	static int ipd_count;
339f5daba1dSHeiko Carstens 	static DEFINE_SPINLOCK(ipd_lock);
340f5daba1dSHeiko Carstens 	static unsigned long long last_ipd;
341f5daba1dSHeiko Carstens 	struct mcck_struct *mcck;
342f5daba1dSHeiko Carstens 	unsigned long long tmp;
343dc6e1555SHeiko Carstens 	union mci mci;
344c929500dSQingFeng Hao 	unsigned long mcck_dam_code;
345f5daba1dSHeiko Carstens 
34681f64b87SHeiko Carstens 	nmi_enter();
347420f42ecSHeiko Carstens 	inc_irq_stat(NMI_NMI);
348dc6e1555SHeiko Carstens 	mci.val = S390_lowcore.mcck_interruption_code;
349eb7e7d76SChristoph Lameter 	mcck = this_cpu_ptr(&cpu_mcck);
350f5daba1dSHeiko Carstens 
351dc6e1555SHeiko Carstens 	if (mci.sd) {
352f5daba1dSHeiko Carstens 		/* System damage -> stopping machine */
3533d68286aSHeiko Carstens 		s390_handle_damage();
354f5daba1dSHeiko Carstens 	}
355c929500dSQingFeng Hao 
356c929500dSQingFeng Hao 	/*
357c929500dSQingFeng Hao 	 * Reinject the instruction processing damages' machine checks
358c929500dSQingFeng Hao 	 * including Delayed Access Exception into the guest
359c929500dSQingFeng Hao 	 * instead of damaging the host if they happen in the guest.
360c929500dSQingFeng Hao 	 */
361c929500dSQingFeng Hao 	if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
362dc6e1555SHeiko Carstens 		if (mci.b) {
363f5daba1dSHeiko Carstens 			/* Processing backup -> verify if we can survive this */
364f5daba1dSHeiko Carstens 			u64 z_mcic, o_mcic, t_mcic;
365f5daba1dSHeiko Carstens 			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
366f5daba1dSHeiko Carstens 			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
367f5daba1dSHeiko Carstens 				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
368f5daba1dSHeiko Carstens 				  1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
369f5daba1dSHeiko Carstens 				  1ULL<<16);
370dc6e1555SHeiko Carstens 			t_mcic = mci.val;
371f5daba1dSHeiko Carstens 
372f5daba1dSHeiko Carstens 			if (((t_mcic & z_mcic) != 0) ||
373f5daba1dSHeiko Carstens 			    ((t_mcic & o_mcic) != o_mcic)) {
3743d68286aSHeiko Carstens 				s390_handle_damage();
375f5daba1dSHeiko Carstens 			}
376f5daba1dSHeiko Carstens 
377f5daba1dSHeiko Carstens 			/*
378f5daba1dSHeiko Carstens 			 * Nullifying exigent condition, therefore we might
379f5daba1dSHeiko Carstens 			 * retry this instruction.
380f5daba1dSHeiko Carstens 			 */
381f5daba1dSHeiko Carstens 			spin_lock(&ipd_lock);
3821aae0560SHeiko Carstens 			tmp = get_tod_clock();
383f5daba1dSHeiko Carstens 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
384f5daba1dSHeiko Carstens 				ipd_count++;
385f5daba1dSHeiko Carstens 			else
386f5daba1dSHeiko Carstens 				ipd_count = 1;
387f5daba1dSHeiko Carstens 			last_ipd = tmp;
388f5daba1dSHeiko Carstens 			if (ipd_count == MAX_IPD_COUNT)
3893d68286aSHeiko Carstens 				s390_handle_damage();
390f5daba1dSHeiko Carstens 			spin_unlock(&ipd_lock);
391f5daba1dSHeiko Carstens 		} else {
392f5daba1dSHeiko Carstens 			/* Processing damage -> stopping machine */
3933d68286aSHeiko Carstens 			s390_handle_damage();
394f5daba1dSHeiko Carstens 		}
395f5daba1dSHeiko Carstens 	}
3963037a52fSMartin Schwidefsky 	if (s390_check_registers(mci, user_mode(regs))) {
397f5daba1dSHeiko Carstens 		/*
3988f149ea6SMartin Schwidefsky 		 * Couldn't restore all register contents for the
3998f149ea6SMartin Schwidefsky 		 * user space process -> mark task for termination.
400f5daba1dSHeiko Carstens 		 */
401f5daba1dSHeiko Carstens 		mcck->kill_task = 1;
402dc6e1555SHeiko Carstens 		mcck->mcck_code = mci.val;
403d3a73acbSMartin Schwidefsky 		set_cpu_flag(CIF_MCCK_PENDING);
404f5daba1dSHeiko Carstens 	}
405da72ca4dSQingFeng Hao 
406da72ca4dSQingFeng Hao 	/*
407da72ca4dSQingFeng Hao 	 * Backup the machine check's info if it happens when the guest
408da72ca4dSQingFeng Hao 	 * is running.
409da72ca4dSQingFeng Hao 	 */
410da72ca4dSQingFeng Hao 	if (test_cpu_flag(CIF_MCCK_GUEST))
411da72ca4dSQingFeng Hao 		s390_backup_mcck_info(regs);
412da72ca4dSQingFeng Hao 
413dc6e1555SHeiko Carstens 	if (mci.cd) {
414f5daba1dSHeiko Carstens 		/* Timing facility damage */
4153d68286aSHeiko Carstens 		s390_handle_damage();
416f5daba1dSHeiko Carstens 	}
417dc6e1555SHeiko Carstens 	if (mci.ed && mci.ec) {
418f5daba1dSHeiko Carstens 		/* External damage */
419f5daba1dSHeiko Carstens 		if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
42029b0a825SHeiko Carstens 			mcck->stp_queue |= stp_sync_check();
421f5daba1dSHeiko Carstens 		if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
42229b0a825SHeiko Carstens 			mcck->stp_queue |= stp_island_check();
423fd5ada04SMartin Schwidefsky 		if (mcck->stp_queue)
42429b0a825SHeiko Carstens 			set_cpu_flag(CIF_MCCK_PENDING);
425f5daba1dSHeiko Carstens 	}
426c929500dSQingFeng Hao 
427c929500dSQingFeng Hao 	/*
428c929500dSQingFeng Hao 	 * Reinject storage related machine checks into the guest if they
429c929500dSQingFeng Hao 	 * happen when the guest is running.
430c929500dSQingFeng Hao 	 */
431c929500dSQingFeng Hao 	if (!test_cpu_flag(CIF_MCCK_GUEST)) {
432dc6e1555SHeiko Carstens 		if (mci.se)
433f5daba1dSHeiko Carstens 			/* Storage error uncorrected */
4343d68286aSHeiko Carstens 			s390_handle_damage();
435dc6e1555SHeiko Carstens 		if (mci.ke)
436f5daba1dSHeiko Carstens 			/* Storage key-error uncorrected */
4373d68286aSHeiko Carstens 			s390_handle_damage();
438dc6e1555SHeiko Carstens 		if (mci.ds && mci.fa)
439f5daba1dSHeiko Carstens 			/* Storage degradation */
4403d68286aSHeiko Carstens 			s390_handle_damage();
441c929500dSQingFeng Hao 	}
442dc6e1555SHeiko Carstens 	if (mci.cp) {
443f5daba1dSHeiko Carstens 		/* Channel report word pending */
444f5daba1dSHeiko Carstens 		mcck->channel_report = 1;
445d3a73acbSMartin Schwidefsky 		set_cpu_flag(CIF_MCCK_PENDING);
446f5daba1dSHeiko Carstens 	}
447dc6e1555SHeiko Carstens 	if (mci.w) {
448f5daba1dSHeiko Carstens 		/* Warning pending */
449f5daba1dSHeiko Carstens 		mcck->warning = 1;
450d3a73acbSMartin Schwidefsky 		set_cpu_flag(CIF_MCCK_PENDING);
451f5daba1dSHeiko Carstens 	}
452c929500dSQingFeng Hao 
453c929500dSQingFeng Hao 	/*
454c929500dSQingFeng Hao 	 * If there are only Channel Report Pending and External Damage
455c929500dSQingFeng Hao 	 * machine checks, they will not be reinjected into the guest
456c929500dSQingFeng Hao 	 * because they refer to host conditions only.
457c929500dSQingFeng Hao 	 */
458c929500dSQingFeng Hao 	mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
459c929500dSQingFeng Hao 	if (test_cpu_flag(CIF_MCCK_GUEST) &&
460c929500dSQingFeng Hao 	(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
461c929500dSQingFeng Hao 		/* Set exit reason code for host's later handling */
462c929500dSQingFeng Hao 		*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
463c929500dSQingFeng Hao 	}
464c929500dSQingFeng Hao 	clear_cpu_flag(CIF_MCCK_GUEST);
46581f64b87SHeiko Carstens 	nmi_exit();
466f5daba1dSHeiko Carstens }
46700a8f886SMartin Schwidefsky NOKPROBE_SYMBOL(s390_do_machine_check);
468f5daba1dSHeiko Carstens 
469f5daba1dSHeiko Carstens static int __init machine_check_init(void)
470f5daba1dSHeiko Carstens {
471f5daba1dSHeiko Carstens 	ctl_set_bit(14, 25);	/* enable external damage MCH */
472f5daba1dSHeiko Carstens 	ctl_set_bit(14, 27);	/* enable system recovery MCH */
473f5daba1dSHeiko Carstens 	ctl_set_bit(14, 24);	/* enable warning MCH */
474f5daba1dSHeiko Carstens 	return 0;
475f5daba1dSHeiko Carstens }
47624d05ff8SHeiko Carstens early_initcall(machine_check_init);
477