xref: /linux/arch/s390/kernel/nmi.c (revision bc46b7cbc58c4cb562b6a45a1fbc7b8e7b23df58)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *   Machine check handler
4  *
5  *    Copyright IBM Corp. 2000, 2009
6  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
7  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
8  *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
9  */
10 
11 #include <linux/kernel_stat.h>
12 #include <linux/utsname.h>
13 #include <linux/cpufeature.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/entry-common.h>
17 #include <linux/hardirq.h>
18 #include <linux/log2.h>
19 #include <linux/kprobes.h>
20 #include <linux/kmemleak.h>
21 #include <linux/time.h>
22 #include <linux/module.h>
23 #include <linux/sched/signal.h>
24 #include <linux/kvm_host.h>
25 #include <asm/lowcore.h>
26 #include <asm/ctlreg.h>
27 #include <asm/fpu.h>
28 #include <asm/smp.h>
29 #include <asm/stp.h>
30 #include <asm/cputime.h>
31 #include <asm/nmi.h>
32 #include <asm/crw.h>
33 #include <asm/asm-offsets.h>
34 #include <asm/pai.h>
35 #include <asm/vtime.h>
36 
37 struct mcck_struct {
38 	unsigned int kill_task : 1;
39 	unsigned int channel_report : 1;
40 	unsigned int warning : 1;
41 	unsigned int stp_queue : 1;
42 	unsigned long mcck_code;
43 };
44 
45 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
46 
nmi_needs_mcesa(void)47 static inline int nmi_needs_mcesa(void)
48 {
49 	return cpu_has_vx() || cpu_has_gs();
50 }
51 
52 /*
53  * The initial machine check extended save area for the boot CPU.
54  * It will be replaced on the boot CPU reinit with an allocated
55  * structure. The structure is required for machine check happening
56  * early in the boot process.
57  */
58 static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
59 
nmi_alloc_mcesa_early(u64 * mcesad)60 void __init nmi_alloc_mcesa_early(u64 *mcesad)
61 {
62 	if (!nmi_needs_mcesa())
63 		return;
64 	*mcesad = __pa(&boot_mcesa);
65 	if (cpu_has_gs())
66 		*mcesad |= ilog2(MCESA_MAX_SIZE);
67 }
68 
nmi_alloc_mcesa(u64 * mcesad)69 int nmi_alloc_mcesa(u64 *mcesad)
70 {
71 	unsigned long size;
72 	void *origin;
73 
74 	*mcesad = 0;
75 	if (!nmi_needs_mcesa())
76 		return 0;
77 	size = cpu_has_gs() ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
78 	origin = kmalloc(size, GFP_KERNEL);
79 	if (!origin)
80 		return -ENOMEM;
81 	/* The pointer is stored with mcesa_bits ORed in */
82 	kmemleak_not_leak(origin);
83 	*mcesad = __pa(origin);
84 	if (cpu_has_gs())
85 		*mcesad |= ilog2(MCESA_MAX_SIZE);
86 	return 0;
87 }
88 
nmi_free_mcesa(u64 * mcesad)89 void nmi_free_mcesa(u64 *mcesad)
90 {
91 	if (!nmi_needs_mcesa())
92 		return;
93 	kfree(__va(*mcesad & MCESA_ORIGIN_MASK));
94 }
95 
nmi_puts(char * dest,const char * src)96 static __always_inline char *nmi_puts(char *dest, const char *src)
97 {
98 	while (*src)
99 		*dest++ = *src++;
100 	*dest = 0;
101 	return dest;
102 }
103 
u64_to_hex(char * dest,u64 val)104 static __always_inline char *u64_to_hex(char *dest, u64 val)
105 {
106 	int i, num;
107 
108 	for (i = 1; i <= 16; i++) {
109 		num = (val >> (64 - 4 * i)) & 0xf;
110 		if (num >= 10)
111 			*dest++ = 'A' + num - 10;
112 		else
113 			*dest++ = '0' + num;
114 	}
115 	*dest = 0;
116 	return dest;
117 }
118 
nmi_print_info(void)119 static notrace void nmi_print_info(void)
120 {
121 	struct lowcore *lc = get_lowcore();
122 	char message[100];
123 	char *ptr;
124 	int i;
125 
126 	ptr = nmi_puts(message, "Unrecoverable machine check, code: ");
127 	ptr = u64_to_hex(ptr, lc->mcck_interruption_code);
128 	ptr = nmi_puts(ptr, "\n");
129 	sclp_emergency_printk(message);
130 
131 	ptr = nmi_puts(message, init_utsname()->release);
132 	ptr = nmi_puts(ptr, "\n");
133 	sclp_emergency_printk(message);
134 
135 	ptr = nmi_puts(message, arch_hw_string);
136 	ptr = nmi_puts(ptr, "\n");
137 	sclp_emergency_printk(message);
138 
139 	ptr = nmi_puts(message, "PSW: ");
140 	ptr = u64_to_hex(ptr, lc->mcck_old_psw.mask);
141 	ptr = nmi_puts(ptr, " ");
142 	ptr = u64_to_hex(ptr, lc->mcck_old_psw.addr);
143 	ptr = nmi_puts(ptr, " PFX: ");
144 	ptr = u64_to_hex(ptr, (u64)get_lowcore());
145 	ptr = nmi_puts(ptr, "\n");
146 	sclp_emergency_printk(message);
147 
148 	ptr = nmi_puts(message, "LBA: ");
149 	ptr = u64_to_hex(ptr, lc->last_break_save_area);
150 	ptr = nmi_puts(ptr, " EDC: ");
151 	ptr = u64_to_hex(ptr, lc->external_damage_code);
152 	ptr = nmi_puts(ptr, " FSA: ");
153 	ptr = u64_to_hex(ptr, lc->failing_storage_address);
154 	ptr = nmi_puts(ptr, "\n");
155 	sclp_emergency_printk(message);
156 
157 	ptr = nmi_puts(message, "CRS:\n");
158 	sclp_emergency_printk(message);
159 	ptr = message;
160 	for (i = 0; i < 16; i++) {
161 		ptr = u64_to_hex(ptr, lc->cregs_save_area[i].val);
162 		ptr = nmi_puts(ptr, " ");
163 		if ((i + 1) % 4 == 0) {
164 			ptr = nmi_puts(ptr, "\n");
165 			sclp_emergency_printk(message);
166 			ptr = message;
167 		}
168 	}
169 
170 	ptr = nmi_puts(message, "GPRS:\n");
171 	sclp_emergency_printk(message);
172 	ptr = message;
173 	for (i = 0; i < 16; i++) {
174 		ptr = u64_to_hex(ptr, lc->gpregs_save_area[i]);
175 		ptr = nmi_puts(ptr, " ");
176 		if ((i + 1) % 4 == 0) {
177 			ptr = nmi_puts(ptr, "\n");
178 			sclp_emergency_printk(message);
179 			ptr = message;
180 		}
181 	}
182 
183 	ptr = nmi_puts(message, "System stopped\n");
184 	sclp_emergency_printk(message);
185 }
186 
s390_handle_damage(void)187 static notrace void s390_handle_damage(void)
188 {
189 	struct lowcore *lc = get_lowcore();
190 	union ctlreg0 cr0, cr0_new;
191 	psw_t psw_save;
192 
193 	smp_emergency_stop();
194 	diag_amode31_ops.diag308_reset();
195 
196 	/*
197 	 * Disable low address protection and make machine check new PSW a
198 	 * disabled wait PSW. Any additional machine check cannot be handled.
199 	 */
200 	local_ctl_store(0, &cr0.reg);
201 	cr0_new = cr0;
202 	cr0_new.lap = 0;
203 	local_ctl_load(0, &cr0_new.reg);
204 	psw_save = lc->mcck_new_psw;
205 	psw_bits(lc->mcck_new_psw).io = 0;
206 	psw_bits(lc->mcck_new_psw).ext = 0;
207 	psw_bits(lc->mcck_new_psw).wait = 1;
208 	nmi_print_info();
209 
210 	/*
211 	 * Restore machine check new PSW and control register 0 to original
212 	 * values. This makes possible system dump analysis easier.
213 	 */
214 	lc->mcck_new_psw = psw_save;
215 	local_ctl_load(0, &cr0.reg);
216 	disabled_wait();
217 	while (1);
218 }
219 NOKPROBE_SYMBOL(s390_handle_damage);
220 
221 /*
222  * Main machine check handler function. Will be called with interrupts disabled
223  * and machine checks enabled.
224  */
s390_handle_mcck(void)225 void s390_handle_mcck(void)
226 {
227 	struct mcck_struct mcck;
228 	unsigned long mflags;
229 
230 	/*
231 	 * Disable machine checks and get the current state of accumulated
232 	 * machine checks. Afterwards delete the old state and enable machine
233 	 * checks again.
234 	 */
235 	local_mcck_save(mflags);
236 	mcck = *this_cpu_ptr(&cpu_mcck);
237 	memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
238 	local_mcck_restore(mflags);
239 
240 	if (mcck.channel_report)
241 		crw_handle_channel_report();
242 	/*
243 	 * A warning may remain for a prolonged period on the bare iron.
244 	 * (actually until the machine is powered off, or the problem is gone)
245 	 * So we just stop listening for the WARNING MCH and avoid continuously
246 	 * being interrupted.  One caveat is however, that we must do this per
247 	 * processor and cannot use the smp version of ctl_clear_bit().
248 	 * On VM we only get one interrupt per virtally presented machinecheck.
249 	 * Though one suffices, we may get one interrupt per (virtual) cpu.
250 	 */
251 	if (mcck.warning) {	/* WARNING pending ? */
252 		static int mchchk_wng_posted = 0;
253 
254 		/* Use single cpu clear, as we cannot handle smp here. */
255 		local_ctl_clear_bit(14, CR14_WARNING_SUBMASK_BIT);
256 		if (xchg(&mchchk_wng_posted, 1) == 0)
257 			kill_cad_pid(SIGPWR, 1);
258 	}
259 	if (mcck.stp_queue)
260 		stp_queue_work();
261 	if (mcck.kill_task) {
262 		printk(KERN_EMERG "mcck: Terminating task because of machine "
263 		       "malfunction (code 0x%016lx).\n", mcck.mcck_code);
264 		printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
265 		       current->comm, current->pid);
266 		if (is_global_init(current))
267 			panic("mcck: Attempting to kill init!\n");
268 		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, current, PIDTYPE_PID);
269 	}
270 }
271 
272 /**
273  * nmi_registers_valid - verify if registers are valid
274  * @mci: machine check interruption code
275  *
276  * Inspect a machine check interruption code and verify if all required
277  * registers are valid. For some registers the corresponding validity bit is
278  * ignored and the registers are set to the expected value.
279  * Returns true if all registers are valid, otherwise false.
280  */
nmi_registers_valid(union mci mci)281 static bool notrace nmi_registers_valid(union mci mci)
282 {
283 	union ctlreg2 cr2;
284 
285 	/*
286 	 * The getcpu vdso syscall reads the CPU number from the programmable
287 	 * field of the TOD clock. Disregard the TOD programmable register
288 	 * validity bit and load the CPU number into the TOD programmable field
289 	 * unconditionally.
290 	 */
291 	set_tod_programmable_field(raw_smp_processor_id());
292 	/*
293 	 * Set the clock comparator register to the next expected value.
294 	 */
295 	set_clock_comparator(get_lowcore()->clock_comparator);
296 	if (!mci.gr || !mci.fp || !mci.fc)
297 		return false;
298 	/*
299 	 * The vector validity must only be checked if not running a
300 	 * KVM guest. For KVM guests the machine check is forwarded by
301 	 * KVM and it is the responsibility of the guest to take
302 	 * appropriate actions. The host vector or FPU values have been
303 	 * saved by KVM and will be restored by KVM.
304 	 */
305 	if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
306 		return false;
307 	if (!mci.ar)
308 		return false;
309 	/*
310 	 * Two cases for guarded storage registers:
311 	 * - machine check in kernel or userspace
312 	 * - machine check while running SIE (KVM guest)
313 	 * For kernel or userspace the userspace values of guarded storage
314 	 * control can not be recreated, the process must be terminated.
315 	 * For SIE the guest values of guarded storage can not be recreated.
316 	 * This is either due to a bug or due to GS being disabled in the
317 	 * guest. The guest will be notified by KVM code and the guests machine
318 	 * check handling must take care of this. The host values are saved by
319 	 * KVM and are not affected.
320 	 */
321 	cr2.reg = get_lowcore()->cregs_save_area[2];
322 	if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST))
323 		return false;
324 	if (!mci.ms || !mci.pm || !mci.ia)
325 		return false;
326 	return true;
327 }
328 NOKPROBE_SYMBOL(nmi_registers_valid);
329 
330 /*
331  * Backup the guest's machine check info to its description block
332  */
s390_backup_mcck_info(struct pt_regs * regs)333 static void notrace s390_backup_mcck_info(struct pt_regs *regs)
334 {
335 	struct mcck_volatile_info *mcck_backup;
336 	struct sie_page *sie_page;
337 
338 	/* r14 contains the sie block, which was set in sie64a */
339 	struct kvm_s390_sie_block *sie_block = phys_to_virt(regs->gprs[14]);
340 
341 	if (sie_block == NULL)
342 		/* Something's seriously wrong, stop system. */
343 		s390_handle_damage();
344 
345 	sie_page = container_of(sie_block, struct sie_page, sie_block);
346 	mcck_backup = &sie_page->mcck_info;
347 	mcck_backup->mcic = get_lowcore()->mcck_interruption_code &
348 				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
349 	mcck_backup->ext_damage_code = get_lowcore()->external_damage_code;
350 	mcck_backup->failing_storage_address = get_lowcore()->failing_storage_address;
351 }
352 NOKPROBE_SYMBOL(s390_backup_mcck_info);
353 
354 #define MAX_IPD_COUNT	29
355 #define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
356 
357 #define ED_STP_ISLAND	6	/* External damage STP island check */
358 #define ED_STP_SYNC	7	/* External damage STP sync check */
359 
360 #define MCCK_CODE_NO_GUEST	(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
361 
362 /*
363  * machine check handler.
364  */
s390_do_machine_check(struct pt_regs * regs)365 void notrace s390_do_machine_check(struct pt_regs *regs)
366 {
367 	static int ipd_count;
368 	static DEFINE_SPINLOCK(ipd_lock);
369 	static unsigned long long last_ipd;
370 	struct lowcore *lc = get_lowcore();
371 	struct mcck_struct *mcck;
372 	unsigned long long tmp;
373 	irqentry_state_t irq_state;
374 	union mci mci;
375 	unsigned long mcck_dam_code;
376 	int mcck_pending = 0;
377 
378 	irq_state = irqentry_nmi_enter(regs);
379 
380 	if (user_mode(regs))
381 		update_timer_mcck();
382 	inc_irq_stat(NMI_NMI);
383 	mci.val = lc->mcck_interruption_code;
384 	mcck = this_cpu_ptr(&cpu_mcck);
385 
386 	/*
387 	 * Reinject the instruction processing damages' machine checks
388 	 * including Delayed Access Exception into the guest
389 	 * instead of damaging the host if they happen in the guest.
390 	 */
391 	if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
392 		if (mci.b) {
393 			/* Processing backup -> verify if we can survive this */
394 			u64 z_mcic, o_mcic, t_mcic;
395 			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
396 			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
397 				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
398 				  1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
399 				  1ULL<<16);
400 			t_mcic = mci.val;
401 
402 			if (((t_mcic & z_mcic) != 0) ||
403 			    ((t_mcic & o_mcic) != o_mcic)) {
404 				s390_handle_damage();
405 			}
406 
407 			/*
408 			 * Nullifying exigent condition, therefore we might
409 			 * retry this instruction.
410 			 */
411 			spin_lock(&ipd_lock);
412 			tmp = get_tod_clock();
413 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
414 				ipd_count++;
415 			else
416 				ipd_count = 1;
417 			last_ipd = tmp;
418 			if (ipd_count == MAX_IPD_COUNT)
419 				s390_handle_damage();
420 			spin_unlock(&ipd_lock);
421 		} else {
422 			/* Processing damage -> stopping machine */
423 			s390_handle_damage();
424 		}
425 	}
426 	if (!nmi_registers_valid(mci)) {
427 		if (!user_mode(regs))
428 			s390_handle_damage();
429 		/*
430 		 * Couldn't restore all register contents for the
431 		 * user space process -> mark task for termination.
432 		 */
433 		mcck->kill_task = 1;
434 		mcck->mcck_code = mci.val;
435 		mcck_pending = 1;
436 	}
437 
438 	/*
439 	 * Backup the machine check's info if it happens when the guest
440 	 * is running.
441 	 */
442 	if (test_cpu_flag(CIF_MCCK_GUEST))
443 		s390_backup_mcck_info(regs);
444 
445 	if (mci.cd) {
446 		/* Timing facility damage */
447 		s390_handle_damage();
448 	}
449 	if (mci.ed && mci.ec) {
450 		/* External damage */
451 		if (lc->external_damage_code & (1U << ED_STP_SYNC))
452 			mcck->stp_queue |= stp_sync_check();
453 		if (lc->external_damage_code & (1U << ED_STP_ISLAND))
454 			mcck->stp_queue |= stp_island_check();
455 		mcck_pending = 1;
456 	}
457 	/*
458 	 * Reinject storage related machine checks into the guest if they
459 	 * happen when the guest is running.
460 	 */
461 	if (!test_cpu_flag(CIF_MCCK_GUEST)) {
462 		/* Storage error uncorrected */
463 		if (mci.se)
464 			s390_handle_damage();
465 		/* Storage key-error uncorrected */
466 		if (mci.ke)
467 			s390_handle_damage();
468 		/* Storage degradation */
469 		if (mci.ds && mci.fa)
470 			s390_handle_damage();
471 	}
472 	if (mci.cp) {
473 		/* Channel report word pending */
474 		mcck->channel_report = 1;
475 		mcck_pending = 1;
476 	}
477 	if (mci.w) {
478 		/* Warning pending */
479 		mcck->warning = 1;
480 		mcck_pending = 1;
481 	}
482 
483 	/*
484 	 * If there are only Channel Report Pending and External Damage
485 	 * machine checks, they will not be reinjected into the guest
486 	 * because they refer to host conditions only.
487 	 */
488 	mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
489 	if (test_cpu_flag(CIF_MCCK_GUEST) &&
490 	(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
491 		/* Set exit reason code for host's later handling */
492 		*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
493 	}
494 	clear_cpu_flag(CIF_MCCK_GUEST);
495 
496 	if (mcck_pending)
497 		schedule_mcck_handler();
498 
499 	irqentry_nmi_exit(regs, irq_state);
500 }
501 NOKPROBE_SYMBOL(s390_do_machine_check);
502 
machine_check_init(void)503 static int __init machine_check_init(void)
504 {
505 	system_ctl_set_bit(14, CR14_EXTERNAL_DAMAGE_SUBMASK_BIT);
506 	system_ctl_set_bit(14, CR14_RECOVERY_SUBMASK_BIT);
507 	system_ctl_set_bit(14, CR14_WARNING_SUBMASK_BIT);
508 	return 0;
509 }
510 early_initcall(machine_check_init);
511