xref: /linux/arch/x86/kernel/cpu/mce/inject.c (revision 702648721db590b3425c31ade294000e18808345)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Machine check injection support.
4  * Copyright 2008 Intel Corporation.
5  *
6  * Authors:
7  * Andi Kleen
8  * Ying Huang
9  *
10  * The AMD part (from mce_amd_inj.c): a simple MCE injection facility
11  * for testing different aspects of the RAS code. This driver should be
12  * built as module so that it can be loaded on production kernels for
13  * testing purposes.
14  *
15  * Copyright (c) 2010-17:  Borislav Petkov <bp@alien8.de>
16  *			   Advanced Micro Devices Inc.
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/debugfs.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/uaccess.h>
26 
27 #include <asm/amd_nb.h>
28 #include <asm/apic.h>
29 #include <asm/irq_vectors.h>
30 #include <asm/mce.h>
31 #include <asm/nmi.h>
32 #include <asm/smp.h>
33 
34 #include "internal.h"
35 
36 static bool hw_injection_possible = true;
37 
38 /*
39  * Collect all the MCi_XXX settings
40  */
41 static struct mce i_mce;
42 static struct dentry *dfs_inj;
43 
44 #define MAX_FLAG_OPT_SIZE	4
45 #define NBCFG			0x44
46 
47 enum injection_type {
48 	SW_INJ = 0,	/* SW injection, simply decode the error */
49 	HW_INJ,		/* Trigger a #MC */
50 	DFR_INT_INJ,    /* Trigger Deferred error interrupt */
51 	THR_INT_INJ,    /* Trigger threshold interrupt */
52 	N_INJ_TYPES,
53 };
54 
55 static const char * const flags_options[] = {
56 	[SW_INJ] = "sw",
57 	[HW_INJ] = "hw",
58 	[DFR_INT_INJ] = "df",
59 	[THR_INT_INJ] = "th",
60 	NULL
61 };
62 
63 /* Set default injection to SW_INJ */
64 static enum injection_type inj_type = SW_INJ;
65 
66 #define MCE_INJECT_SET(reg)						\
67 static int inj_##reg##_set(void *data, u64 val)				\
68 {									\
69 	struct mce *m = (struct mce *)data;				\
70 									\
71 	m->reg = val;							\
72 	return 0;							\
73 }
74 
75 MCE_INJECT_SET(status);
76 MCE_INJECT_SET(misc);
77 MCE_INJECT_SET(addr);
78 MCE_INJECT_SET(synd);
79 
80 #define MCE_INJECT_GET(reg)						\
81 static int inj_##reg##_get(void *data, u64 *val)			\
82 {									\
83 	struct mce *m = (struct mce *)data;				\
84 									\
85 	*val = m->reg;							\
86 	return 0;							\
87 }
88 
89 MCE_INJECT_GET(status);
90 MCE_INJECT_GET(misc);
91 MCE_INJECT_GET(addr);
92 MCE_INJECT_GET(synd);
93 MCE_INJECT_GET(ipid);
94 
95 DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
96 DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
97 DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
98 DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
99 
100 /* Use the user provided IPID value on a sw injection. */
101 static int inj_ipid_set(void *data, u64 val)
102 {
103 	struct mce *m = (struct mce *)data;
104 
105 	if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
106 		if (inj_type == SW_INJ)
107 			m->ipid = val;
108 	}
109 
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n");
114 
115 static void setup_inj_struct(struct mce *m)
116 {
117 	memset(m, 0, sizeof(struct mce));
118 
119 	m->cpuvendor = boot_cpu_data.x86_vendor;
120 	m->time	     = ktime_get_real_seconds();
121 	m->cpuid     = cpuid_eax(1);
122 	m->microcode = boot_cpu_data.microcode;
123 }
124 
125 /* Update fake mce registers on current CPU. */
126 static void inject_mce(struct mce *m)
127 {
128 	struct mce *i = &per_cpu(injectm, m->extcpu);
129 
130 	/* Make sure no one reads partially written injectm */
131 	i->finished = 0;
132 	mb();
133 	m->finished = 0;
134 	/* First set the fields after finished */
135 	i->extcpu = m->extcpu;
136 	mb();
137 	/* Now write record in order, finished last (except above) */
138 	memcpy(i, m, sizeof(struct mce));
139 	/* Finally activate it */
140 	mb();
141 	i->finished = 1;
142 }
143 
144 static void raise_poll(struct mce *m)
145 {
146 	unsigned long flags;
147 	mce_banks_t b;
148 
149 	memset(&b, 0xff, sizeof(mce_banks_t));
150 	local_irq_save(flags);
151 	machine_check_poll(0, &b);
152 	local_irq_restore(flags);
153 	m->finished = 0;
154 }
155 
156 static void raise_exception(struct mce *m, struct pt_regs *pregs)
157 {
158 	struct pt_regs regs;
159 	unsigned long flags;
160 
161 	if (!pregs) {
162 		memset(&regs, 0, sizeof(struct pt_regs));
163 		regs.ip = m->ip;
164 		regs.cs = m->cs;
165 		pregs = &regs;
166 	}
167 	/* do_machine_check() expects interrupts disabled -- at least */
168 	local_irq_save(flags);
169 	do_machine_check(pregs);
170 	local_irq_restore(flags);
171 	m->finished = 0;
172 }
173 
174 static cpumask_var_t mce_inject_cpumask;
175 static DEFINE_MUTEX(mce_inject_mutex);
176 
177 static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
178 {
179 	int cpu = smp_processor_id();
180 	struct mce *m = this_cpu_ptr(&injectm);
181 	if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
182 		return NMI_DONE;
183 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
184 	if (m->inject_flags & MCJ_EXCEPTION)
185 		raise_exception(m, regs);
186 	else if (m->status)
187 		raise_poll(m);
188 	return NMI_HANDLED;
189 }
190 
191 static void mce_irq_ipi(void *info)
192 {
193 	int cpu = smp_processor_id();
194 	struct mce *m = this_cpu_ptr(&injectm);
195 
196 	if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
197 			m->inject_flags & MCJ_EXCEPTION) {
198 		cpumask_clear_cpu(cpu, mce_inject_cpumask);
199 		raise_exception(m, NULL);
200 	}
201 }
202 
203 /* Inject mce on current CPU */
204 static int raise_local(void)
205 {
206 	struct mce *m = this_cpu_ptr(&injectm);
207 	int context = MCJ_CTX(m->inject_flags);
208 	int ret = 0;
209 	int cpu = m->extcpu;
210 
211 	if (m->inject_flags & MCJ_EXCEPTION) {
212 		pr_info("Triggering MCE exception on CPU %d\n", cpu);
213 		switch (context) {
214 		case MCJ_CTX_IRQ:
215 			/*
216 			 * Could do more to fake interrupts like
217 			 * calling irq_enter, but the necessary
218 			 * machinery isn't exported currently.
219 			 */
220 			fallthrough;
221 		case MCJ_CTX_PROCESS:
222 			raise_exception(m, NULL);
223 			break;
224 		default:
225 			pr_info("Invalid MCE context\n");
226 			ret = -EINVAL;
227 		}
228 		pr_info("MCE exception done on CPU %d\n", cpu);
229 	} else if (m->status) {
230 		pr_info("Starting machine check poll CPU %d\n", cpu);
231 		raise_poll(m);
232 		mce_notify_irq();
233 		pr_info("Machine check poll done on CPU %d\n", cpu);
234 	} else
235 		m->finished = 0;
236 
237 	return ret;
238 }
239 
240 static void __maybe_unused raise_mce(struct mce *m)
241 {
242 	int context = MCJ_CTX(m->inject_flags);
243 
244 	inject_mce(m);
245 
246 	if (context == MCJ_CTX_RANDOM)
247 		return;
248 
249 	if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
250 		unsigned long start;
251 		int cpu;
252 
253 		cpus_read_lock();
254 		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
255 		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
256 		for_each_online_cpu(cpu) {
257 			struct mce *mcpu = &per_cpu(injectm, cpu);
258 			if (!mcpu->finished ||
259 			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
260 				cpumask_clear_cpu(cpu, mce_inject_cpumask);
261 		}
262 		if (!cpumask_empty(mce_inject_cpumask)) {
263 			if (m->inject_flags & MCJ_IRQ_BROADCAST) {
264 				/*
265 				 * don't wait because mce_irq_ipi is necessary
266 				 * to be sync with following raise_local
267 				 */
268 				preempt_disable();
269 				smp_call_function_many(mce_inject_cpumask,
270 					mce_irq_ipi, NULL, 0);
271 				preempt_enable();
272 			} else if (m->inject_flags & MCJ_NMI_BROADCAST)
273 				apic->send_IPI_mask(mce_inject_cpumask,
274 						NMI_VECTOR);
275 		}
276 		start = jiffies;
277 		while (!cpumask_empty(mce_inject_cpumask)) {
278 			if (!time_before(jiffies, start + 2*HZ)) {
279 				pr_err("Timeout waiting for mce inject %lx\n",
280 					*cpumask_bits(mce_inject_cpumask));
281 				break;
282 			}
283 			cpu_relax();
284 		}
285 		raise_local();
286 		put_cpu();
287 		cpus_read_unlock();
288 	} else {
289 		preempt_disable();
290 		raise_local();
291 		preempt_enable();
292 	}
293 }
294 
295 static int mce_inject_raise(struct notifier_block *nb, unsigned long val,
296 			    void *data)
297 {
298 	struct mce *m = (struct mce *)data;
299 
300 	if (!m)
301 		return NOTIFY_DONE;
302 
303 	mutex_lock(&mce_inject_mutex);
304 	raise_mce(m);
305 	mutex_unlock(&mce_inject_mutex);
306 
307 	return NOTIFY_DONE;
308 }
309 
310 static struct notifier_block inject_nb = {
311 	.notifier_call  = mce_inject_raise,
312 };
313 
314 /*
315  * Caller needs to be make sure this cpu doesn't disappear
316  * from under us, i.e.: get_cpu/put_cpu.
317  */
318 static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
319 {
320 	u32 l, h;
321 	int err;
322 
323 	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
324 	if (err) {
325 		pr_err("%s: error reading HWCR\n", __func__);
326 		return err;
327 	}
328 
329 	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
330 
331 	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
332 	if (err)
333 		pr_err("%s: error writing HWCR\n", __func__);
334 
335 	return err;
336 }
337 
338 static int __set_inj(const char *buf)
339 {
340 	int i;
341 
342 	for (i = 0; i < N_INJ_TYPES; i++) {
343 		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
344 			if (i > SW_INJ && !hw_injection_possible)
345 				continue;
346 			inj_type = i;
347 			return 0;
348 		}
349 	}
350 	return -EINVAL;
351 }
352 
353 static ssize_t flags_read(struct file *filp, char __user *ubuf,
354 			  size_t cnt, loff_t *ppos)
355 {
356 	char buf[MAX_FLAG_OPT_SIZE];
357 	int n;
358 
359 	n = sprintf(buf, "%s\n", flags_options[inj_type]);
360 
361 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
362 }
363 
364 static ssize_t flags_write(struct file *filp, const char __user *ubuf,
365 			   size_t cnt, loff_t *ppos)
366 {
367 	char buf[MAX_FLAG_OPT_SIZE], *__buf;
368 	int err;
369 
370 	if (!cnt || cnt > MAX_FLAG_OPT_SIZE)
371 		return -EINVAL;
372 
373 	if (copy_from_user(&buf, ubuf, cnt))
374 		return -EFAULT;
375 
376 	buf[cnt - 1] = 0;
377 
378 	/* strip whitespace */
379 	__buf = strstrip(buf);
380 
381 	err = __set_inj(__buf);
382 	if (err) {
383 		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
384 		return err;
385 	}
386 
387 	*ppos += cnt;
388 
389 	return cnt;
390 }
391 
392 static const struct file_operations flags_fops = {
393 	.read           = flags_read,
394 	.write          = flags_write,
395 	.llseek         = generic_file_llseek,
396 };
397 
398 /*
399  * On which CPU to inject?
400  */
401 MCE_INJECT_GET(extcpu);
402 
403 static int inj_extcpu_set(void *data, u64 val)
404 {
405 	struct mce *m = (struct mce *)data;
406 
407 	if (val >= nr_cpu_ids || !cpu_online(val)) {
408 		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
409 		return -EINVAL;
410 	}
411 	m->extcpu = val;
412 	return 0;
413 }
414 
415 DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
416 
417 static void trigger_mce(void *info)
418 {
419 	asm volatile("int $18");
420 }
421 
422 static void trigger_dfr_int(void *info)
423 {
424 	asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
425 }
426 
427 static void trigger_thr_int(void *info)
428 {
429 	asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
430 }
431 
432 static u32 get_nbc_for_node(int node_id)
433 {
434 	struct cpuinfo_x86 *c = &boot_cpu_data;
435 	u32 cores_per_node;
436 
437 	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
438 
439 	return cores_per_node * node_id;
440 }
441 
442 static void toggle_nb_mca_mst_cpu(u16 nid)
443 {
444 	struct amd_northbridge *nb;
445 	struct pci_dev *F3;
446 	u32 val;
447 	int err;
448 
449 	nb = node_to_amd_nb(nid);
450 	if (!nb)
451 		return;
452 
453 	F3 = nb->misc;
454 	if (!F3)
455 		return;
456 
457 	err = pci_read_config_dword(F3, NBCFG, &val);
458 	if (err) {
459 		pr_err("%s: Error reading F%dx%03x.\n",
460 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
461 		return;
462 	}
463 
464 	if (val & BIT(27))
465 		return;
466 
467 	pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
468 	       __func__);
469 
470 	val |= BIT(27);
471 	err = pci_write_config_dword(F3, NBCFG, val);
472 	if (err)
473 		pr_err("%s: Error writing F%dx%03x.\n",
474 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
475 }
476 
477 static void prepare_msrs(void *info)
478 {
479 	struct mce m = *(struct mce *)info;
480 	u8 b = m.bank;
481 
482 	wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
483 
484 	if (boot_cpu_has(X86_FEATURE_SMCA)) {
485 		if (m.inject_flags == DFR_INT_INJ) {
486 			wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
487 			wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
488 		} else {
489 			wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
490 			wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
491 		}
492 
493 		wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
494 		wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
495 	} else {
496 		wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
497 		wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
498 		wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
499 	}
500 }
501 
502 static void do_inject(void)
503 {
504 	u64 mcg_status = 0;
505 	unsigned int cpu = i_mce.extcpu;
506 	u8 b = i_mce.bank;
507 
508 	i_mce.tsc = rdtsc_ordered();
509 
510 	i_mce.status |= MCI_STATUS_VAL;
511 
512 	if (i_mce.misc)
513 		i_mce.status |= MCI_STATUS_MISCV;
514 
515 	if (i_mce.synd)
516 		i_mce.status |= MCI_STATUS_SYNDV;
517 
518 	if (inj_type == SW_INJ) {
519 		mce_log(&i_mce);
520 		return;
521 	}
522 
523 	/* prep MCE global settings for the injection */
524 	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
525 
526 	if (!(i_mce.status & MCI_STATUS_PCC))
527 		mcg_status |= MCG_STATUS_RIPV;
528 
529 	/*
530 	 * Ensure necessary status bits for deferred errors:
531 	 * - MCx_STATUS[Deferred]: make sure it is a deferred error
532 	 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
533 	 */
534 	if (inj_type == DFR_INT_INJ) {
535 		i_mce.status |= MCI_STATUS_DEFERRED;
536 		i_mce.status &= ~MCI_STATUS_UC;
537 	}
538 
539 	/*
540 	 * For multi node CPUs, logging and reporting of bank 4 errors happens
541 	 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
542 	 * Fam10h and later BKDGs.
543 	 */
544 	if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
545 	    b == 4 &&
546 	    boot_cpu_data.x86 < 0x17) {
547 		toggle_nb_mca_mst_cpu(topology_die_id(cpu));
548 		cpu = get_nbc_for_node(topology_die_id(cpu));
549 	}
550 
551 	cpus_read_lock();
552 	if (!cpu_online(cpu))
553 		goto err;
554 
555 	toggle_hw_mce_inject(cpu, true);
556 
557 	i_mce.mcgstatus = mcg_status;
558 	i_mce.inject_flags = inj_type;
559 	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
560 
561 	toggle_hw_mce_inject(cpu, false);
562 
563 	switch (inj_type) {
564 	case DFR_INT_INJ:
565 		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
566 		break;
567 	case THR_INT_INJ:
568 		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
569 		break;
570 	default:
571 		smp_call_function_single(cpu, trigger_mce, NULL, 0);
572 	}
573 
574 err:
575 	cpus_read_unlock();
576 
577 }
578 
579 /*
580  * This denotes into which bank we're injecting and triggers
581  * the injection, at the same time.
582  */
583 static int inj_bank_set(void *data, u64 val)
584 {
585 	struct mce *m = (struct mce *)data;
586 	u8 n_banks;
587 	u64 cap;
588 
589 	/* Get bank count on target CPU so we can handle non-uniform values. */
590 	rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
591 	n_banks = cap & MCG_BANKCNT_MASK;
592 
593 	if (val >= n_banks) {
594 		pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
595 		return -EINVAL;
596 	}
597 
598 	m->bank = val;
599 
600 	/*
601 	 * sw-only injection allows to write arbitrary values into the MCA
602 	 * registers because it tests only the decoding paths.
603 	 */
604 	if (inj_type == SW_INJ)
605 		goto inject;
606 
607 	/*
608 	 * Read IPID value to determine if a bank is populated on the target
609 	 * CPU.
610 	 */
611 	if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
612 		u64 ipid;
613 
614 		if (rdmsrl_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
615 			pr_err("Error reading IPID on CPU%d\n", m->extcpu);
616 			return -EINVAL;
617 		}
618 
619 		if (!ipid) {
620 			pr_err("Cannot inject into unpopulated bank %llu\n", val);
621 			return -ENODEV;
622 		}
623 	}
624 
625 inject:
626 	do_inject();
627 
628 	/* Reset injection struct */
629 	setup_inj_struct(&i_mce);
630 
631 	return 0;
632 }
633 
634 MCE_INJECT_GET(bank);
635 
636 DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
637 
638 static const char readme_msg[] =
639 "Description of the files and their usages:\n"
640 "\n"
641 "Note1: i refers to the bank number below.\n"
642 "Note2: See respective BKDGs for the exact bit definitions of the files below\n"
643 "as they mirror the hardware registers.\n"
644 "\n"
645 "status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
646 "\t attributes of the error which caused the MCE.\n"
647 "\n"
648 "misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
649 "\t used for error thresholding purposes and its validity is indicated by\n"
650 "\t MCi_STATUS[MiscV].\n"
651 "\n"
652 "synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n"
653 "\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n"
654 "\n"
655 "addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
656 "\t associated with the error.\n"
657 "\n"
658 "cpu:\t The CPU to inject the error on.\n"
659 "\n"
660 "bank:\t Specify the bank you want to inject the error into: the number of\n"
661 "\t banks in a processor varies and is family/model-specific, therefore, the\n"
662 "\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
663 "\t injection.\n"
664 "\n"
665 "flags:\t Injection type to be performed. Writing to this file will trigger a\n"
666 "\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
667 "\t for AMD processors.\n"
668 "\n"
669 "\t Allowed error injection types:\n"
670 "\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
671 "\t    format only. Safe to use.\n"
672 "\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
673 "\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
674 "\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
675 "\t    before injecting.\n"
676 "\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
677 "\t    error APIC interrupt handler to handle the error if the feature is \n"
678 "\t    is present in hardware. \n"
679 "\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
680 "\t    APIC interrupt handler to handle the error. \n"
681 "\n"
682 "ipid:\t IPID (AMD-specific)\n"
683 "\n";
684 
685 static ssize_t
686 inj_readme_read(struct file *filp, char __user *ubuf,
687 		       size_t cnt, loff_t *ppos)
688 {
689 	return simple_read_from_buffer(ubuf, cnt, ppos,
690 					readme_msg, strlen(readme_msg));
691 }
692 
693 static const struct file_operations readme_fops = {
694 	.read		= inj_readme_read,
695 };
696 
697 static struct dfs_node {
698 	char *name;
699 	const struct file_operations *fops;
700 	umode_t perm;
701 } dfs_fls[] = {
702 	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
703 	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
704 	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
705 	{ .name = "synd",	.fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
706 	{ .name = "ipid",	.fops = &ipid_fops,   .perm = S_IRUSR | S_IWUSR },
707 	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
708 	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
709 	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
710 	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
711 };
712 
713 static void __init debugfs_init(void)
714 {
715 	unsigned int i;
716 
717 	dfs_inj = debugfs_create_dir("mce-inject", NULL);
718 
719 	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
720 		debugfs_create_file(dfs_fls[i].name, dfs_fls[i].perm, dfs_inj,
721 				    &i_mce, dfs_fls[i].fops);
722 }
723 
724 static void check_hw_inj_possible(void)
725 {
726 	int cpu;
727 	u8 bank;
728 
729 	/*
730 	 * This behavior exists only on SMCA systems though its not directly
731 	 * related to SMCA.
732 	 */
733 	if (!cpu_feature_enabled(X86_FEATURE_SMCA))
734 		return;
735 
736 	cpu = get_cpu();
737 
738 	for (bank = 0; bank < MAX_NR_BANKS; ++bank) {
739 		u64 status = MCI_STATUS_VAL, ipid;
740 
741 		/* Check whether bank is populated */
742 		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid);
743 		if (!ipid)
744 			continue;
745 
746 		toggle_hw_mce_inject(cpu, true);
747 
748 		wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
749 		rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
750 
751 		if (!status) {
752 			hw_injection_possible = false;
753 			pr_warn("Platform does not allow *hardware* error injection."
754 				"Try using APEI EINJ instead.\n");
755 		}
756 
757 		toggle_hw_mce_inject(cpu, false);
758 
759 		break;
760 	}
761 
762 	put_cpu();
763 }
764 
765 static int __init inject_init(void)
766 {
767 	if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
768 		return -ENOMEM;
769 
770 	check_hw_inj_possible();
771 
772 	debugfs_init();
773 
774 	register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify");
775 	mce_register_injector_chain(&inject_nb);
776 
777 	setup_inj_struct(&i_mce);
778 
779 	pr_info("Machine check injector initialized\n");
780 
781 	return 0;
782 }
783 
784 static void __exit inject_exit(void)
785 {
786 
787 	mce_unregister_injector_chain(&inject_nb);
788 	unregister_nmi_handler(NMI_LOCAL, "mce_notify");
789 
790 	debugfs_remove_recursive(dfs_inj);
791 	dfs_inj = NULL;
792 
793 	memset(&dfs_fls, 0, sizeof(dfs_fls));
794 
795 	free_cpumask_var(mce_inject_cpumask);
796 }
797 
798 module_init(inject_init);
799 module_exit(inject_exit);
800 MODULE_LICENSE("GPL");
801