xref: /linux/arch/x86/kernel/cpu/mce/inject.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Machine check injection support.
4  * Copyright 2008 Intel Corporation.
5  *
6  * Authors:
7  * Andi Kleen
8  * Ying Huang
9  *
10  * The AMD part (from mce_amd_inj.c): a simple MCE injection facility
11  * for testing different aspects of the RAS code. This driver should be
12  * built as module so that it can be loaded on production kernels for
13  * testing purposes.
14  *
15  * Copyright (c) 2010-17:  Borislav Petkov <bp@alien8.de>
16  *			   Advanced Micro Devices Inc.
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/debugfs.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/uaccess.h>
26 
27 #include <asm/amd_nb.h>
28 #include <asm/apic.h>
29 #include <asm/irq_vectors.h>
30 #include <asm/mce.h>
31 #include <asm/nmi.h>
32 #include <asm/smp.h>
33 
34 #include "internal.h"
35 
36 /*
37  * Collect all the MCi_XXX settings
38  */
39 static struct mce i_mce;
40 static struct dentry *dfs_inj;
41 
42 #define MAX_FLAG_OPT_SIZE	4
43 #define NBCFG			0x44
44 
45 enum injection_type {
46 	SW_INJ = 0,	/* SW injection, simply decode the error */
47 	HW_INJ,		/* Trigger a #MC */
48 	DFR_INT_INJ,    /* Trigger Deferred error interrupt */
49 	THR_INT_INJ,    /* Trigger threshold interrupt */
50 	N_INJ_TYPES,
51 };
52 
53 static const char * const flags_options[] = {
54 	[SW_INJ] = "sw",
55 	[HW_INJ] = "hw",
56 	[DFR_INT_INJ] = "df",
57 	[THR_INT_INJ] = "th",
58 	NULL
59 };
60 
61 /* Set default injection to SW_INJ */
62 static enum injection_type inj_type = SW_INJ;
63 
64 #define MCE_INJECT_SET(reg)						\
65 static int inj_##reg##_set(void *data, u64 val)				\
66 {									\
67 	struct mce *m = (struct mce *)data;				\
68 									\
69 	m->reg = val;							\
70 	return 0;							\
71 }
72 
73 MCE_INJECT_SET(status);
74 MCE_INJECT_SET(misc);
75 MCE_INJECT_SET(addr);
76 MCE_INJECT_SET(synd);
77 
78 #define MCE_INJECT_GET(reg)						\
79 static int inj_##reg##_get(void *data, u64 *val)			\
80 {									\
81 	struct mce *m = (struct mce *)data;				\
82 									\
83 	*val = m->reg;							\
84 	return 0;							\
85 }
86 
87 MCE_INJECT_GET(status);
88 MCE_INJECT_GET(misc);
89 MCE_INJECT_GET(addr);
90 MCE_INJECT_GET(synd);
91 MCE_INJECT_GET(ipid);
92 
93 DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
94 DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
95 DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
96 DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
97 
98 /* Use the user provided IPID value on a sw injection. */
99 static int inj_ipid_set(void *data, u64 val)
100 {
101 	struct mce *m = (struct mce *)data;
102 
103 	if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
104 		if (inj_type == SW_INJ)
105 			m->ipid = val;
106 	}
107 
108 	return 0;
109 }
110 
111 DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n");
112 
113 static void setup_inj_struct(struct mce *m)
114 {
115 	memset(m, 0, sizeof(struct mce));
116 
117 	m->cpuvendor = boot_cpu_data.x86_vendor;
118 	m->time	     = ktime_get_real_seconds();
119 	m->cpuid     = cpuid_eax(1);
120 	m->microcode = boot_cpu_data.microcode;
121 }
122 
123 /* Update fake mce registers on current CPU. */
124 static void inject_mce(struct mce *m)
125 {
126 	struct mce *i = &per_cpu(injectm, m->extcpu);
127 
128 	/* Make sure no one reads partially written injectm */
129 	i->finished = 0;
130 	mb();
131 	m->finished = 0;
132 	/* First set the fields after finished */
133 	i->extcpu = m->extcpu;
134 	mb();
135 	/* Now write record in order, finished last (except above) */
136 	memcpy(i, m, sizeof(struct mce));
137 	/* Finally activate it */
138 	mb();
139 	i->finished = 1;
140 }
141 
142 static void raise_poll(struct mce *m)
143 {
144 	unsigned long flags;
145 	mce_banks_t b;
146 
147 	memset(&b, 0xff, sizeof(mce_banks_t));
148 	local_irq_save(flags);
149 	machine_check_poll(0, &b);
150 	local_irq_restore(flags);
151 	m->finished = 0;
152 }
153 
154 static void raise_exception(struct mce *m, struct pt_regs *pregs)
155 {
156 	struct pt_regs regs;
157 	unsigned long flags;
158 
159 	if (!pregs) {
160 		memset(&regs, 0, sizeof(struct pt_regs));
161 		regs.ip = m->ip;
162 		regs.cs = m->cs;
163 		pregs = &regs;
164 	}
165 	/* do_machine_check() expects interrupts disabled -- at least */
166 	local_irq_save(flags);
167 	do_machine_check(pregs);
168 	local_irq_restore(flags);
169 	m->finished = 0;
170 }
171 
172 static cpumask_var_t mce_inject_cpumask;
173 static DEFINE_MUTEX(mce_inject_mutex);
174 
175 static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
176 {
177 	int cpu = smp_processor_id();
178 	struct mce *m = this_cpu_ptr(&injectm);
179 	if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
180 		return NMI_DONE;
181 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
182 	if (m->inject_flags & MCJ_EXCEPTION)
183 		raise_exception(m, regs);
184 	else if (m->status)
185 		raise_poll(m);
186 	return NMI_HANDLED;
187 }
188 
189 static void mce_irq_ipi(void *info)
190 {
191 	int cpu = smp_processor_id();
192 	struct mce *m = this_cpu_ptr(&injectm);
193 
194 	if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
195 			m->inject_flags & MCJ_EXCEPTION) {
196 		cpumask_clear_cpu(cpu, mce_inject_cpumask);
197 		raise_exception(m, NULL);
198 	}
199 }
200 
201 /* Inject mce on current CPU */
202 static int raise_local(void)
203 {
204 	struct mce *m = this_cpu_ptr(&injectm);
205 	int context = MCJ_CTX(m->inject_flags);
206 	int ret = 0;
207 	int cpu = m->extcpu;
208 
209 	if (m->inject_flags & MCJ_EXCEPTION) {
210 		pr_info("Triggering MCE exception on CPU %d\n", cpu);
211 		switch (context) {
212 		case MCJ_CTX_IRQ:
213 			/*
214 			 * Could do more to fake interrupts like
215 			 * calling irq_enter, but the necessary
216 			 * machinery isn't exported currently.
217 			 */
218 			fallthrough;
219 		case MCJ_CTX_PROCESS:
220 			raise_exception(m, NULL);
221 			break;
222 		default:
223 			pr_info("Invalid MCE context\n");
224 			ret = -EINVAL;
225 		}
226 		pr_info("MCE exception done on CPU %d\n", cpu);
227 	} else if (m->status) {
228 		pr_info("Starting machine check poll CPU %d\n", cpu);
229 		raise_poll(m);
230 		mce_notify_irq();
231 		pr_info("Machine check poll done on CPU %d\n", cpu);
232 	} else
233 		m->finished = 0;
234 
235 	return ret;
236 }
237 
238 static void __maybe_unused raise_mce(struct mce *m)
239 {
240 	int context = MCJ_CTX(m->inject_flags);
241 
242 	inject_mce(m);
243 
244 	if (context == MCJ_CTX_RANDOM)
245 		return;
246 
247 	if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
248 		unsigned long start;
249 		int cpu;
250 
251 		cpus_read_lock();
252 		cpumask_copy(mce_inject_cpumask, cpu_online_mask);
253 		cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
254 		for_each_online_cpu(cpu) {
255 			struct mce *mcpu = &per_cpu(injectm, cpu);
256 			if (!mcpu->finished ||
257 			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
258 				cpumask_clear_cpu(cpu, mce_inject_cpumask);
259 		}
260 		if (!cpumask_empty(mce_inject_cpumask)) {
261 			if (m->inject_flags & MCJ_IRQ_BROADCAST) {
262 				/*
263 				 * don't wait because mce_irq_ipi is necessary
264 				 * to be sync with following raise_local
265 				 */
266 				preempt_disable();
267 				smp_call_function_many(mce_inject_cpumask,
268 					mce_irq_ipi, NULL, 0);
269 				preempt_enable();
270 			} else if (m->inject_flags & MCJ_NMI_BROADCAST)
271 				apic->send_IPI_mask(mce_inject_cpumask,
272 						NMI_VECTOR);
273 		}
274 		start = jiffies;
275 		while (!cpumask_empty(mce_inject_cpumask)) {
276 			if (!time_before(jiffies, start + 2*HZ)) {
277 				pr_err("Timeout waiting for mce inject %lx\n",
278 					*cpumask_bits(mce_inject_cpumask));
279 				break;
280 			}
281 			cpu_relax();
282 		}
283 		raise_local();
284 		put_cpu();
285 		cpus_read_unlock();
286 	} else {
287 		preempt_disable();
288 		raise_local();
289 		preempt_enable();
290 	}
291 }
292 
293 static int mce_inject_raise(struct notifier_block *nb, unsigned long val,
294 			    void *data)
295 {
296 	struct mce *m = (struct mce *)data;
297 
298 	if (!m)
299 		return NOTIFY_DONE;
300 
301 	mutex_lock(&mce_inject_mutex);
302 	raise_mce(m);
303 	mutex_unlock(&mce_inject_mutex);
304 
305 	return NOTIFY_DONE;
306 }
307 
308 static struct notifier_block inject_nb = {
309 	.notifier_call  = mce_inject_raise,
310 };
311 
312 /*
313  * Caller needs to be make sure this cpu doesn't disappear
314  * from under us, i.e.: get_cpu/put_cpu.
315  */
316 static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
317 {
318 	u32 l, h;
319 	int err;
320 
321 	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
322 	if (err) {
323 		pr_err("%s: error reading HWCR\n", __func__);
324 		return err;
325 	}
326 
327 	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
328 
329 	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
330 	if (err)
331 		pr_err("%s: error writing HWCR\n", __func__);
332 
333 	return err;
334 }
335 
336 static int __set_inj(const char *buf)
337 {
338 	int i;
339 
340 	for (i = 0; i < N_INJ_TYPES; i++) {
341 		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
342 			inj_type = i;
343 			return 0;
344 		}
345 	}
346 	return -EINVAL;
347 }
348 
349 static ssize_t flags_read(struct file *filp, char __user *ubuf,
350 			  size_t cnt, loff_t *ppos)
351 {
352 	char buf[MAX_FLAG_OPT_SIZE];
353 	int n;
354 
355 	n = sprintf(buf, "%s\n", flags_options[inj_type]);
356 
357 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
358 }
359 
360 static ssize_t flags_write(struct file *filp, const char __user *ubuf,
361 			   size_t cnt, loff_t *ppos)
362 {
363 	char buf[MAX_FLAG_OPT_SIZE], *__buf;
364 	int err;
365 
366 	if (!cnt || cnt > MAX_FLAG_OPT_SIZE)
367 		return -EINVAL;
368 
369 	if (copy_from_user(&buf, ubuf, cnt))
370 		return -EFAULT;
371 
372 	buf[cnt - 1] = 0;
373 
374 	/* strip whitespace */
375 	__buf = strstrip(buf);
376 
377 	err = __set_inj(__buf);
378 	if (err) {
379 		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
380 		return err;
381 	}
382 
383 	*ppos += cnt;
384 
385 	return cnt;
386 }
387 
388 static const struct file_operations flags_fops = {
389 	.read           = flags_read,
390 	.write          = flags_write,
391 	.llseek         = generic_file_llseek,
392 };
393 
394 /*
395  * On which CPU to inject?
396  */
397 MCE_INJECT_GET(extcpu);
398 
399 static int inj_extcpu_set(void *data, u64 val)
400 {
401 	struct mce *m = (struct mce *)data;
402 
403 	if (val >= nr_cpu_ids || !cpu_online(val)) {
404 		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
405 		return -EINVAL;
406 	}
407 	m->extcpu = val;
408 	return 0;
409 }
410 
411 DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
412 
413 static void trigger_mce(void *info)
414 {
415 	asm volatile("int $18");
416 }
417 
418 static void trigger_dfr_int(void *info)
419 {
420 	asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
421 }
422 
423 static void trigger_thr_int(void *info)
424 {
425 	asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
426 }
427 
428 static u32 get_nbc_for_node(int node_id)
429 {
430 	struct cpuinfo_x86 *c = &boot_cpu_data;
431 	u32 cores_per_node;
432 
433 	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
434 
435 	return cores_per_node * node_id;
436 }
437 
438 static void toggle_nb_mca_mst_cpu(u16 nid)
439 {
440 	struct amd_northbridge *nb;
441 	struct pci_dev *F3;
442 	u32 val;
443 	int err;
444 
445 	nb = node_to_amd_nb(nid);
446 	if (!nb)
447 		return;
448 
449 	F3 = nb->misc;
450 	if (!F3)
451 		return;
452 
453 	err = pci_read_config_dword(F3, NBCFG, &val);
454 	if (err) {
455 		pr_err("%s: Error reading F%dx%03x.\n",
456 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
457 		return;
458 	}
459 
460 	if (val & BIT(27))
461 		return;
462 
463 	pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
464 	       __func__);
465 
466 	val |= BIT(27);
467 	err = pci_write_config_dword(F3, NBCFG, val);
468 	if (err)
469 		pr_err("%s: Error writing F%dx%03x.\n",
470 		       __func__, PCI_FUNC(F3->devfn), NBCFG);
471 }
472 
473 static void prepare_msrs(void *info)
474 {
475 	struct mce m = *(struct mce *)info;
476 	u8 b = m.bank;
477 
478 	wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
479 
480 	if (boot_cpu_has(X86_FEATURE_SMCA)) {
481 		if (m.inject_flags == DFR_INT_INJ) {
482 			wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
483 			wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
484 		} else {
485 			wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
486 			wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
487 		}
488 
489 		wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
490 		wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
491 	} else {
492 		wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
493 		wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
494 		wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
495 	}
496 }
497 
498 static void do_inject(void)
499 {
500 	u64 mcg_status = 0;
501 	unsigned int cpu = i_mce.extcpu;
502 	u8 b = i_mce.bank;
503 
504 	i_mce.tsc = rdtsc_ordered();
505 
506 	i_mce.status |= MCI_STATUS_VAL;
507 
508 	if (i_mce.misc)
509 		i_mce.status |= MCI_STATUS_MISCV;
510 
511 	if (i_mce.synd)
512 		i_mce.status |= MCI_STATUS_SYNDV;
513 
514 	if (inj_type == SW_INJ) {
515 		mce_log(&i_mce);
516 		return;
517 	}
518 
519 	/* prep MCE global settings for the injection */
520 	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
521 
522 	if (!(i_mce.status & MCI_STATUS_PCC))
523 		mcg_status |= MCG_STATUS_RIPV;
524 
525 	/*
526 	 * Ensure necessary status bits for deferred errors:
527 	 * - MCx_STATUS[Deferred]: make sure it is a deferred error
528 	 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
529 	 */
530 	if (inj_type == DFR_INT_INJ) {
531 		i_mce.status |= MCI_STATUS_DEFERRED;
532 		i_mce.status &= ~MCI_STATUS_UC;
533 	}
534 
535 	/*
536 	 * For multi node CPUs, logging and reporting of bank 4 errors happens
537 	 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
538 	 * Fam10h and later BKDGs.
539 	 */
540 	if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
541 	    b == 4 &&
542 	    boot_cpu_data.x86 < 0x17) {
543 		toggle_nb_mca_mst_cpu(topology_die_id(cpu));
544 		cpu = get_nbc_for_node(topology_die_id(cpu));
545 	}
546 
547 	cpus_read_lock();
548 	if (!cpu_online(cpu))
549 		goto err;
550 
551 	toggle_hw_mce_inject(cpu, true);
552 
553 	i_mce.mcgstatus = mcg_status;
554 	i_mce.inject_flags = inj_type;
555 	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
556 
557 	toggle_hw_mce_inject(cpu, false);
558 
559 	switch (inj_type) {
560 	case DFR_INT_INJ:
561 		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
562 		break;
563 	case THR_INT_INJ:
564 		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
565 		break;
566 	default:
567 		smp_call_function_single(cpu, trigger_mce, NULL, 0);
568 	}
569 
570 err:
571 	cpus_read_unlock();
572 
573 }
574 
575 /*
576  * This denotes into which bank we're injecting and triggers
577  * the injection, at the same time.
578  */
579 static int inj_bank_set(void *data, u64 val)
580 {
581 	struct mce *m = (struct mce *)data;
582 	u8 n_banks;
583 	u64 cap;
584 
585 	/* Get bank count on target CPU so we can handle non-uniform values. */
586 	rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
587 	n_banks = cap & MCG_BANKCNT_MASK;
588 
589 	if (val >= n_banks) {
590 		pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
591 		return -EINVAL;
592 	}
593 
594 	m->bank = val;
595 
596 	/*
597 	 * sw-only injection allows to write arbitrary values into the MCA
598 	 * registers because it tests only the decoding paths.
599 	 */
600 	if (inj_type == SW_INJ)
601 		goto inject;
602 
603 	/*
604 	 * Read IPID value to determine if a bank is populated on the target
605 	 * CPU.
606 	 */
607 	if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
608 		u64 ipid;
609 
610 		if (rdmsrl_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) {
611 			pr_err("Error reading IPID on CPU%d\n", m->extcpu);
612 			return -EINVAL;
613 		}
614 
615 		if (!ipid) {
616 			pr_err("Cannot inject into unpopulated bank %llu\n", val);
617 			return -ENODEV;
618 		}
619 	}
620 
621 inject:
622 	do_inject();
623 
624 	/* Reset injection struct */
625 	setup_inj_struct(&i_mce);
626 
627 	return 0;
628 }
629 
630 MCE_INJECT_GET(bank);
631 
632 DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
633 
634 static const char readme_msg[] =
635 "Description of the files and their usages:\n"
636 "\n"
637 "Note1: i refers to the bank number below.\n"
638 "Note2: See respective BKDGs for the exact bit definitions of the files below\n"
639 "as they mirror the hardware registers.\n"
640 "\n"
641 "status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
642 "\t attributes of the error which caused the MCE.\n"
643 "\n"
644 "misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
645 "\t used for error thresholding purposes and its validity is indicated by\n"
646 "\t MCi_STATUS[MiscV].\n"
647 "\n"
648 "synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n"
649 "\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n"
650 "\n"
651 "addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
652 "\t associated with the error.\n"
653 "\n"
654 "cpu:\t The CPU to inject the error on.\n"
655 "\n"
656 "bank:\t Specify the bank you want to inject the error into: the number of\n"
657 "\t banks in a processor varies and is family/model-specific, therefore, the\n"
658 "\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
659 "\t injection.\n"
660 "\n"
661 "flags:\t Injection type to be performed. Writing to this file will trigger a\n"
662 "\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
663 "\t for AMD processors.\n"
664 "\n"
665 "\t Allowed error injection types:\n"
666 "\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
667 "\t    format only. Safe to use.\n"
668 "\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
669 "\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
670 "\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
671 "\t    before injecting.\n"
672 "\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
673 "\t    error APIC interrupt handler to handle the error if the feature is \n"
674 "\t    is present in hardware. \n"
675 "\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
676 "\t    APIC interrupt handler to handle the error. \n"
677 "\n"
678 "ipid:\t IPID (AMD-specific)\n"
679 "\n";
680 
681 static ssize_t
682 inj_readme_read(struct file *filp, char __user *ubuf,
683 		       size_t cnt, loff_t *ppos)
684 {
685 	return simple_read_from_buffer(ubuf, cnt, ppos,
686 					readme_msg, strlen(readme_msg));
687 }
688 
689 static const struct file_operations readme_fops = {
690 	.read		= inj_readme_read,
691 };
692 
693 static struct dfs_node {
694 	char *name;
695 	const struct file_operations *fops;
696 	umode_t perm;
697 } dfs_fls[] = {
698 	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
699 	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
700 	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
701 	{ .name = "synd",	.fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
702 	{ .name = "ipid",	.fops = &ipid_fops,   .perm = S_IRUSR | S_IWUSR },
703 	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
704 	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
705 	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
706 	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
707 };
708 
709 static void __init debugfs_init(void)
710 {
711 	unsigned int i;
712 
713 	dfs_inj = debugfs_create_dir("mce-inject", NULL);
714 
715 	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
716 		debugfs_create_file(dfs_fls[i].name, dfs_fls[i].perm, dfs_inj,
717 				    &i_mce, dfs_fls[i].fops);
718 }
719 
720 static int __init inject_init(void)
721 {
722 	if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
723 		return -ENOMEM;
724 
725 	debugfs_init();
726 
727 	register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify");
728 	mce_register_injector_chain(&inject_nb);
729 
730 	setup_inj_struct(&i_mce);
731 
732 	pr_info("Machine check injector initialized\n");
733 
734 	return 0;
735 }
736 
737 static void __exit inject_exit(void)
738 {
739 
740 	mce_unregister_injector_chain(&inject_nb);
741 	unregister_nmi_handler(NMI_LOCAL, "mce_notify");
742 
743 	debugfs_remove_recursive(dfs_inj);
744 	dfs_inj = NULL;
745 
746 	memset(&dfs_fls, 0, sizeof(dfs_fls));
747 
748 	free_cpumask_var(mce_inject_cpumask);
749 }
750 
751 module_init(inject_init);
752 module_exit(inject_exit);
753 MODULE_LICENSE("GPL");
754