xref: /linux/arch/x86/lib/copy_mc.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
3 
4 #include <linux/jump_label.h>
5 #include <linux/uaccess.h>
6 #include <linux/export.h>
7 #include <linux/instrumented.h>
8 #include <linux/string.h>
9 #include <linux/types.h>
10 
11 #include <asm/mce.h>
12 
13 #ifdef CONFIG_X86_MCE
14 static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
15 
16 void enable_copy_mc_fragile(void)
17 {
18 	static_branch_inc(&copy_mc_fragile_key);
19 }
20 #define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))
21 
22 /*
23  * Similar to copy_user_handle_tail, probe for the write fault point, or
24  * source exception point.
25  */
26 __visible notrace unsigned long
27 copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
28 {
29 	for (; len; --len, to++, from++)
30 		if (copy_mc_fragile(to, from, 1))
31 			break;
32 	return len;
33 }
34 #else
35 /*
36  * No point in doing careful copying, or consulting a static key when
37  * there is no #MC handler in the CONFIG_X86_MCE=n case.
38  */
39 void enable_copy_mc_fragile(void)
40 {
41 }
42 #define copy_mc_fragile_enabled (0)
43 #endif
44 
45 unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
46 
47 /**
48  * copy_mc_to_kernel - memory copy that handles source exceptions
49  *
50  * @dst:	destination address
51  * @src:	source address
52  * @len:	number of bytes to copy
53  *
54  * Call into the 'fragile' version on systems that benefit from avoiding
55  * corner case poison consumption scenarios, For example, accessing
56  * poison across 2 cachelines with a single instruction. Almost all
57  * other uses case can use copy_mc_enhanced_fast_string() for a fast
58  * recoverable copy, or fallback to plain memcpy.
59  *
60  * Return 0 for success, or number of bytes not copied if there was an
61  * exception.
62  */
63 unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
64 {
65 	unsigned long ret;
66 
67 	if (copy_mc_fragile_enabled) {
68 		instrument_memcpy_before(dst, src, len);
69 		ret = copy_mc_fragile(dst, src, len);
70 		instrument_memcpy_after(dst, src, len, ret);
71 		return ret;
72 	}
73 	if (static_cpu_has(X86_FEATURE_ERMS)) {
74 		instrument_memcpy_before(dst, src, len);
75 		ret = copy_mc_enhanced_fast_string(dst, src, len);
76 		instrument_memcpy_after(dst, src, len, ret);
77 		return ret;
78 	}
79 	memcpy(dst, src, len);
80 	return 0;
81 }
82 EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
83 
84 unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
85 {
86 	unsigned long ret;
87 
88 	if (copy_mc_fragile_enabled) {
89 		instrument_copy_to_user(dst, src, len);
90 		__uaccess_begin();
91 		ret = copy_mc_fragile((__force void *)dst, src, len);
92 		__uaccess_end();
93 		return ret;
94 	}
95 
96 	if (static_cpu_has(X86_FEATURE_ERMS)) {
97 		instrument_copy_to_user(dst, src, len);
98 		__uaccess_begin();
99 		ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
100 		__uaccess_end();
101 		return ret;
102 	}
103 
104 	return copy_user_generic((__force void *)dst, src, len);
105 }
106