xref: /linux/arch/x86/include/asm/uaccess_64.h (revision e1f2750edc4afebb966a229b797fc89b98ee6098)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4 
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14 
15 /*
16  * Copy To/From Userspace
17  */
18 
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 rep_movs_alternative(void *to, const void *from, unsigned len);
22 
23 static __always_inline __must_check unsigned long
24 copy_user_generic(void *to, const void *from, unsigned long len)
25 {
26 	stac();
27 	/*
28 	 * If CPU has FSRM feature, use 'rep movs'.
29 	 * Otherwise, use rep_movs_alternative.
30 	 */
31 	asm volatile(
32 		"1:\n\t"
33 		ALTERNATIVE("rep movsb",
34 			    "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
35 		"2:\n"
36 		_ASM_EXTABLE_UA(1b, 2b)
37 		:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
38 		: : "memory", "rax", "r8", "r9", "r10", "r11");
39 	clac();
40 	return len;
41 }
42 
43 static __always_inline __must_check unsigned long
44 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
45 {
46 	return copy_user_generic(dst, (__force void *)src, size);
47 }
48 
49 static __always_inline __must_check unsigned long
50 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
51 {
52 	return copy_user_generic((__force void *)dst, src, size);
53 }
54 
55 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
56 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
57 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
58 			   size_t len);
59 
60 static inline int
61 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
62 				  unsigned size)
63 {
64 	long ret;
65 	kasan_check_write(dst, size);
66 	stac();
67 	ret = __copy_user_nocache(dst, src, size);
68 	clac();
69 	return ret;
70 }
71 
72 static inline int
73 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
74 {
75 	kasan_check_write(dst, size);
76 	return __copy_user_flushcache(dst, src, size);
77 }
78 
79 /*
80  * Zero Userspace.
81  */
82 
83 __must_check unsigned long
84 rep_stos_alternative(void __user *addr, unsigned long len);
85 
86 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
87 {
88 	might_fault();
89 	stac();
90 
91 	/*
92 	 * No memory constraint because it doesn't change any memory gcc
93 	 * knows about.
94 	 */
95 	asm volatile(
96 		"1:\n\t"
97 		ALTERNATIVE("rep stosb",
98 			    "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
99 		"2:\n"
100 	       _ASM_EXTABLE_UA(1b, 2b)
101 	       : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
102 	       : "a" (0));
103 
104 	clac();
105 
106 	return size;
107 }
108 
109 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
110 {
111 	if (access_ok(to, n))
112 		return __clear_user(to, n);
113 	return n;
114 }
115 #endif /* _ASM_X86_UACCESS_64_H */
116