xref: /linux/arch/x86/include/asm/uaccess_32.h (revision e8d235d4d8fb8957bae5f6ed4521115203a00d8b)
1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 
13 unsigned long __must_check __copy_to_user_ll
14 		(void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 		(void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 		(void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 		(void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 		(void *to, const void __user *from, unsigned long n);
23 
24 /**
25  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26  * @to:   Destination address, in user space.
27  * @from: Source address, in kernel space.
28  * @n:    Number of bytes to copy.
29  *
30  * Context: User context only.
31  *
32  * Copy data from kernel space to user space.  Caller must check
33  * the specified block with access_ok() before calling this function.
34  * The caller should also make sure he pins the user space address
35  * so that we don't result in page fault and sleep.
36  *
37  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
38  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39  * If a store crosses a page boundary and gets a fault, the x86 will not write
40  * anything, so this is accurate.
41  */
42 
43 static __always_inline unsigned long __must_check
44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45 {
46 	if (__builtin_constant_p(n)) {
47 		unsigned long ret;
48 
49 		switch (n) {
50 		case 1:
51 			__put_user_size(*(u8 *)from, (u8 __user *)to,
52 					1, ret, 1);
53 			return ret;
54 		case 2:
55 			__put_user_size(*(u16 *)from, (u16 __user *)to,
56 					2, ret, 2);
57 			return ret;
58 		case 4:
59 			__put_user_size(*(u32 *)from, (u32 __user *)to,
60 					4, ret, 4);
61 			return ret;
62 		}
63 	}
64 	return __copy_to_user_ll(to, from, n);
65 }
66 
67 /**
68  * __copy_to_user: - Copy a block of data into user space, with less checking.
69  * @to:   Destination address, in user space.
70  * @from: Source address, in kernel space.
71  * @n:    Number of bytes to copy.
72  *
73  * Context: User context only.  This function may sleep.
74  *
75  * Copy data from kernel space to user space.  Caller must check
76  * the specified block with access_ok() before calling this function.
77  *
78  * Returns number of bytes that could not be copied.
79  * On success, this will be zero.
80  */
81 static __always_inline unsigned long __must_check
82 __copy_to_user(void __user *to, const void *from, unsigned long n)
83 {
84 	might_fault();
85 	return __copy_to_user_inatomic(to, from, n);
86 }
87 
88 static __always_inline unsigned long
89 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
90 {
91 	/* Avoid zeroing the tail if the copy fails..
92 	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
93 	 * but as the zeroing behaviour is only significant when n is not
94 	 * constant, that shouldn't be a problem.
95 	 */
96 	if (__builtin_constant_p(n)) {
97 		unsigned long ret;
98 
99 		switch (n) {
100 		case 1:
101 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
102 			return ret;
103 		case 2:
104 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
105 			return ret;
106 		case 4:
107 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
108 			return ret;
109 		}
110 	}
111 	return __copy_from_user_ll_nozero(to, from, n);
112 }
113 
114 /**
115  * __copy_from_user: - Copy a block of data from user space, with less checking.
116  * @to:   Destination address, in kernel space.
117  * @from: Source address, in user space.
118  * @n:    Number of bytes to copy.
119  *
120  * Context: User context only.  This function may sleep.
121  *
122  * Copy data from user space to kernel space.  Caller must check
123  * the specified block with access_ok() before calling this function.
124  *
125  * Returns number of bytes that could not be copied.
126  * On success, this will be zero.
127  *
128  * If some data could not be copied, this function will pad the copied
129  * data to the requested size using zero bytes.
130  *
131  * An alternate version - __copy_from_user_inatomic() - may be called from
132  * atomic context and will fail rather than sleep.  In this case the
133  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
134  * for explanation of why this is needed.
135  */
136 static __always_inline unsigned long
137 __copy_from_user(void *to, const void __user *from, unsigned long n)
138 {
139 	might_fault();
140 	if (__builtin_constant_p(n)) {
141 		unsigned long ret;
142 
143 		switch (n) {
144 		case 1:
145 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
146 			return ret;
147 		case 2:
148 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
149 			return ret;
150 		case 4:
151 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
152 			return ret;
153 		}
154 	}
155 	return __copy_from_user_ll(to, from, n);
156 }
157 
158 static __always_inline unsigned long __copy_from_user_nocache(void *to,
159 				const void __user *from, unsigned long n)
160 {
161 	might_fault();
162 	if (__builtin_constant_p(n)) {
163 		unsigned long ret;
164 
165 		switch (n) {
166 		case 1:
167 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
168 			return ret;
169 		case 2:
170 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
171 			return ret;
172 		case 4:
173 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
174 			return ret;
175 		}
176 	}
177 	return __copy_from_user_ll_nocache(to, from, n);
178 }
179 
180 static __always_inline unsigned long
181 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
182 				  unsigned long n)
183 {
184        return __copy_from_user_ll_nocache_nozero(to, from, n);
185 }
186 
187 #endif /* _ASM_X86_UACCESS_32_H */
188