xref: /linux/arch/x86/lib/copy_user_64.S (revision c0e297dc61f8d4453e07afbea1fa8d0e67cd4a34)
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/current.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <asm/cpufeature.h>
14#include <asm/alternative-asm.h>
15#include <asm/asm.h>
16#include <asm/smap.h>
17
18/* Standard copy_to_user with segment limit checking */
19ENTRY(_copy_to_user)
20	GET_THREAD_INFO(%rax)
21	movq %rdi,%rcx
22	addq %rdx,%rcx
23	jc bad_to_user
24	cmpq TI_addr_limit(%rax),%rcx
25	ja bad_to_user
26	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
27		      "jmp copy_user_generic_string",		\
28		      X86_FEATURE_REP_GOOD,			\
29		      "jmp copy_user_enhanced_fast_string",	\
30		      X86_FEATURE_ERMS
31ENDPROC(_copy_to_user)
32
33/* Standard copy_from_user with segment limit checking */
34ENTRY(_copy_from_user)
35	GET_THREAD_INFO(%rax)
36	movq %rsi,%rcx
37	addq %rdx,%rcx
38	jc bad_from_user
39	cmpq TI_addr_limit(%rax),%rcx
40	ja bad_from_user
41	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
42		      "jmp copy_user_generic_string",		\
43		      X86_FEATURE_REP_GOOD,			\
44		      "jmp copy_user_enhanced_fast_string",	\
45		      X86_FEATURE_ERMS
46ENDPROC(_copy_from_user)
47
48	.section .fixup,"ax"
49	/* must zero dest */
50ENTRY(bad_from_user)
51bad_from_user:
52	movl %edx,%ecx
53	xorl %eax,%eax
54	rep
55	stosb
56bad_to_user:
57	movl %edx,%eax
58	ret
59ENDPROC(bad_from_user)
60	.previous
61
62/*
63 * copy_user_generic_unrolled - memory copy with exception handling.
64 * This version is for CPUs like P4 that don't have efficient micro
65 * code for rep movsq
66 *
67 * Input:
68 * rdi destination
69 * rsi source
70 * rdx count
71 *
72 * Output:
73 * eax uncopied bytes or 0 if successful.
74 */
75ENTRY(copy_user_generic_unrolled)
76	ASM_STAC
77	cmpl $8,%edx
78	jb 20f		/* less then 8 bytes, go to byte copy loop */
79	ALIGN_DESTINATION
80	movl %edx,%ecx
81	andl $63,%edx
82	shrl $6,%ecx
83	jz 17f
841:	movq (%rsi),%r8
852:	movq 1*8(%rsi),%r9
863:	movq 2*8(%rsi),%r10
874:	movq 3*8(%rsi),%r11
885:	movq %r8,(%rdi)
896:	movq %r9,1*8(%rdi)
907:	movq %r10,2*8(%rdi)
918:	movq %r11,3*8(%rdi)
929:	movq 4*8(%rsi),%r8
9310:	movq 5*8(%rsi),%r9
9411:	movq 6*8(%rsi),%r10
9512:	movq 7*8(%rsi),%r11
9613:	movq %r8,4*8(%rdi)
9714:	movq %r9,5*8(%rdi)
9815:	movq %r10,6*8(%rdi)
9916:	movq %r11,7*8(%rdi)
100	leaq 64(%rsi),%rsi
101	leaq 64(%rdi),%rdi
102	decl %ecx
103	jnz 1b
10417:	movl %edx,%ecx
105	andl $7,%edx
106	shrl $3,%ecx
107	jz 20f
10818:	movq (%rsi),%r8
10919:	movq %r8,(%rdi)
110	leaq 8(%rsi),%rsi
111	leaq 8(%rdi),%rdi
112	decl %ecx
113	jnz 18b
11420:	andl %edx,%edx
115	jz 23f
116	movl %edx,%ecx
11721:	movb (%rsi),%al
11822:	movb %al,(%rdi)
119	incq %rsi
120	incq %rdi
121	decl %ecx
122	jnz 21b
12323:	xor %eax,%eax
124	ASM_CLAC
125	ret
126
127	.section .fixup,"ax"
12830:	shll $6,%ecx
129	addl %ecx,%edx
130	jmp 60f
13140:	leal (%rdx,%rcx,8),%edx
132	jmp 60f
13350:	movl %ecx,%edx
13460:	jmp copy_user_handle_tail /* ecx is zerorest also */
135	.previous
136
137	_ASM_EXTABLE(1b,30b)
138	_ASM_EXTABLE(2b,30b)
139	_ASM_EXTABLE(3b,30b)
140	_ASM_EXTABLE(4b,30b)
141	_ASM_EXTABLE(5b,30b)
142	_ASM_EXTABLE(6b,30b)
143	_ASM_EXTABLE(7b,30b)
144	_ASM_EXTABLE(8b,30b)
145	_ASM_EXTABLE(9b,30b)
146	_ASM_EXTABLE(10b,30b)
147	_ASM_EXTABLE(11b,30b)
148	_ASM_EXTABLE(12b,30b)
149	_ASM_EXTABLE(13b,30b)
150	_ASM_EXTABLE(14b,30b)
151	_ASM_EXTABLE(15b,30b)
152	_ASM_EXTABLE(16b,30b)
153	_ASM_EXTABLE(18b,40b)
154	_ASM_EXTABLE(19b,40b)
155	_ASM_EXTABLE(21b,50b)
156	_ASM_EXTABLE(22b,50b)
157ENDPROC(copy_user_generic_unrolled)
158
159/* Some CPUs run faster using the string copy instructions.
160 * This is also a lot simpler. Use them when possible.
161 *
162 * Only 4GB of copy is supported. This shouldn't be a problem
163 * because the kernel normally only writes from/to page sized chunks
164 * even if user space passed a longer buffer.
165 * And more would be dangerous because both Intel and AMD have
166 * errata with rep movsq > 4GB. If someone feels the need to fix
167 * this please consider this.
168 *
169 * Input:
170 * rdi destination
171 * rsi source
172 * rdx count
173 *
174 * Output:
175 * eax uncopied bytes or 0 if successful.
176 */
177ENTRY(copy_user_generic_string)
178	ASM_STAC
179	cmpl $8,%edx
180	jb 2f		/* less than 8 bytes, go to byte copy loop */
181	ALIGN_DESTINATION
182	movl %edx,%ecx
183	shrl $3,%ecx
184	andl $7,%edx
1851:	rep
186	movsq
1872:	movl %edx,%ecx
1883:	rep
189	movsb
190	xorl %eax,%eax
191	ASM_CLAC
192	ret
193
194	.section .fixup,"ax"
19511:	leal (%rdx,%rcx,8),%ecx
19612:	movl %ecx,%edx		/* ecx is zerorest also */
197	jmp copy_user_handle_tail
198	.previous
199
200	_ASM_EXTABLE(1b,11b)
201	_ASM_EXTABLE(3b,12b)
202ENDPROC(copy_user_generic_string)
203
204/*
205 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
206 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
207 *
208 * Input:
209 * rdi destination
210 * rsi source
211 * rdx count
212 *
213 * Output:
214 * eax uncopied bytes or 0 if successful.
215 */
216ENTRY(copy_user_enhanced_fast_string)
217	ASM_STAC
218	movl %edx,%ecx
2191:	rep
220	movsb
221	xorl %eax,%eax
222	ASM_CLAC
223	ret
224
225	.section .fixup,"ax"
22612:	movl %ecx,%edx		/* ecx is zerorest also */
227	jmp copy_user_handle_tail
228	.previous
229
230	_ASM_EXTABLE(1b,12b)
231ENDPROC(copy_user_enhanced_fast_string)
232
233/*
234 * copy_user_nocache - Uncached memory copy with exception handling
235 * This will force destination/source out of cache for more performance.
236 */
237ENTRY(__copy_user_nocache)
238	ASM_STAC
239	cmpl $8,%edx
240	jb 20f		/* less then 8 bytes, go to byte copy loop */
241	ALIGN_DESTINATION
242	movl %edx,%ecx
243	andl $63,%edx
244	shrl $6,%ecx
245	jz 17f
2461:	movq (%rsi),%r8
2472:	movq 1*8(%rsi),%r9
2483:	movq 2*8(%rsi),%r10
2494:	movq 3*8(%rsi),%r11
2505:	movnti %r8,(%rdi)
2516:	movnti %r9,1*8(%rdi)
2527:	movnti %r10,2*8(%rdi)
2538:	movnti %r11,3*8(%rdi)
2549:	movq 4*8(%rsi),%r8
25510:	movq 5*8(%rsi),%r9
25611:	movq 6*8(%rsi),%r10
25712:	movq 7*8(%rsi),%r11
25813:	movnti %r8,4*8(%rdi)
25914:	movnti %r9,5*8(%rdi)
26015:	movnti %r10,6*8(%rdi)
26116:	movnti %r11,7*8(%rdi)
262	leaq 64(%rsi),%rsi
263	leaq 64(%rdi),%rdi
264	decl %ecx
265	jnz 1b
26617:	movl %edx,%ecx
267	andl $7,%edx
268	shrl $3,%ecx
269	jz 20f
27018:	movq (%rsi),%r8
27119:	movnti %r8,(%rdi)
272	leaq 8(%rsi),%rsi
273	leaq 8(%rdi),%rdi
274	decl %ecx
275	jnz 18b
27620:	andl %edx,%edx
277	jz 23f
278	movl %edx,%ecx
27921:	movb (%rsi),%al
28022:	movb %al,(%rdi)
281	incq %rsi
282	incq %rdi
283	decl %ecx
284	jnz 21b
28523:	xorl %eax,%eax
286	ASM_CLAC
287	sfence
288	ret
289
290	.section .fixup,"ax"
29130:	shll $6,%ecx
292	addl %ecx,%edx
293	jmp 60f
29440:	lea (%rdx,%rcx,8),%rdx
295	jmp 60f
29650:	movl %ecx,%edx
29760:	sfence
298	jmp copy_user_handle_tail
299	.previous
300
301	_ASM_EXTABLE(1b,30b)
302	_ASM_EXTABLE(2b,30b)
303	_ASM_EXTABLE(3b,30b)
304	_ASM_EXTABLE(4b,30b)
305	_ASM_EXTABLE(5b,30b)
306	_ASM_EXTABLE(6b,30b)
307	_ASM_EXTABLE(7b,30b)
308	_ASM_EXTABLE(8b,30b)
309	_ASM_EXTABLE(9b,30b)
310	_ASM_EXTABLE(10b,30b)
311	_ASM_EXTABLE(11b,30b)
312	_ASM_EXTABLE(12b,30b)
313	_ASM_EXTABLE(13b,30b)
314	_ASM_EXTABLE(14b,30b)
315	_ASM_EXTABLE(15b,30b)
316	_ASM_EXTABLE(16b,30b)
317	_ASM_EXTABLE(18b,40b)
318	_ASM_EXTABLE(19b,40b)
319	_ASM_EXTABLE(21b,50b)
320	_ASM_EXTABLE(22b,50b)
321ENDPROC(__copy_user_nocache)
322