xref: /linux/arch/arm64/lib/copy_from_user.S (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18
19#include <asm/alternative.h>
20#include <asm/assembler.h>
21#include <asm/cpufeature.h>
22#include <asm/sysreg.h>
23
24/*
25 * Copy from user space to a kernel buffer (alignment handled by the hardware)
26 *
27 * Parameters:
28 *	x0 - to
29 *	x1 - from
30 *	x2 - n
31 * Returns:
32 *	x0 - bytes not copied
33 */
34ENTRY(__copy_from_user)
35ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
36	    CONFIG_ARM64_PAN)
37	add	x5, x1, x2			// upper user buffer boundary
38	subs	x2, x2, #16
39	b.mi	1f
400:
41USER(9f, ldp	x3, x4, [x1], #16)
42	subs	x2, x2, #16
43	stp	x3, x4, [x0], #16
44	b.pl	0b
451:	adds	x2, x2, #8
46	b.mi	2f
47USER(9f, ldr	x3, [x1], #8	)
48	sub	x2, x2, #8
49	str	x3, [x0], #8
502:	adds	x2, x2, #4
51	b.mi	3f
52USER(9f, ldr	w3, [x1], #4	)
53	sub	x2, x2, #4
54	str	w3, [x0], #4
553:	adds	x2, x2, #2
56	b.mi	4f
57USER(9f, ldrh	w3, [x1], #2	)
58	sub	x2, x2, #2
59	strh	w3, [x0], #2
604:	adds	x2, x2, #1
61	b.mi	5f
62USER(9f, ldrb	w3, [x1]	)
63	strb	w3, [x0]
645:	mov	x0, #0
65ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
66	    CONFIG_ARM64_PAN)
67	ret
68ENDPROC(__copy_from_user)
69
70	.section .fixup,"ax"
71	.align	2
729:	sub	x2, x5, x1
73	mov	x3, x2
7410:	strb	wzr, [x0], #1			// zero remaining buffer space
75	subs	x3, x3, #1
76	b.ne	10b
77	mov	x0, x2				// bytes not copied
78	ret
79	.previous
80