xref: /linux/arch/riscv/lib/uaccess.S (revision 8e3ed5440b0c305dcd1d5fa7419bd8066d22ef42)
1#include <linux/linkage.h>
2#include <linux/export.h>
3#include <asm/asm.h>
4#include <asm/asm-extable.h>
5#include <asm/csr.h>
6#include <asm/hwcap.h>
7#include <asm/alternative-macros.h>
8
9	.macro fixup op reg addr lbl
10100:
11	\op \reg, \addr
12	_asm_extable	100b, \lbl
13	.endm
14
15SYM_FUNC_START(__asm_copy_to_user)
16#ifdef CONFIG_RISCV_ISA_V
17	ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
18	REG_L	t0, riscv_v_usercopy_threshold
19	bltu	a2, t0, fallback_scalar_usercopy
20	tail enter_vector_usercopy
21#endif
22SYM_FUNC_START(fallback_scalar_usercopy)
23
24	/* Enable access to user memory */
25	li t6, SR_SUM
26	csrs CSR_STATUS, t6
27
28	/*
29	 * Save the terminal address which will be used to compute the number
30	 * of bytes copied in case of a fixup exception.
31	 */
32	add	t5, a0, a2
33
34	/*
35	 * Register allocation for code below:
36	 * a0 - start of uncopied dst
37	 * a1 - start of uncopied src
38	 * a2 - size
39	 * t0 - end of uncopied dst
40	 */
41	add	t0, a0, a2
42
43	/*
44	 * Use byte copy only if too small.
45	 * SZREG holds 4 for RV32 and 8 for RV64
46	 */
47	li	a3, 9*SZREG /* size must be larger than size in word_copy */
48	bltu	a2, a3, .Lbyte_copy_tail
49
50	/*
51	 * Copy first bytes until dst is aligned to word boundary.
52	 * a0 - start of dst
53	 * t1 - start of aligned dst
54	 */
55	addi	t1, a0, SZREG-1
56	andi	t1, t1, ~(SZREG-1)
57	/* dst is already aligned, skip */
58	beq	a0, t1, .Lskip_align_dst
591:
60	/* a5 - one byte for copying data */
61	fixup lb      a5, 0(a1), 10f
62	addi	a1, a1, 1	/* src */
63	fixup sb      a5, 0(a0), 10f
64	addi	a0, a0, 1	/* dst */
65	bltu	a0, t1, 1b	/* t1 - start of aligned dst */
66
67.Lskip_align_dst:
68	/*
69	 * Now dst is aligned.
70	 * Use shift-copy if src is misaligned.
71	 * Use word-copy if both src and dst are aligned because
72	 * can not use shift-copy which do not require shifting
73	 */
74	/* a1 - start of src */
75	andi	a3, a1, SZREG-1
76	bnez	a3, .Lshift_copy
77
78.Lword_copy:
79        /*
80	 * Both src and dst are aligned, unrolled word copy
81	 *
82	 * a0 - start of aligned dst
83	 * a1 - start of aligned src
84	 * t0 - end of aligned dst
85	 */
86	addi	t0, t0, -(8*SZREG) /* not to over run */
872:
88	fixup REG_L   a4,        0(a1), 10f
89	fixup REG_L   a5,    SZREG(a1), 10f
90	fixup REG_L   a6,  2*SZREG(a1), 10f
91	fixup REG_L   a7,  3*SZREG(a1), 10f
92	fixup REG_L   t1,  4*SZREG(a1), 10f
93	fixup REG_L   t2,  5*SZREG(a1), 10f
94	fixup REG_L   t3,  6*SZREG(a1), 10f
95	fixup REG_L   t4,  7*SZREG(a1), 10f
96	fixup REG_S   a4,        0(a0), 10f
97	fixup REG_S   a5,    SZREG(a0), 10f
98	fixup REG_S   a6,  2*SZREG(a0), 10f
99	fixup REG_S   a7,  3*SZREG(a0), 10f
100	fixup REG_S   t1,  4*SZREG(a0), 10f
101	fixup REG_S   t2,  5*SZREG(a0), 10f
102	fixup REG_S   t3,  6*SZREG(a0), 10f
103	fixup REG_S   t4,  7*SZREG(a0), 10f
104	addi	a0, a0, 8*SZREG
105	addi	a1, a1, 8*SZREG
106	bltu	a0, t0, 2b
107
108	addi	t0, t0, 8*SZREG /* revert to original value */
109	j	.Lbyte_copy_tail
110
111.Lshift_copy:
112
113	/*
114	 * Word copy with shifting.
115	 * For misaligned copy we still perform aligned word copy, but
116	 * we need to use the value fetched from the previous iteration and
117	 * do some shifts.
118	 * This is safe because reading is less than a word size.
119	 *
120	 * a0 - start of aligned dst
121	 * a1 - start of src
122	 * a3 - a1 & mask:(SZREG-1)
123	 * t0 - end of uncopied dst
124	 * t1 - end of aligned dst
125	 */
126	/* calculating aligned word boundary for dst */
127	andi	t1, t0, ~(SZREG-1)
128	/* Converting unaligned src to aligned src */
129	andi	a1, a1, ~(SZREG-1)
130
131	/*
132	 * Calculate shifts
133	 * t3 - prev shift
134	 * t4 - current shift
135	 */
136	slli	t3, a3, 3 /* converting bytes in a3 to bits */
137	li	a5, SZREG*8
138	sub	t4, a5, t3
139
140	/* Load the first word to combine with second word */
141	fixup REG_L   a5, 0(a1), 10f
142
1433:
144	/* Main shifting copy
145	 *
146	 * a0 - start of aligned dst
147	 * a1 - start of aligned src
148	 * t1 - end of aligned dst
149	 */
150
151	/* At least one iteration will be executed */
152	srl	a4, a5, t3
153	fixup REG_L   a5, SZREG(a1), 10f
154	addi	a1, a1, SZREG
155	sll	a2, a5, t4
156	or	a2, a2, a4
157	fixup REG_S   a2, 0(a0), 10f
158	addi	a0, a0, SZREG
159	bltu	a0, t1, 3b
160
161	/* Revert src to original unaligned value  */
162	add	a1, a1, a3
163
164.Lbyte_copy_tail:
165	/*
166	 * Byte copy anything left.
167	 *
168	 * a0 - start of remaining dst
169	 * a1 - start of remaining src
170	 * t0 - end of remaining dst
171	 */
172	bgeu	a0, t0, .Lout_copy_user  /* check if end of copy */
1734:
174	fixup lb      a5, 0(a1), 10f
175	addi	a1, a1, 1	/* src */
176	fixup sb      a5, 0(a0), 10f
177	addi	a0, a0, 1	/* dst */
178	bltu	a0, t0, 4b	/* t0 - end of dst */
179
180.Lout_copy_user:
181	/* Disable access to user memory */
182	csrc CSR_STATUS, t6
183	li	a0, 0
184	ret
185
186	/* Exception fixup code */
18710:
188	/* Disable access to user memory */
189	csrc CSR_STATUS, t6
190	sub a0, t5, a0
191	ret
192SYM_FUNC_END(__asm_copy_to_user)
193SYM_FUNC_END(fallback_scalar_usercopy)
194EXPORT_SYMBOL(__asm_copy_to_user)
195SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
196EXPORT_SYMBOL(__asm_copy_from_user)
197
198
199SYM_FUNC_START(__clear_user)
200
201	/* Enable access to user memory */
202	li t6, SR_SUM
203	csrs CSR_STATUS, t6
204
205	add a3, a0, a1
206	addi t0, a0, SZREG-1
207	andi t1, a3, ~(SZREG-1)
208	andi t0, t0, ~(SZREG-1)
209	/*
210	 * a3: terminal address of target region
211	 * t0: lowest doubleword-aligned address in target region
212	 * t1: highest doubleword-aligned address in target region
213	 */
214	bgeu t0, t1, 2f
215	bltu a0, t0, 4f
2161:
217	fixup REG_S, zero, (a0), 11f
218	addi a0, a0, SZREG
219	bltu a0, t1, 1b
2202:
221	bltu a0, a3, 5f
222
2233:
224	/* Disable access to user memory */
225	csrc CSR_STATUS, t6
226	li a0, 0
227	ret
2284: /* Edge case: unalignment */
229	fixup sb, zero, (a0), 11f
230	addi a0, a0, 1
231	bltu a0, t0, 4b
232	j 1b
2335: /* Edge case: remainder */
234	fixup sb, zero, (a0), 11f
235	addi a0, a0, 1
236	bltu a0, a3, 5b
237	j 3b
238
239	/* Exception fixup code */
24011:
241	/* Disable access to user memory */
242	csrc CSR_STATUS, t6
243	sub a0, a3, a0
244	ret
245SYM_FUNC_END(__clear_user)
246EXPORT_SYMBOL(__clear_user)
247