xref: /linux/arch/arm/lib/memset.S (revision 03f7c1d2a49acd30e38789cd809d3300721e9b0e)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/lib/memset.S
4 *
5 *  Copyright (C) 1995-2000 Russell King
6 *
7 *  ASM optimised string functions
8 */
9#include <linux/linkage.h>
10#include <asm/assembler.h>
11#include <asm/unwind.h>
12
13	.text
14	.align	5
15
16ENTRY(__memset)
17ENTRY(mmioset)
18WEAK(memset)
19UNWIND( .fnstart         )
20	ands	r3, r0, #3		@ 1 unaligned?
21	mov	ip, r0			@ preserve r0 as return value
22	bne	6f			@ 1
23/*
24 * we know that the pointer in ip is aligned to a word boundary.
25 */
261:	orr	r1, r1, r1, lsl #8
27	orr	r1, r1, r1, lsl #16
28	mov	r3, r1
297:	cmp	r2, #16
30	blt	4f
31UNWIND( .fnend              )
32
33#if ! CALGN(1)+0
34
35/*
36 * We need 2 extra registers for this loop - use r8 and the LR
37 */
38UNWIND( .fnstart            )
39UNWIND( .save {r8, lr}      )
40	stmfd	sp!, {r8, lr}
41	mov	r8, r1
42	mov	lr, r3
43
442:	subs	r2, r2, #64
45	stmiage	ip!, {r1, r3, r8, lr}	@ 64 bytes at a time.
46	stmiage	ip!, {r1, r3, r8, lr}
47	stmiage	ip!, {r1, r3, r8, lr}
48	stmiage	ip!, {r1, r3, r8, lr}
49	bgt	2b
50	ldmfdeq	sp!, {r8, pc}		@ Now <64 bytes to go.
51/*
52 * No need to correct the count; we're only testing bits from now on
53 */
54	tst	r2, #32
55	stmiane	ip!, {r1, r3, r8, lr}
56	stmiane	ip!, {r1, r3, r8, lr}
57	tst	r2, #16
58	stmiane	ip!, {r1, r3, r8, lr}
59	ldmfd	sp!, {r8, lr}
60UNWIND( .fnend              )
61
62#else
63
64/*
65 * This version aligns the destination pointer in order to write
66 * whole cache lines at once.
67 */
68
69UNWIND( .fnstart               )
70UNWIND( .save {r4-r8, lr}      )
71	stmfd	sp!, {r4-r8, lr}
72	mov	r4, r1
73	mov	r5, r3
74	mov	r6, r1
75	mov	r7, r3
76	mov	r8, r1
77	mov	lr, r3
78
79	cmp	r2, #96
80	tstgt	ip, #31
81	ble	3f
82
83	and	r8, ip, #31
84	rsb	r8, r8, #32
85	sub	r2, r2, r8
86	movs	r8, r8, lsl #(32 - 4)
87	stmiacs	ip!, {r4, r5, r6, r7}
88	stmiami	ip!, {r4, r5}
89	tst	r8, #(1 << 30)
90	mov	r8, r1
91	strne	r1, [ip], #4
92
933:	subs	r2, r2, #64
94	stmiage	ip!, {r1, r3-r8, lr}
95	stmiage	ip!, {r1, r3-r8, lr}
96	bgt	3b
97	ldmfdeq	sp!, {r4-r8, pc}
98
99	tst	r2, #32
100	stmiane	ip!, {r1, r3-r8, lr}
101	tst	r2, #16
102	stmiane	ip!, {r4-r7}
103	ldmfd	sp!, {r4-r8, lr}
104UNWIND( .fnend                 )
105
106#endif
107
108UNWIND( .fnstart            )
1094:	tst	r2, #8
110	stmiane	ip!, {r1, r3}
111	tst	r2, #4
112	strne	r1, [ip], #4
113/*
114 * When we get here, we've got less than 4 bytes to set.  We
115 * may have an unaligned pointer as well.
116 */
1175:	tst	r2, #2
118	strbne	r1, [ip], #1
119	strbne	r1, [ip], #1
120	tst	r2, #1
121	strbne	r1, [ip], #1
122	ret	lr
123
1246:	subs	r2, r2, #4		@ 1 do we have enough
125	blt	5b			@ 1 bytes to align with?
126	cmp	r3, #2			@ 1
127	strblt	r1, [ip], #1		@ 1
128	strble	r1, [ip], #1		@ 1
129	strb	r1, [ip], #1		@ 1
130	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
131	b	1b
132UNWIND( .fnend   )
133ENDPROC(memset)
134ENDPROC(mmioset)
135ENDPROC(__memset)
136
137ENTRY(__memset32)
138UNWIND( .fnstart         )
139	mov	r3, r1			@ copy r1 to r3 and fall into memset64
140UNWIND( .fnend   )
141ENDPROC(__memset32)
142ENTRY(__memset64)
143UNWIND( .fnstart         )
144	mov	ip, r0			@ preserve r0 as return value
145	b	7b			@ jump into the middle of memset
146UNWIND( .fnend   )
147ENDPROC(__memset64)
148