xref: /linux/arch/powerpc/boot/string.S (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) Paul Mackerras 1997.
4 *
5 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
6 */
7
8#include "ppc_asm.h"
9
10	.text
11	.globl	strcpy
12strcpy:
13	addi	r5,r3,-1
14	addi	r4,r4,-1
151:	lbzu	r0,1(r4)
16	cmpwi	0,r0,0
17	stbu	r0,1(r5)
18	bne	1b
19	blr
20
21	.globl	strncpy
22strncpy:
23	cmpwi	0,r5,0
24	beqlr
25	mtctr	r5
26	addi	r6,r3,-1
27	addi	r4,r4,-1
281:	lbzu	r0,1(r4)
29	cmpwi	0,r0,0
30	stbu	r0,1(r6)
31	bdnzf	2,1b		/* dec ctr, branch if ctr != 0 && !cr0.eq */
32	blr
33
34	.globl	strcat
35strcat:
36	addi	r5,r3,-1
37	addi	r4,r4,-1
381:	lbzu	r0,1(r5)
39	cmpwi	0,r0,0
40	bne	1b
41	addi	r5,r5,-1
421:	lbzu	r0,1(r4)
43	cmpwi	0,r0,0
44	stbu	r0,1(r5)
45	bne	1b
46	blr
47
48	.globl	strchr
49strchr:
50	addi	r3,r3,-1
511:	lbzu	r0,1(r3)
52	cmpw	0,r0,r4
53	beqlr
54	cmpwi	0,r0,0
55	bne	1b
56	li	r3,0
57	blr
58
59	.globl	strcmp
60strcmp:
61	addi	r5,r3,-1
62	addi	r4,r4,-1
631:	lbzu	r3,1(r5)
64	cmpwi	1,r3,0
65	lbzu	r0,1(r4)
66	subf.	r3,r0,r3
67	beqlr	1
68	beq	1b
69	blr
70
71	.globl	strncmp
72strncmp:
73	mtctr	r5
74	addi	r5,r3,-1
75	addi	r4,r4,-1
761:	lbzu	r3,1(r5)
77	cmpwi	1,r3,0
78	lbzu	r0,1(r4)
79	subf.	r3,r0,r3
80	beqlr	1
81	bdnzt	eq,1b
82	blr
83
84	.globl	strlen
85strlen:
86	addi	r4,r3,-1
871:	lbzu	r0,1(r4)
88	cmpwi	0,r0,0
89	bne	1b
90	subf	r3,r3,r4
91	blr
92
93	.globl	memset
94memset:
95	rlwimi	r4,r4,8,16,23
96	rlwimi	r4,r4,16,0,15
97	addi	r6,r3,-4
98	cmplwi	0,r5,4
99	blt	7f
100	stwu	r4,4(r6)
101	beqlr
102	andi.	r0,r6,3
103	add	r5,r0,r5
104	subf	r6,r0,r6
105	rlwinm	r0,r5,32-2,2,31
106	mtctr	r0
107	bdz	6f
1081:	stwu	r4,4(r6)
109	bdnz	1b
1106:	andi.	r5,r5,3
1117:	cmpwi	0,r5,0
112	beqlr
113	mtctr	r5
114	addi	r6,r6,3
1158:	stbu	r4,1(r6)
116	bdnz	8b
117	blr
118
119	.globl	memmove
120memmove:
121	cmplw	0,r3,r4
122	bgt	backwards_memcpy
123	/* fall through */
124
125	.globl	memcpy
126memcpy:
127	rlwinm.	r7,r5,32-3,3,31		/* r7 = r5 >> 3 */
128	addi	r6,r3,-4
129	addi	r4,r4,-4
130	beq	3f			/* if less than 8 bytes to do */
131	andi.	r0,r6,3			/* get dest word aligned */
132	mtctr	r7
133	bne	5f
134	andi.	r0,r4,3			/* check src word aligned too */
135	bne	3f
1361:	lwz	r7,4(r4)
137	lwzu	r8,8(r4)
138	stw	r7,4(r6)
139	stwu	r8,8(r6)
140	bdnz	1b
141	andi.	r5,r5,7
1422:	cmplwi	0,r5,4
143	blt	3f
144	lwzu	r0,4(r4)
145	addi	r5,r5,-4
146	stwu	r0,4(r6)
1473:	cmpwi	0,r5,0
148	beqlr
149	mtctr	r5
150	addi	r4,r4,3
151	addi	r6,r6,3
1524:	lbzu	r0,1(r4)
153	stbu	r0,1(r6)
154	bdnz	4b
155	blr
1565:	subfic	r0,r0,4
157	cmpw	cr1,r0,r5
158	add	r7,r0,r4
159	andi.	r7,r7,3			/* will source be word-aligned too? */
160	ble	cr1,3b
161	bne	3b			/* do byte-by-byte if not */
162	mtctr	r0
1636:	lbz	r7,4(r4)
164	addi	r4,r4,1
165	stb	r7,4(r6)
166	addi	r6,r6,1
167	bdnz	6b
168	subf	r5,r0,r5
169	rlwinm.	r7,r5,32-3,3,31
170	beq	2b
171	mtctr	r7
172	b	1b
173
174	.globl	backwards_memcpy
175backwards_memcpy:
176	rlwinm.	r7,r5,32-3,3,31		/* r7 = r5 >> 3 */
177	add	r6,r3,r5
178	add	r4,r4,r5
179	beq	3f
180	andi.	r0,r6,3
181	mtctr	r7
182	bne	5f
183	andi.	r0,r4,3
184	bne	3f
1851:	lwz	r7,-4(r4)
186	lwzu	r8,-8(r4)
187	stw	r7,-4(r6)
188	stwu	r8,-8(r6)
189	bdnz	1b
190	andi.	r5,r5,7
1912:	cmplwi	0,r5,4
192	blt	3f
193	lwzu	r0,-4(r4)
194	subi	r5,r5,4
195	stwu	r0,-4(r6)
1963:	cmpwi	0,r5,0
197	beqlr
198	mtctr	r5
1994:	lbzu	r0,-1(r4)
200	stbu	r0,-1(r6)
201	bdnz	4b
202	blr
2035:	cmpw	cr1,r0,r5
204	subf	r7,r0,r4
205	andi.	r7,r7,3
206	ble	cr1,3b
207	bne	3b
208	mtctr	r0
2096:	lbzu	r7,-1(r4)
210	stbu	r7,-1(r6)
211	bdnz	6b
212	subf	r5,r0,r5
213	rlwinm.	r7,r5,32-3,3,31
214	beq	2b
215	mtctr	r7
216	b	1b
217
218	.globl	memchr
219memchr:
220	cmpwi	0,r5,0
221	blelr
222	mtctr	r5
223	addi	r3,r3,-1
2241:	lbzu	r0,1(r3)
225	cmpw	r0,r4
226	beqlr
227	bdnz	1b
228	li	r3,0
229	blr
230
231	.globl	memcmp
232memcmp:
233	cmpwi	0,r5,0
234	ble	2f
235	mtctr	r5
236	addi	r6,r3,-1
237	addi	r4,r4,-1
2381:	lbzu	r3,1(r6)
239	lbzu	r0,1(r4)
240	subf.	r3,r0,r3
241	bdnzt	2,1b
242	blr
2432:	li	r3,0
244	blr
245
246
247/*
248 * Flush the dcache and invalidate the icache for a range of addresses.
249 *
250 * flush_cache(addr, len)
251 */
252	.global	flush_cache
253flush_cache:
254	addi	4,4,0x1f	/* len = (len + 0x1f) / 0x20 */
255	rlwinm.	4,4,27,5,31
256	mtctr	4
257	beqlr
2581:	dcbf	0,3
259	icbi	0,3
260	addi	3,3,0x20
261	bdnz	1b
262	sync
263	isync
264	blr
265
266