Lines Matching +full:1 +full:- +full:4
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Memory copy functions for 32-bit PowerPC.
5 * Copyright (C) 1996-2005 Paul Mackerras.
12 #include <asm/code-patching-asm.h>
16 lwz r7,4(r4); \
20 stw r7,4(r6); \
27 lwz r7,4(r4); \
28 8 ## n ## 1: \
34 8 ## n ## 4: \
35 stw r7,4(r6); \
45 addi r5,r5,-(16 * n); \
47 9 ## n ## 1: \
48 addi r5,r5,-(16 * n); \
51 EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \
54 EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \
55 EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \
56 EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \
57 EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
63 CACHELINE_MASK = (L1_CACHE_BYTES-1)
67 rlwinm. r0 ,r5, 31, 1, 31
68 addi r6, r3, -4
69 beq- 2f
72 1: stwu r4, 4(r6)
73 bdnz 1b
74 2: andi. r0, r5, 1
76 sth r4, 4(r6)
84 * area is cacheable. -- paulus
91 cmplwi 0,r5,4
110 clrlwi r7,r6,32-LG_CACHELINE_BYTES
113 addic. r9,r9,-1 /* total number of complete cachelines */
119 4: stwu r4,4(r6)
120 bdnz 4b
122 li r7,4
126 clrlwi r5,r8,32-LG_CACHELINE_BYTES
127 addi r5,r5,4
132 1: stwu r4,4(r6)
133 bdnz 1b
138 8: stbu r4,1(r6)
145 addi r6,r3,-1
146 9: stbu r4,1(r6)
157 * -- paulus.
169 1: b generic_memcpy
170 patch_site 1b, patch__memcpy_nocache
175 cmplw 1,r3,r8
176 crand 0,0,4 /* cr0.lt &= cr1.lt */
179 addi r4,r4,-4
180 addi r6,r3,-4
187 andi. r8,r0,3 /* get it word-aligned first */
191 70: lbz r9,4(r4) /* do some bytes */
192 addi r4,r4,1
193 addi r6,r6,1
199 72: lwzu r9,4(r4) /* do some words */
200 stwu r9,4(r6)
204 clrlwi r5,r5,32-LG_CACHELINE_BYTES
205 li r11,4
229 30: lwzu r0,4(r4)
230 stwu r0,4(r6)
238 40: lbzu r0,1(r4)
239 stbu r0,1(r6)
249 addi r6,r3,-4
250 addi r4,r4,-4
255 1: lwz r7,4(r4)
257 stw r7,4(r6)
259 bdnz 1b
261 2: cmplwi 0,r5,4
263 lwzu r0,4(r4)
264 addi r5,r5,-4
265 stwu r0,4(r6)
271 4: lbzu r0,1(r4)
272 stbu r0,1(r6)
273 bdnz 4b
275 5: subfic r0,r0,4
277 6: lbz r7,4(r4)
278 addi r4,r4,1
279 stb r7,4(r6)
280 addi r6,r6,1
283 rlwinm. r7,r5,32-3,3,31
286 b 1b
289 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
296 1: lwz r7,-4(r4)
297 lwzu r8,-8(r4)
298 stw r7,-4(r6)
299 stwu r8,-8(r6)
300 bdnz 1b
302 2: cmplwi 0,r5,4
304 lwzu r0,-4(r4)
305 subi r5,r5,4
306 stwu r0,-4(r6)
310 4: lbzu r0,-1(r4)
311 stbu r0,-1(r6)
312 bdnz 4b
315 6: lbzu r7,-1(r4)
316 stbu r7,-1(r6)
319 rlwinm. r7,r5,32-3,3,31
322 b 1b
325 addi r4,r4,-4
326 addi r6,r3,-4
333 andi. r8,r0,3 /* get it word-aligned first */
336 70: lbz r9,4(r4) /* do some bytes */
337 71: stb r9,4(r6)
338 addi r4,r4,1
339 addi r6,r6,1
345 72: lwzu r9,4(r4) /* do some words */
346 73: stwu r9,4(r6)
355 clrlwi r5,r5,32-LG_CACHELINE_BYTES
356 li r11,4
360 li r3,4
361 cmpwi r0,1
364 li r7,1
365 #if MAX_COPY_PREFETCH > 1
368 we prefetch 1 cacheline ahead. */
379 #endif /* MAX_COPY_PREFETCH > 1 */
391 COPY_16_BYTES_WITHEX(1)
396 COPY_16_BYTES_WITHEX(4)
405 li r3,4
412 30: lwzu r0,4(r4)
413 31: stwu r0,4(r6)
419 40: lbz r0,4(r4)
420 41: stb r0,4(r6)
421 addi r4,r4,1
422 addi r6,r6,1
427 /* read fault, initial single-byte copy */
430 /* write fault, initial single-byte copy */
431 101: li r9,1
439 103: li r9,1
449 COPY_16_BYTES_EXCODE(1)
454 COPY_16_BYTES_EXCODE(4)
467 105: li r9,1
476 109: li r9,1
484 111: li r9,1
489 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
499 130: lbz r0,4(r4)
500 131: stb r0,4(r6)
501 addi r4,r4,1
502 addi r6,r6,1
504 /* then clear out the destination: r3 bytes starting at 4(r6) */