xref: /linux/arch/arm/lib/copy_page.S (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  linux/arch/arm/lib/copypage.S
4 *
5 *  Copyright (C) 1995-1999 Russell King
6 *
7 *  ASM optimised string functions
8 */
9#include <linux/linkage.h>
10#include <asm/assembler.h>
11#include <asm/asm-offsets.h>
12#include <asm/cache.h>
13
14#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
15
16		.text
17		.align	5
18/*
19 * StrongARM optimised copy_page routine
20 * now 1.78bytes/cycle, was 1.60 bytes/cycle (50MHz bus -> 89MB/s)
21 * Note that we probably achieve closer to the 100MB/s target with
22 * the core clock switching.
23 */
24ENTRY(copy_page)
25		stmfd	sp!, {r4, lr}			@	2
26	PLD(	pld	[r1, #0]		)
27	PLD(	pld	[r1, #L1_CACHE_BYTES]		)
28		mov	r2, #COPY_COUNT			@	1
29		ldmia	r1!, {r3, r4, ip, lr}		@	4+1
301:	PLD(	pld	[r1, #2 * L1_CACHE_BYTES])
31	PLD(	pld	[r1, #3 * L1_CACHE_BYTES])
322:
33	.rept	(2 * L1_CACHE_BYTES / 16 - 1)
34		stmia	r0!, {r3, r4, ip, lr}		@	4
35		ldmia	r1!, {r3, r4, ip, lr}		@	4
36	.endr
37		subs	r2, r2, #1			@	1
38		stmia	r0!, {r3, r4, ip, lr}		@	4
39		ldmiagt	r1!, {r3, r4, ip, lr}		@	4
40		bgt	1b				@	1
41	PLD(	ldmiaeq r1!, {r3, r4, ip, lr}	)
42	PLD(	beq	2b			)
43		ldmfd	sp!, {r4, pc}			@	3
44ENDPROC(copy_page)
45