xref: /linux/arch/arm/mm/copypage-v4wb.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/copypage-v4wb.c
4  *
5  *  Copyright (C) 1995-1999 Russell King
6  */
7 #include <linux/init.h>
8 #include <linux/highmem.h>
9 
10 /*
11  * ARMv4 optimised copy_user_highpage
12  *
13  * We flush the destination cache lines just before we write the data into the
14  * corresponding address.  Since the Dcache is read-allocate, this removes the
15  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
16  * and merged as appropriate.
17  *
18  * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
19  * instruction.  If your processor does not supply this, you have to write your
20  * own copy_user_highpage that does the right thing.
21  */
22 static void v4wb_copy_user_page(void *kto, const void *kfrom)
23 {
24 	int tmp;
25 
26 	asm volatile ("\
27 	.syntax unified\n\
28 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
29 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
30 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
31 	ldmia	%1!, {r3, r4, ip, lr}		@ 4+1\n\
32 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
33 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
34 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
35 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
36 	ldmia	%1!, {r3, r4, ip, lr}		@ 4\n\
37 	subs	%2, %2, #1			@ 1\n\
38 	stmia	%0!, {r3, r4, ip, lr}		@ 4\n\
39 	ldmiane	%1!, {r3, r4, ip, lr}		@ 4\n\
40 	bne	1b				@ 1\n\
41 	mcr	p15, 0, %1, c7, c10, 4		@ 1   drain WB"
42 	: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
43 	: "2" (PAGE_SIZE / 64)
44 	: "r3", "r4", "ip", "lr");
45 }
46 
47 void v4wb_copy_user_highpage(struct page *to, struct page *from,
48 	unsigned long vaddr, struct vm_area_struct *vma)
49 {
50 	void *kto, *kfrom;
51 
52 	kto = kmap_atomic(to);
53 	kfrom = kmap_atomic(from);
54 	flush_cache_page(vma, vaddr, page_to_pfn(from));
55 	v4wb_copy_user_page(kto, kfrom);
56 	kunmap_atomic(kfrom);
57 	kunmap_atomic(kto);
58 }
59 
60 /*
61  * ARMv4 optimised clear_user_page
62  *
63  * Same story as above.
64  */
65 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
66 {
67 	void *ptr, *kaddr = kmap_atomic(page);
68 	asm volatile("\
69 	mov	r1, %2				@ 1\n\
70 	mov	r2, #0				@ 1\n\
71 	mov	r3, #0				@ 1\n\
72 	mov	ip, #0				@ 1\n\
73 	mov	lr, #0				@ 1\n\
74 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
75 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
76 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
77 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
78 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
79 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
80 	subs	r1, r1, #1			@ 1\n\
81 	bne	1b				@ 1\n\
82 	mcr	p15, 0, r1, c7, c10, 4		@ 1   drain WB"
83 	: "=r" (ptr)
84 	: "0" (kaddr), "I" (PAGE_SIZE / 64)
85 	: "r1", "r2", "r3", "ip", "lr");
86 	kunmap_atomic(kaddr);
87 }
88 
89 struct cpu_user_fns v4wb_user_fns __initdata = {
90 	.cpu_clear_user_highpage = v4wb_clear_user_highpage,
91 	.cpu_copy_user_highpage	= v4wb_copy_user_highpage,
92 };
93