xref: /linux/arch/arm/mm/copypage-v4mc.c (revision 2634682fdffd9ba6e74b76be8aa91cf8b2e05c41)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/lib/copypage-armv4mc.S
4  *
5  *  Copyright (C) 1995-2005 Russell King
6  *
7  * This handles the mini data cache, as found on SA11x0 and XScale
8  * processors.  When we copy a user page page, we map it in such a way
9  * that accesses to this page will not touch the main data cache, but
10  * will be cached in the mini data cache.  This prevents us thrashing
11  * the main data cache on page faults.
12  */
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 
20 #include "mm.h"
21 
22 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
23 				  L_PTE_MT_MINICACHE)
24 
25 static DEFINE_RAW_SPINLOCK(minicache_lock);
26 
27 /*
28  * ARMv4 mini-dcache optimised copy_user_highpage
29  *
30  * We flush the destination cache lines just before we write the data into the
31  * corresponding address.  Since the Dcache is read-allocate, this removes the
32  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
33  * and merged as appropriate.
34  *
35  * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
36  * instruction.  If your processor does not supply this, you have to write your
37  * own copy_user_highpage that does the right thing.
38  */
39 static void mc_copy_user_page(void *from, void *to)
40 {
41 	int tmp;
42 
43 	asm volatile ("\
44 	.syntax unified\n\
45 	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
46 1:	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line\n\
47 	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
48 	ldmia	%0!, {r2, r3, ip, lr}		@ 4+1\n\
49 	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
50 	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
51 	mcr	p15, 0, %1, c7, c6, 1		@ 1   invalidate D line\n\
52 	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
53 	ldmia	%0!, {r2, r3, ip, lr}		@ 4\n\
54 	subs	%2, %2, #1			@ 1\n\
55 	stmia	%1!, {r2, r3, ip, lr}		@ 4\n\
56 	ldmiane	%0!, {r2, r3, ip, lr}		@ 4\n\
57 	bne	1b				@ "
58 	: "+&r" (from), "+&r" (to), "=&r" (tmp)
59 	: "2" (PAGE_SIZE / 64)
60 	: "r2", "r3", "ip", "lr");
61 }
62 
63 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
64 	unsigned long vaddr, struct vm_area_struct *vma)
65 {
66 	void *kto = kmap_atomic(to);
67 
68 	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
69 		__flush_dcache_page(page_mapping_file(from), from);
70 
71 	raw_spin_lock(&minicache_lock);
72 
73 	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
74 
75 	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
76 
77 	raw_spin_unlock(&minicache_lock);
78 
79 	kunmap_atomic(kto);
80 }
81 
82 /*
83  * ARMv4 optimised clear_user_page
84  */
85 void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
86 {
87 	void *ptr, *kaddr = kmap_atomic(page);
88 	asm volatile("\
89 	mov	r1, %2				@ 1\n\
90 	mov	r2, #0				@ 1\n\
91 	mov	r3, #0				@ 1\n\
92 	mov	ip, #0				@ 1\n\
93 	mov	lr, #0				@ 1\n\
94 1:	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
95 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
96 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
97 	mcr	p15, 0, %0, c7, c6, 1		@ 1   invalidate D line\n\
98 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
99 	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
100 	subs	r1, r1, #1			@ 1\n\
101 	bne	1b				@ 1"
102 	: "=r" (ptr)
103 	: "0" (kaddr), "I" (PAGE_SIZE / 64)
104 	: "r1", "r2", "r3", "ip", "lr");
105 	kunmap_atomic(kaddr);
106 }
107 
108 struct cpu_user_fns v4_mc_user_fns __initdata = {
109 	.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
110 	.cpu_copy_user_highpage	= v4_mc_copy_user_highpage,
111 };
112