xref: /linux/arch/arm/mm/copypage-xsc3.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d73e60b7SRussell King /*
3d73e60b7SRussell King  *  linux/arch/arm/mm/copypage-xsc3.S
4d73e60b7SRussell King  *
5d73e60b7SRussell King  *  Copyright (C) 2004 Intel Corp.
6d73e60b7SRussell King  *
7d73e60b7SRussell King  * Adapted for 3rd gen XScale core, no more mini-dcache
8d73e60b7SRussell King  * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
9d73e60b7SRussell King  */
10d73e60b7SRussell King #include <linux/init.h>
11063b0a42SRussell King #include <linux/highmem.h>
12d73e60b7SRussell King 
13d73e60b7SRussell King /*
14d73e60b7SRussell King  * General note:
15d73e60b7SRussell King  *  We don't really want write-allocate cache behaviour for these functions
16d73e60b7SRussell King  *  since that will just eat through 8K of the cache.
17d73e60b7SRussell King  */
18d73e60b7SRussell King 
19d73e60b7SRussell King /*
20063b0a42SRussell King  * XSC3 optimised copy_user_highpage
21d73e60b7SRussell King  *
22d73e60b7SRussell King  * The source page may have some clean entries in the cache already, but we
23d73e60b7SRussell King  * can safely ignore them - break_cow() will flush them out of the cache
24d73e60b7SRussell King  * if we eventually end up using our copied page.
25d73e60b7SRussell King  *
26d73e60b7SRussell King  */
xsc3_mc_copy_user_page(void * kto,const void * kfrom)27b99afae1SNicolas Pitre static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
28d73e60b7SRussell King {
29b99afae1SNicolas Pitre 	int tmp;
30b99afae1SNicolas Pitre 
31b99afae1SNicolas Pitre 	asm volatile ("\
32*c8a91428SArnd Bergmann .arch xscale					\n\
33b99afae1SNicolas Pitre 	pld	[%1, #0]			\n\
34b99afae1SNicolas Pitre 	pld	[%1, #32]			\n\
35b99afae1SNicolas Pitre 1:	pld	[%1, #64]			\n\
36b99afae1SNicolas Pitre 	pld	[%1, #96]			\n\
37d73e60b7SRussell King 						\n\
38bc2eca9aSNicolas Pitre 2:	ldrd	r2, r3, [%1], #8		\n\
39bc2eca9aSNicolas Pitre 	ldrd	r4, r5, [%1], #8		\n\
40b99afae1SNicolas Pitre 	mcr	p15, 0, %0, c7, c6, 1		@ invalidate\n\
41bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
42bc2eca9aSNicolas Pitre 	ldrd	r2, r3, [%1], #8		\n\
43bc2eca9aSNicolas Pitre 	strd	r4, r5, [%0], #8		\n\
44bc2eca9aSNicolas Pitre 	ldrd	r4, r5, [%1], #8		\n\
45bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
46bc2eca9aSNicolas Pitre 	strd	r4, r5, [%0], #8		\n\
47bc2eca9aSNicolas Pitre 	ldrd	r2, r3, [%1], #8		\n\
48bc2eca9aSNicolas Pitre 	ldrd	r4, r5, [%1], #8		\n\
49b99afae1SNicolas Pitre 	mcr	p15, 0, %0, c7, c6, 1		@ invalidate\n\
50bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
51bc2eca9aSNicolas Pitre 	ldrd	r2, r3, [%1], #8		\n\
52b99afae1SNicolas Pitre 	subs	%2, %2, #1			\n\
53bc2eca9aSNicolas Pitre 	strd	r4, r5, [%0], #8		\n\
54bc2eca9aSNicolas Pitre 	ldrd	r4, r5, [%1], #8		\n\
55bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
56bc2eca9aSNicolas Pitre 	strd	r4, r5, [%0], #8		\n\
57d73e60b7SRussell King 	bgt	1b				\n\
58b99afae1SNicolas Pitre 	beq	2b				"
59b99afae1SNicolas Pitre 	: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
60b99afae1SNicolas Pitre 	: "2" (PAGE_SIZE / 64 - 1)
61b99afae1SNicolas Pitre 	: "r2", "r3", "r4", "r5");
62d73e60b7SRussell King }
63d73e60b7SRussell King 
xsc3_mc_copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)64063b0a42SRussell King void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
65f00a75c0SRussell King 	unsigned long vaddr, struct vm_area_struct *vma)
66063b0a42SRussell King {
67063b0a42SRussell King 	void *kto, *kfrom;
68063b0a42SRussell King 
695472e862SCong Wang 	kto = kmap_atomic(to);
705472e862SCong Wang 	kfrom = kmap_atomic(from);
712725898fSRussell King 	flush_cache_page(vma, vaddr, page_to_pfn(from));
72063b0a42SRussell King 	xsc3_mc_copy_user_page(kto, kfrom);
735472e862SCong Wang 	kunmap_atomic(kfrom);
745472e862SCong Wang 	kunmap_atomic(kto);
75063b0a42SRussell King }
76063b0a42SRussell King 
77d73e60b7SRussell King /*
78d73e60b7SRussell King  * XScale optimised clear_user_page
79d73e60b7SRussell King  */
xsc3_mc_clear_user_highpage(struct page * page,unsigned long vaddr)80303c6443SRussell King void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
81d73e60b7SRussell King {
825472e862SCong Wang 	void *ptr, *kaddr = kmap_atomic(page);
8343ae286bSNicolas Pitre 	asm volatile ("\
84*c8a91428SArnd Bergmann .arch xscale					\n\
8543ae286bSNicolas Pitre 	mov	r1, %2				\n\
86d73e60b7SRussell King 	mov	r2, #0				\n\
87d73e60b7SRussell King 	mov	r3, #0				\n\
88303c6443SRussell King 1:	mcr	p15, 0, %0, c7, c6, 1		@ invalidate line\n\
89bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
90bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
91bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
92bc2eca9aSNicolas Pitre 	strd	r2, r3, [%0], #8		\n\
93d73e60b7SRussell King 	subs	r1, r1, #1			\n\
94303c6443SRussell King 	bne	1b"
9543ae286bSNicolas Pitre 	: "=r" (ptr)
9643ae286bSNicolas Pitre 	: "0" (kaddr), "I" (PAGE_SIZE / 32)
97303c6443SRussell King 	: "r1", "r2", "r3");
985472e862SCong Wang 	kunmap_atomic(kaddr);
99d73e60b7SRussell King }
100d73e60b7SRussell King 
101d73e60b7SRussell King struct cpu_user_fns xsc3_mc_user_fns __initdata = {
102303c6443SRussell King 	.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
103063b0a42SRussell King 	.cpu_copy_user_highpage	= xsc3_mc_copy_user_highpage,
104d73e60b7SRussell King };
105