xref: /linux/arch/arm64/mm/flush.c (revision 0773e3a851c8afd46cefb0cbf8d0977d454d899e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/flush.c
4  *
5  * Copyright (C) 1995-2002 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/export.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 
13 #include <asm/cacheflush.h>
14 #include <asm/cache.h>
15 #include <asm/tlbflush.h>
16 
17 void sync_icache_aliases(unsigned long start, unsigned long end)
18 {
19 	if (icache_is_aliasing()) {
20 		dcache_clean_pou(start, end);
21 		icache_inval_all_pou();
22 	} else {
23 		/*
24 		 * Don't issue kick_all_cpus_sync() after I-cache invalidation
25 		 * for user mappings.
26 		 */
27 		caches_clean_inval_pou(start, end);
28 	}
29 }
30 
31 static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
32 				unsigned long end)
33 {
34 	if (vma->vm_flags & VM_EXEC)
35 		sync_icache_aliases(start, end);
36 }
37 
38 /*
39  * Copy user data from/to a page which is mapped into a different processes
40  * address space.  Really, we want to allow our "user space" model to handle
41  * this.
42  */
43 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
44 		       unsigned long uaddr, void *dst, const void *src,
45 		       unsigned long len)
46 {
47 	memcpy(dst, src, len);
48 	flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
49 }
50 
51 void __sync_icache_dcache(pte_t pte)
52 {
53 	struct page *page = pte_page(pte);
54 
55 	/*
56 	 * HugeTLB pages are always fully mapped, so only setting head page's
57 	 * PG_dcache_clean flag is enough.
58 	 */
59 	if (PageHuge(page))
60 		page = compound_head(page);
61 
62 	if (!test_bit(PG_dcache_clean, &page->flags)) {
63 		sync_icache_aliases((unsigned long)page_address(page),
64 				    (unsigned long)page_address(page) +
65 					    page_size(page));
66 		set_bit(PG_dcache_clean, &page->flags);
67 	}
68 }
69 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
70 
71 /*
72  * This function is called when a page has been modified by the kernel. Mark
73  * it as dirty for later flushing when mapped in user space (if executable,
74  * see __sync_icache_dcache).
75  */
76 void flush_dcache_page(struct page *page)
77 {
78 	/*
79 	 * HugeTLB pages are always fully mapped and only head page will be
80 	 * set PG_dcache_clean (see comments in __sync_icache_dcache()).
81 	 */
82 	if (PageHuge(page))
83 		page = compound_head(page);
84 
85 	if (test_bit(PG_dcache_clean, &page->flags))
86 		clear_bit(PG_dcache_clean, &page->flags);
87 }
88 EXPORT_SYMBOL(flush_dcache_page);
89 
90 /*
91  * Additional functions defined in assembly.
92  */
93 EXPORT_SYMBOL(caches_clean_inval_pou);
94 
95 #ifdef CONFIG_ARCH_HAS_PMEM_API
96 void arch_wb_cache_pmem(void *addr, size_t size)
97 {
98 	/* Ensure order against any prior non-cacheable writes */
99 	dmb(osh);
100 	dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
101 }
102 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
103 
104 void arch_invalidate_pmem(void *addr, size_t size)
105 {
106 	dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
107 }
108 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
109 #endif
110