xref: /linux/arch/arm64/include/asm/tlbflush.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * Based on arch/arm/include/asm/tlbflush.h
3  *
4  * Copyright (C) 1999-2003 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26 
27 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
28 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
29 
30 extern struct cpu_tlb_fns cpu_tlb;
31 
32 /*
33  *	TLB Management
34  *	==============
35  *
36  *	The arch/arm64/mm/tlb.S files implement these methods.
37  *
38  *	The TLB specific code is expected to perform whatever tests it needs
39  *	to determine if it should invalidate the TLB for each call.  Start
40  *	addresses are inclusive and end addresses are exclusive; it is safe to
41  *	round these addresses down.
42  *
43  *	flush_tlb_all()
44  *
45  *		Invalidate the entire TLB.
46  *
47  *	flush_tlb_mm(mm)
48  *
49  *		Invalidate all TLB entries in a particular address space.
50  *		- mm	- mm_struct describing address space
51  *
52  *	flush_tlb_range(mm,start,end)
53  *
54  *		Invalidate a range of TLB entries in the specified address
55  *		space.
56  *		- mm	- mm_struct describing address space
57  *		- start - start address (may not be aligned)
58  *		- end	- end address (exclusive, may not be aligned)
59  *
60  *	flush_tlb_page(vaddr,vma)
61  *
62  *		Invalidate the specified page in the specified address range.
63  *		- vaddr - virtual address (may not be aligned)
64  *		- vma	- vma_struct describing address range
65  *
66  *	flush_kern_tlb_page(kaddr)
67  *
68  *		Invalidate the TLB entry for the specified page.  The address
69  *		will be in the kernels virtual memory space.  Current uses
70  *		only require the D-TLB to be invalidated.
71  *		- kaddr - Kernel virtual memory address
72  */
73 static inline void flush_tlb_all(void)
74 {
75 	dsb(ishst);
76 	asm("tlbi	vmalle1is");
77 	dsb(ish);
78 	isb();
79 }
80 
81 static inline void flush_tlb_mm(struct mm_struct *mm)
82 {
83 	unsigned long asid = (unsigned long)ASID(mm) << 48;
84 
85 	dsb(ishst);
86 	asm("tlbi	aside1is, %0" : : "r" (asid));
87 	dsb(ish);
88 }
89 
90 static inline void flush_tlb_page(struct vm_area_struct *vma,
91 				  unsigned long uaddr)
92 {
93 	unsigned long addr = uaddr >> 12 |
94 		((unsigned long)ASID(vma->vm_mm) << 48);
95 
96 	dsb(ishst);
97 	asm("tlbi	vae1is, %0" : : "r" (addr));
98 	dsb(ish);
99 }
100 
101 static inline void __flush_tlb_range(struct vm_area_struct *vma,
102 				     unsigned long start, unsigned long end)
103 {
104 	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
105 	unsigned long addr;
106 	start = asid | (start >> 12);
107 	end = asid | (end >> 12);
108 
109 	dsb(ishst);
110 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
111 		asm("tlbi vae1is, %0" : : "r"(addr));
112 	dsb(ish);
113 }
114 
115 static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
116 {
117 	unsigned long addr;
118 	start >>= 12;
119 	end >>= 12;
120 
121 	dsb(ishst);
122 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
123 		asm("tlbi vaae1is, %0" : : "r"(addr));
124 	dsb(ish);
125 	isb();
126 }
127 
128 /*
129  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
130  * necessarily a performance improvement.
131  */
132 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
133 
134 static inline void flush_tlb_range(struct vm_area_struct *vma,
135 				   unsigned long start, unsigned long end)
136 {
137 	if ((end - start) <= MAX_TLB_RANGE)
138 		__flush_tlb_range(vma, start, end);
139 	else
140 		flush_tlb_mm(vma->vm_mm);
141 }
142 
143 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
144 {
145 	if ((end - start) <= MAX_TLB_RANGE)
146 		__flush_tlb_kernel_range(start, end);
147 	else
148 		flush_tlb_all();
149 }
150 
151 /*
152  * On AArch64, the cache coherency is handled via the set_pte_at() function.
153  */
154 static inline void update_mmu_cache(struct vm_area_struct *vma,
155 				    unsigned long addr, pte_t *ptep)
156 {
157 	/*
158 	 * set_pte() does not have a DSB for user mappings, so make sure that
159 	 * the page table write is visible.
160 	 */
161 	dsb(ishst);
162 }
163 
164 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
165 
166 #endif
167 
168 #endif
169