xref: /linux/arch/powerpc/include/asm/book3s/64/tlbflush.h (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4 
5 #define MMU_NO_CONTEXT	~0UL
6 
7 #include <linux/mm_types.h>
8 #include <linux/mmu_notifier.h>
9 #include <asm/book3s/64/tlbflush-hash.h>
10 #include <asm/book3s/64/tlbflush-radix.h>
11 
12 /* TLB flush actions. Used as argument to tlbiel_all() */
13 enum {
14 	TLB_INVAL_SCOPE_GLOBAL = 0,	/* invalidate all TLBs */
15 	TLB_INVAL_SCOPE_LPID = 1,	/* invalidate TLBs for current LPID */
16 };
17 
18 static inline void tlbiel_all(void)
19 {
20 	/*
21 	 * This is used for host machine check and bootup.
22 	 *
23 	 * This uses early_radix_enabled and implementations use
24 	 * early_cpu_has_feature etc because that works early in boot
25 	 * and this is the machine check path which is not performance
26 	 * critical.
27 	 */
28 	if (early_radix_enabled())
29 		radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30 	else
31 		hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
32 }
33 
34 static inline void tlbiel_all_lpid(bool radix)
35 {
36 	/*
37 	 * This is used for guest machine check.
38 	 */
39 	if (radix)
40 		radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
41 	else
42 		hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
43 }
44 
45 
46 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
47 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
48 				       unsigned long start, unsigned long end)
49 {
50 	if (radix_enabled())
51 		radix__flush_pmd_tlb_range(vma, start, end);
52 }
53 
54 #define __HAVE_ARCH_FLUSH_PUD_TLB_RANGE
55 static inline void flush_pud_tlb_range(struct vm_area_struct *vma,
56 				       unsigned long start, unsigned long end)
57 {
58 	if (radix_enabled())
59 		radix__flush_pud_tlb_range(vma, start, end);
60 }
61 
62 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
63 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
64 					   unsigned long start,
65 					   unsigned long end)
66 {
67 	if (radix_enabled())
68 		radix__flush_hugetlb_tlb_range(vma, start, end);
69 }
70 
71 static inline void flush_tlb_range(struct vm_area_struct *vma,
72 				   unsigned long start, unsigned long end)
73 {
74 	if (radix_enabled())
75 		radix__flush_tlb_range(vma, start, end);
76 }
77 
78 static inline void flush_tlb_kernel_range(unsigned long start,
79 					  unsigned long end)
80 {
81 	if (radix_enabled())
82 		radix__flush_tlb_kernel_range(start, end);
83 }
84 
85 static inline void local_flush_tlb_mm(struct mm_struct *mm)
86 {
87 	if (radix_enabled())
88 		radix__local_flush_tlb_mm(mm);
89 }
90 
91 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
92 					unsigned long vmaddr)
93 {
94 	if (radix_enabled())
95 		radix__local_flush_tlb_page(vma, vmaddr);
96 }
97 
98 static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
99 					      unsigned long vmaddr, int psize)
100 {
101 	if (radix_enabled())
102 		radix__local_flush_tlb_page_psize(mm, vmaddr, psize);
103 }
104 
105 static inline void tlb_flush(struct mmu_gather *tlb)
106 {
107 	if (radix_enabled())
108 		radix__tlb_flush(tlb);
109 	else
110 		hash__tlb_flush(tlb);
111 }
112 
113 #ifdef CONFIG_SMP
114 static inline void flush_tlb_mm(struct mm_struct *mm)
115 {
116 	if (radix_enabled())
117 		radix__flush_tlb_mm(mm);
118 }
119 
120 static inline void flush_tlb_page(struct vm_area_struct *vma,
121 				  unsigned long vmaddr)
122 {
123 	if (radix_enabled())
124 		radix__flush_tlb_page(vma, vmaddr);
125 }
126 #else
127 #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
128 #define flush_tlb_page(vma, addr)	local_flush_tlb_page(vma, addr)
129 #endif /* CONFIG_SMP */
130 
131 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
132 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
133 						unsigned long address,
134 						pte_t *ptep)
135 {
136 	/*
137 	 * Book3S 64 does not require spurious fault flushes because the PTE
138 	 * must be re-fetched in case of an access permission problem. So the
139 	 * only reason for a spurious fault should be concurrent modification
140 	 * to the PTE, in which case the PTE will eventually be re-fetched by
141 	 * the MMU when it attempts the access again.
142 	 *
143 	 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
144 	 * Entry, Setting a Reference or Change Bit or Upgrading Access
145 	 * Authority (PTE Subject to Atomic Hardware Updates):
146 	 *
147 	 * "If the only change being made to a valid PTE that is subject to
148 	 *  atomic hardware updates is to set the Reference or Change bit to
149 	 *  1 or to upgrade access authority, a simpler sequence suffices
150 	 *  because the translation hardware will refetch the PTE if an
151 	 *  access is attempted for which the only problems were reference
152 	 *  and/or change bits needing to be set or insufficient access
153 	 *  authority."
154 	 *
155 	 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
156 	 * it avoids the spurious fault problem by flushing the TLB before
157 	 * upgrading PTE permissions, see radix__ptep_set_access_flags.
158 	 */
159 }
160 
161 static inline bool __pte_protnone(unsigned long pte)
162 {
163 	return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
164 }
165 
166 static inline bool __pte_flags_need_flush(unsigned long oldval,
167 					  unsigned long newval)
168 {
169 	unsigned long delta = oldval ^ newval;
170 
171 	/*
172 	 * The return value of this function doesn't matter for hash,
173 	 * ptep_modify_prot_start() does a pte_update() which does or schedules
174 	 * any necessary hash table update and flush.
175 	 */
176 	if (!radix_enabled())
177 		return true;
178 
179 	/*
180 	 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
181 	 */
182 	VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
183 	VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
184 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
185 	VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
186 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
187 	VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
188 
189 	/*
190 	*  Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
191 	*
192 	 * In theory, some changed software bits could be tolerated, in
193 	 * practice those should rarely if ever matter.
194 	 */
195 
196 	if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
197 		return true;
198 
199 	/*
200 	 * If any of the above was present in old but cleared in new, flush.
201 	 * With the exception of _PAGE_ACCESSED, don't worry about flushing
202 	 * if that was cleared (see the comment in ptep_clear_flush_young()).
203 	 */
204 	if ((delta & ~_PAGE_ACCESSED) & oldval)
205 		return true;
206 
207 	return false;
208 }
209 
210 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
211 {
212 	return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
213 }
214 #define pte_needs_flush pte_needs_flush
215 
216 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
217 {
218 	return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
219 }
220 #define huge_pmd_needs_flush huge_pmd_needs_flush
221 
222 extern bool tlbie_capable;
223 extern bool tlbie_enabled;
224 
225 static inline bool cputlb_use_tlbie(void)
226 {
227 	return tlbie_enabled;
228 }
229 
230 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
231