xref: /linux/arch/alpha/include/asm/tlbflush.h (revision f7af616c632ee2ac3af0876fe33bf9e0232e665a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_TLBFLUSH_H
3 #define _ALPHA_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/compiler.h>
8 
9 #ifndef __EXTERN_INLINE
10 #define __EXTERN_INLINE extern inline
11 #define __MMU_EXTERN_INLINE
12 #endif
13 
14 extern void __load_new_mm_context(struct mm_struct *);
15 
16 
17 /* Use a few helper functions to hide the ugly broken ASN
18    numbers on early Alphas (ev4 and ev45).  */
19 
20 __EXTERN_INLINE void
21 ev4_flush_tlb_current(struct mm_struct *mm)
22 {
23 	__load_new_mm_context(mm);
24 	tbiap();
25 }
26 
27 __EXTERN_INLINE void
28 ev5_flush_tlb_current(struct mm_struct *mm)
29 {
30 	__load_new_mm_context(mm);
31 }
32 
33 /* Flush just one page in the current TLB set.  We need to be very
34    careful about the icache here, there is no way to invalidate a
35    specific icache page.  */
36 
37 __EXTERN_INLINE void
38 ev4_flush_tlb_current_page(struct mm_struct * mm,
39 			   struct vm_area_struct *vma,
40 			   unsigned long addr)
41 {
42 	int tbi_flag = 2;
43 	if (vma->vm_flags & VM_EXEC) {
44 		__load_new_mm_context(mm);
45 		tbi_flag = 3;
46 	}
47 	tbi(tbi_flag, addr);
48 }
49 
50 __EXTERN_INLINE void
51 ev5_flush_tlb_current_page(struct mm_struct * mm,
52 			   struct vm_area_struct *vma,
53 			   unsigned long addr)
54 {
55 	if (vma->vm_flags & VM_EXEC)
56 		__load_new_mm_context(mm);
57 	else
58 		tbi(2, addr);
59 }
60 
61 
62 #ifdef CONFIG_ALPHA_GENERIC
63 # define flush_tlb_current		alpha_mv.mv_flush_tlb_current
64 # define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
65 #else
66 # ifdef CONFIG_ALPHA_EV4
67 #  define flush_tlb_current		ev4_flush_tlb_current
68 #  define flush_tlb_current_page	ev4_flush_tlb_current_page
69 # else
70 #  define flush_tlb_current		ev5_flush_tlb_current
71 #  define flush_tlb_current_page	ev5_flush_tlb_current_page
72 # endif
73 #endif
74 
75 #ifdef __MMU_EXTERN_INLINE
76 #undef __EXTERN_INLINE
77 #undef __MMU_EXTERN_INLINE
78 #endif
79 
80 /* Flush current user mapping.  */
81 static inline void
82 flush_tlb(void)
83 {
84 	flush_tlb_current(current->active_mm);
85 }
86 
87 /* Flush someone else's user mapping.  */
88 static inline void
89 flush_tlb_other(struct mm_struct *mm)
90 {
91 	unsigned long *mmc = &mm->context[smp_processor_id()];
92 	/* Check it's not zero first to avoid cacheline ping pong
93 	   when possible.  */
94 	if (*mmc) *mmc = 0;
95 }
96 
97 #ifndef CONFIG_SMP
98 /* Flush everything (kernel mapping may also have changed
99    due to vmalloc/vfree).  */
100 static inline void flush_tlb_all(void)
101 {
102 	tbia();
103 }
104 
105 /* Flush a specified user mapping.  */
106 static inline void
107 flush_tlb_mm(struct mm_struct *mm)
108 {
109 	if (mm == current->active_mm)
110 		flush_tlb_current(mm);
111 	else
112 		flush_tlb_other(mm);
113 }
114 
115 /* Page-granular tlb flush.  */
116 static inline void
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
118 {
119 	struct mm_struct *mm = vma->vm_mm;
120 
121 	if (mm == current->active_mm)
122 		flush_tlb_current_page(mm, vma, addr);
123 	else
124 		flush_tlb_other(mm);
125 }
126 
127 /* Flush a specified range of user mapping.  On the Alpha we flush
128    the whole user tlb.  */
129 static inline void
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
131 		unsigned long end)
132 {
133 	flush_tlb_mm(vma->vm_mm);
134 }
135 
136 #else /* CONFIG_SMP */
137 
138 extern void flush_tlb_all(void);
139 extern void flush_tlb_mm(struct mm_struct *);
140 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
141 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
142 			    unsigned long);
143 
144 #endif /* CONFIG_SMP */
145 
146 static inline void flush_tlb_kernel_range(unsigned long start,
147 					unsigned long end)
148 {
149 	flush_tlb_all();
150 }
151 
152 #endif /* _ALPHA_TLBFLUSH_H */
153