xref: /linux/arch/alpha/include/asm/tlbflush.h (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_TLBFLUSH_H
3 #define _ALPHA_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/compiler.h>
8 
9 #ifndef __EXTERN_INLINE
10 #define __EXTERN_INLINE extern inline
11 #define __MMU_EXTERN_INLINE
12 #endif
13 
14 extern void __load_new_mm_context(struct mm_struct *);
15 
16 
17 __EXTERN_INLINE void
18 ev5_flush_tlb_current(struct mm_struct *mm)
19 {
20 	__load_new_mm_context(mm);
21 }
22 
23 /* Flush just one page in the current TLB set.  We need to be very
24    careful about the icache here, there is no way to invalidate a
25    specific icache page.  */
26 
27 __EXTERN_INLINE void
28 ev5_flush_tlb_current_page(struct mm_struct * mm,
29 			   struct vm_area_struct *vma,
30 			   unsigned long addr)
31 {
32 	if (vma->vm_flags & VM_EXEC)
33 		__load_new_mm_context(mm);
34 	else
35 		tbi(2, addr);
36 }
37 
38 
39 #define flush_tlb_current	ev5_flush_tlb_current
40 #define flush_tlb_current_page	ev5_flush_tlb_current_page
41 
42 #ifdef __MMU_EXTERN_INLINE
43 #undef __EXTERN_INLINE
44 #undef __MMU_EXTERN_INLINE
45 #endif
46 
47 /* Flush current user mapping.  */
48 static inline void
49 flush_tlb(void)
50 {
51 	flush_tlb_current(current->active_mm);
52 }
53 
54 /* Flush someone else's user mapping.  */
55 static inline void
56 flush_tlb_other(struct mm_struct *mm)
57 {
58 	unsigned long *mmc = &mm->context[smp_processor_id()];
59 	/* Check it's not zero first to avoid cacheline ping pong
60 	   when possible.  */
61 
62 	if (READ_ONCE(*mmc))
63 		WRITE_ONCE(*mmc, 0);
64 }
65 
66 #ifndef CONFIG_SMP
67 /* Flush everything (kernel mapping may also have changed
68    due to vmalloc/vfree).  */
69 static inline void flush_tlb_all(void)
70 {
71 	tbia();
72 }
73 
74 /* Flush a specified user mapping.  */
75 static inline void
76 flush_tlb_mm(struct mm_struct *mm)
77 {
78 	if (mm == current->active_mm)
79 		flush_tlb_current(mm);
80 	else
81 		flush_tlb_other(mm);
82 }
83 
84 /* Page-granular tlb flush.  */
85 static inline void
86 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
87 {
88 	struct mm_struct *mm = vma->vm_mm;
89 
90 	if (mm == current->active_mm)
91 		flush_tlb_current_page(mm, vma, addr);
92 	else
93 		flush_tlb_other(mm);
94 }
95 
96 /* Flush a specified range of user mapping.  On the Alpha we flush
97    the whole user tlb.  */
98 static inline void
99 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
100 		unsigned long end)
101 {
102 	flush_tlb_mm(vma->vm_mm);
103 }
104 
105 #else /* CONFIG_SMP */
106 
107 extern void flush_tlb_all(void);
108 extern void flush_tlb_mm(struct mm_struct *);
109 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
110 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
111 			    unsigned long);
112 
113 #endif /* CONFIG_SMP */
114 
115 static inline void flush_tlb_kernel_range(unsigned long start,
116 					unsigned long end)
117 {
118 	flush_tlb_all();
119 }
120 
121 #endif /* _ALPHA_TLBFLUSH_H */
122