xref: /linux/arch/alpha/include/asm/tlbflush.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_TLBFLUSH_H
3 #define _ALPHA_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/compiler.h>
8 
9 #ifndef __EXTERN_INLINE
10 #define __EXTERN_INLINE extern inline
11 #define __MMU_EXTERN_INLINE
12 #endif
13 
14 extern void __load_new_mm_context(struct mm_struct *);
15 
16 
17 __EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct * mm)18 ev5_flush_tlb_current(struct mm_struct *mm)
19 {
20 	__load_new_mm_context(mm);
21 }
22 
23 /* Flush just one page in the current TLB set.  We need to be very
24    careful about the icache here, there is no way to invalidate a
25    specific icache page.  */
26 
27 __EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)28 ev5_flush_tlb_current_page(struct mm_struct * mm,
29 			   struct vm_area_struct *vma,
30 			   unsigned long addr)
31 {
32 	if (vma->vm_flags & VM_EXEC)
33 		__load_new_mm_context(mm);
34 	else
35 		tbi(2, addr);
36 }
37 
38 
39 #define flush_tlb_current	ev5_flush_tlb_current
40 #define flush_tlb_current_page	ev5_flush_tlb_current_page
41 
42 #ifdef __MMU_EXTERN_INLINE
43 #undef __EXTERN_INLINE
44 #undef __MMU_EXTERN_INLINE
45 #endif
46 
47 /* Flush current user mapping.  */
48 static inline void
flush_tlb(void)49 flush_tlb(void)
50 {
51 	flush_tlb_current(current->active_mm);
52 }
53 
54 /* Flush someone else's user mapping.  */
55 static inline void
flush_tlb_other(struct mm_struct * mm)56 flush_tlb_other(struct mm_struct *mm)
57 {
58 	unsigned long *mmc = &mm->context[smp_processor_id()];
59 	/* Check it's not zero first to avoid cacheline ping pong
60 	   when possible.  */
61 	if (*mmc) *mmc = 0;
62 }
63 
64 #ifndef CONFIG_SMP
65 /* Flush everything (kernel mapping may also have changed
66    due to vmalloc/vfree).  */
flush_tlb_all(void)67 static inline void flush_tlb_all(void)
68 {
69 	tbia();
70 }
71 
72 /* Flush a specified user mapping.  */
73 static inline void
flush_tlb_mm(struct mm_struct * mm)74 flush_tlb_mm(struct mm_struct *mm)
75 {
76 	if (mm == current->active_mm)
77 		flush_tlb_current(mm);
78 	else
79 		flush_tlb_other(mm);
80 }
81 
82 /* Page-granular tlb flush.  */
83 static inline void
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)84 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
85 {
86 	struct mm_struct *mm = vma->vm_mm;
87 
88 	if (mm == current->active_mm)
89 		flush_tlb_current_page(mm, vma, addr);
90 	else
91 		flush_tlb_other(mm);
92 }
93 
94 /* Flush a specified range of user mapping.  On the Alpha we flush
95    the whole user tlb.  */
96 static inline void
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)97 flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
98 		unsigned long end)
99 {
100 	flush_tlb_mm(vma->vm_mm);
101 }
102 
103 #else /* CONFIG_SMP */
104 
105 extern void flush_tlb_all(void);
106 extern void flush_tlb_mm(struct mm_struct *);
107 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
108 extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
109 			    unsigned long);
110 
111 #endif /* CONFIG_SMP */
112 
flush_tlb_kernel_range(unsigned long start,unsigned long end)113 static inline void flush_tlb_kernel_range(unsigned long start,
114 					unsigned long end)
115 {
116 	flush_tlb_all();
117 }
118 
119 #endif /* _ALPHA_TLBFLUSH_H */
120