xref: /linux/arch/s390/include/asm/tlbflush.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 
10 /*
11  * Flush all TLB entries on the local CPU.
12  */
13 static inline void __tlb_flush_local(void)
14 {
15 	asm volatile("ptlb" : : : "memory");
16 }
17 
18 /*
19  * Flush TLB entries for a specific ASCE on all CPUs
20  */
21 static inline void __tlb_flush_idte(unsigned long asce)
22 {
23 	/* Global TLB flush for the mm */
24 	asm volatile(
25 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
26 		: : "a" (2048), "a" (asce) : "cc");
27 }
28 
29 #ifdef CONFIG_SMP
30 void smp_ptlb_all(void);
31 
32 /*
33  * Flush all TLB entries on all CPUs.
34  */
35 static inline void __tlb_flush_global(void)
36 {
37 	unsigned int dummy = 0;
38 
39 	csp(&dummy, 0, 0);
40 }
41 
42 /*
43  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
44  * this implicates multiple ASCEs!).
45  */
46 static inline void __tlb_flush_full(struct mm_struct *mm)
47 {
48 	preempt_disable();
49 	atomic_inc(&mm->context.flush_count);
50 	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
51 		/* Local TLB flush */
52 		__tlb_flush_local();
53 	} else {
54 		/* Global TLB flush */
55 		__tlb_flush_global();
56 		/* Reset TLB flush mask */
57 		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
58 	}
59 	atomic_dec(&mm->context.flush_count);
60 	preempt_enable();
61 }
62 
63 static inline void __tlb_flush_mm(struct mm_struct *mm)
64 {
65 	unsigned long gmap_asce;
66 
67 	/*
68 	 * If the machine has IDTE we prefer to do a per mm flush
69 	 * on all cpus instead of doing a local flush if the mm
70 	 * only ran on the local cpu.
71 	 */
72 	preempt_disable();
73 	atomic_inc(&mm->context.flush_count);
74 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
75 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
76 		if (gmap_asce)
77 			__tlb_flush_idte(gmap_asce);
78 		__tlb_flush_idte(mm->context.asce);
79 	} else {
80 		__tlb_flush_full(mm);
81 	}
82 	/* Reset TLB flush mask */
83 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
84 	atomic_dec(&mm->context.flush_count);
85 	preempt_enable();
86 }
87 
88 static inline void __tlb_flush_kernel(void)
89 {
90 	if (MACHINE_HAS_IDTE)
91 		__tlb_flush_idte(init_mm.context.asce);
92 	else
93 		__tlb_flush_global();
94 }
95 #else
96 #define __tlb_flush_global()	__tlb_flush_local()
97 #define __tlb_flush_full(mm)	__tlb_flush_local()
98 
99 /*
100  * Flush TLB entries for a specific ASCE on all CPUs.
101  */
102 static inline void __tlb_flush_mm(struct mm_struct *mm)
103 {
104 	__tlb_flush_local();
105 }
106 
107 static inline void __tlb_flush_kernel(void)
108 {
109 	__tlb_flush_local();
110 }
111 #endif
112 
113 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
114 {
115 	if (mm->context.flush_mm) {
116 		__tlb_flush_mm(mm);
117 		mm->context.flush_mm = 0;
118 	}
119 }
120 
121 /*
122  * TLB flushing:
123  *  flush_tlb() - flushes the current mm struct TLBs
124  *  flush_tlb_all() - flushes all processes TLBs
125  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
126  *  flush_tlb_page(vma, vmaddr) - flushes one page
127  *  flush_tlb_range(vma, start, end) - flushes a range of pages
128  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
129  */
130 
131 /*
132  * flush_tlb_mm goes together with ptep_set_wrprotect for the
133  * copy_page_range operation and flush_tlb_range is related to
134  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
135  * ptep_get_and_clear do not flush the TLBs directly if the mm has
136  * only one user. At the end of the update the flush_tlb_mm and
137  * flush_tlb_range functions need to do the flush.
138  */
139 #define flush_tlb()				do { } while (0)
140 #define flush_tlb_all()				do { } while (0)
141 #define flush_tlb_page(vma, addr)		do { } while (0)
142 
143 static inline void flush_tlb_mm(struct mm_struct *mm)
144 {
145 	__tlb_flush_mm_lazy(mm);
146 }
147 
148 static inline void flush_tlb_range(struct vm_area_struct *vma,
149 				   unsigned long start, unsigned long end)
150 {
151 	__tlb_flush_mm_lazy(vma->vm_mm);
152 }
153 
154 static inline void flush_tlb_kernel_range(unsigned long start,
155 					  unsigned long end)
156 {
157 	__tlb_flush_kernel();
158 }
159 
160 #endif /* _S390_TLBFLUSH_H */
161