xref: /linux/arch/s390/include/asm/tlbflush.h (revision 4fd18fc38757217c746aa063ba9e4729814dc737)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
4 
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <asm/processor.h>
8 
9 /*
10  * Flush all TLB entries on the local CPU.
11  */
12 static inline void __tlb_flush_local(void)
13 {
14 	asm volatile("ptlb" : : : "memory");
15 }
16 
17 /*
18  * Flush TLB entries for a specific ASCE on all CPUs
19  */
20 static inline void __tlb_flush_idte(unsigned long asce)
21 {
22 	unsigned long opt;
23 
24 	opt = IDTE_PTOA;
25 	if (MACHINE_HAS_TLB_GUEST)
26 		opt |= IDTE_GUEST_ASCE;
27 	/* Global TLB flush for the mm */
28 	asm volatile(
29 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
30 		: : "a" (opt), "a" (asce) : "cc");
31 }
32 
33 /*
34  * Flush all TLB entries on all CPUs.
35  */
36 static inline void __tlb_flush_global(void)
37 {
38 	unsigned int dummy = 0;
39 
40 	csp(&dummy, 0, 0);
41 }
42 
43 /*
44  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
45  * this implicates multiple ASCEs!).
46  */
47 static inline void __tlb_flush_mm(struct mm_struct *mm)
48 {
49 	unsigned long gmap_asce;
50 
51 	/*
52 	 * If the machine has IDTE we prefer to do a per mm flush
53 	 * on all cpus instead of doing a local flush if the mm
54 	 * only ran on the local cpu.
55 	 */
56 	preempt_disable();
57 	atomic_inc(&mm->context.flush_count);
58 	/* Reset TLB flush mask */
59 	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
60 	barrier();
61 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
62 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
63 		if (gmap_asce)
64 			__tlb_flush_idte(gmap_asce);
65 		__tlb_flush_idte(mm->context.asce);
66 	} else {
67 		/* Global TLB flush */
68 		__tlb_flush_global();
69 	}
70 	atomic_dec(&mm->context.flush_count);
71 	preempt_enable();
72 }
73 
74 static inline void __tlb_flush_kernel(void)
75 {
76 	if (MACHINE_HAS_IDTE)
77 		__tlb_flush_idte(init_mm.context.asce);
78 	else
79 		__tlb_flush_global();
80 }
81 
82 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
83 {
84 	spin_lock(&mm->context.lock);
85 	if (mm->context.flush_mm) {
86 		mm->context.flush_mm = 0;
87 		__tlb_flush_mm(mm);
88 	}
89 	spin_unlock(&mm->context.lock);
90 }
91 
92 /*
93  * TLB flushing:
94  *  flush_tlb() - flushes the current mm struct TLBs
95  *  flush_tlb_all() - flushes all processes TLBs
96  *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
97  *  flush_tlb_page(vma, vmaddr) - flushes one page
98  *  flush_tlb_range(vma, start, end) - flushes a range of pages
99  *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
100  */
101 
102 /*
103  * flush_tlb_mm goes together with ptep_set_wrprotect for the
104  * copy_page_range operation and flush_tlb_range is related to
105  * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
106  * ptep_get_and_clear do not flush the TLBs directly if the mm has
107  * only one user. At the end of the update the flush_tlb_mm and
108  * flush_tlb_range functions need to do the flush.
109  */
110 #define flush_tlb()				do { } while (0)
111 #define flush_tlb_all()				do { } while (0)
112 #define flush_tlb_page(vma, addr)		do { } while (0)
113 
114 static inline void flush_tlb_mm(struct mm_struct *mm)
115 {
116 	__tlb_flush_mm_lazy(mm);
117 }
118 
119 static inline void flush_tlb_range(struct vm_area_struct *vma,
120 				   unsigned long start, unsigned long end)
121 {
122 	__tlb_flush_mm_lazy(vma->vm_mm);
123 }
124 
125 static inline void flush_tlb_kernel_range(unsigned long start,
126 					  unsigned long end)
127 {
128 	__tlb_flush_kernel();
129 }
130 
131 #endif /* _S390_TLBFLUSH_H */
132