1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _S390_TLBFLUSH_H 3 #define _S390_TLBFLUSH_H 4 5 #include <linux/cpufeature.h> 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <asm/processor.h> 9 #include <asm/machine.h> 10 11 /* 12 * Flush all TLB entries on the local CPU. 13 */ 14 static inline void __tlb_flush_local(void) 15 { 16 asm volatile("ptlb" : : : "memory"); 17 } 18 19 /* 20 * Flush TLB entries for a specific ASCE on all CPUs 21 */ 22 static inline void __tlb_flush_idte(unsigned long asce) 23 { 24 unsigned long opt; 25 26 opt = IDTE_PTOA; 27 if (machine_has_tlb_guest()) 28 opt |= IDTE_GUEST_ASCE; 29 /* Global TLB flush for the mm */ 30 asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); 31 } 32 33 /* 34 * Flush all TLB entries on all CPUs. 35 */ 36 static inline void __tlb_flush_global(void) 37 { 38 unsigned long dummy = 0; 39 40 cspg(&dummy, 0, 0); 41 } 42 43 /* 44 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 45 * this implicates multiple ASCEs!). 46 */ 47 static inline void __tlb_flush_mm(struct mm_struct *mm) 48 { 49 unsigned long gmap_asce; 50 51 preempt_disable(); 52 atomic_inc(&mm->context.flush_count); 53 /* Reset TLB flush mask */ 54 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 55 barrier(); 56 gmap_asce = READ_ONCE(mm->context.gmap_asce); 57 if (gmap_asce != -1UL) { 58 if (gmap_asce) 59 __tlb_flush_idte(gmap_asce); 60 __tlb_flush_idte(mm->context.asce); 61 } else { 62 /* Global TLB flush */ 63 __tlb_flush_global(); 64 } 65 atomic_dec(&mm->context.flush_count); 66 preempt_enable(); 67 } 68 69 static inline void __tlb_flush_kernel(void) 70 { 71 __tlb_flush_idte(init_mm.context.asce); 72 } 73 74 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) 75 { 76 spin_lock(&mm->context.lock); 77 if (mm->context.flush_mm) { 78 mm->context.flush_mm = 0; 79 __tlb_flush_mm(mm); 80 } 81 spin_unlock(&mm->context.lock); 82 } 83 84 /* 85 * TLB flushing: 86 * flush_tlb_all() - flushes all processes TLBs 87 * flush_tlb_mm(mm) - flushes the specified mm context TLB's 88 * flush_tlb_page(vma, vmaddr) - flushes one page 89 * flush_tlb_range(vma, start, end) - flushes a range of pages 90 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages 91 */ 92 93 /* 94 * flush_tlb_mm goes together with ptep_set_wrprotect for the 95 * copy_page_range operation and flush_tlb_range is related to 96 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and 97 * ptep_get_and_clear do not flush the TLBs directly if the mm has 98 * only one user. At the end of the update the flush_tlb_mm and 99 * flush_tlb_range functions need to do the flush. 100 */ 101 #define flush_tlb_all() do { } while (0) 102 #define flush_tlb_page(vma, addr) do { } while (0) 103 104 static inline void flush_tlb_mm(struct mm_struct *mm) 105 { 106 __tlb_flush_mm_lazy(mm); 107 } 108 109 static inline void flush_tlb_range(struct vm_area_struct *vma, 110 unsigned long start, unsigned long end) 111 { 112 __tlb_flush_mm_lazy(vma->vm_mm); 113 } 114 115 static inline void flush_tlb_kernel_range(unsigned long start, 116 unsigned long end) 117 { 118 __tlb_flush_kernel(); 119 } 120 121 #endif /* _S390_TLBFLUSH_H */ 122