1 #ifndef _ASM_X86_TLBFLUSH_H 2 #define _ASM_X86_TLBFLUSH_H 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 7 #include <asm/processor.h> 8 #include <asm/special_insns.h> 9 10 #ifdef CONFIG_PARAVIRT 11 #include <asm/paravirt.h> 12 #else 13 #define __flush_tlb() __native_flush_tlb() 14 #define __flush_tlb_global() __native_flush_tlb_global() 15 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) 16 #endif 17 18 static inline void __native_flush_tlb(void) 19 { 20 native_write_cr3(native_read_cr3()); 21 } 22 23 static inline void __native_flush_tlb_global(void) 24 { 25 unsigned long flags; 26 unsigned long cr4; 27 28 /* 29 * Read-modify-write to CR4 - protect it from preemption and 30 * from interrupts. (Use the raw variant because this code can 31 * be called from deep inside debugging code.) 32 */ 33 raw_local_irq_save(flags); 34 35 cr4 = native_read_cr4(); 36 /* clear PGE */ 37 native_write_cr4(cr4 & ~X86_CR4_PGE); 38 /* write old PGE again and flush TLBs */ 39 native_write_cr4(cr4); 40 41 raw_local_irq_restore(flags); 42 } 43 44 static inline void __native_flush_tlb_single(unsigned long addr) 45 { 46 asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 47 } 48 49 static inline void __flush_tlb_all(void) 50 { 51 if (cpu_has_pge) 52 __flush_tlb_global(); 53 else 54 __flush_tlb(); 55 } 56 57 static inline void __flush_tlb_one(unsigned long addr) 58 { 59 if (cpu_has_invlpg) 60 __flush_tlb_single(addr); 61 else 62 __flush_tlb(); 63 } 64 65 #define TLB_FLUSH_ALL -1UL 66 67 /* 68 * TLB flushing: 69 * 70 * - flush_tlb() flushes the current mm struct TLBs 71 * - flush_tlb_all() flushes all processes TLBs 72 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 73 * - flush_tlb_page(vma, vmaddr) flushes one page 74 * - flush_tlb_range(vma, start, end) flushes a range of pages 75 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 76 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus 77 * 78 * ..but the i386 has somewhat limited tlb flushing capabilities, 79 * and page-granular flushes are available only on i486 and up. 80 */ 81 82 #ifndef CONFIG_SMP 83 84 #define flush_tlb() __flush_tlb() 85 #define flush_tlb_all() __flush_tlb_all() 86 #define local_flush_tlb() __flush_tlb() 87 88 static inline void flush_tlb_mm(struct mm_struct *mm) 89 { 90 if (mm == current->active_mm) 91 __flush_tlb(); 92 } 93 94 static inline void flush_tlb_page(struct vm_area_struct *vma, 95 unsigned long addr) 96 { 97 if (vma->vm_mm == current->active_mm) 98 __flush_tlb_one(addr); 99 } 100 101 static inline void flush_tlb_range(struct vm_area_struct *vma, 102 unsigned long start, unsigned long end) 103 { 104 if (vma->vm_mm == current->active_mm) 105 __flush_tlb(); 106 } 107 108 static inline void flush_tlb_mm_range(struct mm_struct *mm, 109 unsigned long start, unsigned long end, unsigned long vmflag) 110 { 111 if (mm == current->active_mm) 112 __flush_tlb(); 113 } 114 115 static inline void native_flush_tlb_others(const struct cpumask *cpumask, 116 struct mm_struct *mm, 117 unsigned long start, 118 unsigned long end) 119 { 120 } 121 122 static inline void reset_lazy_tlbstate(void) 123 { 124 } 125 126 static inline void flush_tlb_kernel_range(unsigned long start, 127 unsigned long end) 128 { 129 flush_tlb_all(); 130 } 131 132 #else /* SMP */ 133 134 #include <asm/smp.h> 135 136 #define local_flush_tlb() __flush_tlb() 137 138 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) 139 140 #define flush_tlb_range(vma, start, end) \ 141 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) 142 143 extern void flush_tlb_all(void); 144 extern void flush_tlb_current_task(void); 145 extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 146 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 147 unsigned long end, unsigned long vmflag); 148 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 149 150 #define flush_tlb() flush_tlb_current_task() 151 152 void native_flush_tlb_others(const struct cpumask *cpumask, 153 struct mm_struct *mm, 154 unsigned long start, unsigned long end); 155 156 #define TLBSTATE_OK 1 157 #define TLBSTATE_LAZY 2 158 159 struct tlb_state { 160 struct mm_struct *active_mm; 161 int state; 162 }; 163 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); 164 165 static inline void reset_lazy_tlbstate(void) 166 { 167 this_cpu_write(cpu_tlbstate.state, 0); 168 this_cpu_write(cpu_tlbstate.active_mm, &init_mm); 169 } 170 171 #endif /* SMP */ 172 173 #ifndef CONFIG_PARAVIRT 174 #define flush_tlb_others(mask, mm, start, end) \ 175 native_flush_tlb_others(mask, mm, start, end) 176 #endif 177 178 #endif /* _ASM_X86_TLBFLUSH_H */ 179