1 #include <linux/init.h> 2 3 #include <linux/mm.h> 4 #include <linux/spinlock.h> 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/module.h> 8 #include <linux/cpu.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/mmu_context.h> 12 #include <asm/cache.h> 13 #include <asm/apic.h> 14 #include <asm/uv/uv.h> 15 #include <linux/debugfs.h> 16 17 /* 18 * Smarter SMP flushing macros. 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31 struct flush_tlb_info { 32 struct mm_struct *flush_mm; 33 unsigned long flush_start; 34 unsigned long flush_end; 35 }; 36 37 /* 38 * We cannot call mmdrop() because we are in interrupt context, 39 * instead update mm->cpu_vm_mask. 40 */ 41 void leave_mm(int cpu) 42 { 43 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); 44 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 45 BUG(); 46 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 48 load_cr3(swapper_pg_dir); 49 /* 50 * This gets called in the idle path where RCU 51 * functions differently. Tracing normally 52 * uses RCU, so we have to call the tracepoint 53 * specially here. 54 */ 55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 56 } 57 } 58 EXPORT_SYMBOL_GPL(leave_mm); 59 60 /* 61 * The flush IPI assumes that a thread switch happens in this order: 62 * [cpu0: the cpu that switches] 63 * 1) switch_mm() either 1a) or 1b) 64 * 1a) thread switch to a different mm 65 * 1a1) set cpu_tlbstate to TLBSTATE_OK 66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm 67 * if cpu0 was in lazy tlb mode. 68 * 1a2) update cpu active_mm 69 * Now cpu0 accepts tlb flushes for the new mm. 70 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); 71 * Now the other cpus will send tlb flush ipis. 72 * 1a4) change cr3. 73 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); 74 * Stop ipi delivery for the old mm. This is not synchronized with 75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong 76 * mm, and in the worst case we perform a superfluous tlb flush. 77 * 1b) thread switch without mm change 78 * cpu active_mm is correct, cpu0 already handles flush ipis. 79 * 1b1) set cpu_tlbstate to TLBSTATE_OK 80 * 1b2) test_and_set the cpu bit in cpu_vm_mask. 81 * Atomically set the bit [other cpus will start sending flush ipis], 82 * and test the bit. 83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb. 84 * 2) switch %%esp, ie current 85 * 86 * The interrupt must handle 2 special cases: 87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. 88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only 89 * runs in kernel space, the cpu could load tlb entries for user space 90 * pages. 91 * 92 * The good news is that cpu_tlbstate is local to each cpu, no 93 * write/read ordering problems. 94 */ 95 96 /* 97 * TLB flush funcation: 98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. 99 * 2) Leave the mm if we are in the lazy tlb mode. 100 */ 101 static void flush_tlb_func(void *info) 102 { 103 struct flush_tlb_info *f = info; 104 105 inc_irq_stat(irq_tlb_count); 106 107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 108 return; 109 if (!f->flush_end) 110 f->flush_end = f->flush_start + PAGE_SIZE; 111 112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 114 if (f->flush_end == TLB_FLUSH_ALL) { 115 local_flush_tlb(); 116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); 117 } else { 118 unsigned long addr; 119 unsigned long nr_pages = 120 (f->flush_end - f->flush_start) / PAGE_SIZE; 121 addr = f->flush_start; 122 while (addr < f->flush_end) { 123 __flush_tlb_single(addr); 124 addr += PAGE_SIZE; 125 } 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); 127 } 128 } else 129 leave_mm(smp_processor_id()); 130 131 } 132 133 void native_flush_tlb_others(const struct cpumask *cpumask, 134 struct mm_struct *mm, unsigned long start, 135 unsigned long end) 136 { 137 struct flush_tlb_info info; 138 info.flush_mm = mm; 139 info.flush_start = start; 140 info.flush_end = end; 141 142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 143 trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); 144 if (is_uv_system()) { 145 unsigned int cpu; 146 147 cpu = smp_processor_id(); 148 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); 149 if (cpumask) 150 smp_call_function_many(cpumask, flush_tlb_func, 151 &info, 1); 152 return; 153 } 154 smp_call_function_many(cpumask, flush_tlb_func, &info, 1); 155 } 156 157 void flush_tlb_current_task(void) 158 { 159 struct mm_struct *mm = current->mm; 160 161 preempt_disable(); 162 163 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 164 165 /* This is an implicit full barrier that synchronizes with switch_mm. */ 166 local_flush_tlb(); 167 168 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 169 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 170 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 171 preempt_enable(); 172 } 173 174 /* 175 * See Documentation/x86/tlb.txt for details. We choose 33 176 * because it is large enough to cover the vast majority (at 177 * least 95%) of allocations, and is small enough that we are 178 * confident it will not cause too much overhead. Each single 179 * flush is about 100 ns, so this caps the maximum overhead at 180 * _about_ 3,000 ns. 181 * 182 * This is in units of pages. 183 */ 184 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 185 186 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 187 unsigned long end, unsigned long vmflag) 188 { 189 unsigned long addr; 190 /* do a global flush by default */ 191 unsigned long base_pages_to_flush = TLB_FLUSH_ALL; 192 193 preempt_disable(); 194 if (current->active_mm != mm) { 195 /* Synchronize with switch_mm. */ 196 smp_mb(); 197 198 goto out; 199 } 200 201 if (!current->mm) { 202 leave_mm(smp_processor_id()); 203 204 /* Synchronize with switch_mm. */ 205 smp_mb(); 206 207 goto out; 208 } 209 210 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) 211 base_pages_to_flush = (end - start) >> PAGE_SHIFT; 212 213 /* 214 * Both branches below are implicit full barriers (MOV to CR or 215 * INVLPG) that synchronize with switch_mm. 216 */ 217 if (base_pages_to_flush > tlb_single_page_flush_ceiling) { 218 base_pages_to_flush = TLB_FLUSH_ALL; 219 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 220 local_flush_tlb(); 221 } else { 222 /* flush range by one by one 'invlpg' */ 223 for (addr = start; addr < end; addr += PAGE_SIZE) { 224 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 225 __flush_tlb_single(addr); 226 } 227 } 228 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); 229 out: 230 if (base_pages_to_flush == TLB_FLUSH_ALL) { 231 start = 0UL; 232 end = TLB_FLUSH_ALL; 233 } 234 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 235 flush_tlb_others(mm_cpumask(mm), mm, start, end); 236 preempt_enable(); 237 } 238 239 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) 240 { 241 struct mm_struct *mm = vma->vm_mm; 242 243 preempt_disable(); 244 245 if (current->active_mm == mm) { 246 if (current->mm) { 247 /* 248 * Implicit full barrier (INVLPG) that synchronizes 249 * with switch_mm. 250 */ 251 __flush_tlb_one(start); 252 } else { 253 leave_mm(smp_processor_id()); 254 255 /* Synchronize with switch_mm. */ 256 smp_mb(); 257 } 258 } 259 260 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 261 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); 262 263 preempt_enable(); 264 } 265 266 static void do_flush_tlb_all(void *info) 267 { 268 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 269 __flush_tlb_all(); 270 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 271 leave_mm(smp_processor_id()); 272 } 273 274 void flush_tlb_all(void) 275 { 276 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 277 on_each_cpu(do_flush_tlb_all, NULL, 1); 278 } 279 280 static void do_kernel_range_flush(void *info) 281 { 282 struct flush_tlb_info *f = info; 283 unsigned long addr; 284 285 /* flush range by one by one 'invlpg' */ 286 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) 287 __flush_tlb_single(addr); 288 } 289 290 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 291 { 292 293 /* Balance as user space task's flush, a bit conservative */ 294 if (end == TLB_FLUSH_ALL || 295 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { 296 on_each_cpu(do_flush_tlb_all, NULL, 1); 297 } else { 298 struct flush_tlb_info info; 299 info.flush_start = start; 300 info.flush_end = end; 301 on_each_cpu(do_kernel_range_flush, &info, 1); 302 } 303 } 304 305 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 306 size_t count, loff_t *ppos) 307 { 308 char buf[32]; 309 unsigned int len; 310 311 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 312 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 313 } 314 315 static ssize_t tlbflush_write_file(struct file *file, 316 const char __user *user_buf, size_t count, loff_t *ppos) 317 { 318 char buf[32]; 319 ssize_t len; 320 int ceiling; 321 322 len = min(count, sizeof(buf) - 1); 323 if (copy_from_user(buf, user_buf, len)) 324 return -EFAULT; 325 326 buf[len] = '\0'; 327 if (kstrtoint(buf, 0, &ceiling)) 328 return -EINVAL; 329 330 if (ceiling < 0) 331 return -EINVAL; 332 333 tlb_single_page_flush_ceiling = ceiling; 334 return count; 335 } 336 337 static const struct file_operations fops_tlbflush = { 338 .read = tlbflush_read_file, 339 .write = tlbflush_write_file, 340 .llseek = default_llseek, 341 }; 342 343 static int __init create_tlb_single_page_flush_ceiling(void) 344 { 345 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 346 arch_debugfs_dir, NULL, &fops_tlbflush); 347 return 0; 348 } 349 late_initcall(create_tlb_single_page_flush_ceiling); 350