1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* include/asm-generic/tlb.h 3 * 4 * Generic TLB shootdown code 5 * 6 * Copyright 2001 Red Hat, Inc. 7 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 8 * 9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra 10 */ 11 #ifndef _ASM_GENERIC__TLB_H 12 #define _ASM_GENERIC__TLB_H 13 14 #include <linux/mmu_notifier.h> 15 #include <linux/swap.h> 16 #include <asm/pgalloc.h> 17 #include <asm/tlbflush.h> 18 #include <asm/cacheflush.h> 19 20 /* 21 * Blindly accessing user memory from NMI context can be dangerous 22 * if we're in the middle of switching the current user task or switching 23 * the loaded mm. 24 */ 25 #ifndef nmi_uaccess_okay 26 # define nmi_uaccess_okay() true 27 #endif 28 29 #ifdef CONFIG_MMU 30 31 /* 32 * Generic MMU-gather implementation. 33 * 34 * The mmu_gather data structure is used by the mm code to implement the 35 * correct and efficient ordering of freeing pages and TLB invalidations. 36 * 37 * This correct ordering is: 38 * 39 * 1) unhook page 40 * 2) TLB invalidate page 41 * 3) free page 42 * 43 * That is, we must never free a page before we have ensured there are no live 44 * translations left to it. Otherwise it might be possible to observe (or 45 * worse, change) the page content after it has been reused. 46 * 47 * The mmu_gather API consists of: 48 * 49 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather 50 * 51 * Finish in particular will issue a (final) TLB invalidate and free 52 * all (remaining) queued pages. 53 * 54 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA 55 * 56 * Defaults to flushing at tlb_end_vma() to reset the range; helps when 57 * there's large holes between the VMAs. 58 * 59 * - tlb_remove_table() 60 * 61 * tlb_remove_table() is the basic primitive to free page-table directories 62 * (__p*_free_tlb()). In it's most primitive form it is an alias for 63 * tlb_remove_page() below, for when page directories are pages and have no 64 * additional constraints. 65 * 66 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. 67 * 68 * - tlb_remove_page() / __tlb_remove_page() 69 * - tlb_remove_page_size() / __tlb_remove_page_size() 70 * 71 * __tlb_remove_page_size() is the basic primitive that queues a page for 72 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a 73 * boolean indicating if the queue is (now) full and a call to 74 * tlb_flush_mmu() is required. 75 * 76 * tlb_remove_page() and tlb_remove_page_size() imply the call to 77 * tlb_flush_mmu() when required and has no return value. 78 * 79 * - tlb_change_page_size() 80 * 81 * call before __tlb_remove_page*() to set the current page-size; implies a 82 * possible tlb_flush_mmu() call. 83 * 84 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() 85 * 86 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets 87 * related state, like the range) 88 * 89 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees 90 * whatever pages are still batched. 91 * 92 * - mmu_gather::fullmm 93 * 94 * A flag set by tlb_gather_mmu() to indicate we're going to free 95 * the entire mm; this allows a number of optimizations. 96 * 97 * - We can ignore tlb_{start,end}_vma(); because we don't 98 * care about ranges. Everything will be shot down. 99 * 100 * - (RISC) architectures that use ASIDs can cycle to a new ASID 101 * and delay the invalidation until ASID space runs out. 102 * 103 * - mmu_gather::need_flush_all 104 * 105 * A flag that can be set by the arch code if it wants to force 106 * flush the entire TLB irrespective of the range. For instance 107 * x86-PAE needs this when changing top-level entries. 108 * 109 * And allows the architecture to provide and implement tlb_flush(): 110 * 111 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make 112 * use of: 113 * 114 * - mmu_gather::start / mmu_gather::end 115 * 116 * which provides the range that needs to be flushed to cover the pages to 117 * be freed. 118 * 119 * - mmu_gather::freed_tables 120 * 121 * set when we freed page table pages 122 * 123 * - tlb_get_unmap_shift() / tlb_get_unmap_size() 124 * 125 * returns the smallest TLB entry size unmapped in this range. 126 * 127 * If an architecture does not provide tlb_flush() a default implementation 128 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is 129 * specified, in which case we'll default to flush_tlb_mm(). 130 * 131 * Additionally there are a few opt-in features: 132 * 133 * MMU_GATHER_PAGE_SIZE 134 * 135 * This ensures we call tlb_flush() every time tlb_change_page_size() actually 136 * changes the size and provides mmu_gather::page_size to tlb_flush(). 137 * 138 * This might be useful if your architecture has size specific TLB 139 * invalidation instructions. 140 * 141 * MMU_GATHER_TABLE_FREE 142 * 143 * This provides tlb_remove_table(), to be used instead of tlb_remove_page() 144 * for page directores (__p*_free_tlb()). 145 * 146 * Useful if your architecture has non-page page directories. 147 * 148 * When used, an architecture is expected to provide __tlb_remove_table() 149 * which does the actual freeing of these pages. 150 * 151 * MMU_GATHER_RCU_TABLE_FREE 152 * 153 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see 154 * comment below). 155 * 156 * Useful if your architecture doesn't use IPIs for remote TLB invalidates 157 * and therefore doesn't naturally serialize with software page-table walkers. 158 * 159 * MMU_GATHER_NO_RANGE 160 * 161 * Use this if your architecture lacks an efficient flush_tlb_range(). 162 * 163 * MMU_GATHER_NO_GATHER 164 * 165 * If the option is set the mmu_gather will not track individual pages for 166 * delayed page free anymore. A platform that enables the option needs to 167 * provide its own implementation of the __tlb_remove_page_size() function to 168 * free pages. 169 * 170 * This is useful if your architecture already flushes TLB entries in the 171 * various ptep_get_and_clear() functions. 172 */ 173 174 #ifdef CONFIG_MMU_GATHER_TABLE_FREE 175 176 struct mmu_table_batch { 177 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 178 struct rcu_head rcu; 179 #endif 180 unsigned int nr; 181 void *tables[0]; 182 }; 183 184 #define MAX_TABLE_BATCH \ 185 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) 186 187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 188 189 #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ 190 191 /* 192 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based 193 * page directories and we can use the normal page batching to free them. 194 */ 195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) 196 197 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ 198 199 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 200 /* 201 * This allows an architecture that does not use the linux page-tables for 202 * hardware to skip the TLBI when freeing page tables. 203 */ 204 #ifndef tlb_needs_table_invalidate 205 #define tlb_needs_table_invalidate() (true) 206 #endif 207 208 #else 209 210 #ifdef tlb_needs_table_invalidate 211 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE 212 #endif 213 214 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 215 216 217 #ifndef CONFIG_MMU_GATHER_NO_GATHER 218 /* 219 * If we can't allocate a page to make a big batch of page pointers 220 * to work on, then just handle a few from the on-stack structure. 221 */ 222 #define MMU_GATHER_BUNDLE 8 223 224 struct mmu_gather_batch { 225 struct mmu_gather_batch *next; 226 unsigned int nr; 227 unsigned int max; 228 struct page *pages[0]; 229 }; 230 231 #define MAX_GATHER_BATCH \ 232 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) 233 234 /* 235 * Limit the maximum number of mmu_gather batches to reduce a risk of soft 236 * lockups for non-preemptible kernels on huge machines when a lot of memory 237 * is zapped during unmapping. 238 * 10K pages freed at once should be safe even without a preemption point. 239 */ 240 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) 241 242 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, 243 int page_size); 244 #endif 245 246 /* 247 * struct mmu_gather is an opaque type used by the mm code for passing around 248 * any data needed by arch specific code for tlb_remove_page. 249 */ 250 struct mmu_gather { 251 struct mm_struct *mm; 252 253 #ifdef CONFIG_MMU_GATHER_TABLE_FREE 254 struct mmu_table_batch *batch; 255 #endif 256 257 unsigned long start; 258 unsigned long end; 259 /* 260 * we are in the middle of an operation to clear 261 * a full mm and can make some optimizations 262 */ 263 unsigned int fullmm : 1; 264 265 /* 266 * we have performed an operation which 267 * requires a complete flush of the tlb 268 */ 269 unsigned int need_flush_all : 1; 270 271 /* 272 * we have removed page directories 273 */ 274 unsigned int freed_tables : 1; 275 276 /* 277 * at which levels have we cleared entries? 278 */ 279 unsigned int cleared_ptes : 1; 280 unsigned int cleared_pmds : 1; 281 unsigned int cleared_puds : 1; 282 unsigned int cleared_p4ds : 1; 283 284 /* 285 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma 286 */ 287 unsigned int vma_exec : 1; 288 unsigned int vma_huge : 1; 289 290 unsigned int batch_count; 291 292 #ifndef CONFIG_MMU_GATHER_NO_GATHER 293 struct mmu_gather_batch *active; 294 struct mmu_gather_batch local; 295 struct page *__pages[MMU_GATHER_BUNDLE]; 296 297 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 298 unsigned int page_size; 299 #endif 300 #endif 301 }; 302 303 void tlb_flush_mmu(struct mmu_gather *tlb); 304 305 static inline void __tlb_adjust_range(struct mmu_gather *tlb, 306 unsigned long address, 307 unsigned int range_size) 308 { 309 tlb->start = min(tlb->start, address); 310 tlb->end = max(tlb->end, address + range_size); 311 } 312 313 static inline void __tlb_reset_range(struct mmu_gather *tlb) 314 { 315 if (tlb->fullmm) { 316 tlb->start = tlb->end = ~0; 317 } else { 318 tlb->start = TASK_SIZE; 319 tlb->end = 0; 320 } 321 tlb->freed_tables = 0; 322 tlb->cleared_ptes = 0; 323 tlb->cleared_pmds = 0; 324 tlb->cleared_puds = 0; 325 tlb->cleared_p4ds = 0; 326 /* 327 * Do not reset mmu_gather::vma_* fields here, we do not 328 * call into tlb_start_vma() again to set them if there is an 329 * intermediate flush. 330 */ 331 } 332 333 #ifdef CONFIG_MMU_GATHER_NO_RANGE 334 335 #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) 336 #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() 337 #endif 338 339 /* 340 * When an architecture does not have efficient means of range flushing TLBs 341 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the 342 * range small. We equally don't have to worry about page granularity or other 343 * things. 344 * 345 * All we need to do is issue a full flush for any !0 range. 346 */ 347 static inline void tlb_flush(struct mmu_gather *tlb) 348 { 349 if (tlb->end) 350 flush_tlb_mm(tlb->mm); 351 } 352 353 static inline void 354 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } 355 356 #define tlb_end_vma tlb_end_vma 357 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } 358 359 #else /* CONFIG_MMU_GATHER_NO_RANGE */ 360 361 #ifndef tlb_flush 362 363 #if defined(tlb_start_vma) || defined(tlb_end_vma) 364 #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() 365 #endif 366 367 /* 368 * When an architecture does not provide its own tlb_flush() implementation 369 * but does have a reasonably efficient flush_vma_range() implementation 370 * use that. 371 */ 372 static inline void tlb_flush(struct mmu_gather *tlb) 373 { 374 if (tlb->fullmm || tlb->need_flush_all) { 375 flush_tlb_mm(tlb->mm); 376 } else if (tlb->end) { 377 struct vm_area_struct vma = { 378 .vm_mm = tlb->mm, 379 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | 380 (tlb->vma_huge ? VM_HUGETLB : 0), 381 }; 382 383 flush_tlb_range(&vma, tlb->start, tlb->end); 384 } 385 } 386 387 static inline void 388 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) 389 { 390 /* 391 * flush_tlb_range() implementations that look at VM_HUGETLB (tile, 392 * mips-4k) flush only large pages. 393 * 394 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB 395 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing 396 * range. 397 * 398 * We rely on tlb_end_vma() to issue a flush, such that when we reset 399 * these values the batch is empty. 400 */ 401 tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); 402 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); 403 } 404 405 #else 406 407 static inline void 408 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } 409 410 #endif 411 412 #endif /* CONFIG_MMU_GATHER_NO_RANGE */ 413 414 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 415 { 416 /* 417 * Anything calling __tlb_adjust_range() also sets at least one of 418 * these bits. 419 */ 420 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || 421 tlb->cleared_puds || tlb->cleared_p4ds)) 422 return; 423 424 tlb_flush(tlb); 425 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 426 __tlb_reset_range(tlb); 427 } 428 429 static inline void tlb_remove_page_size(struct mmu_gather *tlb, 430 struct page *page, int page_size) 431 { 432 if (__tlb_remove_page_size(tlb, page, page_size)) 433 tlb_flush_mmu(tlb); 434 } 435 436 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 437 { 438 return __tlb_remove_page_size(tlb, page, PAGE_SIZE); 439 } 440 441 /* tlb_remove_page 442 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when 443 * required. 444 */ 445 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 446 { 447 return tlb_remove_page_size(tlb, page, PAGE_SIZE); 448 } 449 450 static inline void tlb_change_page_size(struct mmu_gather *tlb, 451 unsigned int page_size) 452 { 453 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 454 if (tlb->page_size && tlb->page_size != page_size) { 455 if (!tlb->fullmm && !tlb->need_flush_all) 456 tlb_flush_mmu(tlb); 457 } 458 459 tlb->page_size = page_size; 460 #endif 461 } 462 463 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) 464 { 465 if (tlb->cleared_ptes) 466 return PAGE_SHIFT; 467 if (tlb->cleared_pmds) 468 return PMD_SHIFT; 469 if (tlb->cleared_puds) 470 return PUD_SHIFT; 471 if (tlb->cleared_p4ds) 472 return P4D_SHIFT; 473 474 return PAGE_SHIFT; 475 } 476 477 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) 478 { 479 return 1UL << tlb_get_unmap_shift(tlb); 480 } 481 482 /* 483 * In the case of tlb vma handling, we can optimise these away in the 484 * case where we're doing a full MM flush. When we're doing a munmap, 485 * the vmas are adjusted to only cover the region to be torn down. 486 */ 487 #ifndef tlb_start_vma 488 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 489 { 490 if (tlb->fullmm) 491 return; 492 493 tlb_update_vma_flags(tlb, vma); 494 flush_cache_range(vma, vma->vm_start, vma->vm_end); 495 } 496 #endif 497 498 #ifndef tlb_end_vma 499 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 500 { 501 if (tlb->fullmm) 502 return; 503 504 /* 505 * Do a TLB flush and reset the range at VMA boundaries; this avoids 506 * the ranges growing with the unused space between consecutive VMAs, 507 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on 508 * this. 509 */ 510 tlb_flush_mmu_tlbonly(tlb); 511 } 512 #endif 513 514 #ifndef __tlb_remove_tlb_entry 515 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 516 #endif 517 518 /** 519 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. 520 * 521 * Record the fact that pte's were really unmapped by updating the range, 522 * so we can later optimise away the tlb invalidate. This helps when 523 * userspace is unmapping already-unmapped pages, which happens quite a lot. 524 */ 525 #define tlb_remove_tlb_entry(tlb, ptep, address) \ 526 do { \ 527 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 528 tlb->cleared_ptes = 1; \ 529 __tlb_remove_tlb_entry(tlb, ptep, address); \ 530 } while (0) 531 532 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ 533 do { \ 534 unsigned long _sz = huge_page_size(h); \ 535 __tlb_adjust_range(tlb, address, _sz); \ 536 if (_sz == PMD_SIZE) \ 537 tlb->cleared_pmds = 1; \ 538 else if (_sz == PUD_SIZE) \ 539 tlb->cleared_puds = 1; \ 540 __tlb_remove_tlb_entry(tlb, ptep, address); \ 541 } while (0) 542 543 /** 544 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation 545 * This is a nop so far, because only x86 needs it. 546 */ 547 #ifndef __tlb_remove_pmd_tlb_entry 548 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) 549 #endif 550 551 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ 552 do { \ 553 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ 554 tlb->cleared_pmds = 1; \ 555 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ 556 } while (0) 557 558 /** 559 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb 560 * invalidation. This is a nop so far, because only x86 needs it. 561 */ 562 #ifndef __tlb_remove_pud_tlb_entry 563 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) 564 #endif 565 566 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ 567 do { \ 568 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ 569 tlb->cleared_puds = 1; \ 570 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ 571 } while (0) 572 573 /* 574 * For things like page tables caches (ie caching addresses "inside" the 575 * page tables, like x86 does), for legacy reasons, flushing an 576 * individual page had better flush the page table caches behind it. This 577 * is definitely how x86 works, for example. And if you have an 578 * architected non-legacy page table cache (which I'm not aware of 579 * anybody actually doing), you're going to have some architecturally 580 * explicit flushing for that, likely *separate* from a regular TLB entry 581 * flush, and thus you'd need more than just some range expansion.. 582 * 583 * So if we ever find an architecture 584 * that would want something that odd, I think it is up to that 585 * architecture to do its own odd thing, not cause pain for others 586 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com 587 * 588 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE 589 */ 590 591 #ifndef pte_free_tlb 592 #define pte_free_tlb(tlb, ptep, address) \ 593 do { \ 594 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 595 tlb->freed_tables = 1; \ 596 tlb->cleared_pmds = 1; \ 597 __pte_free_tlb(tlb, ptep, address); \ 598 } while (0) 599 #endif 600 601 #ifndef pmd_free_tlb 602 #define pmd_free_tlb(tlb, pmdp, address) \ 603 do { \ 604 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 605 tlb->freed_tables = 1; \ 606 tlb->cleared_puds = 1; \ 607 __pmd_free_tlb(tlb, pmdp, address); \ 608 } while (0) 609 #endif 610 611 #ifndef pud_free_tlb 612 #define pud_free_tlb(tlb, pudp, address) \ 613 do { \ 614 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 615 tlb->freed_tables = 1; \ 616 tlb->cleared_p4ds = 1; \ 617 __pud_free_tlb(tlb, pudp, address); \ 618 } while (0) 619 #endif 620 621 #ifndef p4d_free_tlb 622 #define p4d_free_tlb(tlb, pudp, address) \ 623 do { \ 624 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 625 tlb->freed_tables = 1; \ 626 __p4d_free_tlb(tlb, pudp, address); \ 627 } while (0) 628 #endif 629 630 #endif /* CONFIG_MMU */ 631 632 #endif /* _ASM_GENERIC__TLB_H */ 633