Lines Matching +full:d +full:- +full:tlb +full:- +full:sets

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
32 * Generic MMU-gather implementation.
35 * correct and efficient ordering of freeing pages and TLB invalidations.
40 * 2) TLB invalidate page
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
53 * Finish in particular will issue a (final) TLB invalidate and free
56 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
61 * - tlb_remove_table()
63 * tlb_remove_table() is the basic primitive to free page-table directories
70 * - tlb_remove_page() / __tlb_remove_page()
71 * - tlb_remove_page_size() / __tlb_remove_page_size()
72 * - __tlb_remove_folio_pages()
87 * - tlb_change_page_size()
89 * call before __tlb_remove_page*() to set the current page-size; implies a
92 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
94 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
97 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
100 * - mmu_gather::fullmm
105 * - We can ignore tlb_{start,end}_vma(); because we don't
108 * - (RISC) architectures that use ASIDs can cycle to a new ASID
111 * - mmu_gather::need_flush_all
114 * flush the entire TLB irrespective of the range. For instance
115 * x86-PAE needs this when changing top-level entries.
122 * - mmu_gather::start / mmu_gather::end
127 * - mmu_gather::freed_tables
131 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
133 * returns the smallest TLB entry size unmapped in this range.
139 * Additionally there are a few opt-in features:
146 * This might be useful if your architecture has size specific TLB
154 * Useful if your architecture has non-page page directories.
162 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
165 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
166 * and therefore doesn't naturally serialize with software page-table walkers.
194 * This is useful if your architecture already flushes TLB entries in the
209 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
220 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
224 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
229 static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
234 tlb_remove_page(tlb, page);
240 * This allows an architecture that does not use the linux page-tables for
263 * to work on, then just handle a few from the on-stack structure.
275 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
279 * lockups for non-preemptible kernels on huge machines when a lot of memory
285 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
287 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
292 * This both sets 'delayed_rmap', and returns true. It would be an inline
295 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
296 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
302 * We have a no-op version of the rmap removal that doesn't
309 #define tlb_delay_rmap(tlb) (false)
310 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
334 * requires a complete flush of the tlb
376 void tlb_flush_mmu(struct mmu_gather *tlb);
378 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
382 tlb->start = min(tlb->start, address);
383 tlb->end = max(tlb->end, address + range_size);
386 static inline void __tlb_reset_range(struct mmu_gather *tlb)
388 if (tlb->fullmm) {
389 tlb->start = tlb->end = ~0;
391 tlb->start = TASK_SIZE;
392 tlb->end = 0;
394 tlb->freed_tables = 0;
395 tlb->cleared_ptes = 0;
396 tlb->cleared_pmds = 0;
397 tlb->cleared_puds = 0;
398 tlb->cleared_p4ds = 0;
420 static inline void tlb_flush(struct mmu_gather *tlb)
422 if (tlb->end)
423 flush_tlb_mm(tlb->mm);
434 static inline void tlb_flush(struct mmu_gather *tlb)
436 if (tlb->fullmm || tlb->need_flush_all) {
437 flush_tlb_mm(tlb->mm);
438 } else if (tlb->end) {
440 .vm_mm = tlb->mm,
441 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
442 (tlb->vma_huge ? VM_HUGETLB : 0),
445 flush_tlb_range(&vma, tlb->start, tlb->end);
453 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
457 * mips-4k) flush only large pages.
459 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
466 tlb->vma_huge = is_vm_hugetlb_page(vma);
467 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
468 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
471 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
474 * Anything calling __tlb_adjust_range() also sets at least one of
477 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
478 tlb->cleared_puds || tlb->cleared_p4ds))
481 tlb_flush(tlb);
482 __tlb_reset_range(tlb);
485 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
488 if (__tlb_remove_page_size(tlb, page, false, page_size))
489 tlb_flush_mmu(tlb);
492 static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
495 return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
502 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
504 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
507 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
509 tlb_remove_table(tlb, pt);
512 /* Like tlb_remove_ptdesc, but for page-like page directories. */
513 static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
515 tlb_remove_page(tlb, ptdesc_page(pt));
518 static inline void tlb_change_page_size(struct mmu_gather *tlb,
522 if (tlb->page_size && tlb->page_size != page_size) {
523 if (!tlb->fullmm && !tlb->need_flush_all)
524 tlb_flush_mmu(tlb);
527 tlb->page_size = page_size;
531 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
533 if (tlb->cleared_ptes)
535 if (tlb->cleared_pmds)
537 if (tlb->cleared_puds)
539 if (tlb->cleared_p4ds)
545 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
547 return 1UL << tlb_get_unmap_shift(tlb);
551 * In the case of tlb vma handling, we can optimise these away in the
555 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
557 if (tlb->fullmm)
560 tlb_update_vma_flags(tlb, vma);
562 flush_cache_range(vma, vma->vm_start, vma->vm_end);
566 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
568 if (tlb->fullmm)
573 * page mapcount -- there might not be page-frames for these PFNs after
577 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
579 * Do a TLB flush and reset the range at VMA boundaries; this avoids
582 tlb_flush_mmu_tlbonly(tlb);
587 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
590 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
593 __tlb_adjust_range(tlb, address, size);
594 tlb->cleared_ptes = 1;
597 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
600 __tlb_adjust_range(tlb, address, size);
601 tlb->cleared_pmds = 1;
604 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
607 __tlb_adjust_range(tlb, address, size);
608 tlb->cleared_puds = 1;
611 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
614 __tlb_adjust_range(tlb, address, size);
615 tlb->cleared_p4ds = 1;
619 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
625 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
628 * so we can later optimise away the tlb invalidate. This helps when
629 * userspace is unmapping already-unmapped pages, which happens quite a lot.
631 #define tlb_remove_tlb_entry(tlb, ptep, address) \
633 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
634 __tlb_remove_tlb_entry(tlb, ptep, address); \
638 * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
639 * later tlb invalidation.
644 static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
647 tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
649 __tlb_remove_tlb_entry(tlb, ptep, address);
650 if (--nr == 0)
657 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
661 tlb_flush_p4d_range(tlb, address, _sz); \
663 tlb_flush_pud_range(tlb, address, _sz); \
665 tlb_flush_pmd_range(tlb, address, _sz); \
667 tlb_flush_pte_range(tlb, address, _sz); \
668 __tlb_remove_tlb_entry(tlb, ptep, address); \
672 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
676 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
679 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
681 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
682 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
686 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
690 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
693 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
695 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
696 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
704 * architected non-legacy page table cache (which I'm not aware of
706 * explicit flushing for that, likely *separate* from a regular TLB entry
707 * flush, and thus you'd need more than just some range expansion..
718 #define pte_free_tlb(tlb, ptep, address) \
720 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
721 tlb->freed_tables = 1; \
722 __pte_free_tlb(tlb, ptep, address); \
727 #define pmd_free_tlb(tlb, pmdp, address) \
729 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
730 tlb->freed_tables = 1; \
731 __pmd_free_tlb(tlb, pmdp, address); \
736 #define pud_free_tlb(tlb, pudp, address) \
738 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
739 tlb->freed_tables = 1; \
740 __pud_free_tlb(tlb, pudp, address); \
745 #define p4d_free_tlb(tlb, pudp, address) \
747 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
748 tlb->freed_tables = 1; \
749 __p4d_free_tlb(tlb, pudp, address); \