xref: /linux/arch/arm64/include/asm/tlb.h (revision 37744feebc086908fd89760650f458ab19071750)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/tlb.h
4  *
5  * Copyright (C) 2002 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_TLB_H
9 #define __ASM_TLB_H
10 
11 #include <linux/pagemap.h>
12 #include <linux/swap.h>
13 
14 static inline void __tlb_remove_table(void *_table)
15 {
16 	free_page_and_swap_cache((struct page *)_table);
17 }
18 
19 #define tlb_flush tlb_flush
20 static void tlb_flush(struct mmu_gather *tlb);
21 
22 #include <asm-generic/tlb.h>
23 
24 static inline void tlb_flush(struct mmu_gather *tlb)
25 {
26 	struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
27 	bool last_level = !tlb->freed_tables;
28 	unsigned long stride = tlb_get_unmap_size(tlb);
29 
30 	/*
31 	 * If we're tearing down the address space then we only care about
32 	 * invalidating the walk-cache, since the ASID allocator won't
33 	 * reallocate our ASID without invalidating the entire TLB.
34 	 */
35 	if (tlb->fullmm) {
36 		if (!last_level)
37 			flush_tlb_mm(tlb->mm);
38 		return;
39 	}
40 
41 	__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
42 }
43 
44 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
45 				  unsigned long addr)
46 {
47 	pgtable_pte_page_dtor(pte);
48 	tlb_remove_table(tlb, pte);
49 }
50 
51 #if CONFIG_PGTABLE_LEVELS > 2
52 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
53 				  unsigned long addr)
54 {
55 	struct page *page = virt_to_page(pmdp);
56 
57 	pgtable_pmd_page_dtor(page);
58 	tlb_remove_table(tlb, page);
59 }
60 #endif
61 
62 #if CONFIG_PGTABLE_LEVELS > 3
63 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
64 				  unsigned long addr)
65 {
66 	tlb_remove_table(tlb, virt_to_page(pudp));
67 }
68 #endif
69 
70 #endif
71