xref: /linux/arch/riscv/include/asm/tlbflush.h (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  */
6 
7 #ifndef _ASM_RISCV_TLBFLUSH_H
8 #define _ASM_RISCV_TLBFLUSH_H
9 
10 #include <linux/mm_types.h>
11 #include <asm/smp.h>
12 #include <asm/errata_list.h>
13 
14 #define FLUSH_TLB_MAX_SIZE      ((unsigned long)-1)
15 #define FLUSH_TLB_NO_ASID       ((unsigned long)-1)
16 
17 #ifdef CONFIG_MMU
18 extern unsigned long asid_mask;
19 
20 static inline void local_flush_tlb_all(void)
21 {
22 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
23 }
24 
25 /* Flush one page from local TLB */
26 static inline void local_flush_tlb_page(unsigned long addr)
27 {
28 	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
29 }
30 #else /* CONFIG_MMU */
31 #define local_flush_tlb_all()			do { } while (0)
32 #define local_flush_tlb_page(addr)		do { } while (0)
33 #endif /* CONFIG_MMU */
34 
35 #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
36 void flush_tlb_all(void);
37 void flush_tlb_mm(struct mm_struct *mm);
38 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
39 			unsigned long end, unsigned int page_size);
40 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
41 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
42 		     unsigned long end);
43 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
44 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
45 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
47 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
48 			unsigned long end);
49 #endif
50 
51 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
52 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
53 			       struct mm_struct *mm,
54 			       unsigned long uaddr);
55 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
56 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
57 
58 #else /* CONFIG_SMP && CONFIG_MMU */
59 
60 #define flush_tlb_all() local_flush_tlb_all()
61 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
62 
63 static inline void flush_tlb_range(struct vm_area_struct *vma,
64 		unsigned long start, unsigned long end)
65 {
66 	local_flush_tlb_all();
67 }
68 
69 /* Flush a range of kernel pages */
70 static inline void flush_tlb_kernel_range(unsigned long start,
71 	unsigned long end)
72 {
73 	local_flush_tlb_all();
74 }
75 
76 #define flush_tlb_mm(mm) flush_tlb_all()
77 #define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
78 #endif /* !CONFIG_SMP || !CONFIG_MMU */
79 
80 #endif /* _ASM_RISCV_TLBFLUSH_H */
81