xref: /linux/arch/riscv/include/asm/tlbflush.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  */
6 
7 #ifndef _ASM_RISCV_TLBFLUSH_H
8 #define _ASM_RISCV_TLBFLUSH_H
9 
10 #include <linux/mm_types.h>
11 #include <asm/smp.h>
12 #include <asm/errata_list.h>
13 
14 #define FLUSH_TLB_MAX_SIZE      ((unsigned long)-1)
15 #define FLUSH_TLB_NO_ASID       ((unsigned long)-1)
16 
17 #ifdef CONFIG_MMU
18 static inline void local_flush_tlb_all(void)
19 {
20 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
21 }
22 
23 static inline void local_flush_tlb_all_asid(unsigned long asid)
24 {
25 	if (asid != FLUSH_TLB_NO_ASID)
26 		ALT_SFENCE_VMA_ASID(asid);
27 	else
28 		local_flush_tlb_all();
29 }
30 
31 /* Flush one page from local TLB */
32 static inline void local_flush_tlb_page(unsigned long addr)
33 {
34 	ALT_SFENCE_VMA_ADDR(addr);
35 }
36 
37 static inline void local_flush_tlb_page_asid(unsigned long addr,
38 					     unsigned long asid)
39 {
40 	if (asid != FLUSH_TLB_NO_ASID)
41 		ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
42 	else
43 		local_flush_tlb_page(addr);
44 }
45 
46 void flush_tlb_all(void);
47 void flush_tlb_mm(struct mm_struct *mm);
48 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
49 			unsigned long end, unsigned int page_size);
50 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
51 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
52 		     unsigned long end);
53 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
54 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
55 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
56 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
57 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
58 			unsigned long end);
59 #endif
60 
61 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
62 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
63 			       struct mm_struct *mm,
64 			       unsigned long uaddr);
65 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
66 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
67 
68 extern unsigned long tlb_flush_all_threshold;
69 #else /* CONFIG_MMU */
70 #define local_flush_tlb_all()			do { } while (0)
71 #endif /* CONFIG_MMU */
72 
73 #endif /* _ASM_RISCV_TLBFLUSH_H */
74