xref: /linux/arch/sparc/include/asm/tlbflush_64.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
3 
4 #include <asm/mmu_context.h>
5 
6 /* TSB flush operations. */
7 
8 #define TLB_BATCH_NR	192
9 
10 struct tlb_batch {
11 	struct mm_struct *mm;
12 	unsigned long tlb_nr;
13 	unsigned long active;
14 	unsigned long vaddrs[TLB_BATCH_NR];
15 };
16 
17 void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18 void flush_tsb_user(struct tlb_batch *tb);
19 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
20 
21 /* TLB flush operations. */
22 
23 static inline void flush_tlb_mm(struct mm_struct *mm)
24 {
25 }
26 
27 static inline void flush_tlb_page(struct vm_area_struct *vma,
28 				  unsigned long vmaddr)
29 {
30 }
31 
32 static inline void flush_tlb_range(struct vm_area_struct *vma,
33 				   unsigned long start, unsigned long end)
34 {
35 }
36 
37 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
38 
39 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
40 
41 void flush_tlb_pending(void);
42 void arch_enter_lazy_mmu_mode(void);
43 void arch_leave_lazy_mmu_mode(void);
44 #define arch_flush_lazy_mmu_mode()      do {} while (0)
45 
46 /* Local cpu only.  */
47 void __flush_tlb_all(void);
48 void __flush_tlb_page(unsigned long context, unsigned long vaddr);
49 void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
50 
51 #ifndef CONFIG_SMP
52 
53 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
54 {
55 	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
56 }
57 
58 #else /* CONFIG_SMP */
59 
60 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
61 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
62 
63 #define global_flush_tlb_page(mm, vaddr) \
64 	smp_flush_tlb_page(mm, vaddr)
65 
66 #endif /* ! CONFIG_SMP */
67 
68 #endif /* _SPARC64_TLBFLUSH_H */
69