1 #ifndef _SPARC64_TLBFLUSH_H 2 #define _SPARC64_TLBFLUSH_H 3 4 #include <asm/mmu_context.h> 5 6 /* TSB flush operations. */ 7 8 #define TLB_BATCH_NR 192 9 10 struct tlb_batch { 11 unsigned int hugepage_shift; 12 struct mm_struct *mm; 13 unsigned long tlb_nr; 14 unsigned long active; 15 unsigned long vaddrs[TLB_BATCH_NR]; 16 }; 17 18 void flush_tsb_kernel_range(unsigned long start, unsigned long end); 19 void flush_tsb_user(struct tlb_batch *tb); 20 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, 21 unsigned int hugepage_shift); 22 23 /* TLB flush operations. */ 24 25 static inline void flush_tlb_mm(struct mm_struct *mm) 26 { 27 } 28 29 static inline void flush_tlb_page(struct vm_area_struct *vma, 30 unsigned long vmaddr) 31 { 32 } 33 34 static inline void flush_tlb_range(struct vm_area_struct *vma, 35 unsigned long start, unsigned long end) 36 { 37 } 38 39 void flush_tlb_kernel_range(unsigned long start, unsigned long end); 40 41 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 42 43 void flush_tlb_pending(void); 44 void arch_enter_lazy_mmu_mode(void); 45 void arch_leave_lazy_mmu_mode(void); 46 #define arch_flush_lazy_mmu_mode() do {} while (0) 47 48 /* Local cpu only. */ 49 void __flush_tlb_all(void); 50 void __flush_tlb_page(unsigned long context, unsigned long vaddr); 51 void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 52 53 #ifndef CONFIG_SMP 54 55 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 56 { 57 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); 58 } 59 60 #else /* CONFIG_SMP */ 61 62 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 63 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); 64 65 #define global_flush_tlb_page(mm, vaddr) \ 66 smp_flush_tlb_page(mm, vaddr) 67 68 #endif /* ! CONFIG_SMP */ 69 70 #endif /* _SPARC64_TLBFLUSH_H */ 71