xref: /linux/arch/powerpc/include/asm/book3s/32/tlbflush.h (revision 1448f8acf4cc61197a228bdb7126e7eeb92760fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
4 
5 #define MMU_NO_CONTEXT      (0)
6 /*
7  * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
8  */
9 void hash__flush_tlb_mm(struct mm_struct *mm);
10 void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
11 void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
12 
13 #ifdef CONFIG_SMP
14 void _tlbie(unsigned long address);
15 #else
16 static inline void _tlbie(unsigned long address)
17 {
18 	asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
19 }
20 #endif
21 void _tlbia(void);
22 
23 /*
24  * Called at the end of a mmu_gather operation to make sure the
25  * TLB flush is completely done.
26  */
27 static inline void tlb_flush(struct mmu_gather *tlb)
28 {
29 	/* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
30 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
31 		_tlbia();
32 }
33 
34 static inline void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
35 {
36 	start &= PAGE_MASK;
37 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
38 		hash__flush_range(mm, start, end);
39 	else if (end - start <= PAGE_SIZE)
40 		_tlbie(start);
41 	else
42 		_tlbia();
43 }
44 
45 static inline void flush_tlb_mm(struct mm_struct *mm)
46 {
47 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
48 		hash__flush_tlb_mm(mm);
49 	else
50 		_tlbia();
51 }
52 
53 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
54 {
55 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
56 		hash__flush_tlb_page(vma, vmaddr);
57 	else
58 		_tlbie(vmaddr);
59 }
60 
61 static inline void
62 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
63 {
64 	flush_range(vma->vm_mm, start, end);
65 }
66 
67 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
68 {
69 	flush_range(&init_mm, start, end);
70 }
71 
72 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
73 					unsigned long vmaddr)
74 {
75 	flush_tlb_page(vma, vmaddr);
76 }
77 static inline void local_flush_tlb_mm(struct mm_struct *mm)
78 {
79 	flush_tlb_mm(mm);
80 }
81 
82 #endif /* _ASM_POWERPC_TLBFLUSH_H */
83