xref: /linux/arch/powerpc/include/asm/tlb.h (revision 98c45f51f767bfdd71d773cceaceb403352e51ae)
1 /*
2  *	TLB shootdown specifics for powerpc
3  *
4  * Copyright (C) 2002 Anton Blanchard, IBM Corp.
5  * Copyright (C) 2002 Paul Mackerras, IBM Corp.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 #ifndef _ASM_POWERPC_TLB_H
13 #define _ASM_POWERPC_TLB_H
14 #ifdef __KERNEL__
15 
16 #ifndef __powerpc64__
17 #include <asm/pgtable.h>
18 #endif
19 #include <asm/pgalloc.h>
20 #ifndef __powerpc64__
21 #include <asm/page.h>
22 #include <asm/mmu.h>
23 #endif
24 
25 #include <linux/pagemap.h>
26 
27 #define tlb_start_vma(tlb, vma)	do { } while (0)
28 #define tlb_end_vma(tlb, vma)	do { } while (0)
29 #define __tlb_remove_tlb_entry	__tlb_remove_tlb_entry
30 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
31 
32 extern void tlb_flush(struct mmu_gather *tlb);
33 
34 /* Get the generic bits... */
35 #include <asm-generic/tlb.h>
36 
37 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
38 			     unsigned long address);
39 
40 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
41 					  unsigned long address)
42 {
43 #ifdef CONFIG_PPC_STD_MMU_32
44 	if (pte_val(*ptep) & _PAGE_HASHPTE)
45 		flush_hash_entry(tlb->mm, ptep, address);
46 #endif
47 }
48 
49 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
50 						     unsigned int page_size)
51 {
52 	if (tlb->fullmm)
53 		return;
54 
55 	if (!tlb->page_size)
56 		tlb->page_size = page_size;
57 	else if (tlb->page_size != page_size) {
58 		tlb_flush_mmu(tlb);
59 		/*
60 		 * update the page size after flush for the new
61 		 * mmu_gather.
62 		 */
63 		tlb->page_size = page_size;
64 	}
65 }
66 
67 #ifdef CONFIG_SMP
68 static inline int mm_is_core_local(struct mm_struct *mm)
69 {
70 	return cpumask_subset(mm_cpumask(mm),
71 			      topology_sibling_cpumask(smp_processor_id()));
72 }
73 
74 #ifdef CONFIG_PPC_BOOK3S_64
75 static inline int mm_is_thread_local(struct mm_struct *mm)
76 {
77 	if (atomic_read(&mm->context.active_cpus) > 1)
78 		return false;
79 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
80 }
81 static inline void mm_reset_thread_local(struct mm_struct *mm)
82 {
83 	WARN_ON(atomic_read(&mm->context.copros) > 0);
84 	/*
85 	 * It's possible for mm_access to take a reference on mm_users to
86 	 * access the remote mm from another thread, but it's not allowed
87 	 * to set mm_cpumask, so mm_users may be > 1 here.
88 	 */
89 	WARN_ON(current->mm != mm);
90 	atomic_set(&mm->context.active_cpus, 1);
91 	cpumask_clear(mm_cpumask(mm));
92 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
93 }
94 #else /* CONFIG_PPC_BOOK3S_64 */
95 static inline int mm_is_thread_local(struct mm_struct *mm)
96 {
97 	return cpumask_equal(mm_cpumask(mm),
98 			      cpumask_of(smp_processor_id()));
99 }
100 #endif /* !CONFIG_PPC_BOOK3S_64 */
101 
102 #else /* CONFIG_SMP */
103 static inline int mm_is_core_local(struct mm_struct *mm)
104 {
105 	return 1;
106 }
107 
108 static inline int mm_is_thread_local(struct mm_struct *mm)
109 {
110 	return 1;
111 }
112 #endif
113 
114 #endif /* __KERNEL__ */
115 #endif /* __ASM_POWERPC_TLB_H */
116