xref: /linux/arch/arm/kernel/smp_tlb.c (revision d978a6361ad13f1f9694fcb7b5852d253a544d92)
1 /*
2  *  linux/arch/arm/kernel/smp_tlb.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/preempt.h>
11 #include <linux/smp.h>
12 
13 #include <asm/smp_plat.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
16 
17 /**********************************************************************/
18 
19 /*
20  * TLB operations
21  */
22 struct tlb_args {
23 	struct vm_area_struct *ta_vma;
24 	unsigned long ta_start;
25 	unsigned long ta_end;
26 };
27 
28 static inline void ipi_flush_tlb_all(void *ignored)
29 {
30 	local_flush_tlb_all();
31 }
32 
33 static inline void ipi_flush_tlb_mm(void *arg)
34 {
35 	struct mm_struct *mm = (struct mm_struct *)arg;
36 
37 	local_flush_tlb_mm(mm);
38 }
39 
40 static inline void ipi_flush_tlb_page(void *arg)
41 {
42 	struct tlb_args *ta = (struct tlb_args *)arg;
43 
44 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
45 }
46 
47 static inline void ipi_flush_tlb_kernel_page(void *arg)
48 {
49 	struct tlb_args *ta = (struct tlb_args *)arg;
50 
51 	local_flush_tlb_kernel_page(ta->ta_start);
52 }
53 
54 static inline void ipi_flush_tlb_range(void *arg)
55 {
56 	struct tlb_args *ta = (struct tlb_args *)arg;
57 
58 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
59 }
60 
61 static inline void ipi_flush_tlb_kernel_range(void *arg)
62 {
63 	struct tlb_args *ta = (struct tlb_args *)arg;
64 
65 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
66 }
67 
68 static inline void ipi_flush_bp_all(void *ignored)
69 {
70 	local_flush_bp_all();
71 }
72 
73 #ifdef CONFIG_ARM_ERRATA_798181
74 static int erratum_a15_798181(void)
75 {
76 	unsigned int midr = read_cpuid_id();
77 
78 	/* Cortex-A15 r0p0..r3p2 affected */
79 	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 		return 0;
81 	return 1;
82 }
83 #else
84 static int erratum_a15_798181(void)
85 {
86 	return 0;
87 }
88 #endif
89 
90 static void ipi_flush_tlb_a15_erratum(void *arg)
91 {
92 	dmb();
93 }
94 
95 static void broadcast_tlb_a15_erratum(void)
96 {
97 	if (!erratum_a15_798181())
98 		return;
99 
100 	dummy_flush_tlb_a15_erratum();
101 	smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
102 			       NULL, 1);
103 }
104 
105 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106 {
107 	int cpu;
108 	cpumask_t mask = { CPU_BITS_NONE };
109 
110 	if (!erratum_a15_798181())
111 		return;
112 
113 	dummy_flush_tlb_a15_erratum();
114 	for_each_online_cpu(cpu) {
115 		if (cpu == smp_processor_id())
116 			continue;
117 		/*
118 		 * We only need to send an IPI if the other CPUs are running
119 		 * the same ASID as the one being invalidated. There is no
120 		 * need for locking around the active_asids check since the
121 		 * switch_mm() function has at least one dmb() (as required by
122 		 * this workaround) in case a context switch happens on
123 		 * another CPU after the condition below.
124 		 */
125 		if (atomic64_read(&mm->context.id) ==
126 		    atomic64_read(&per_cpu(active_asids, cpu)))
127 			cpumask_set_cpu(cpu, &mask);
128 	}
129 	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 }
131 
132 void flush_tlb_all(void)
133 {
134 	if (tlb_ops_need_broadcast())
135 		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
136 	else
137 		local_flush_tlb_all();
138 	broadcast_tlb_a15_erratum();
139 }
140 
141 void flush_tlb_mm(struct mm_struct *mm)
142 {
143 	if (tlb_ops_need_broadcast())
144 		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
145 	else
146 		local_flush_tlb_mm(mm);
147 	broadcast_tlb_mm_a15_erratum(mm);
148 }
149 
150 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
151 {
152 	if (tlb_ops_need_broadcast()) {
153 		struct tlb_args ta;
154 		ta.ta_vma = vma;
155 		ta.ta_start = uaddr;
156 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
157 					&ta, 1);
158 	} else
159 		local_flush_tlb_page(vma, uaddr);
160 	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
161 }
162 
163 void flush_tlb_kernel_page(unsigned long kaddr)
164 {
165 	if (tlb_ops_need_broadcast()) {
166 		struct tlb_args ta;
167 		ta.ta_start = kaddr;
168 		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
169 	} else
170 		local_flush_tlb_kernel_page(kaddr);
171 	broadcast_tlb_a15_erratum();
172 }
173 
174 void flush_tlb_range(struct vm_area_struct *vma,
175                      unsigned long start, unsigned long end)
176 {
177 	if (tlb_ops_need_broadcast()) {
178 		struct tlb_args ta;
179 		ta.ta_vma = vma;
180 		ta.ta_start = start;
181 		ta.ta_end = end;
182 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
183 					&ta, 1);
184 	} else
185 		local_flush_tlb_range(vma, start, end);
186 	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
187 }
188 
189 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
190 {
191 	if (tlb_ops_need_broadcast()) {
192 		struct tlb_args ta;
193 		ta.ta_start = start;
194 		ta.ta_end = end;
195 		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
196 	} else
197 		local_flush_tlb_kernel_range(start, end);
198 	broadcast_tlb_a15_erratum();
199 }
200 
201 void flush_bp_all(void)
202 {
203 	if (tlb_ops_need_broadcast())
204 		on_each_cpu(ipi_flush_bp_all, NULL, 1);
205 	else
206 		local_flush_bp_all();
207 }
208