xref: /linux/arch/openrisc/kernel/smp.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2  * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3  * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4  *
5  * Based on arm64 and arc implementations
6  * Copyright (C) 2013 ARM Ltd.
7  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8  *
9  * This file is licensed under the terms of the GNU General Public License
10  * version 2.  This program is licensed "as is" without any warranty of any
11  * kind, whether express or implied.
12  */
13 
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/sched/mm.h>
19 #include <linux/irq.h>
20 #include <linux/of.h>
21 #include <asm/cpuinfo.h>
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
25 #include <asm/time.h>
26 
27 asmlinkage __init void secondary_start_kernel(void);
28 
29 static unsigned int ipi_irq __ro_after_init;
30 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
31 
32 unsigned long secondary_release = -1;
33 struct thread_info *secondary_thread_info;
34 
35 enum ipi_msg_type {
36 	IPI_WAKEUP,
37 	IPI_RESCHEDULE,
38 	IPI_CALL_FUNC,
39 	IPI_CALL_FUNC_SINGLE,
40 };
41 
42 static DEFINE_SPINLOCK(boot_lock);
43 
44 static void or1k_ipi_enable(void)
45 {
46 	if (WARN_ON_ONCE(!ipi_irq))
47 		return;
48 
49 	enable_percpu_irq(ipi_irq, 0);
50 }
51 
52 static void boot_secondary(unsigned int cpu, struct task_struct *idle)
53 {
54 	/*
55 	 * set synchronisation state between this boot processor
56 	 * and the secondary one
57 	 */
58 	spin_lock(&boot_lock);
59 
60 	secondary_release = cpu;
61 	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
62 
63 	/*
64 	 * now the secondary core is starting up let it run its
65 	 * calibrations, then wait for it to finish
66 	 */
67 	spin_unlock(&boot_lock);
68 }
69 
70 void __init smp_init_cpus(void)
71 {
72 	struct device_node *cpu;
73 	u32 cpu_id;
74 
75 	for_each_of_cpu_node(cpu) {
76 		cpu_id = of_get_cpu_hwid(cpu, 0);
77 		if (cpu_id < NR_CPUS)
78 			set_cpu_possible(cpu_id, true);
79 	}
80 }
81 
82 void __init smp_prepare_cpus(unsigned int max_cpus)
83 {
84 	unsigned int cpu;
85 
86 	/*
87 	 * Initialise the present map, which describes the set of CPUs
88 	 * actually populated at the present time.
89 	 */
90 	for_each_possible_cpu(cpu) {
91 		if (cpu < max_cpus)
92 			set_cpu_present(cpu, true);
93 	}
94 }
95 
96 void __init smp_cpus_done(unsigned int max_cpus)
97 {
98 }
99 
100 static DECLARE_COMPLETION(cpu_running);
101 
102 int __cpu_up(unsigned int cpu, struct task_struct *idle)
103 {
104 	if (smp_cross_call == NULL) {
105 		pr_warn("CPU%u: failed to start, IPI controller missing",
106 			cpu);
107 		return -EIO;
108 	}
109 
110 	secondary_thread_info = task_thread_info(idle);
111 	current_pgd[cpu] = init_mm.pgd;
112 
113 	boot_secondary(cpu, idle);
114 	if (!wait_for_completion_timeout(&cpu_running,
115 					msecs_to_jiffies(1000))) {
116 		pr_crit("CPU%u: failed to start\n", cpu);
117 		return -EIO;
118 	}
119 	synchronise_count_master(cpu);
120 
121 	return 0;
122 }
123 
124 asmlinkage __init void secondary_start_kernel(void)
125 {
126 	struct mm_struct *mm = &init_mm;
127 	unsigned int cpu = smp_processor_id();
128 	/*
129 	 * All kernel threads share the same mm context; grab a
130 	 * reference and switch to it.
131 	 */
132 	mmgrab(mm);
133 	current->active_mm = mm;
134 	cpumask_set_cpu(cpu, mm_cpumask(mm));
135 
136 	pr_info("CPU%u: Booted secondary processor\n", cpu);
137 
138 	setup_cpuinfo();
139 	openrisc_clockevent_init();
140 
141 	notify_cpu_starting(cpu);
142 
143 	/*
144 	 * OK, now it's safe to let the boot CPU continue
145 	 */
146 	complete(&cpu_running);
147 
148 	synchronise_count_slave(cpu);
149 	or1k_ipi_enable();
150 	set_cpu_online(cpu, true);
151 
152 	local_irq_enable();
153 	/*
154 	 * OK, it's off to the idle thread for us
155 	 */
156 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
157 }
158 
159 void handle_IPI(unsigned int ipi_msg)
160 {
161 	unsigned int cpu = smp_processor_id();
162 
163 	switch (ipi_msg) {
164 	case IPI_WAKEUP:
165 		break;
166 
167 	case IPI_RESCHEDULE:
168 		scheduler_ipi();
169 		break;
170 
171 	case IPI_CALL_FUNC:
172 		generic_smp_call_function_interrupt();
173 		break;
174 
175 	case IPI_CALL_FUNC_SINGLE:
176 		generic_smp_call_function_single_interrupt();
177 		break;
178 
179 	default:
180 		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
181 		break;
182 	}
183 }
184 
185 void arch_smp_send_reschedule(int cpu)
186 {
187 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
188 }
189 
190 static void stop_this_cpu(void *dummy)
191 {
192 	/* Remove this CPU */
193 	set_cpu_online(smp_processor_id(), false);
194 
195 	local_irq_disable();
196 	/* CPU Doze */
197 	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
198 		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
199 	/* If that didn't work, infinite loop */
200 	while (1)
201 		;
202 }
203 
204 void smp_send_stop(void)
205 {
206 	smp_call_function(stop_this_cpu, NULL, 0);
207 }
208 
209 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int),
210 			       unsigned int irq)
211 {
212 	if (WARN_ON(ipi_irq))
213 		return;
214 
215 	smp_cross_call = fn;
216 
217 	ipi_irq = irq;
218 
219 	/* Enabled IPIs for boot CPU immediately */
220 	or1k_ipi_enable();
221 }
222 
223 void arch_send_call_function_single_ipi(int cpu)
224 {
225 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
226 }
227 
228 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
229 {
230 	smp_cross_call(mask, IPI_CALL_FUNC);
231 }
232 
233 /* TLB flush operations - Performed on each CPU*/
234 static inline void ipi_flush_tlb_all(void *ignored)
235 {
236 	local_flush_tlb_all();
237 }
238 
239 static inline void ipi_flush_tlb_mm(void *info)
240 {
241 	struct mm_struct *mm = (struct mm_struct *)info;
242 
243 	local_flush_tlb_mm(mm);
244 }
245 
246 static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
247 {
248 	unsigned int cpuid;
249 
250 	if (cpumask_empty(cmask))
251 		return;
252 
253 	cpuid = get_cpu();
254 
255 	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
256 		/* local cpu is the only cpu present in cpumask */
257 		local_flush_tlb_mm(mm);
258 	} else {
259 		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
260 	}
261 	put_cpu();
262 }
263 
264 struct flush_tlb_data {
265 	unsigned long addr1;
266 	unsigned long addr2;
267 };
268 
269 static inline void ipi_flush_tlb_page(void *info)
270 {
271 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
272 
273 	local_flush_tlb_page(NULL, fd->addr1);
274 }
275 
276 static inline void ipi_flush_tlb_range(void *info)
277 {
278 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
279 
280 	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
281 }
282 
283 static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
284 				unsigned long end)
285 {
286 	unsigned int cpuid;
287 
288 	if (cpumask_empty(cmask))
289 		return;
290 
291 	cpuid = get_cpu();
292 
293 	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
294 		/* local cpu is the only cpu present in cpumask */
295 		if ((end - start) <= PAGE_SIZE)
296 			local_flush_tlb_page(NULL, start);
297 		else
298 			local_flush_tlb_range(NULL, start, end);
299 	} else {
300 		struct flush_tlb_data fd;
301 
302 		fd.addr1 = start;
303 		fd.addr2 = end;
304 
305 		if ((end - start) <= PAGE_SIZE)
306 			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
307 		else
308 			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
309 	}
310 	put_cpu();
311 }
312 
313 void flush_tlb_all(void)
314 {
315 	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
316 }
317 
318 void flush_tlb_mm(struct mm_struct *mm)
319 {
320 	smp_flush_tlb_mm(mm_cpumask(mm), mm);
321 }
322 
323 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
324 {
325 	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
326 }
327 
328 void flush_tlb_range(struct vm_area_struct *vma,
329 		     unsigned long start, unsigned long end)
330 {
331 	const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
332 					  : cpu_online_mask;
333 	smp_flush_tlb_range(cmask, start, end);
334 }
335 
336 /* Instruction cache invalidate - performed on each cpu */
337 static void ipi_icache_page_inv(void *arg)
338 {
339 	struct page *page = arg;
340 
341 	local_icache_page_inv(page);
342 }
343 
344 void smp_icache_page_inv(struct page *page)
345 {
346 	on_each_cpu(ipi_icache_page_inv, page, 1);
347 }
348 EXPORT_SYMBOL(smp_icache_page_inv);
349