xref: /linux/arch/arc/kernel/smp.c (revision 83869019c74cc2d01c96a3be2463a4eebe362224)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  *
5  * RajeshwarR: Dec 11, 2007
6  *   -- Added support for Inter Processor Interrupts
7  *
8  * Vineetg: Nov 1st, 2007
9  *    -- Initial Write (Borrowed heavily from ARM)
10  */
11 
12 #include <linux/spinlock.h>
13 #include <linux/sched/mm.h>
14 #include <linux/interrupt.h>
15 #include <linux/profile.h>
16 #include <linux/mm.h>
17 #include <linux/cpu.h>
18 #include <linux/irq.h>
19 #include <linux/atomic.h>
20 #include <linux/cpumask.h>
21 #include <linux/reboot.h>
22 #include <linux/irqdomain.h>
23 #include <linux/export.h>
24 #include <linux/of_fdt.h>
25 
26 #include <asm/processor.h>
27 #include <asm/setup.h>
28 #include <asm/mach_desc.h>
29 
30 #ifndef CONFIG_ARC_HAS_LLSC
31 arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32 
33 EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
34 #endif
35 
36 struct plat_smp_ops  __weak plat_smp_ops;
37 
38 /* XXX: per cpu ? Only needed once in early seconday boot */
39 struct task_struct *secondary_idle_tsk;
40 
41 /* Called from start_kernel */
42 void __init smp_prepare_boot_cpu(void)
43 {
44 }
45 
46 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
47 {
48 	unsigned long dt_root = of_get_flat_dt_root();
49 	const char *buf;
50 
51 	buf = of_get_flat_dt_prop(dt_root, name, NULL);
52 	if (!buf)
53 		return -EINVAL;
54 
55 	if (cpulist_parse(buf, cpumask))
56 		return -EINVAL;
57 
58 	return 0;
59 }
60 
61 /*
62  * Read from DeviceTree and setup cpu possible mask. If there is no
63  * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
64  */
65 static void __init arc_init_cpu_possible(void)
66 {
67 	struct cpumask cpumask;
68 
69 	if (arc_get_cpu_map("possible-cpus", &cpumask)) {
70 		pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
71 			NR_CPUS);
72 
73 		cpumask_setall(&cpumask);
74 	}
75 
76 	if (!cpumask_test_cpu(0, &cpumask))
77 		panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
78 
79 	init_cpu_possible(&cpumask);
80 }
81 
82 /*
83  * Called from setup_arch() before calling setup_processor()
84  *
85  * - Initialise the CPU possible map early - this describes the CPUs
86  *   which may be present or become present in the system.
87  * - Call early smp init hook. This can initialize a specific multi-core
88  *   IP which is say common to several platforms (hence not part of
89  *   platform specific int_early() hook)
90  */
91 void __init smp_init_cpus(void)
92 {
93 	arc_init_cpu_possible();
94 
95 	if (plat_smp_ops.init_early_smp)
96 		plat_smp_ops.init_early_smp();
97 }
98 
99 /* called from init ( ) =>  process 1 */
100 void __init smp_prepare_cpus(unsigned int max_cpus)
101 {
102 	/*
103 	 * if platform didn't set the present map already, do it now
104 	 * boot cpu is set to present already by init/main.c
105 	 */
106 	if (num_present_cpus() <= 1)
107 		init_cpu_present(cpu_possible_mask);
108 }
109 
110 void __init smp_cpus_done(unsigned int max_cpus)
111 {
112 
113 }
114 
115 /*
116  * Default smp boot helper for Run-on-reset case where all cores start off
117  * together. Non-masters need to wait for Master to start running.
118  * This is implemented using a flag in memory, which Non-masters spin-wait on.
119  * Master sets it to cpu-id of core to "ungate" it.
120  */
121 static volatile int wake_flag;
122 
123 #ifdef CONFIG_ISA_ARCOMPACT
124 
125 #define __boot_read(f)		f
126 #define __boot_write(f, v)	f = v
127 
128 #else
129 
130 #define __boot_read(f)		arc_read_uncached_32(&f)
131 #define __boot_write(f, v)	arc_write_uncached_32(&f, v)
132 
133 #endif
134 
135 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
136 {
137 	BUG_ON(cpu == 0);
138 
139 	__boot_write(wake_flag, cpu);
140 }
141 
142 void arc_platform_smp_wait_to_boot(int cpu)
143 {
144 	/* for halt-on-reset, we've waited already */
145 	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
146 		return;
147 
148 	while (__boot_read(wake_flag) != cpu)
149 		;
150 
151 	__boot_write(wake_flag, 0);
152 }
153 
154 const char *arc_platform_smp_cpuinfo(void)
155 {
156 	return plat_smp_ops.info ? : "";
157 }
158 
159 /*
160  * The very first "C" code executed by secondary
161  * Called from asm stub in head.S
162  * "current"/R25 already setup by low level boot code
163  */
164 void start_kernel_secondary(void)
165 {
166 	struct mm_struct *mm = &init_mm;
167 	unsigned int cpu = smp_processor_id();
168 
169 	/* MMU, Caches, Vector Table, Interrupts etc */
170 	setup_processor();
171 
172 	mmget(mm);
173 	mmgrab(mm);
174 	current->active_mm = mm;
175 	cpumask_set_cpu(cpu, mm_cpumask(mm));
176 
177 	/* Some SMP H/w setup - for each cpu */
178 	if (plat_smp_ops.init_per_cpu)
179 		plat_smp_ops.init_per_cpu(cpu);
180 
181 	if (machine_desc->init_per_cpu)
182 		machine_desc->init_per_cpu(cpu);
183 
184 	notify_cpu_starting(cpu);
185 	set_cpu_online(cpu, true);
186 
187 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
188 
189 	local_irq_enable();
190 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
191 }
192 
193 /*
194  * Called from kernel_init( ) -> smp_init( ) - for each CPU
195  *
196  * At this point, Secondary Processor  is "HALT"ed:
197  *  -It booted, but was halted in head.S
198  *  -It was configured to halt-on-reset
199  *  So need to wake it up.
200  *
201  * Essential requirements being where to run from (PC) and stack (SP)
202 */
203 int __cpu_up(unsigned int cpu, struct task_struct *idle)
204 {
205 	unsigned long wait_till;
206 
207 	secondary_idle_tsk = idle;
208 
209 	pr_info("Idle Task [%d] %p", cpu, idle);
210 	pr_info("Trying to bring up CPU%u ...\n", cpu);
211 
212 	if (plat_smp_ops.cpu_kick)
213 		plat_smp_ops.cpu_kick(cpu,
214 				(unsigned long)first_lines_of_secondary);
215 	else
216 		arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
217 
218 	/* wait for 1 sec after kicking the secondary */
219 	wait_till = jiffies + HZ;
220 	while (time_before(jiffies, wait_till)) {
221 		if (cpu_online(cpu))
222 			break;
223 	}
224 
225 	if (!cpu_online(cpu)) {
226 		pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
227 		return -1;
228 	}
229 
230 	secondary_idle_tsk = NULL;
231 
232 	return 0;
233 }
234 
235 /*
236  * not supported here
237  */
238 int setup_profiling_timer(unsigned int multiplier)
239 {
240 	return -EINVAL;
241 }
242 
243 /*****************************************************************************/
244 /*              Inter Processor Interrupt Handling                           */
245 /*****************************************************************************/
246 
247 enum ipi_msg_type {
248 	IPI_EMPTY = 0,
249 	IPI_RESCHEDULE = 1,
250 	IPI_CALL_FUNC,
251 	IPI_CPU_STOP,
252 };
253 
254 /*
255  * In arches with IRQ for each msg type (above), receiver can use IRQ-id  to
256  * figure out what msg was sent. For those which don't (ARC has dedicated IPI
257  * IRQ), the msg-type needs to be conveyed via per-cpu data
258  */
259 
260 static DEFINE_PER_CPU(unsigned long, ipi_data);
261 
262 static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
263 {
264 	unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
265 	unsigned long old, new;
266 	unsigned long flags;
267 
268 	pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
269 
270 	local_irq_save(flags);
271 
272 	/*
273 	 * Atomically write new msg bit (in case others are writing too),
274 	 * and read back old value
275 	 */
276 	do {
277 		new = old = READ_ONCE(*ipi_data_ptr);
278 		new |= 1U << msg;
279 	} while (cmpxchg(ipi_data_ptr, old, new) != old);
280 
281 	/*
282 	 * Call the platform specific IPI kick function, but avoid if possible:
283 	 * Only do so if there's no pending msg from other concurrent sender(s).
284 	 * Otherwise, receiver will see this msg as well when it takes the
285 	 * IPI corresponding to that msg. This is true, even if it is already in
286 	 * IPI handler, because !@old means it has not yet dequeued the msg(s)
287 	 * so @new msg can be a free-loader
288 	 */
289 	if (plat_smp_ops.ipi_send && !old)
290 		plat_smp_ops.ipi_send(cpu);
291 
292 	local_irq_restore(flags);
293 }
294 
295 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
296 {
297 	unsigned int cpu;
298 
299 	for_each_cpu(cpu, callmap)
300 		ipi_send_msg_one(cpu, msg);
301 }
302 
303 void smp_send_reschedule(int cpu)
304 {
305 	ipi_send_msg_one(cpu, IPI_RESCHEDULE);
306 }
307 
308 void smp_send_stop(void)
309 {
310 	struct cpumask targets;
311 	cpumask_copy(&targets, cpu_online_mask);
312 	cpumask_clear_cpu(smp_processor_id(), &targets);
313 	ipi_send_msg(&targets, IPI_CPU_STOP);
314 }
315 
316 void arch_send_call_function_single_ipi(int cpu)
317 {
318 	ipi_send_msg_one(cpu, IPI_CALL_FUNC);
319 }
320 
321 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
322 {
323 	ipi_send_msg(mask, IPI_CALL_FUNC);
324 }
325 
326 /*
327  * ipi_cpu_stop - handle IPI from smp_send_stop()
328  */
329 static void ipi_cpu_stop(void)
330 {
331 	machine_halt();
332 }
333 
334 static inline int __do_IPI(unsigned long msg)
335 {
336 	int rc = 0;
337 
338 	switch (msg) {
339 	case IPI_RESCHEDULE:
340 		scheduler_ipi();
341 		break;
342 
343 	case IPI_CALL_FUNC:
344 		generic_smp_call_function_interrupt();
345 		break;
346 
347 	case IPI_CPU_STOP:
348 		ipi_cpu_stop();
349 		break;
350 
351 	default:
352 		rc = 1;
353 	}
354 
355 	return rc;
356 }
357 
358 /*
359  * arch-common ISR to handle for inter-processor interrupts
360  * Has hooks for platform specific IPI
361  */
362 irqreturn_t do_IPI(int irq, void *dev_id)
363 {
364 	unsigned long pending;
365 	unsigned long __maybe_unused copy;
366 
367 	pr_debug("IPI [%ld] received on cpu %d\n",
368 		 *this_cpu_ptr(&ipi_data), smp_processor_id());
369 
370 	if (plat_smp_ops.ipi_clear)
371 		plat_smp_ops.ipi_clear(irq);
372 
373 	/*
374 	 * "dequeue" the msg corresponding to this IPI (and possibly other
375 	 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
376 	 */
377 	copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
378 
379 	do {
380 		unsigned long msg = __ffs(pending);
381 		int rc;
382 
383 		rc = __do_IPI(msg);
384 		if (rc)
385 			pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
386 		pending &= ~(1U << msg);
387 	} while (pending);
388 
389 	return IRQ_HANDLED;
390 }
391 
392 /*
393  * API called by platform code to hookup arch-common ISR to their IPI IRQ
394  *
395  * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
396  * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
397  * request_percpu_irq() below will fail
398  */
399 static DEFINE_PER_CPU(int, ipi_dev);
400 
401 int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
402 {
403 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
404 	unsigned int virq = irq_find_mapping(NULL, hwirq);
405 
406 	if (!virq)
407 		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
408 
409 	/* Boot cpu calls request, all call enable */
410 	if (!cpu) {
411 		int rc;
412 
413 		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
414 		if (rc)
415 			panic("Percpu IRQ request failed for %u\n", virq);
416 	}
417 
418 	enable_percpu_irq(virq, 0);
419 
420 	return 0;
421 }
422