xref: /linux/arch/arm/kernel/smp.c (revision 3b64b1881143ce9e461c211cc81acc72d0cdc476)
1 /*
2  *  linux/arch/arm/kernel/smp.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 
29 #include <linux/atomic.h>
30 #include <asm/smp.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpu.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/localtimer.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach/arch.h>
47 
48 /*
49  * as from 2.5, kernels no longer have an init_tasks structure
50  * so we need some other way of telling a new secondary core
51  * where to place its SVC stack
52  */
53 struct secondary_data secondary_data;
54 
55 /*
56  * control for which core is the next to come out of the secondary
57  * boot "holding pen"
58  */
59 volatile int __cpuinitdata pen_release = -1;
60 
61 enum ipi_msg_type {
62 	IPI_TIMER = 2,
63 	IPI_RESCHEDULE,
64 	IPI_CALL_FUNC,
65 	IPI_CALL_FUNC_SINGLE,
66 	IPI_CPU_STOP,
67 };
68 
69 static DECLARE_COMPLETION(cpu_running);
70 
71 static struct smp_operations smp_ops;
72 
73 void __init smp_set_ops(struct smp_operations *ops)
74 {
75 	if (ops)
76 		smp_ops = *ops;
77 };
78 
79 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
80 {
81 	int ret;
82 
83 	/*
84 	 * We need to tell the secondary core where to find
85 	 * its stack and the page tables.
86 	 */
87 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
88 	secondary_data.pgdir = virt_to_phys(idmap_pgd);
89 	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
90 	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
91 	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
92 
93 	/*
94 	 * Now bring the CPU into our world.
95 	 */
96 	ret = boot_secondary(cpu, idle);
97 	if (ret == 0) {
98 		/*
99 		 * CPU was successfully started, wait for it
100 		 * to come online or time out.
101 		 */
102 		wait_for_completion_timeout(&cpu_running,
103 						 msecs_to_jiffies(1000));
104 
105 		if (!cpu_online(cpu)) {
106 			pr_crit("CPU%u: failed to come online\n", cpu);
107 			ret = -EIO;
108 		}
109 	} else {
110 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
111 	}
112 
113 	secondary_data.stack = NULL;
114 	secondary_data.pgdir = 0;
115 
116 	return ret;
117 }
118 
119 /* platform specific SMP operations */
120 void __init smp_init_cpus(void)
121 {
122 	if (smp_ops.smp_init_cpus)
123 		smp_ops.smp_init_cpus();
124 }
125 
126 static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
127 {
128 	if (smp_ops.smp_prepare_cpus)
129 		smp_ops.smp_prepare_cpus(max_cpus);
130 }
131 
132 static void __cpuinit platform_secondary_init(unsigned int cpu)
133 {
134 	if (smp_ops.smp_secondary_init)
135 		smp_ops.smp_secondary_init(cpu);
136 }
137 
138 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
139 {
140 	if (smp_ops.smp_boot_secondary)
141 		return smp_ops.smp_boot_secondary(cpu, idle);
142 	return -ENOSYS;
143 }
144 
145 #ifdef CONFIG_HOTPLUG_CPU
146 static void percpu_timer_stop(void);
147 
148 static int platform_cpu_kill(unsigned int cpu)
149 {
150 	if (smp_ops.cpu_kill)
151 		return smp_ops.cpu_kill(cpu);
152 	return 1;
153 }
154 
155 static void platform_cpu_die(unsigned int cpu)
156 {
157 	if (smp_ops.cpu_die)
158 		smp_ops.cpu_die(cpu);
159 }
160 
161 static int platform_cpu_disable(unsigned int cpu)
162 {
163 	if (smp_ops.cpu_disable)
164 		return smp_ops.cpu_disable(cpu);
165 
166 	/*
167 	 * By default, allow disabling all CPUs except the first one,
168 	 * since this is special on a lot of platforms, e.g. because
169 	 * of clock tick interrupts.
170 	 */
171 	return cpu == 0 ? -EPERM : 0;
172 }
173 /*
174  * __cpu_disable runs on the processor to be shutdown.
175  */
176 int __cpuinit __cpu_disable(void)
177 {
178 	unsigned int cpu = smp_processor_id();
179 	int ret;
180 
181 	ret = platform_cpu_disable(cpu);
182 	if (ret)
183 		return ret;
184 
185 	/*
186 	 * Take this CPU offline.  Once we clear this, we can't return,
187 	 * and we must not schedule until we're ready to give up the cpu.
188 	 */
189 	set_cpu_online(cpu, false);
190 
191 	/*
192 	 * OK - migrate IRQs away from this CPU
193 	 */
194 	migrate_irqs();
195 
196 	/*
197 	 * Stop the local timer for this CPU.
198 	 */
199 	percpu_timer_stop();
200 
201 	/*
202 	 * Flush user cache and TLB mappings, and then remove this CPU
203 	 * from the vm mask set of all processes.
204 	 */
205 	flush_cache_all();
206 	local_flush_tlb_all();
207 
208 	clear_tasks_mm_cpumask(cpu);
209 
210 	return 0;
211 }
212 
213 static DECLARE_COMPLETION(cpu_died);
214 
215 /*
216  * called on the thread which is asking for a CPU to be shutdown -
217  * waits until shutdown has completed, or it is timed out.
218  */
219 void __cpuinit __cpu_die(unsigned int cpu)
220 {
221 	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
222 		pr_err("CPU%u: cpu didn't die\n", cpu);
223 		return;
224 	}
225 	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
226 
227 	if (!platform_cpu_kill(cpu))
228 		printk("CPU%u: unable to kill\n", cpu);
229 }
230 
231 /*
232  * Called from the idle thread for the CPU which has been shutdown.
233  *
234  * Note that we disable IRQs here, but do not re-enable them
235  * before returning to the caller. This is also the behaviour
236  * of the other hotplug-cpu capable cores, so presumably coming
237  * out of idle fixes this.
238  */
239 void __ref cpu_die(void)
240 {
241 	unsigned int cpu = smp_processor_id();
242 
243 	idle_task_exit();
244 
245 	local_irq_disable();
246 	mb();
247 
248 	/* Tell __cpu_die() that this CPU is now safe to dispose of */
249 	RCU_NONIDLE(complete(&cpu_died));
250 
251 	/*
252 	 * actual CPU shutdown procedure is at least platform (if not
253 	 * CPU) specific.
254 	 */
255 	platform_cpu_die(cpu);
256 
257 	/*
258 	 * Do not return to the idle loop - jump back to the secondary
259 	 * cpu initialisation.  There's some initialisation which needs
260 	 * to be repeated to undo the effects of taking the CPU offline.
261 	 */
262 	__asm__("mov	sp, %0\n"
263 	"	mov	fp, #0\n"
264 	"	b	secondary_start_kernel"
265 		:
266 		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
267 }
268 #endif /* CONFIG_HOTPLUG_CPU */
269 
270 /*
271  * Called by both boot and secondaries to move global data into
272  * per-processor storage.
273  */
274 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
275 {
276 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
277 
278 	cpu_info->loops_per_jiffy = loops_per_jiffy;
279 
280 	store_cpu_topology(cpuid);
281 }
282 
283 static void percpu_timer_setup(void);
284 
285 /*
286  * This is the secondary CPU boot entry.  We're using this CPUs
287  * idle thread stack, but a set of temporary page tables.
288  */
289 asmlinkage void __cpuinit secondary_start_kernel(void)
290 {
291 	struct mm_struct *mm = &init_mm;
292 	unsigned int cpu = smp_processor_id();
293 
294 	/*
295 	 * All kernel threads share the same mm context; grab a
296 	 * reference and switch to it.
297 	 */
298 	atomic_inc(&mm->mm_count);
299 	current->active_mm = mm;
300 	cpumask_set_cpu(cpu, mm_cpumask(mm));
301 	cpu_switch_mm(mm->pgd, mm);
302 	enter_lazy_tlb(mm, current);
303 	local_flush_tlb_all();
304 
305 	printk("CPU%u: Booted secondary processor\n", cpu);
306 
307 	cpu_init();
308 	preempt_disable();
309 	trace_hardirqs_off();
310 
311 	/*
312 	 * Give the platform a chance to do its own initialisation.
313 	 */
314 	platform_secondary_init(cpu);
315 
316 	notify_cpu_starting(cpu);
317 
318 	calibrate_delay();
319 
320 	smp_store_cpu_info(cpu);
321 
322 	/*
323 	 * OK, now it's safe to let the boot CPU continue.  Wait for
324 	 * the CPU migration code to notice that the CPU is online
325 	 * before we continue - which happens after __cpu_up returns.
326 	 */
327 	set_cpu_online(cpu, true);
328 	complete(&cpu_running);
329 
330 	/*
331 	 * Setup the percpu timer for this CPU.
332 	 */
333 	percpu_timer_setup();
334 
335 	local_irq_enable();
336 	local_fiq_enable();
337 
338 	/*
339 	 * OK, it's off to the idle thread for us
340 	 */
341 	cpu_idle();
342 }
343 
344 void __init smp_cpus_done(unsigned int max_cpus)
345 {
346 	int cpu;
347 	unsigned long bogosum = 0;
348 
349 	for_each_online_cpu(cpu)
350 		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
351 
352 	printk(KERN_INFO "SMP: Total of %d processors activated "
353 	       "(%lu.%02lu BogoMIPS).\n",
354 	       num_online_cpus(),
355 	       bogosum / (500000/HZ),
356 	       (bogosum / (5000/HZ)) % 100);
357 }
358 
359 void __init smp_prepare_boot_cpu(void)
360 {
361 }
362 
363 void __init smp_prepare_cpus(unsigned int max_cpus)
364 {
365 	unsigned int ncores = num_possible_cpus();
366 
367 	init_cpu_topology();
368 
369 	smp_store_cpu_info(smp_processor_id());
370 
371 	/*
372 	 * are we trying to boot more cores than exist?
373 	 */
374 	if (max_cpus > ncores)
375 		max_cpus = ncores;
376 	if (ncores > 1 && max_cpus) {
377 		/*
378 		 * Enable the local timer or broadcast device for the
379 		 * boot CPU, but only if we have more than one CPU.
380 		 */
381 		percpu_timer_setup();
382 
383 		/*
384 		 * Initialise the present map, which describes the set of CPUs
385 		 * actually populated at the present time. A platform should
386 		 * re-initialize the map in platform_smp_prepare_cpus() if
387 		 * present != possible (e.g. physical hotplug).
388 		 */
389 		init_cpu_present(cpu_possible_mask);
390 
391 		/*
392 		 * Initialise the SCU if there are more than one CPU
393 		 * and let them know where to start.
394 		 */
395 		platform_smp_prepare_cpus(max_cpus);
396 	}
397 }
398 
399 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
400 
401 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
402 {
403 	smp_cross_call = fn;
404 }
405 
406 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
407 {
408 	smp_cross_call(mask, IPI_CALL_FUNC);
409 }
410 
411 void arch_send_call_function_single_ipi(int cpu)
412 {
413 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
414 }
415 
416 static const char *ipi_types[NR_IPI] = {
417 #define S(x,s)	[x - IPI_TIMER] = s
418 	S(IPI_TIMER, "Timer broadcast interrupts"),
419 	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
420 	S(IPI_CALL_FUNC, "Function call interrupts"),
421 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
422 	S(IPI_CPU_STOP, "CPU stop interrupts"),
423 };
424 
425 void show_ipi_list(struct seq_file *p, int prec)
426 {
427 	unsigned int cpu, i;
428 
429 	for (i = 0; i < NR_IPI; i++) {
430 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
431 
432 		for_each_present_cpu(cpu)
433 			seq_printf(p, "%10u ",
434 				   __get_irq_stat(cpu, ipi_irqs[i]));
435 
436 		seq_printf(p, " %s\n", ipi_types[i]);
437 	}
438 }
439 
440 u64 smp_irq_stat_cpu(unsigned int cpu)
441 {
442 	u64 sum = 0;
443 	int i;
444 
445 	for (i = 0; i < NR_IPI; i++)
446 		sum += __get_irq_stat(cpu, ipi_irqs[i]);
447 
448 	return sum;
449 }
450 
451 /*
452  * Timer (local or broadcast) support
453  */
454 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
455 
456 static void ipi_timer(void)
457 {
458 	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
459 	evt->event_handler(evt);
460 }
461 
462 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
463 static void smp_timer_broadcast(const struct cpumask *mask)
464 {
465 	smp_cross_call(mask, IPI_TIMER);
466 }
467 #else
468 #define smp_timer_broadcast	NULL
469 #endif
470 
471 static void broadcast_timer_set_mode(enum clock_event_mode mode,
472 	struct clock_event_device *evt)
473 {
474 }
475 
476 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
477 {
478 	evt->name	= "dummy_timer";
479 	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
480 			  CLOCK_EVT_FEAT_PERIODIC |
481 			  CLOCK_EVT_FEAT_DUMMY;
482 	evt->rating	= 400;
483 	evt->mult	= 1;
484 	evt->set_mode	= broadcast_timer_set_mode;
485 
486 	clockevents_register_device(evt);
487 }
488 
489 static struct local_timer_ops *lt_ops;
490 
491 #ifdef CONFIG_LOCAL_TIMERS
492 int local_timer_register(struct local_timer_ops *ops)
493 {
494 	if (!is_smp() || !setup_max_cpus)
495 		return -ENXIO;
496 
497 	if (lt_ops)
498 		return -EBUSY;
499 
500 	lt_ops = ops;
501 	return 0;
502 }
503 #endif
504 
505 static void __cpuinit percpu_timer_setup(void)
506 {
507 	unsigned int cpu = smp_processor_id();
508 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
509 
510 	evt->cpumask = cpumask_of(cpu);
511 	evt->broadcast = smp_timer_broadcast;
512 
513 	if (!lt_ops || lt_ops->setup(evt))
514 		broadcast_timer_setup(evt);
515 }
516 
517 #ifdef CONFIG_HOTPLUG_CPU
518 /*
519  * The generic clock events code purposely does not stop the local timer
520  * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
521  * manually here.
522  */
523 static void percpu_timer_stop(void)
524 {
525 	unsigned int cpu = smp_processor_id();
526 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
527 
528 	if (lt_ops)
529 		lt_ops->stop(evt);
530 }
531 #endif
532 
533 static DEFINE_RAW_SPINLOCK(stop_lock);
534 
535 /*
536  * ipi_cpu_stop - handle IPI from smp_send_stop()
537  */
538 static void ipi_cpu_stop(unsigned int cpu)
539 {
540 	if (system_state == SYSTEM_BOOTING ||
541 	    system_state == SYSTEM_RUNNING) {
542 		raw_spin_lock(&stop_lock);
543 		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
544 		dump_stack();
545 		raw_spin_unlock(&stop_lock);
546 	}
547 
548 	set_cpu_online(cpu, false);
549 
550 	local_fiq_disable();
551 	local_irq_disable();
552 
553 	while (1)
554 		cpu_relax();
555 }
556 
557 /*
558  * Main handler for inter-processor interrupts
559  */
560 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
561 {
562 	handle_IPI(ipinr, regs);
563 }
564 
565 void handle_IPI(int ipinr, struct pt_regs *regs)
566 {
567 	unsigned int cpu = smp_processor_id();
568 	struct pt_regs *old_regs = set_irq_regs(regs);
569 
570 	if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
571 		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
572 
573 	switch (ipinr) {
574 	case IPI_TIMER:
575 		irq_enter();
576 		ipi_timer();
577 		irq_exit();
578 		break;
579 
580 	case IPI_RESCHEDULE:
581 		scheduler_ipi();
582 		break;
583 
584 	case IPI_CALL_FUNC:
585 		irq_enter();
586 		generic_smp_call_function_interrupt();
587 		irq_exit();
588 		break;
589 
590 	case IPI_CALL_FUNC_SINGLE:
591 		irq_enter();
592 		generic_smp_call_function_single_interrupt();
593 		irq_exit();
594 		break;
595 
596 	case IPI_CPU_STOP:
597 		irq_enter();
598 		ipi_cpu_stop(cpu);
599 		irq_exit();
600 		break;
601 
602 	default:
603 		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
604 		       cpu, ipinr);
605 		break;
606 	}
607 	set_irq_regs(old_regs);
608 }
609 
610 void smp_send_reschedule(int cpu)
611 {
612 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
613 }
614 
615 #ifdef CONFIG_HOTPLUG_CPU
616 static void smp_kill_cpus(cpumask_t *mask)
617 {
618 	unsigned int cpu;
619 	for_each_cpu(cpu, mask)
620 		platform_cpu_kill(cpu);
621 }
622 #else
623 static void smp_kill_cpus(cpumask_t *mask) { }
624 #endif
625 
626 void smp_send_stop(void)
627 {
628 	unsigned long timeout;
629 	struct cpumask mask;
630 
631 	cpumask_copy(&mask, cpu_online_mask);
632 	cpumask_clear_cpu(smp_processor_id(), &mask);
633 	if (!cpumask_empty(&mask))
634 		smp_cross_call(&mask, IPI_CPU_STOP);
635 
636 	/* Wait up to one second for other CPUs to stop */
637 	timeout = USEC_PER_SEC;
638 	while (num_online_cpus() > 1 && timeout--)
639 		udelay(1);
640 
641 	if (num_online_cpus() > 1)
642 		pr_warning("SMP: failed to stop secondary CPUs\n");
643 
644 	smp_kill_cpus(&mask);
645 }
646 
647 /*
648  * not supported here
649  */
650 int setup_profiling_timer(unsigned int multiplier)
651 {
652 	return -EINVAL;
653 }
654 
655 #ifdef CONFIG_CPU_FREQ
656 
657 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
658 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
659 static unsigned long global_l_p_j_ref;
660 static unsigned long global_l_p_j_ref_freq;
661 
662 static int cpufreq_callback(struct notifier_block *nb,
663 					unsigned long val, void *data)
664 {
665 	struct cpufreq_freqs *freq = data;
666 	int cpu = freq->cpu;
667 
668 	if (freq->flags & CPUFREQ_CONST_LOOPS)
669 		return NOTIFY_OK;
670 
671 	if (!per_cpu(l_p_j_ref, cpu)) {
672 		per_cpu(l_p_j_ref, cpu) =
673 			per_cpu(cpu_data, cpu).loops_per_jiffy;
674 		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
675 		if (!global_l_p_j_ref) {
676 			global_l_p_j_ref = loops_per_jiffy;
677 			global_l_p_j_ref_freq = freq->old;
678 		}
679 	}
680 
681 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
682 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
683 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
684 		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
685 						global_l_p_j_ref_freq,
686 						freq->new);
687 		per_cpu(cpu_data, cpu).loops_per_jiffy =
688 			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
689 					per_cpu(l_p_j_ref_freq, cpu),
690 					freq->new);
691 	}
692 	return NOTIFY_OK;
693 }
694 
695 static struct notifier_block cpufreq_notifier = {
696 	.notifier_call  = cpufreq_callback,
697 };
698 
699 static int __init register_cpufreq_notifier(void)
700 {
701 	return cpufreq_register_notifier(&cpufreq_notifier,
702 						CPUFREQ_TRANSITION_NOTIFIER);
703 }
704 core_initcall(register_cpufreq_notifier);
705 
706 #endif
707