xref: /linux/arch/arm/kernel/smp.c (revision f49f4ab95c301dbccad0efe85296d908b8ae7ad4)
1 /*
2  *  linux/arch/arm/kernel/smp.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 
29 #include <linux/atomic.h>
30 #include <asm/smp.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpu.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/localtimer.h>
45 #include <asm/smp_plat.h>
46 #include <asm/virt.h>
47 #include <asm/mach/arch.h>
48 
49 /*
50  * as from 2.5, kernels no longer have an init_tasks structure
51  * so we need some other way of telling a new secondary core
52  * where to place its SVC stack
53  */
54 struct secondary_data secondary_data;
55 
56 /*
57  * control for which core is the next to come out of the secondary
58  * boot "holding pen"
59  */
60 volatile int __cpuinitdata pen_release = -1;
61 
62 enum ipi_msg_type {
63 	IPI_WAKEUP,
64 	IPI_TIMER,
65 	IPI_RESCHEDULE,
66 	IPI_CALL_FUNC,
67 	IPI_CALL_FUNC_SINGLE,
68 	IPI_CPU_STOP,
69 };
70 
71 static DECLARE_COMPLETION(cpu_running);
72 
73 static struct smp_operations smp_ops;
74 
75 void __init smp_set_ops(struct smp_operations *ops)
76 {
77 	if (ops)
78 		smp_ops = *ops;
79 };
80 
81 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
82 {
83 	int ret;
84 
85 	/*
86 	 * We need to tell the secondary core where to find
87 	 * its stack and the page tables.
88 	 */
89 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
90 	secondary_data.pgdir = virt_to_phys(idmap_pgd);
91 	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
92 	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
93 	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
94 
95 	/*
96 	 * Now bring the CPU into our world.
97 	 */
98 	ret = boot_secondary(cpu, idle);
99 	if (ret == 0) {
100 		/*
101 		 * CPU was successfully started, wait for it
102 		 * to come online or time out.
103 		 */
104 		wait_for_completion_timeout(&cpu_running,
105 						 msecs_to_jiffies(1000));
106 
107 		if (!cpu_online(cpu)) {
108 			pr_crit("CPU%u: failed to come online\n", cpu);
109 			ret = -EIO;
110 		}
111 	} else {
112 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
113 	}
114 
115 	secondary_data.stack = NULL;
116 	secondary_data.pgdir = 0;
117 
118 	return ret;
119 }
120 
121 /* platform specific SMP operations */
122 void __init smp_init_cpus(void)
123 {
124 	if (smp_ops.smp_init_cpus)
125 		smp_ops.smp_init_cpus();
126 }
127 
128 static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
129 {
130 	if (smp_ops.smp_prepare_cpus)
131 		smp_ops.smp_prepare_cpus(max_cpus);
132 }
133 
134 static void __cpuinit platform_secondary_init(unsigned int cpu)
135 {
136 	if (smp_ops.smp_secondary_init)
137 		smp_ops.smp_secondary_init(cpu);
138 }
139 
140 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
141 {
142 	if (smp_ops.smp_boot_secondary)
143 		return smp_ops.smp_boot_secondary(cpu, idle);
144 	return -ENOSYS;
145 }
146 
147 #ifdef CONFIG_HOTPLUG_CPU
148 static void percpu_timer_stop(void);
149 
150 static int platform_cpu_kill(unsigned int cpu)
151 {
152 	if (smp_ops.cpu_kill)
153 		return smp_ops.cpu_kill(cpu);
154 	return 1;
155 }
156 
157 static void platform_cpu_die(unsigned int cpu)
158 {
159 	if (smp_ops.cpu_die)
160 		smp_ops.cpu_die(cpu);
161 }
162 
163 static int platform_cpu_disable(unsigned int cpu)
164 {
165 	if (smp_ops.cpu_disable)
166 		return smp_ops.cpu_disable(cpu);
167 
168 	/*
169 	 * By default, allow disabling all CPUs except the first one,
170 	 * since this is special on a lot of platforms, e.g. because
171 	 * of clock tick interrupts.
172 	 */
173 	return cpu == 0 ? -EPERM : 0;
174 }
175 /*
176  * __cpu_disable runs on the processor to be shutdown.
177  */
178 int __cpuinit __cpu_disable(void)
179 {
180 	unsigned int cpu = smp_processor_id();
181 	int ret;
182 
183 	ret = platform_cpu_disable(cpu);
184 	if (ret)
185 		return ret;
186 
187 	/*
188 	 * Take this CPU offline.  Once we clear this, we can't return,
189 	 * and we must not schedule until we're ready to give up the cpu.
190 	 */
191 	set_cpu_online(cpu, false);
192 
193 	/*
194 	 * OK - migrate IRQs away from this CPU
195 	 */
196 	migrate_irqs();
197 
198 	/*
199 	 * Stop the local timer for this CPU.
200 	 */
201 	percpu_timer_stop();
202 
203 	/*
204 	 * Flush user cache and TLB mappings, and then remove this CPU
205 	 * from the vm mask set of all processes.
206 	 *
207 	 * Caches are flushed to the Level of Unification Inner Shareable
208 	 * to write-back dirty lines to unified caches shared by all CPUs.
209 	 */
210 	flush_cache_louis();
211 	local_flush_tlb_all();
212 
213 	clear_tasks_mm_cpumask(cpu);
214 
215 	return 0;
216 }
217 
218 static DECLARE_COMPLETION(cpu_died);
219 
220 /*
221  * called on the thread which is asking for a CPU to be shutdown -
222  * waits until shutdown has completed, or it is timed out.
223  */
224 void __cpuinit __cpu_die(unsigned int cpu)
225 {
226 	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
227 		pr_err("CPU%u: cpu didn't die\n", cpu);
228 		return;
229 	}
230 	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
231 
232 	if (!platform_cpu_kill(cpu))
233 		printk("CPU%u: unable to kill\n", cpu);
234 }
235 
236 /*
237  * Called from the idle thread for the CPU which has been shutdown.
238  *
239  * Note that we disable IRQs here, but do not re-enable them
240  * before returning to the caller. This is also the behaviour
241  * of the other hotplug-cpu capable cores, so presumably coming
242  * out of idle fixes this.
243  */
244 void __ref cpu_die(void)
245 {
246 	unsigned int cpu = smp_processor_id();
247 
248 	idle_task_exit();
249 
250 	local_irq_disable();
251 	mb();
252 
253 	/* Tell __cpu_die() that this CPU is now safe to dispose of */
254 	RCU_NONIDLE(complete(&cpu_died));
255 
256 	/*
257 	 * actual CPU shutdown procedure is at least platform (if not
258 	 * CPU) specific.
259 	 */
260 	platform_cpu_die(cpu);
261 
262 	/*
263 	 * Do not return to the idle loop - jump back to the secondary
264 	 * cpu initialisation.  There's some initialisation which needs
265 	 * to be repeated to undo the effects of taking the CPU offline.
266 	 */
267 	__asm__("mov	sp, %0\n"
268 	"	mov	fp, #0\n"
269 	"	b	secondary_start_kernel"
270 		:
271 		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
272 }
273 #endif /* CONFIG_HOTPLUG_CPU */
274 
275 /*
276  * Called by both boot and secondaries to move global data into
277  * per-processor storage.
278  */
279 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
280 {
281 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
282 
283 	cpu_info->loops_per_jiffy = loops_per_jiffy;
284 
285 	store_cpu_topology(cpuid);
286 }
287 
288 static void percpu_timer_setup(void);
289 
290 /*
291  * This is the secondary CPU boot entry.  We're using this CPUs
292  * idle thread stack, but a set of temporary page tables.
293  */
294 asmlinkage void __cpuinit secondary_start_kernel(void)
295 {
296 	struct mm_struct *mm = &init_mm;
297 	unsigned int cpu;
298 
299 	/*
300 	 * The identity mapping is uncached (strongly ordered), so
301 	 * switch away from it before attempting any exclusive accesses.
302 	 */
303 	cpu_switch_mm(mm->pgd, mm);
304 	enter_lazy_tlb(mm, current);
305 	local_flush_tlb_all();
306 
307 	/*
308 	 * All kernel threads share the same mm context; grab a
309 	 * reference and switch to it.
310 	 */
311 	cpu = smp_processor_id();
312 	atomic_inc(&mm->mm_count);
313 	current->active_mm = mm;
314 	cpumask_set_cpu(cpu, mm_cpumask(mm));
315 
316 	printk("CPU%u: Booted secondary processor\n", cpu);
317 
318 	cpu_init();
319 	preempt_disable();
320 	trace_hardirqs_off();
321 
322 	/*
323 	 * Give the platform a chance to do its own initialisation.
324 	 */
325 	platform_secondary_init(cpu);
326 
327 	notify_cpu_starting(cpu);
328 
329 	calibrate_delay();
330 
331 	smp_store_cpu_info(cpu);
332 
333 	/*
334 	 * OK, now it's safe to let the boot CPU continue.  Wait for
335 	 * the CPU migration code to notice that the CPU is online
336 	 * before we continue - which happens after __cpu_up returns.
337 	 */
338 	set_cpu_online(cpu, true);
339 	complete(&cpu_running);
340 
341 	/*
342 	 * Setup the percpu timer for this CPU.
343 	 */
344 	percpu_timer_setup();
345 
346 	local_irq_enable();
347 	local_fiq_enable();
348 
349 	/*
350 	 * OK, it's off to the idle thread for us
351 	 */
352 	cpu_idle();
353 }
354 
355 void __init smp_cpus_done(unsigned int max_cpus)
356 {
357 	int cpu;
358 	unsigned long bogosum = 0;
359 
360 	for_each_online_cpu(cpu)
361 		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
362 
363 	printk(KERN_INFO "SMP: Total of %d processors activated "
364 	       "(%lu.%02lu BogoMIPS).\n",
365 	       num_online_cpus(),
366 	       bogosum / (500000/HZ),
367 	       (bogosum / (5000/HZ)) % 100);
368 
369 	hyp_mode_check();
370 }
371 
372 void __init smp_prepare_boot_cpu(void)
373 {
374 }
375 
376 void __init smp_prepare_cpus(unsigned int max_cpus)
377 {
378 	unsigned int ncores = num_possible_cpus();
379 
380 	init_cpu_topology();
381 
382 	smp_store_cpu_info(smp_processor_id());
383 
384 	/*
385 	 * are we trying to boot more cores than exist?
386 	 */
387 	if (max_cpus > ncores)
388 		max_cpus = ncores;
389 	if (ncores > 1 && max_cpus) {
390 		/*
391 		 * Enable the local timer or broadcast device for the
392 		 * boot CPU, but only if we have more than one CPU.
393 		 */
394 		percpu_timer_setup();
395 
396 		/*
397 		 * Initialise the present map, which describes the set of CPUs
398 		 * actually populated at the present time. A platform should
399 		 * re-initialize the map in platform_smp_prepare_cpus() if
400 		 * present != possible (e.g. physical hotplug).
401 		 */
402 		init_cpu_present(cpu_possible_mask);
403 
404 		/*
405 		 * Initialise the SCU if there are more than one CPU
406 		 * and let them know where to start.
407 		 */
408 		platform_smp_prepare_cpus(max_cpus);
409 	}
410 }
411 
412 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
413 
414 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
415 {
416 	smp_cross_call = fn;
417 }
418 
419 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
420 {
421 	smp_cross_call(mask, IPI_CALL_FUNC);
422 }
423 
424 void arch_send_call_function_single_ipi(int cpu)
425 {
426 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
427 }
428 
429 static const char *ipi_types[NR_IPI] = {
430 #define S(x,s)	[x] = s
431 	S(IPI_WAKEUP, "CPU wakeup interrupts"),
432 	S(IPI_TIMER, "Timer broadcast interrupts"),
433 	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
434 	S(IPI_CALL_FUNC, "Function call interrupts"),
435 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
436 	S(IPI_CPU_STOP, "CPU stop interrupts"),
437 };
438 
439 void show_ipi_list(struct seq_file *p, int prec)
440 {
441 	unsigned int cpu, i;
442 
443 	for (i = 0; i < NR_IPI; i++) {
444 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
445 
446 		for_each_present_cpu(cpu)
447 			seq_printf(p, "%10u ",
448 				   __get_irq_stat(cpu, ipi_irqs[i]));
449 
450 		seq_printf(p, " %s\n", ipi_types[i]);
451 	}
452 }
453 
454 u64 smp_irq_stat_cpu(unsigned int cpu)
455 {
456 	u64 sum = 0;
457 	int i;
458 
459 	for (i = 0; i < NR_IPI; i++)
460 		sum += __get_irq_stat(cpu, ipi_irqs[i]);
461 
462 	return sum;
463 }
464 
465 /*
466  * Timer (local or broadcast) support
467  */
468 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
469 
470 static void ipi_timer(void)
471 {
472 	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
473 	evt->event_handler(evt);
474 }
475 
476 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
477 static void smp_timer_broadcast(const struct cpumask *mask)
478 {
479 	smp_cross_call(mask, IPI_TIMER);
480 }
481 #else
482 #define smp_timer_broadcast	NULL
483 #endif
484 
485 static void broadcast_timer_set_mode(enum clock_event_mode mode,
486 	struct clock_event_device *evt)
487 {
488 }
489 
490 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
491 {
492 	evt->name	= "dummy_timer";
493 	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
494 			  CLOCK_EVT_FEAT_PERIODIC |
495 			  CLOCK_EVT_FEAT_DUMMY;
496 	evt->rating	= 400;
497 	evt->mult	= 1;
498 	evt->set_mode	= broadcast_timer_set_mode;
499 
500 	clockevents_register_device(evt);
501 }
502 
503 static struct local_timer_ops *lt_ops;
504 
505 #ifdef CONFIG_LOCAL_TIMERS
506 int local_timer_register(struct local_timer_ops *ops)
507 {
508 	if (!is_smp() || !setup_max_cpus)
509 		return -ENXIO;
510 
511 	if (lt_ops)
512 		return -EBUSY;
513 
514 	lt_ops = ops;
515 	return 0;
516 }
517 #endif
518 
519 static void __cpuinit percpu_timer_setup(void)
520 {
521 	unsigned int cpu = smp_processor_id();
522 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
523 
524 	evt->cpumask = cpumask_of(cpu);
525 	evt->broadcast = smp_timer_broadcast;
526 
527 	if (!lt_ops || lt_ops->setup(evt))
528 		broadcast_timer_setup(evt);
529 }
530 
531 #ifdef CONFIG_HOTPLUG_CPU
532 /*
533  * The generic clock events code purposely does not stop the local timer
534  * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
535  * manually here.
536  */
537 static void percpu_timer_stop(void)
538 {
539 	unsigned int cpu = smp_processor_id();
540 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
541 
542 	if (lt_ops)
543 		lt_ops->stop(evt);
544 }
545 #endif
546 
547 static DEFINE_RAW_SPINLOCK(stop_lock);
548 
549 /*
550  * ipi_cpu_stop - handle IPI from smp_send_stop()
551  */
552 static void ipi_cpu_stop(unsigned int cpu)
553 {
554 	if (system_state == SYSTEM_BOOTING ||
555 	    system_state == SYSTEM_RUNNING) {
556 		raw_spin_lock(&stop_lock);
557 		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
558 		dump_stack();
559 		raw_spin_unlock(&stop_lock);
560 	}
561 
562 	set_cpu_online(cpu, false);
563 
564 	local_fiq_disable();
565 	local_irq_disable();
566 
567 	while (1)
568 		cpu_relax();
569 }
570 
571 /*
572  * Main handler for inter-processor interrupts
573  */
574 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
575 {
576 	handle_IPI(ipinr, regs);
577 }
578 
579 void handle_IPI(int ipinr, struct pt_regs *regs)
580 {
581 	unsigned int cpu = smp_processor_id();
582 	struct pt_regs *old_regs = set_irq_regs(regs);
583 
584 	if (ipinr < NR_IPI)
585 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
586 
587 	switch (ipinr) {
588 	case IPI_WAKEUP:
589 		break;
590 
591 	case IPI_TIMER:
592 		irq_enter();
593 		ipi_timer();
594 		irq_exit();
595 		break;
596 
597 	case IPI_RESCHEDULE:
598 		scheduler_ipi();
599 		break;
600 
601 	case IPI_CALL_FUNC:
602 		irq_enter();
603 		generic_smp_call_function_interrupt();
604 		irq_exit();
605 		break;
606 
607 	case IPI_CALL_FUNC_SINGLE:
608 		irq_enter();
609 		generic_smp_call_function_single_interrupt();
610 		irq_exit();
611 		break;
612 
613 	case IPI_CPU_STOP:
614 		irq_enter();
615 		ipi_cpu_stop(cpu);
616 		irq_exit();
617 		break;
618 
619 	default:
620 		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
621 		       cpu, ipinr);
622 		break;
623 	}
624 	set_irq_regs(old_regs);
625 }
626 
627 void smp_send_reschedule(int cpu)
628 {
629 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
630 }
631 
632 #ifdef CONFIG_HOTPLUG_CPU
633 static void smp_kill_cpus(cpumask_t *mask)
634 {
635 	unsigned int cpu;
636 	for_each_cpu(cpu, mask)
637 		platform_cpu_kill(cpu);
638 }
639 #else
640 static void smp_kill_cpus(cpumask_t *mask) { }
641 #endif
642 
643 void smp_send_stop(void)
644 {
645 	unsigned long timeout;
646 	struct cpumask mask;
647 
648 	cpumask_copy(&mask, cpu_online_mask);
649 	cpumask_clear_cpu(smp_processor_id(), &mask);
650 	if (!cpumask_empty(&mask))
651 		smp_cross_call(&mask, IPI_CPU_STOP);
652 
653 	/* Wait up to one second for other CPUs to stop */
654 	timeout = USEC_PER_SEC;
655 	while (num_online_cpus() > 1 && timeout--)
656 		udelay(1);
657 
658 	if (num_online_cpus() > 1)
659 		pr_warning("SMP: failed to stop secondary CPUs\n");
660 
661 	smp_kill_cpus(&mask);
662 }
663 
664 /*
665  * not supported here
666  */
667 int setup_profiling_timer(unsigned int multiplier)
668 {
669 	return -EINVAL;
670 }
671 
672 #ifdef CONFIG_CPU_FREQ
673 
674 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
675 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
676 static unsigned long global_l_p_j_ref;
677 static unsigned long global_l_p_j_ref_freq;
678 
679 static int cpufreq_callback(struct notifier_block *nb,
680 					unsigned long val, void *data)
681 {
682 	struct cpufreq_freqs *freq = data;
683 	int cpu = freq->cpu;
684 
685 	if (freq->flags & CPUFREQ_CONST_LOOPS)
686 		return NOTIFY_OK;
687 
688 	if (!per_cpu(l_p_j_ref, cpu)) {
689 		per_cpu(l_p_j_ref, cpu) =
690 			per_cpu(cpu_data, cpu).loops_per_jiffy;
691 		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
692 		if (!global_l_p_j_ref) {
693 			global_l_p_j_ref = loops_per_jiffy;
694 			global_l_p_j_ref_freq = freq->old;
695 		}
696 	}
697 
698 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
699 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
700 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
701 		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
702 						global_l_p_j_ref_freq,
703 						freq->new);
704 		per_cpu(cpu_data, cpu).loops_per_jiffy =
705 			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
706 					per_cpu(l_p_j_ref_freq, cpu),
707 					freq->new);
708 	}
709 	return NOTIFY_OK;
710 }
711 
712 static struct notifier_block cpufreq_notifier = {
713 	.notifier_call  = cpufreq_callback,
714 };
715 
716 static int __init register_cpufreq_notifier(void)
717 {
718 	return cpufreq_register_notifier(&cpufreq_notifier,
719 						CPUFREQ_TRANSITION_NOTIFIER);
720 }
721 core_initcall(register_cpufreq_notifier);
722 
723 #endif
724