xref: /linux/arch/arm/kernel/smp.c (revision 2c1ba398ac9da3305815f6ae8e95ae2b9fd3b5ff)
1 /*
2  *  linux/arch/arm/kernel/smp.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/ftrace.h>
20 #include <linux/mm.h>
21 #include <linux/err.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/percpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/completion.h>
29 
30 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpu.h>
33 #include <asm/cputype.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/processor.h>
38 #include <asm/sections.h>
39 #include <asm/tlbflush.h>
40 #include <asm/ptrace.h>
41 #include <asm/localtimer.h>
42 
43 /*
44  * as from 2.5, kernels no longer have an init_tasks structure
45  * so we need some other way of telling a new secondary core
46  * where to place its SVC stack
47  */
48 struct secondary_data secondary_data;
49 
50 enum ipi_msg_type {
51 	IPI_TIMER = 2,
52 	IPI_RESCHEDULE,
53 	IPI_CALL_FUNC,
54 	IPI_CALL_FUNC_SINGLE,
55 	IPI_CPU_STOP,
56 };
57 
58 int __cpuinit __cpu_up(unsigned int cpu)
59 {
60 	struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
61 	struct task_struct *idle = ci->idle;
62 	pgd_t *pgd;
63 	int ret;
64 
65 	/*
66 	 * Spawn a new process manually, if not already done.
67 	 * Grab a pointer to its task struct so we can mess with it
68 	 */
69 	if (!idle) {
70 		idle = fork_idle(cpu);
71 		if (IS_ERR(idle)) {
72 			printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
73 			return PTR_ERR(idle);
74 		}
75 		ci->idle = idle;
76 	} else {
77 		/*
78 		 * Since this idle thread is being re-used, call
79 		 * init_idle() to reinitialize the thread structure.
80 		 */
81 		init_idle(idle, cpu);
82 	}
83 
84 	/*
85 	 * Allocate initial page tables to allow the new CPU to
86 	 * enable the MMU safely.  This essentially means a set
87 	 * of our "standard" page tables, with the addition of
88 	 * a 1:1 mapping for the physical address of the kernel.
89 	 */
90 	pgd = pgd_alloc(&init_mm);
91 	if (!pgd)
92 		return -ENOMEM;
93 
94 	if (PHYS_OFFSET != PAGE_OFFSET) {
95 #ifndef CONFIG_HOTPLUG_CPU
96 		identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
97 #endif
98 		identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
99 		identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
100 	}
101 
102 	/*
103 	 * We need to tell the secondary core where to find
104 	 * its stack and the page tables.
105 	 */
106 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
107 	secondary_data.pgdir = virt_to_phys(pgd);
108 	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
109 	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
110 	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
111 
112 	/*
113 	 * Now bring the CPU into our world.
114 	 */
115 	ret = boot_secondary(cpu, idle);
116 	if (ret == 0) {
117 		unsigned long timeout;
118 
119 		/*
120 		 * CPU was successfully started, wait for it
121 		 * to come online or time out.
122 		 */
123 		timeout = jiffies + HZ;
124 		while (time_before(jiffies, timeout)) {
125 			if (cpu_online(cpu))
126 				break;
127 
128 			udelay(10);
129 			barrier();
130 		}
131 
132 		if (!cpu_online(cpu)) {
133 			pr_crit("CPU%u: failed to come online\n", cpu);
134 			ret = -EIO;
135 		}
136 	} else {
137 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
138 	}
139 
140 	secondary_data.stack = NULL;
141 	secondary_data.pgdir = 0;
142 
143 	if (PHYS_OFFSET != PAGE_OFFSET) {
144 #ifndef CONFIG_HOTPLUG_CPU
145 		identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
146 #endif
147 		identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
148 		identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
149 	}
150 
151 	pgd_free(&init_mm, pgd);
152 
153 	return ret;
154 }
155 
156 #ifdef CONFIG_HOTPLUG_CPU
157 static void percpu_timer_stop(void);
158 
159 /*
160  * __cpu_disable runs on the processor to be shutdown.
161  */
162 int __cpu_disable(void)
163 {
164 	unsigned int cpu = smp_processor_id();
165 	struct task_struct *p;
166 	int ret;
167 
168 	ret = platform_cpu_disable(cpu);
169 	if (ret)
170 		return ret;
171 
172 	/*
173 	 * Take this CPU offline.  Once we clear this, we can't return,
174 	 * and we must not schedule until we're ready to give up the cpu.
175 	 */
176 	set_cpu_online(cpu, false);
177 
178 	/*
179 	 * OK - migrate IRQs away from this CPU
180 	 */
181 	migrate_irqs();
182 
183 	/*
184 	 * Stop the local timer for this CPU.
185 	 */
186 	percpu_timer_stop();
187 
188 	/*
189 	 * Flush user cache and TLB mappings, and then remove this CPU
190 	 * from the vm mask set of all processes.
191 	 */
192 	flush_cache_all();
193 	local_flush_tlb_all();
194 
195 	read_lock(&tasklist_lock);
196 	for_each_process(p) {
197 		if (p->mm)
198 			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
199 	}
200 	read_unlock(&tasklist_lock);
201 
202 	return 0;
203 }
204 
205 static DECLARE_COMPLETION(cpu_died);
206 
207 /*
208  * called on the thread which is asking for a CPU to be shutdown -
209  * waits until shutdown has completed, or it is timed out.
210  */
211 void __cpu_die(unsigned int cpu)
212 {
213 	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
214 		pr_err("CPU%u: cpu didn't die\n", cpu);
215 		return;
216 	}
217 	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
218 
219 	if (!platform_cpu_kill(cpu))
220 		printk("CPU%u: unable to kill\n", cpu);
221 }
222 
223 /*
224  * Called from the idle thread for the CPU which has been shutdown.
225  *
226  * Note that we disable IRQs here, but do not re-enable them
227  * before returning to the caller. This is also the behaviour
228  * of the other hotplug-cpu capable cores, so presumably coming
229  * out of idle fixes this.
230  */
231 void __ref cpu_die(void)
232 {
233 	unsigned int cpu = smp_processor_id();
234 
235 	idle_task_exit();
236 
237 	local_irq_disable();
238 	mb();
239 
240 	/* Tell __cpu_die() that this CPU is now safe to dispose of */
241 	complete(&cpu_died);
242 
243 	/*
244 	 * actual CPU shutdown procedure is at least platform (if not
245 	 * CPU) specific.
246 	 */
247 	platform_cpu_die(cpu);
248 
249 	/*
250 	 * Do not return to the idle loop - jump back to the secondary
251 	 * cpu initialisation.  There's some initialisation which needs
252 	 * to be repeated to undo the effects of taking the CPU offline.
253 	 */
254 	__asm__("mov	sp, %0\n"
255 	"	mov	fp, #0\n"
256 	"	b	secondary_start_kernel"
257 		:
258 		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
259 }
260 #endif /* CONFIG_HOTPLUG_CPU */
261 
262 /*
263  * Called by both boot and secondaries to move global data into
264  * per-processor storage.
265  */
266 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
267 {
268 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
269 
270 	cpu_info->loops_per_jiffy = loops_per_jiffy;
271 }
272 
273 /*
274  * This is the secondary CPU boot entry.  We're using this CPUs
275  * idle thread stack, but a set of temporary page tables.
276  */
277 asmlinkage void __cpuinit secondary_start_kernel(void)
278 {
279 	struct mm_struct *mm = &init_mm;
280 	unsigned int cpu = smp_processor_id();
281 
282 	printk("CPU%u: Booted secondary processor\n", cpu);
283 
284 	/*
285 	 * All kernel threads share the same mm context; grab a
286 	 * reference and switch to it.
287 	 */
288 	atomic_inc(&mm->mm_count);
289 	current->active_mm = mm;
290 	cpumask_set_cpu(cpu, mm_cpumask(mm));
291 	cpu_switch_mm(mm->pgd, mm);
292 	enter_lazy_tlb(mm, current);
293 	local_flush_tlb_all();
294 
295 	cpu_init();
296 	preempt_disable();
297 	trace_hardirqs_off();
298 
299 	/*
300 	 * Give the platform a chance to do its own initialisation.
301 	 */
302 	platform_secondary_init(cpu);
303 
304 	/*
305 	 * Enable local interrupts.
306 	 */
307 	notify_cpu_starting(cpu);
308 	local_irq_enable();
309 	local_fiq_enable();
310 
311 	/*
312 	 * Setup the percpu timer for this CPU.
313 	 */
314 	percpu_timer_setup();
315 
316 	calibrate_delay();
317 
318 	smp_store_cpu_info(cpu);
319 
320 	/*
321 	 * OK, now it's safe to let the boot CPU continue.  Wait for
322 	 * the CPU migration code to notice that the CPU is online
323 	 * before we continue.
324 	 */
325 	set_cpu_online(cpu, true);
326 	while (!cpu_active(cpu))
327 		cpu_relax();
328 
329 	/*
330 	 * OK, it's off to the idle thread for us
331 	 */
332 	cpu_idle();
333 }
334 
335 void __init smp_cpus_done(unsigned int max_cpus)
336 {
337 	int cpu;
338 	unsigned long bogosum = 0;
339 
340 	for_each_online_cpu(cpu)
341 		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
342 
343 	printk(KERN_INFO "SMP: Total of %d processors activated "
344 	       "(%lu.%02lu BogoMIPS).\n",
345 	       num_online_cpus(),
346 	       bogosum / (500000/HZ),
347 	       (bogosum / (5000/HZ)) % 100);
348 }
349 
350 void __init smp_prepare_boot_cpu(void)
351 {
352 	unsigned int cpu = smp_processor_id();
353 
354 	per_cpu(cpu_data, cpu).idle = current;
355 }
356 
357 void __init smp_prepare_cpus(unsigned int max_cpus)
358 {
359 	unsigned int ncores = num_possible_cpus();
360 
361 	smp_store_cpu_info(smp_processor_id());
362 
363 	/*
364 	 * are we trying to boot more cores than exist?
365 	 */
366 	if (max_cpus > ncores)
367 		max_cpus = ncores;
368 	if (ncores > 1 && max_cpus) {
369 		/*
370 		 * Enable the local timer or broadcast device for the
371 		 * boot CPU, but only if we have more than one CPU.
372 		 */
373 		percpu_timer_setup();
374 
375 		/*
376 		 * Initialise the present map, which describes the set of CPUs
377 		 * actually populated at the present time. A platform should
378 		 * re-initialize the map in platform_smp_prepare_cpus() if
379 		 * present != possible (e.g. physical hotplug).
380 		 */
381 		init_cpu_present(&cpu_possible_map);
382 
383 		/*
384 		 * Initialise the SCU if there are more than one CPU
385 		 * and let them know where to start.
386 		 */
387 		platform_smp_prepare_cpus(max_cpus);
388 	}
389 }
390 
391 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
392 
393 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
394 {
395 	smp_cross_call = fn;
396 }
397 
398 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
399 {
400 	smp_cross_call(mask, IPI_CALL_FUNC);
401 }
402 
403 void arch_send_call_function_single_ipi(int cpu)
404 {
405 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
406 }
407 
408 static const char *ipi_types[NR_IPI] = {
409 #define S(x,s)	[x - IPI_TIMER] = s
410 	S(IPI_TIMER, "Timer broadcast interrupts"),
411 	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
412 	S(IPI_CALL_FUNC, "Function call interrupts"),
413 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
414 	S(IPI_CPU_STOP, "CPU stop interrupts"),
415 };
416 
417 void show_ipi_list(struct seq_file *p, int prec)
418 {
419 	unsigned int cpu, i;
420 
421 	for (i = 0; i < NR_IPI; i++) {
422 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
423 
424 		for_each_present_cpu(cpu)
425 			seq_printf(p, "%10u ",
426 				   __get_irq_stat(cpu, ipi_irqs[i]));
427 
428 		seq_printf(p, " %s\n", ipi_types[i]);
429 	}
430 }
431 
432 u64 smp_irq_stat_cpu(unsigned int cpu)
433 {
434 	u64 sum = 0;
435 	int i;
436 
437 	for (i = 0; i < NR_IPI; i++)
438 		sum += __get_irq_stat(cpu, ipi_irqs[i]);
439 
440 #ifdef CONFIG_LOCAL_TIMERS
441 	sum += __get_irq_stat(cpu, local_timer_irqs);
442 #endif
443 
444 	return sum;
445 }
446 
447 /*
448  * Timer (local or broadcast) support
449  */
450 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
451 
452 static void ipi_timer(void)
453 {
454 	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
455 	irq_enter();
456 	evt->event_handler(evt);
457 	irq_exit();
458 }
459 
460 #ifdef CONFIG_LOCAL_TIMERS
461 asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
462 {
463 	struct pt_regs *old_regs = set_irq_regs(regs);
464 	int cpu = smp_processor_id();
465 
466 	if (local_timer_ack()) {
467 		__inc_irq_stat(cpu, local_timer_irqs);
468 		ipi_timer();
469 	}
470 
471 	set_irq_regs(old_regs);
472 }
473 
474 void show_local_irqs(struct seq_file *p, int prec)
475 {
476 	unsigned int cpu;
477 
478 	seq_printf(p, "%*s: ", prec, "LOC");
479 
480 	for_each_present_cpu(cpu)
481 		seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
482 
483 	seq_printf(p, " Local timer interrupts\n");
484 }
485 #endif
486 
487 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
488 static void smp_timer_broadcast(const struct cpumask *mask)
489 {
490 	smp_cross_call(mask, IPI_TIMER);
491 }
492 #else
493 #define smp_timer_broadcast	NULL
494 #endif
495 
496 static void broadcast_timer_set_mode(enum clock_event_mode mode,
497 	struct clock_event_device *evt)
498 {
499 }
500 
501 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
502 {
503 	evt->name	= "dummy_timer";
504 	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
505 			  CLOCK_EVT_FEAT_PERIODIC |
506 			  CLOCK_EVT_FEAT_DUMMY;
507 	evt->rating	= 400;
508 	evt->mult	= 1;
509 	evt->set_mode	= broadcast_timer_set_mode;
510 
511 	clockevents_register_device(evt);
512 }
513 
514 void __cpuinit percpu_timer_setup(void)
515 {
516 	unsigned int cpu = smp_processor_id();
517 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
518 
519 	evt->cpumask = cpumask_of(cpu);
520 	evt->broadcast = smp_timer_broadcast;
521 
522 	if (local_timer_setup(evt))
523 		broadcast_timer_setup(evt);
524 }
525 
526 #ifdef CONFIG_HOTPLUG_CPU
527 /*
528  * The generic clock events code purposely does not stop the local timer
529  * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
530  * manually here.
531  */
532 static void percpu_timer_stop(void)
533 {
534 	unsigned int cpu = smp_processor_id();
535 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
536 
537 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
538 }
539 #endif
540 
541 static DEFINE_SPINLOCK(stop_lock);
542 
543 /*
544  * ipi_cpu_stop - handle IPI from smp_send_stop()
545  */
546 static void ipi_cpu_stop(unsigned int cpu)
547 {
548 	if (system_state == SYSTEM_BOOTING ||
549 	    system_state == SYSTEM_RUNNING) {
550 		spin_lock(&stop_lock);
551 		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
552 		dump_stack();
553 		spin_unlock(&stop_lock);
554 	}
555 
556 	set_cpu_online(cpu, false);
557 
558 	local_fiq_disable();
559 	local_irq_disable();
560 
561 	while (1)
562 		cpu_relax();
563 }
564 
565 /*
566  * Main handler for inter-processor interrupts
567  */
568 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
569 {
570 	unsigned int cpu = smp_processor_id();
571 	struct pt_regs *old_regs = set_irq_regs(regs);
572 
573 	if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
574 		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
575 
576 	switch (ipinr) {
577 	case IPI_TIMER:
578 		ipi_timer();
579 		break;
580 
581 	case IPI_RESCHEDULE:
582 		scheduler_ipi();
583 		break;
584 
585 	case IPI_CALL_FUNC:
586 		generic_smp_call_function_interrupt();
587 		break;
588 
589 	case IPI_CALL_FUNC_SINGLE:
590 		generic_smp_call_function_single_interrupt();
591 		break;
592 
593 	case IPI_CPU_STOP:
594 		ipi_cpu_stop(cpu);
595 		break;
596 
597 	default:
598 		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
599 		       cpu, ipinr);
600 		break;
601 	}
602 	set_irq_regs(old_regs);
603 }
604 
605 void smp_send_reschedule(int cpu)
606 {
607 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
608 }
609 
610 void smp_send_stop(void)
611 {
612 	unsigned long timeout;
613 
614 	if (num_online_cpus() > 1) {
615 		cpumask_t mask = cpu_online_map;
616 		cpu_clear(smp_processor_id(), mask);
617 
618 		smp_cross_call(&mask, IPI_CPU_STOP);
619 	}
620 
621 	/* Wait up to one second for other CPUs to stop */
622 	timeout = USEC_PER_SEC;
623 	while (num_online_cpus() > 1 && timeout--)
624 		udelay(1);
625 
626 	if (num_online_cpus() > 1)
627 		pr_warning("SMP: failed to stop secondary CPUs\n");
628 }
629 
630 /*
631  * not supported here
632  */
633 int setup_profiling_timer(unsigned int multiplier)
634 {
635 	return -EINVAL;
636 }
637