xref: /linux/arch/sh/kernel/smp.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2010 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <linux/atomic.h>
25 #include <asm/processor.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
31 
32 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
33 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
34 
35 struct plat_smp_ops *mp_ops = NULL;
36 
37 /* State of each CPU */
38 DEFINE_PER_CPU(int, cpu_state) = { 0 };
39 
40 void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
41 {
42 	if (mp_ops)
43 		printk(KERN_WARNING "Overriding previously set SMP ops\n");
44 
45 	mp_ops = ops;
46 }
47 
48 static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
49 {
50 	struct sh_cpuinfo *c = cpu_data + cpu;
51 
52 	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53 
54 	c->loops_per_jiffy = loops_per_jiffy;
55 }
56 
57 void __init smp_prepare_cpus(unsigned int max_cpus)
58 {
59 	unsigned int cpu = smp_processor_id();
60 
61 	init_new_context(current, &init_mm);
62 	current_thread_info()->cpu = cpu;
63 	mp_ops->prepare_cpus(max_cpus);
64 
65 #ifndef CONFIG_HOTPLUG_CPU
66 	init_cpu_present(cpu_possible_mask);
67 #endif
68 }
69 
70 void __init smp_prepare_boot_cpu(void)
71 {
72 	unsigned int cpu = smp_processor_id();
73 
74 	__cpu_number_map[0] = cpu;
75 	__cpu_logical_map[0] = cpu;
76 
77 	set_cpu_online(cpu, true);
78 	set_cpu_possible(cpu, true);
79 
80 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
81 }
82 
83 #ifdef CONFIG_HOTPLUG_CPU
84 void native_cpu_die(unsigned int cpu)
85 {
86 	unsigned int i;
87 
88 	for (i = 0; i < 10; i++) {
89 		smp_rmb();
90 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91 			if (system_state == SYSTEM_RUNNING)
92 				pr_info("CPU %u is now offline\n", cpu);
93 
94 			return;
95 		}
96 
97 		msleep(100);
98 	}
99 
100 	pr_err("CPU %u didn't die...\n", cpu);
101 }
102 
103 int native_cpu_disable(unsigned int cpu)
104 {
105 	return cpu == 0 ? -EPERM : 0;
106 }
107 
108 void play_dead_common(void)
109 {
110 	idle_task_exit();
111 	irq_ctx_exit(raw_smp_processor_id());
112 	mb();
113 
114 	__get_cpu_var(cpu_state) = CPU_DEAD;
115 	local_irq_disable();
116 }
117 
118 void native_play_dead(void)
119 {
120 	play_dead_common();
121 }
122 
123 int __cpu_disable(void)
124 {
125 	unsigned int cpu = smp_processor_id();
126 	struct task_struct *p;
127 	int ret;
128 
129 	ret = mp_ops->cpu_disable(cpu);
130 	if (ret)
131 		return ret;
132 
133 	/*
134 	 * Take this CPU offline.  Once we clear this, we can't return,
135 	 * and we must not schedule until we're ready to give up the cpu.
136 	 */
137 	set_cpu_online(cpu, false);
138 
139 	/*
140 	 * OK - migrate IRQs away from this CPU
141 	 */
142 	migrate_irqs();
143 
144 	/*
145 	 * Stop the local timer for this CPU.
146 	 */
147 	local_timer_stop(cpu);
148 
149 	/*
150 	 * Flush user cache and TLB mappings, and then remove this CPU
151 	 * from the vm mask set of all processes.
152 	 */
153 	flush_cache_all();
154 	local_flush_tlb_all();
155 
156 	read_lock(&tasklist_lock);
157 	for_each_process(p)
158 		if (p->mm)
159 			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
160 	read_unlock(&tasklist_lock);
161 
162 	return 0;
163 }
164 #else /* ... !CONFIG_HOTPLUG_CPU */
165 int native_cpu_disable(unsigned int cpu)
166 {
167 	return -ENOSYS;
168 }
169 
170 void native_cpu_die(unsigned int cpu)
171 {
172 	/* We said "no" in __cpu_disable */
173 	BUG();
174 }
175 
176 void native_play_dead(void)
177 {
178 	BUG();
179 }
180 #endif
181 
182 asmlinkage void __cpuinit start_secondary(void)
183 {
184 	unsigned int cpu = smp_processor_id();
185 	struct mm_struct *mm = &init_mm;
186 
187 	enable_mmu();
188 	atomic_inc(&mm->mm_count);
189 	atomic_inc(&mm->mm_users);
190 	current->active_mm = mm;
191 	enter_lazy_tlb(mm, current);
192 	local_flush_tlb_all();
193 
194 	per_cpu_trap_init();
195 
196 	preempt_disable();
197 
198 	notify_cpu_starting(cpu);
199 
200 	local_irq_enable();
201 
202 	/* Enable local timers */
203 	local_timer_setup(cpu);
204 	calibrate_delay();
205 
206 	smp_store_cpu_info(cpu);
207 
208 	set_cpu_online(cpu, true);
209 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
210 
211 	cpu_idle();
212 }
213 
214 extern struct {
215 	unsigned long sp;
216 	unsigned long bss_start;
217 	unsigned long bss_end;
218 	void *start_kernel_fn;
219 	void *cpu_init_fn;
220 	void *thread_info;
221 } stack_start;
222 
223 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk)
224 {
225 	unsigned long timeout;
226 
227 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
228 
229 	/* Fill in data in head.S for secondary cpus */
230 	stack_start.sp = tsk->thread.sp;
231 	stack_start.thread_info = tsk->stack;
232 	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
233 	stack_start.start_kernel_fn = start_secondary;
234 
235 	flush_icache_range((unsigned long)&stack_start,
236 			   (unsigned long)&stack_start + sizeof(stack_start));
237 	wmb();
238 
239 	mp_ops->start_cpu(cpu, (unsigned long)_stext);
240 
241 	timeout = jiffies + HZ;
242 	while (time_before(jiffies, timeout)) {
243 		if (cpu_online(cpu))
244 			break;
245 
246 		udelay(10);
247 		barrier();
248 	}
249 
250 	if (cpu_online(cpu))
251 		return 0;
252 
253 	return -ENOENT;
254 }
255 
256 void __init smp_cpus_done(unsigned int max_cpus)
257 {
258 	unsigned long bogosum = 0;
259 	int cpu;
260 
261 	for_each_online_cpu(cpu)
262 		bogosum += cpu_data[cpu].loops_per_jiffy;
263 
264 	printk(KERN_INFO "SMP: Total of %d processors activated "
265 	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
266 	       bogosum / (500000/HZ),
267 	       (bogosum / (5000/HZ)) % 100);
268 }
269 
270 void smp_send_reschedule(int cpu)
271 {
272 	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
273 }
274 
275 void smp_send_stop(void)
276 {
277 	smp_call_function(stop_this_cpu, 0, 0);
278 }
279 
280 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
281 {
282 	int cpu;
283 
284 	for_each_cpu(cpu, mask)
285 		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
286 }
287 
288 void arch_send_call_function_single_ipi(int cpu)
289 {
290 	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
291 }
292 
293 void smp_timer_broadcast(const struct cpumask *mask)
294 {
295 	int cpu;
296 
297 	for_each_cpu(cpu, mask)
298 		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
299 }
300 
301 static void ipi_timer(void)
302 {
303 	irq_enter();
304 	local_timer_interrupt();
305 	irq_exit();
306 }
307 
308 void smp_message_recv(unsigned int msg)
309 {
310 	switch (msg) {
311 	case SMP_MSG_FUNCTION:
312 		generic_smp_call_function_interrupt();
313 		break;
314 	case SMP_MSG_RESCHEDULE:
315 		scheduler_ipi();
316 		break;
317 	case SMP_MSG_FUNCTION_SINGLE:
318 		generic_smp_call_function_single_interrupt();
319 		break;
320 	case SMP_MSG_TIMER:
321 		ipi_timer();
322 		break;
323 	default:
324 		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
325 		       smp_processor_id(), __func__, msg);
326 		break;
327 	}
328 }
329 
330 /* Not really SMP stuff ... */
331 int setup_profiling_timer(unsigned int multiplier)
332 {
333 	return 0;
334 }
335 
336 static void flush_tlb_all_ipi(void *info)
337 {
338 	local_flush_tlb_all();
339 }
340 
341 void flush_tlb_all(void)
342 {
343 	on_each_cpu(flush_tlb_all_ipi, 0, 1);
344 }
345 
346 static void flush_tlb_mm_ipi(void *mm)
347 {
348 	local_flush_tlb_mm((struct mm_struct *)mm);
349 }
350 
351 /*
352  * The following tlb flush calls are invoked when old translations are
353  * being torn down, or pte attributes are changing. For single threaded
354  * address spaces, a new context is obtained on the current cpu, and tlb
355  * context on other cpus are invalidated to force a new context allocation
356  * at switch_mm time, should the mm ever be used on other cpus. For
357  * multithreaded address spaces, intercpu interrupts have to be sent.
358  * Another case where intercpu interrupts are required is when the target
359  * mm might be active on another cpu (eg debuggers doing the flushes on
360  * behalf of debugees, kswapd stealing pages from another process etc).
361  * Kanoj 07/00.
362  */
363 void flush_tlb_mm(struct mm_struct *mm)
364 {
365 	preempt_disable();
366 
367 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
368 		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
369 	} else {
370 		int i;
371 		for (i = 0; i < num_online_cpus(); i++)
372 			if (smp_processor_id() != i)
373 				cpu_context(i, mm) = 0;
374 	}
375 	local_flush_tlb_mm(mm);
376 
377 	preempt_enable();
378 }
379 
380 struct flush_tlb_data {
381 	struct vm_area_struct *vma;
382 	unsigned long addr1;
383 	unsigned long addr2;
384 };
385 
386 static void flush_tlb_range_ipi(void *info)
387 {
388 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389 
390 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
391 }
392 
393 void flush_tlb_range(struct vm_area_struct *vma,
394 		     unsigned long start, unsigned long end)
395 {
396 	struct mm_struct *mm = vma->vm_mm;
397 
398 	preempt_disable();
399 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
400 		struct flush_tlb_data fd;
401 
402 		fd.vma = vma;
403 		fd.addr1 = start;
404 		fd.addr2 = end;
405 		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
406 	} else {
407 		int i;
408 		for (i = 0; i < num_online_cpus(); i++)
409 			if (smp_processor_id() != i)
410 				cpu_context(i, mm) = 0;
411 	}
412 	local_flush_tlb_range(vma, start, end);
413 	preempt_enable();
414 }
415 
416 static void flush_tlb_kernel_range_ipi(void *info)
417 {
418 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
419 
420 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
421 }
422 
423 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
424 {
425 	struct flush_tlb_data fd;
426 
427 	fd.addr1 = start;
428 	fd.addr2 = end;
429 	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
430 }
431 
432 static void flush_tlb_page_ipi(void *info)
433 {
434 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
435 
436 	local_flush_tlb_page(fd->vma, fd->addr1);
437 }
438 
439 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
440 {
441 	preempt_disable();
442 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
443 	    (current->mm != vma->vm_mm)) {
444 		struct flush_tlb_data fd;
445 
446 		fd.vma = vma;
447 		fd.addr1 = page;
448 		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
449 	} else {
450 		int i;
451 		for (i = 0; i < num_online_cpus(); i++)
452 			if (smp_processor_id() != i)
453 				cpu_context(i, vma->vm_mm) = 0;
454 	}
455 	local_flush_tlb_page(vma, page);
456 	preempt_enable();
457 }
458 
459 static void flush_tlb_one_ipi(void *info)
460 {
461 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
462 	local_flush_tlb_one(fd->addr1, fd->addr2);
463 }
464 
465 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
466 {
467 	struct flush_tlb_data fd;
468 
469 	fd.addr1 = asid;
470 	fd.addr2 = vaddr;
471 
472 	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
473 	local_flush_tlb_one(asid, vaddr);
474 }
475