xref: /linux/arch/mips/kernel/smp.c (revision 6e8331ac6973435b1e7604c30f2ad394035b46e1)
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
33 
34 #include <asm/atomic.h>
35 #include <asm/cpu.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/mmu_context.h>
39 #include <asm/smp.h>
40 
41 #ifdef CONFIG_MIPS_MT_SMTC
42 #include <asm/mipsmtregs.h>
43 #endif /* CONFIG_MIPS_MT_SMTC */
44 
45 cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
46 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
47 cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
48 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
49 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
50 
51 EXPORT_SYMBOL(phys_cpu_present_map);
52 EXPORT_SYMBOL(cpu_online_map);
53 
54 static void smp_tune_scheduling (void)
55 {
56 	struct cache_desc *cd = &current_cpu_data.scache;
57 	unsigned long cachesize;       /* kB   */
58 	unsigned long cpu_khz;
59 
60 	/*
61 	 * Crude estimate until we actually meassure ...
62 	 */
63 	cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
64 
65 	/*
66 	 * Rough estimation for SMP scheduling, this is the number of
67 	 * cycles it takes for a fully memory-limited process to flush
68 	 * the SMP-local cache.
69 	 *
70 	 * (For a P5 this pretty much means we will choose another idle
71 	 *  CPU almost always at wakeup time (this is due to the small
72 	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
73 	 *  the cache size)
74 	 */
75 	if (!cpu_khz)
76 		return;
77 
78 	cachesize = cd->linesz * cd->sets * cd->ways;
79 }
80 
81 extern void __init calibrate_delay(void);
82 extern ATTRIB_NORET void cpu_idle(void);
83 
84 /*
85  * First C code run on the secondary CPUs after being started up by
86  * the master.
87  */
88 asmlinkage void start_secondary(void)
89 {
90 	unsigned int cpu;
91 
92 #ifdef CONFIG_MIPS_MT_SMTC
93 	/* Only do cpu_probe for first TC of CPU */
94 	if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
95 #endif /* CONFIG_MIPS_MT_SMTC */
96 	cpu_probe();
97 	cpu_report();
98 	per_cpu_trap_init();
99 	prom_init_secondary();
100 
101 	/*
102 	 * XXX parity protection should be folded in here when it's converted
103 	 * to an option instead of something based on .cputype
104 	 */
105 
106 	calibrate_delay();
107 	preempt_disable();
108 	cpu = smp_processor_id();
109 	cpu_data[cpu].udelay_val = loops_per_jiffy;
110 
111 	prom_smp_finish();
112 
113 	cpu_set(cpu, cpu_callin_map);
114 
115 	cpu_idle();
116 }
117 
118 DEFINE_SPINLOCK(smp_call_lock);
119 
120 struct call_data_struct *call_data;
121 
122 /*
123  * Run a function on all other CPUs.
124  *  <func>      The function to run. This must be fast and non-blocking.
125  *  <info>      An arbitrary pointer to pass to the function.
126  *  <retry>     If true, keep retrying until ready.
127  *  <wait>      If true, wait until function has completed on other CPUs.
128  *  [RETURNS]   0 on success, else a negative status code.
129  *
130  * Does not return until remote CPUs are nearly ready to execute <func>
131  * or are or have executed.
132  *
133  * You must not call this function with disabled interrupts or from a
134  * hardware interrupt handler or from a bottom half handler:
135  *
136  * CPU A                               CPU B
137  * Disable interrupts
138  *                                     smp_call_function()
139  *                                     Take call_lock
140  *                                     Send IPIs
141  *                                     Wait for all cpus to acknowledge IPI
142  *                                     CPU A has not responded, spin waiting
143  *                                     for cpu A to respond, holding call_lock
144  * smp_call_function()
145  * Spin waiting for call_lock
146  * Deadlock                            Deadlock
147  */
148 int smp_call_function (void (*func) (void *info), void *info, int retry,
149 								int wait)
150 {
151 	struct call_data_struct data;
152 	int i, cpus = num_online_cpus() - 1;
153 	int cpu = smp_processor_id();
154 
155 	/*
156 	 * Can die spectacularly if this CPU isn't yet marked online
157 	 */
158 	BUG_ON(!cpu_online(cpu));
159 
160 	if (!cpus)
161 		return 0;
162 
163 	/* Can deadlock when called with interrupts disabled */
164 	WARN_ON(irqs_disabled());
165 
166 	data.func = func;
167 	data.info = info;
168 	atomic_set(&data.started, 0);
169 	data.wait = wait;
170 	if (wait)
171 		atomic_set(&data.finished, 0);
172 
173 	spin_lock(&smp_call_lock);
174 	call_data = &data;
175 	mb();
176 
177 	/* Send a message to all other CPUs and wait for them to respond */
178 	for_each_online_cpu(i)
179 		if (i != cpu)
180 			core_send_ipi(i, SMP_CALL_FUNCTION);
181 
182 	/* Wait for response */
183 	/* FIXME: lock-up detection, backtrace on lock-up */
184 	while (atomic_read(&data.started) != cpus)
185 		barrier();
186 
187 	if (wait)
188 		while (atomic_read(&data.finished) != cpus)
189 			barrier();
190 	call_data = NULL;
191 	spin_unlock(&smp_call_lock);
192 
193 	return 0;
194 }
195 
196 
197 void smp_call_function_interrupt(void)
198 {
199 	void (*func) (void *info) = call_data->func;
200 	void *info = call_data->info;
201 	int wait = call_data->wait;
202 
203 	/*
204 	 * Notify initiating CPU that I've grabbed the data and am
205 	 * about to execute the function.
206 	 */
207 	mb();
208 	atomic_inc(&call_data->started);
209 
210 	/*
211 	 * At this point the info structure may be out of scope unless wait==1.
212 	 */
213 	irq_enter();
214 	(*func)(info);
215 	irq_exit();
216 
217 	if (wait) {
218 		mb();
219 		atomic_inc(&call_data->finished);
220 	}
221 }
222 
223 static void stop_this_cpu(void *dummy)
224 {
225 	/*
226 	 * Remove this CPU:
227 	 */
228 	cpu_clear(smp_processor_id(), cpu_online_map);
229 	local_irq_enable();	/* May need to service _machine_restart IPI */
230 	for (;;);		/* Wait if available. */
231 }
232 
233 void smp_send_stop(void)
234 {
235 	smp_call_function(stop_this_cpu, NULL, 1, 0);
236 }
237 
238 void __init smp_cpus_done(unsigned int max_cpus)
239 {
240 	prom_cpus_done();
241 }
242 
243 /* called from main before smp_init() */
244 void __init smp_prepare_cpus(unsigned int max_cpus)
245 {
246 	init_new_context(current, &init_mm);
247 	current_thread_info()->cpu = 0;
248 	smp_tune_scheduling();
249 	plat_prepare_cpus(max_cpus);
250 #ifndef CONFIG_HOTPLUG_CPU
251 	cpu_present_map = cpu_possible_map;
252 #endif
253 }
254 
255 /* preload SMP state for boot cpu */
256 void __devinit smp_prepare_boot_cpu(void)
257 {
258 	/*
259 	 * This assumes that bootup is always handled by the processor
260 	 * with the logic and physical number 0.
261 	 */
262 	__cpu_number_map[0] = 0;
263 	__cpu_logical_map[0] = 0;
264 	cpu_set(0, phys_cpu_present_map);
265 	cpu_set(0, cpu_online_map);
266 	cpu_set(0, cpu_callin_map);
267 }
268 
269 /*
270  * Called once for each "cpu_possible(cpu)".  Needs to spin up the cpu
271  * and keep control until "cpu_online(cpu)" is set.  Note: cpu is
272  * physical, not logical.
273  */
274 int __devinit __cpu_up(unsigned int cpu)
275 {
276 	struct task_struct *idle;
277 
278 	/*
279 	 * Processor goes to start_secondary(), sets online flag
280 	 * The following code is purely to make sure
281 	 * Linux can schedule processes on this slave.
282 	 */
283 	idle = fork_idle(cpu);
284 	if (IS_ERR(idle))
285 		panic(KERN_ERR "Fork failed for CPU %d", cpu);
286 
287 	prom_boot_secondary(cpu, idle);
288 
289 	/*
290 	 * Trust is futile.  We should really have timeouts ...
291 	 */
292 	while (!cpu_isset(cpu, cpu_callin_map))
293 		udelay(100);
294 
295 	cpu_set(cpu, cpu_online_map);
296 
297 	return 0;
298 }
299 
300 /* Not really SMP stuff ... */
301 int setup_profiling_timer(unsigned int multiplier)
302 {
303 	return 0;
304 }
305 
306 static void flush_tlb_all_ipi(void *info)
307 {
308 	local_flush_tlb_all();
309 }
310 
311 void flush_tlb_all(void)
312 {
313 	on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
314 }
315 
316 static void flush_tlb_mm_ipi(void *mm)
317 {
318 	local_flush_tlb_mm((struct mm_struct *)mm);
319 }
320 
321 /*
322  * Special Variant of smp_call_function for use by TLB functions:
323  *
324  *  o No return value
325  *  o collapses to normal function call on UP kernels
326  *  o collapses to normal function call on systems with a single shared
327  *    primary cache.
328  *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
329  */
330 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
331 {
332 #ifndef CONFIG_MIPS_MT_SMTC
333 	smp_call_function(func, info, 1, 1);
334 #endif
335 }
336 
337 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
338 {
339 	preempt_disable();
340 
341 	smp_on_other_tlbs(func, info);
342 	func(info);
343 
344 	preempt_enable();
345 }
346 
347 /*
348  * The following tlb flush calls are invoked when old translations are
349  * being torn down, or pte attributes are changing. For single threaded
350  * address spaces, a new context is obtained on the current cpu, and tlb
351  * context on other cpus are invalidated to force a new context allocation
352  * at switch_mm time, should the mm ever be used on other cpus. For
353  * multithreaded address spaces, intercpu interrupts have to be sent.
354  * Another case where intercpu interrupts are required is when the target
355  * mm might be active on another cpu (eg debuggers doing the flushes on
356  * behalf of debugees, kswapd stealing pages from another process etc).
357  * Kanoj 07/00.
358  */
359 
360 void flush_tlb_mm(struct mm_struct *mm)
361 {
362 	preempt_disable();
363 
364 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
365 		smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
366 	} else {
367 		int i;
368 		for (i = 0; i < num_online_cpus(); i++)
369 			if (smp_processor_id() != i)
370 				cpu_context(i, mm) = 0;
371 	}
372 	local_flush_tlb_mm(mm);
373 
374 	preempt_enable();
375 }
376 
377 struct flush_tlb_data {
378 	struct vm_area_struct *vma;
379 	unsigned long addr1;
380 	unsigned long addr2;
381 };
382 
383 static void flush_tlb_range_ipi(void *info)
384 {
385 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
386 
387 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
388 }
389 
390 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
391 {
392 	struct mm_struct *mm = vma->vm_mm;
393 
394 	preempt_disable();
395 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
396 		struct flush_tlb_data fd;
397 
398 		fd.vma = vma;
399 		fd.addr1 = start;
400 		fd.addr2 = end;
401 		smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
402 	} else {
403 		int i;
404 		for (i = 0; i < num_online_cpus(); i++)
405 			if (smp_processor_id() != i)
406 				cpu_context(i, mm) = 0;
407 	}
408 	local_flush_tlb_range(vma, start, end);
409 	preempt_enable();
410 }
411 
412 static void flush_tlb_kernel_range_ipi(void *info)
413 {
414 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
415 
416 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
417 }
418 
419 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
420 {
421 	struct flush_tlb_data fd;
422 
423 	fd.addr1 = start;
424 	fd.addr2 = end;
425 	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
426 }
427 
428 static void flush_tlb_page_ipi(void *info)
429 {
430 	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
431 
432 	local_flush_tlb_page(fd->vma, fd->addr1);
433 }
434 
435 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
436 {
437 	preempt_disable();
438 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
439 		struct flush_tlb_data fd;
440 
441 		fd.vma = vma;
442 		fd.addr1 = page;
443 		smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
444 	} else {
445 		int i;
446 		for (i = 0; i < num_online_cpus(); i++)
447 			if (smp_processor_id() != i)
448 				cpu_context(i, vma->vm_mm) = 0;
449 	}
450 	local_flush_tlb_page(vma, page);
451 	preempt_enable();
452 }
453 
454 static void flush_tlb_one_ipi(void *info)
455 {
456 	unsigned long vaddr = (unsigned long) info;
457 
458 	local_flush_tlb_one(vaddr);
459 }
460 
461 void flush_tlb_one(unsigned long vaddr)
462 {
463 	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
464 }
465 
466 static DEFINE_PER_CPU(struct cpu, cpu_devices);
467 
468 static int __init topology_init(void)
469 {
470 	int cpu;
471 	int ret;
472 
473 	for_each_present_cpu(cpu) {
474 		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
475 		if (ret)
476 			printk(KERN_WARNING "topology_init: register_cpu %d "
477 			       "failed (%d)\n", cpu, ret);
478 	}
479 
480 	return 0;
481 }
482 
483 subsys_initcall(topology_init);
484 
485 EXPORT_SYMBOL(flush_tlb_page);
486 EXPORT_SYMBOL(flush_tlb_one);
487