xref: /linux/arch/alpha/kernel/smp.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
34 
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 
42 #include "proto.h"
43 #include "irq_impl.h"
44 
45 
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args)	printk args
49 #else
50 #define DBGS(args)
51 #endif
52 
53 /* A collection of per-processor data.  */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 
56 /* A collection of single bit ipi messages.  */
57 static struct {
58 	unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
60 
61 enum ipi_message_type {
62 	IPI_RESCHEDULE,
63 	IPI_CALL_FUNC,
64 	IPI_CPU_STOP,
65 };
66 
67 /* Set to a secondary's cpuid when it comes online.  */
68 static int smp_secondary_alive __initdata = 0;
69 
70 /* Which cpus ids came online.  */
71 cpumask_t cpu_present_mask;
72 cpumask_t cpu_online_map;
73 
74 EXPORT_SYMBOL(cpu_online_map);
75 
76 int smp_num_probed;		/* Internal processor count */
77 int smp_num_cpus = 1;		/* Number that came online.  */
78 
79 extern void calibrate_delay(void);
80 
81 
82 
83 /*
84  * Called by both boot and secondaries to move global data into
85  *  per-processor storage.
86  */
87 static inline void __init
88 smp_store_cpu_info(int cpuid)
89 {
90 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
91 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
92 	cpu_data[cpuid].need_new_asn = 0;
93 	cpu_data[cpuid].asn_lock = 0;
94 }
95 
96 /*
97  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
98  */
99 static inline void __init
100 smp_setup_percpu_timer(int cpuid)
101 {
102 	cpu_data[cpuid].prof_counter = 1;
103 	cpu_data[cpuid].prof_multiplier = 1;
104 }
105 
106 static void __init
107 wait_boot_cpu_to_stop(int cpuid)
108 {
109 	unsigned long stop = jiffies + 10*HZ;
110 
111 	while (time_before(jiffies, stop)) {
112 	        if (!smp_secondary_alive)
113 			return;
114 		barrier();
115 	}
116 
117 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
118 	for (;;)
119 		barrier();
120 }
121 
122 /*
123  * Where secondaries begin a life of C.
124  */
125 void __init
126 smp_callin(void)
127 {
128 	int cpuid = hard_smp_processor_id();
129 
130 	if (cpu_test_and_set(cpuid, cpu_online_map)) {
131 		printk("??, cpu 0x%x already present??\n", cpuid);
132 		BUG();
133 	}
134 
135 	/* Turn on machine checks.  */
136 	wrmces(7);
137 
138 	/* Set trap vectors.  */
139 	trap_init();
140 
141 	/* Set interrupt vector.  */
142 	wrent(entInt, 0);
143 
144 	/* Get our local ticker going. */
145 	smp_setup_percpu_timer(cpuid);
146 
147 	/* Call platform-specific callin, if specified */
148 	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
149 
150 	/* All kernel threads share the same mm context.  */
151 	atomic_inc(&init_mm.mm_count);
152 	current->active_mm = &init_mm;
153 
154 	/* Must have completely accurate bogos.  */
155 	local_irq_enable();
156 
157 	/* Wait boot CPU to stop with irq enabled before running
158 	   calibrate_delay. */
159 	wait_boot_cpu_to_stop(cpuid);
160 	mb();
161 	calibrate_delay();
162 
163 	smp_store_cpu_info(cpuid);
164 	/* Allow master to continue only after we written loops_per_jiffy.  */
165 	wmb();
166 	smp_secondary_alive = 1;
167 
168 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169 	      cpuid, current, current->active_mm));
170 
171 	/* Do nothing.  */
172 	cpu_idle();
173 }
174 
175 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
176 static int __init
177 wait_for_txrdy (unsigned long cpumask)
178 {
179 	unsigned long timeout;
180 
181 	if (!(hwrpb->txrdy & cpumask))
182 		return 0;
183 
184 	timeout = jiffies + 10*HZ;
185 	while (time_before(jiffies, timeout)) {
186 		if (!(hwrpb->txrdy & cpumask))
187 			return 0;
188 		udelay(10);
189 		barrier();
190 	}
191 
192 	return -1;
193 }
194 
195 /*
196  * Send a message to a secondary's console.  "START" is one such
197  * interesting message.  ;-)
198  */
199 static void __init
200 send_secondary_console_msg(char *str, int cpuid)
201 {
202 	struct percpu_struct *cpu;
203 	register char *cp1, *cp2;
204 	unsigned long cpumask;
205 	size_t len;
206 
207 	cpu = (struct percpu_struct *)
208 		((char*)hwrpb
209 		 + hwrpb->processor_offset
210 		 + cpuid * hwrpb->processor_size);
211 
212 	cpumask = (1UL << cpuid);
213 	if (wait_for_txrdy(cpumask))
214 		goto timeout;
215 
216 	cp2 = str;
217 	len = strlen(cp2);
218 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
219 	cp1 = (char *) &cpu->ipc_buffer[1];
220 	memcpy(cp1, cp2, len);
221 
222 	/* atomic test and set */
223 	wmb();
224 	set_bit(cpuid, &hwrpb->rxrdy);
225 
226 	if (wait_for_txrdy(cpumask))
227 		goto timeout;
228 	return;
229 
230  timeout:
231 	printk("Processor %x not ready\n", cpuid);
232 }
233 
234 /*
235  * A secondary console wants to send a message.  Receive it.
236  */
237 static void
238 recv_secondary_console_msg(void)
239 {
240 	int mycpu, i, cnt;
241 	unsigned long txrdy = hwrpb->txrdy;
242 	char *cp1, *cp2, buf[80];
243 	struct percpu_struct *cpu;
244 
245 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246 
247 	mycpu = hard_smp_processor_id();
248 
249 	for (i = 0; i < NR_CPUS; i++) {
250 		if (!(txrdy & (1UL << i)))
251 			continue;
252 
253 		DBGS(("recv_secondary_console_msg: "
254 		      "TXRDY contains CPU %d.\n", i));
255 
256 		cpu = (struct percpu_struct *)
257 		  ((char*)hwrpb
258 		   + hwrpb->processor_offset
259 		   + i * hwrpb->processor_size);
260 
261  		DBGS(("recv_secondary_console_msg: on %d from %d"
262 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263 		      mycpu, i, cpu->halt_reason, cpu->flags));
264 
265 		cnt = cpu->ipc_buffer[0] >> 32;
266 		if (cnt <= 0 || cnt >= 80)
267 			strcpy(buf, "<<< BOGUS MSG >>>");
268 		else {
269 			cp1 = (char *) &cpu->ipc_buffer[11];
270 			cp2 = buf;
271 			strcpy(cp2, cp1);
272 
273 			while ((cp2 = strchr(cp2, '\r')) != 0) {
274 				*cp2 = ' ';
275 				if (cp2[1] == '\n')
276 					cp2[1] = ' ';
277 			}
278 		}
279 
280 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
281 		      "message is '%s'\n", mycpu, buf));
282 	}
283 
284 	hwrpb->txrdy = 0;
285 }
286 
287 /*
288  * Convince the console to have a secondary cpu begin execution.
289  */
290 static int __init
291 secondary_cpu_start(int cpuid, struct task_struct *idle)
292 {
293 	struct percpu_struct *cpu;
294 	struct pcb_struct *hwpcb, *ipcb;
295 	unsigned long timeout;
296 
297 	cpu = (struct percpu_struct *)
298 		((char*)hwrpb
299 		 + hwrpb->processor_offset
300 		 + cpuid * hwrpb->processor_size);
301 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
302 	ipcb = &task_thread_info(idle)->pcb;
303 
304 	/* Initialize the CPU's HWPCB to something just good enough for
305 	   us to get started.  Immediately after starting, we'll swpctx
306 	   to the target idle task's pcb.  Reuse the stack in the mean
307 	   time.  Precalculate the target PCBB.  */
308 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
309 	hwpcb->usp = 0;
310 	hwpcb->ptbr = ipcb->ptbr;
311 	hwpcb->pcc = 0;
312 	hwpcb->asn = 0;
313 	hwpcb->unique = virt_to_phys(ipcb);
314 	hwpcb->flags = ipcb->flags;
315 	hwpcb->res1 = hwpcb->res2 = 0;
316 
317 #if 0
318 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
319 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
320 #endif
321 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
322 	      cpuid, idle->state, ipcb->flags));
323 
324 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
325 	hwrpb->CPU_restart = __smp_callin;
326 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
327 
328 	/* Recalculate and update the HWRPB checksum */
329 	hwrpb_update_checksum(hwrpb);
330 
331 	/*
332 	 * Send a "start" command to the specified processor.
333 	 */
334 
335 	/* SRM III 3.4.1.3 */
336 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
337 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
338 	wmb();
339 
340 	send_secondary_console_msg("START\r\n", cpuid);
341 
342 	/* Wait 10 seconds for an ACK from the console.  */
343 	timeout = jiffies + 10*HZ;
344 	while (time_before(jiffies, timeout)) {
345 		if (cpu->flags & 1)
346 			goto started;
347 		udelay(10);
348 		barrier();
349 	}
350 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
351 	return -1;
352 
353  started:
354 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
355 	return 0;
356 }
357 
358 /*
359  * Bring one cpu online.
360  */
361 static int __init
362 smp_boot_one_cpu(int cpuid)
363 {
364 	struct task_struct *idle;
365 	unsigned long timeout;
366 
367 	/* Cook up an idler for this guy.  Note that the address we
368 	   give to kernel_thread is irrelevant -- it's going to start
369 	   where HWRPB.CPU_restart says to start.  But this gets all
370 	   the other task-y sort of data structures set up like we
371 	   wish.  We can't use kernel_thread since we must avoid
372 	   rescheduling the child.  */
373 	idle = fork_idle(cpuid);
374 	if (IS_ERR(idle))
375 		panic("failed fork for CPU %d", cpuid);
376 
377 	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
378 	      cpuid, idle->state, idle->flags));
379 
380 	/* Signal the secondary to wait a moment.  */
381 	smp_secondary_alive = -1;
382 
383 	/* Whirrr, whirrr, whirrrrrrrrr... */
384 	if (secondary_cpu_start(cpuid, idle))
385 		return -1;
386 
387 	/* Notify the secondary CPU it can run calibrate_delay.  */
388 	mb();
389 	smp_secondary_alive = 0;
390 
391 	/* We've been acked by the console; wait one second for
392 	   the task to start up for real.  */
393 	timeout = jiffies + 1*HZ;
394 	while (time_before(jiffies, timeout)) {
395 		if (smp_secondary_alive == 1)
396 			goto alive;
397 		udelay(10);
398 		barrier();
399 	}
400 
401 	/* We failed to boot the CPU.  */
402 
403 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
404 	return -1;
405 
406  alive:
407 	/* Another "Red Snapper". */
408 	return 0;
409 }
410 
411 /*
412  * Called from setup_arch.  Detect an SMP system and which processors
413  * are present.
414  */
415 void __init
416 setup_smp(void)
417 {
418 	struct percpu_struct *cpubase, *cpu;
419 	unsigned long i;
420 
421 	if (boot_cpuid != 0) {
422 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
423 		       boot_cpuid);
424 	}
425 
426 	if (hwrpb->nr_processors > 1) {
427 		int boot_cpu_palrev;
428 
429 		DBGS(("setup_smp: nr_processors %ld\n",
430 		      hwrpb->nr_processors));
431 
432 		cpubase = (struct percpu_struct *)
433 			((char*)hwrpb + hwrpb->processor_offset);
434 		boot_cpu_palrev = cpubase->pal_revision;
435 
436 		for (i = 0; i < hwrpb->nr_processors; i++) {
437 			cpu = (struct percpu_struct *)
438 				((char *)cpubase + i*hwrpb->processor_size);
439 			if ((cpu->flags & 0x1cc) == 0x1cc) {
440 				smp_num_probed++;
441 				/* Assume here that "whami" == index */
442 				cpu_set(i, cpu_possible_map);
443 				cpu->pal_revision = boot_cpu_palrev;
444 			}
445 
446 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447 			      i, cpu->flags, cpu->type));
448 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449 			      i, cpu->pal_revision));
450 		}
451 	} else {
452 		smp_num_probed = 1;
453 		cpu_set(boot_cpuid, cpu_possible_map);
454 	}
455 	cpu_present_mask = cpumask_of_cpu(boot_cpuid);
456 
457 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
458 	       smp_num_probed, cpu_possible_map.bits[0]);
459 }
460 
461 /*
462  * Called by smp_init prepare the secondaries
463  */
464 void __init
465 smp_prepare_cpus(unsigned int max_cpus)
466 {
467 	/* Take care of some initial bookkeeping.  */
468 	memset(ipi_data, 0, sizeof(ipi_data));
469 
470 	current_thread_info()->cpu = boot_cpuid;
471 
472 	smp_store_cpu_info(boot_cpuid);
473 	smp_setup_percpu_timer(boot_cpuid);
474 
475 	/* Nothing to do on a UP box, or when told not to.  */
476 	if (smp_num_probed == 1 || max_cpus == 0) {
477 		cpu_present_mask = cpumask_of_cpu(boot_cpuid);
478 		printk(KERN_INFO "SMP mode deactivated.\n");
479 		return;
480 	}
481 
482 	printk(KERN_INFO "SMP starting up secondaries.\n");
483 
484 	smp_num_cpus = smp_num_probed;
485 }
486 
487 void __devinit
488 smp_prepare_boot_cpu(void)
489 {
490 	/*
491 	 * Mark the boot cpu (current cpu) as both present and online
492 	 */
493 	cpu_set(smp_processor_id(), cpu_present_mask);
494 	cpu_set(smp_processor_id(), cpu_online_map);
495 }
496 
497 int __devinit
498 __cpu_up(unsigned int cpu)
499 {
500 	smp_boot_one_cpu(cpu);
501 
502 	return cpu_online(cpu) ? 0 : -ENOSYS;
503 }
504 
505 void __init
506 smp_cpus_done(unsigned int max_cpus)
507 {
508 	int cpu;
509 	unsigned long bogosum = 0;
510 
511 	for(cpu = 0; cpu < NR_CPUS; cpu++)
512 		if (cpu_online(cpu))
513 			bogosum += cpu_data[cpu].loops_per_jiffy;
514 
515 	printk(KERN_INFO "SMP: Total of %d processors activated "
516 	       "(%lu.%02lu BogoMIPS).\n",
517 	       num_online_cpus(),
518 	       (bogosum + 2500) / (500000/HZ),
519 	       ((bogosum + 2500) / (5000/HZ)) % 100);
520 }
521 
522 
523 void
524 smp_percpu_timer_interrupt(struct pt_regs *regs)
525 {
526 	int cpu = smp_processor_id();
527 	unsigned long user = user_mode(regs);
528 	struct cpuinfo_alpha *data = &cpu_data[cpu];
529 
530 	/* Record kernel PC.  */
531 	profile_tick(CPU_PROFILING, regs);
532 
533 	if (!--data->prof_counter) {
534 		/* We need to make like a normal interrupt -- otherwise
535 		   timer interrupts ignore the global interrupt lock,
536 		   which would be a Bad Thing.  */
537 		irq_enter();
538 
539 		update_process_times(user);
540 
541 		data->prof_counter = data->prof_multiplier;
542 
543 		irq_exit();
544 	}
545 }
546 
547 int __init
548 setup_profiling_timer(unsigned int multiplier)
549 {
550 	return -EINVAL;
551 }
552 
553 
554 static void
555 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
556 {
557 	int i;
558 
559 	mb();
560 	for_each_cpu_mask(i, to_whom)
561 		set_bit(operation, &ipi_data[i].bits);
562 
563 	mb();
564 	for_each_cpu_mask(i, to_whom)
565 		wripir(i);
566 }
567 
568 /* Structure and data for smp_call_function.  This is designed to
569    minimize static memory requirements.  Plus it looks cleaner.  */
570 
571 struct smp_call_struct {
572 	void (*func) (void *info);
573 	void *info;
574 	long wait;
575 	atomic_t unstarted_count;
576 	atomic_t unfinished_count;
577 };
578 
579 static struct smp_call_struct *smp_call_function_data;
580 
581 /* Atomicly drop data into a shared pointer.  The pointer is free if
582    it is initially locked.  If retry, spin until free.  */
583 
584 static int
585 pointer_lock (void *lock, void *data, int retry)
586 {
587 	void *old, *tmp;
588 
589 	mb();
590  again:
591 	/* Compare and swap with zero.  */
592 	asm volatile (
593 	"1:	ldq_l	%0,%1\n"
594 	"	mov	%3,%2\n"
595 	"	bne	%0,2f\n"
596 	"	stq_c	%2,%1\n"
597 	"	beq	%2,1b\n"
598 	"2:"
599 	: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
600 	: "r"(data)
601 	: "memory");
602 
603 	if (old == 0)
604 		return 0;
605 	if (! retry)
606 		return -EBUSY;
607 
608 	while (*(void **)lock)
609 		barrier();
610 	goto again;
611 }
612 
613 void
614 handle_ipi(struct pt_regs *regs)
615 {
616 	int this_cpu = smp_processor_id();
617 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
618 	unsigned long ops;
619 
620 #if 0
621 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
622 	      this_cpu, *pending_ipis, regs->pc));
623 #endif
624 
625 	mb();	/* Order interrupt and bit testing. */
626 	while ((ops = xchg(pending_ipis, 0)) != 0) {
627 	  mb();	/* Order bit clearing and data access. */
628 	  do {
629 		unsigned long which;
630 
631 		which = ops & -ops;
632 		ops &= ~which;
633 		which = __ffs(which);
634 
635 		switch (which) {
636 		case IPI_RESCHEDULE:
637 			/* Reschedule callback.  Everything to be done
638 			   is done by the interrupt return path.  */
639 			break;
640 
641 		case IPI_CALL_FUNC:
642 		    {
643 			struct smp_call_struct *data;
644 			void (*func)(void *info);
645 			void *info;
646 			int wait;
647 
648 			data = smp_call_function_data;
649 			func = data->func;
650 			info = data->info;
651 			wait = data->wait;
652 
653 			/* Notify the sending CPU that the data has been
654 			   received, and execution is about to begin.  */
655 			mb();
656 			atomic_dec (&data->unstarted_count);
657 
658 			/* At this point the structure may be gone unless
659 			   wait is true.  */
660 			(*func)(info);
661 
662 			/* Notify the sending CPU that the task is done.  */
663 			mb();
664 			if (wait) atomic_dec (&data->unfinished_count);
665 			break;
666 		    }
667 
668 		case IPI_CPU_STOP:
669 			halt();
670 
671 		default:
672 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
673 			       this_cpu, which);
674 			break;
675 		}
676 	  } while (ops);
677 
678 	  mb();	/* Order data access and bit testing. */
679 	}
680 
681 	cpu_data[this_cpu].ipi_count++;
682 
683 	if (hwrpb->txrdy)
684 		recv_secondary_console_msg();
685 }
686 
687 void
688 smp_send_reschedule(int cpu)
689 {
690 #ifdef DEBUG_IPI_MSG
691 	if (cpu == hard_smp_processor_id())
692 		printk(KERN_WARNING
693 		       "smp_send_reschedule: Sending IPI to self.\n");
694 #endif
695 	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
696 }
697 
698 void
699 smp_send_stop(void)
700 {
701 	cpumask_t to_whom = cpu_possible_map;
702 	cpu_clear(smp_processor_id(), to_whom);
703 #ifdef DEBUG_IPI_MSG
704 	if (hard_smp_processor_id() != boot_cpu_id)
705 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
706 #endif
707 	send_ipi_message(to_whom, IPI_CPU_STOP);
708 }
709 
710 /*
711  * Run a function on all other CPUs.
712  *  <func>	The function to run. This must be fast and non-blocking.
713  *  <info>	An arbitrary pointer to pass to the function.
714  *  <retry>	If true, keep retrying until ready.
715  *  <wait>	If true, wait until function has completed on other CPUs.
716  *  [RETURNS]   0 on success, else a negative status code.
717  *
718  * Does not return until remote CPUs are nearly ready to execute <func>
719  * or are or have executed.
720  * You must not call this function with disabled interrupts or from a
721  * hardware interrupt handler or from a bottom half handler.
722  */
723 
724 int
725 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
726 			  int wait, cpumask_t to_whom)
727 {
728 	struct smp_call_struct data;
729 	unsigned long timeout;
730 	int num_cpus_to_call;
731 
732 	/* Can deadlock when called with interrupts disabled */
733 	WARN_ON(irqs_disabled());
734 
735 	data.func = func;
736 	data.info = info;
737 	data.wait = wait;
738 
739 	cpu_clear(smp_processor_id(), to_whom);
740 	num_cpus_to_call = cpus_weight(to_whom);
741 
742 	atomic_set(&data.unstarted_count, num_cpus_to_call);
743 	atomic_set(&data.unfinished_count, num_cpus_to_call);
744 
745 	/* Acquire the smp_call_function_data mutex.  */
746 	if (pointer_lock(&smp_call_function_data, &data, retry))
747 		return -EBUSY;
748 
749 	/* Send a message to the requested CPUs.  */
750 	send_ipi_message(to_whom, IPI_CALL_FUNC);
751 
752 	/* Wait for a minimal response.  */
753 	timeout = jiffies + HZ;
754 	while (atomic_read (&data.unstarted_count) > 0
755 	       && time_before (jiffies, timeout))
756 		barrier();
757 
758 	/* If there's no response yet, log a message but allow a longer
759 	 * timeout period -- if we get a response this time, log
760 	 * a message saying when we got it..
761 	 */
762 	if (atomic_read(&data.unstarted_count) > 0) {
763 		long start_time = jiffies;
764 		printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
765 		       __FUNCTION__);
766 		timeout = jiffies + 30 * HZ;
767 		while (atomic_read(&data.unstarted_count) > 0
768 		       && time_before(jiffies, timeout))
769 			barrier();
770 		if (atomic_read(&data.unstarted_count) <= 0) {
771 			long delta = jiffies - start_time;
772 			printk(KERN_ERR
773 			       "%s: response %ld.%ld seconds into long wait\n",
774 			       __FUNCTION__, delta / HZ,
775 			       (100 * (delta - ((delta / HZ) * HZ))) / HZ);
776 		}
777 	}
778 
779 	/* We either got one or timed out -- clear the lock. */
780 	mb();
781 	smp_call_function_data = NULL;
782 
783 	/*
784 	 * If after both the initial and long timeout periods we still don't
785 	 * have a response, something is very wrong...
786 	 */
787 	BUG_ON(atomic_read (&data.unstarted_count) > 0);
788 
789 	/* Wait for a complete response, if needed.  */
790 	if (wait) {
791 		while (atomic_read (&data.unfinished_count) > 0)
792 			barrier();
793 	}
794 
795 	return 0;
796 }
797 
798 int
799 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
800 {
801 	return smp_call_function_on_cpu (func, info, retry, wait,
802 					 cpu_online_map);
803 }
804 
805 static void
806 ipi_imb(void *ignored)
807 {
808 	imb();
809 }
810 
811 void
812 smp_imb(void)
813 {
814 	/* Must wait other processors to flush their icache before continue. */
815 	if (on_each_cpu(ipi_imb, NULL, 1, 1))
816 		printk(KERN_CRIT "smp_imb: timed out\n");
817 }
818 
819 static void
820 ipi_flush_tlb_all(void *ignored)
821 {
822 	tbia();
823 }
824 
825 void
826 flush_tlb_all(void)
827 {
828 	/* Although we don't have any data to pass, we do want to
829 	   synchronize with the other processors.  */
830 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
831 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
832 	}
833 }
834 
835 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
836 
837 static void
838 ipi_flush_tlb_mm(void *x)
839 {
840 	struct mm_struct *mm = (struct mm_struct *) x;
841 	if (mm == current->active_mm && !asn_locked())
842 		flush_tlb_current(mm);
843 	else
844 		flush_tlb_other(mm);
845 }
846 
847 void
848 flush_tlb_mm(struct mm_struct *mm)
849 {
850 	preempt_disable();
851 
852 	if (mm == current->active_mm) {
853 		flush_tlb_current(mm);
854 		if (atomic_read(&mm->mm_users) <= 1) {
855 			int cpu, this_cpu = smp_processor_id();
856 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
857 				if (!cpu_online(cpu) || cpu == this_cpu)
858 					continue;
859 				if (mm->context[cpu])
860 					mm->context[cpu] = 0;
861 			}
862 			preempt_enable();
863 			return;
864 		}
865 	}
866 
867 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
868 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
869 	}
870 
871 	preempt_enable();
872 }
873 
874 struct flush_tlb_page_struct {
875 	struct vm_area_struct *vma;
876 	struct mm_struct *mm;
877 	unsigned long addr;
878 };
879 
880 static void
881 ipi_flush_tlb_page(void *x)
882 {
883 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
884 	struct mm_struct * mm = data->mm;
885 
886 	if (mm == current->active_mm && !asn_locked())
887 		flush_tlb_current_page(mm, data->vma, data->addr);
888 	else
889 		flush_tlb_other(mm);
890 }
891 
892 void
893 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
894 {
895 	struct flush_tlb_page_struct data;
896 	struct mm_struct *mm = vma->vm_mm;
897 
898 	preempt_disable();
899 
900 	if (mm == current->active_mm) {
901 		flush_tlb_current_page(mm, vma, addr);
902 		if (atomic_read(&mm->mm_users) <= 1) {
903 			int cpu, this_cpu = smp_processor_id();
904 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
905 				if (!cpu_online(cpu) || cpu == this_cpu)
906 					continue;
907 				if (mm->context[cpu])
908 					mm->context[cpu] = 0;
909 			}
910 			preempt_enable();
911 			return;
912 		}
913 	}
914 
915 	data.vma = vma;
916 	data.mm = mm;
917 	data.addr = addr;
918 
919 	if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
920 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
921 	}
922 
923 	preempt_enable();
924 }
925 
926 void
927 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
928 {
929 	/* On the Alpha we always flush the whole user tlb.  */
930 	flush_tlb_mm(vma->vm_mm);
931 }
932 
933 static void
934 ipi_flush_icache_page(void *x)
935 {
936 	struct mm_struct *mm = (struct mm_struct *) x;
937 	if (mm == current->active_mm && !asn_locked())
938 		__load_new_mm_context(mm);
939 	else
940 		flush_tlb_other(mm);
941 }
942 
943 void
944 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
945 			unsigned long addr, int len)
946 {
947 	struct mm_struct *mm = vma->vm_mm;
948 
949 	if ((vma->vm_flags & VM_EXEC) == 0)
950 		return;
951 
952 	preempt_disable();
953 
954 	if (mm == current->active_mm) {
955 		__load_new_mm_context(mm);
956 		if (atomic_read(&mm->mm_users) <= 1) {
957 			int cpu, this_cpu = smp_processor_id();
958 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
959 				if (!cpu_online(cpu) || cpu == this_cpu)
960 					continue;
961 				if (mm->context[cpu])
962 					mm->context[cpu] = 0;
963 			}
964 			preempt_enable();
965 			return;
966 		}
967 	}
968 
969 	if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
970 		printk(KERN_CRIT "flush_icache_page: timed out\n");
971 	}
972 
973 	preempt_enable();
974 }
975