xref: /linux/arch/alpha/kernel/smp.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
34 
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 
42 #include "proto.h"
43 #include "irq_impl.h"
44 
45 
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args)	printk args
49 #else
50 #define DBGS(args)
51 #endif
52 
53 /* A collection of per-processor data.  */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 
56 /* A collection of single bit ipi messages.  */
57 static struct {
58 	unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
60 
61 enum ipi_message_type {
62 	IPI_RESCHEDULE,
63 	IPI_CALL_FUNC,
64 	IPI_CPU_STOP,
65 };
66 
67 /* Set to a secondary's cpuid when it comes online.  */
68 static int smp_secondary_alive __initdata = 0;
69 
70 /* Which cpus ids came online.  */
71 cpumask_t cpu_online_map;
72 
73 EXPORT_SYMBOL(cpu_online_map);
74 
75 int smp_num_probed;		/* Internal processor count */
76 int smp_num_cpus = 1;		/* Number that came online.  */
77 
78 extern void calibrate_delay(void);
79 
80 
81 
82 /*
83  * Called by both boot and secondaries to move global data into
84  *  per-processor storage.
85  */
86 static inline void __init
87 smp_store_cpu_info(int cpuid)
88 {
89 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
90 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
91 	cpu_data[cpuid].need_new_asn = 0;
92 	cpu_data[cpuid].asn_lock = 0;
93 }
94 
95 /*
96  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
97  */
98 static inline void __init
99 smp_setup_percpu_timer(int cpuid)
100 {
101 	cpu_data[cpuid].prof_counter = 1;
102 	cpu_data[cpuid].prof_multiplier = 1;
103 }
104 
105 static void __init
106 wait_boot_cpu_to_stop(int cpuid)
107 {
108 	unsigned long stop = jiffies + 10*HZ;
109 
110 	while (time_before(jiffies, stop)) {
111 	        if (!smp_secondary_alive)
112 			return;
113 		barrier();
114 	}
115 
116 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
117 	for (;;)
118 		barrier();
119 }
120 
121 /*
122  * Where secondaries begin a life of C.
123  */
124 void __init
125 smp_callin(void)
126 {
127 	int cpuid = hard_smp_processor_id();
128 
129 	if (cpu_test_and_set(cpuid, cpu_online_map)) {
130 		printk("??, cpu 0x%x already present??\n", cpuid);
131 		BUG();
132 	}
133 
134 	/* Turn on machine checks.  */
135 	wrmces(7);
136 
137 	/* Set trap vectors.  */
138 	trap_init();
139 
140 	/* Set interrupt vector.  */
141 	wrent(entInt, 0);
142 
143 	/* Get our local ticker going. */
144 	smp_setup_percpu_timer(cpuid);
145 
146 	/* Call platform-specific callin, if specified */
147 	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
148 
149 	/* All kernel threads share the same mm context.  */
150 	atomic_inc(&init_mm.mm_count);
151 	current->active_mm = &init_mm;
152 
153 	/* Must have completely accurate bogos.  */
154 	local_irq_enable();
155 
156 	/* Wait boot CPU to stop with irq enabled before running
157 	   calibrate_delay. */
158 	wait_boot_cpu_to_stop(cpuid);
159 	mb();
160 	calibrate_delay();
161 
162 	smp_store_cpu_info(cpuid);
163 	/* Allow master to continue only after we written loops_per_jiffy.  */
164 	wmb();
165 	smp_secondary_alive = 1;
166 
167 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
168 	      cpuid, current, current->active_mm));
169 
170 	/* Do nothing.  */
171 	cpu_idle();
172 }
173 
174 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
175 static int __init
176 wait_for_txrdy (unsigned long cpumask)
177 {
178 	unsigned long timeout;
179 
180 	if (!(hwrpb->txrdy & cpumask))
181 		return 0;
182 
183 	timeout = jiffies + 10*HZ;
184 	while (time_before(jiffies, timeout)) {
185 		if (!(hwrpb->txrdy & cpumask))
186 			return 0;
187 		udelay(10);
188 		barrier();
189 	}
190 
191 	return -1;
192 }
193 
194 /*
195  * Send a message to a secondary's console.  "START" is one such
196  * interesting message.  ;-)
197  */
198 static void __init
199 send_secondary_console_msg(char *str, int cpuid)
200 {
201 	struct percpu_struct *cpu;
202 	register char *cp1, *cp2;
203 	unsigned long cpumask;
204 	size_t len;
205 
206 	cpu = (struct percpu_struct *)
207 		((char*)hwrpb
208 		 + hwrpb->processor_offset
209 		 + cpuid * hwrpb->processor_size);
210 
211 	cpumask = (1UL << cpuid);
212 	if (wait_for_txrdy(cpumask))
213 		goto timeout;
214 
215 	cp2 = str;
216 	len = strlen(cp2);
217 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
218 	cp1 = (char *) &cpu->ipc_buffer[1];
219 	memcpy(cp1, cp2, len);
220 
221 	/* atomic test and set */
222 	wmb();
223 	set_bit(cpuid, &hwrpb->rxrdy);
224 
225 	if (wait_for_txrdy(cpumask))
226 		goto timeout;
227 	return;
228 
229  timeout:
230 	printk("Processor %x not ready\n", cpuid);
231 }
232 
233 /*
234  * A secondary console wants to send a message.  Receive it.
235  */
236 static void
237 recv_secondary_console_msg(void)
238 {
239 	int mycpu, i, cnt;
240 	unsigned long txrdy = hwrpb->txrdy;
241 	char *cp1, *cp2, buf[80];
242 	struct percpu_struct *cpu;
243 
244 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
245 
246 	mycpu = hard_smp_processor_id();
247 
248 	for (i = 0; i < NR_CPUS; i++) {
249 		if (!(txrdy & (1UL << i)))
250 			continue;
251 
252 		DBGS(("recv_secondary_console_msg: "
253 		      "TXRDY contains CPU %d.\n", i));
254 
255 		cpu = (struct percpu_struct *)
256 		  ((char*)hwrpb
257 		   + hwrpb->processor_offset
258 		   + i * hwrpb->processor_size);
259 
260  		DBGS(("recv_secondary_console_msg: on %d from %d"
261 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
262 		      mycpu, i, cpu->halt_reason, cpu->flags));
263 
264 		cnt = cpu->ipc_buffer[0] >> 32;
265 		if (cnt <= 0 || cnt >= 80)
266 			strcpy(buf, "<<< BOGUS MSG >>>");
267 		else {
268 			cp1 = (char *) &cpu->ipc_buffer[11];
269 			cp2 = buf;
270 			strcpy(cp2, cp1);
271 
272 			while ((cp2 = strchr(cp2, '\r')) != 0) {
273 				*cp2 = ' ';
274 				if (cp2[1] == '\n')
275 					cp2[1] = ' ';
276 			}
277 		}
278 
279 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
280 		      "message is '%s'\n", mycpu, buf));
281 	}
282 
283 	hwrpb->txrdy = 0;
284 }
285 
286 /*
287  * Convince the console to have a secondary cpu begin execution.
288  */
289 static int __init
290 secondary_cpu_start(int cpuid, struct task_struct *idle)
291 {
292 	struct percpu_struct *cpu;
293 	struct pcb_struct *hwpcb, *ipcb;
294 	unsigned long timeout;
295 
296 	cpu = (struct percpu_struct *)
297 		((char*)hwrpb
298 		 + hwrpb->processor_offset
299 		 + cpuid * hwrpb->processor_size);
300 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
301 	ipcb = &task_thread_info(idle)->pcb;
302 
303 	/* Initialize the CPU's HWPCB to something just good enough for
304 	   us to get started.  Immediately after starting, we'll swpctx
305 	   to the target idle task's pcb.  Reuse the stack in the mean
306 	   time.  Precalculate the target PCBB.  */
307 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
308 	hwpcb->usp = 0;
309 	hwpcb->ptbr = ipcb->ptbr;
310 	hwpcb->pcc = 0;
311 	hwpcb->asn = 0;
312 	hwpcb->unique = virt_to_phys(ipcb);
313 	hwpcb->flags = ipcb->flags;
314 	hwpcb->res1 = hwpcb->res2 = 0;
315 
316 #if 0
317 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
318 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
319 #endif
320 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
321 	      cpuid, idle->state, ipcb->flags));
322 
323 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
324 	hwrpb->CPU_restart = __smp_callin;
325 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
326 
327 	/* Recalculate and update the HWRPB checksum */
328 	hwrpb_update_checksum(hwrpb);
329 
330 	/*
331 	 * Send a "start" command to the specified processor.
332 	 */
333 
334 	/* SRM III 3.4.1.3 */
335 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
336 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
337 	wmb();
338 
339 	send_secondary_console_msg("START\r\n", cpuid);
340 
341 	/* Wait 10 seconds for an ACK from the console.  */
342 	timeout = jiffies + 10*HZ;
343 	while (time_before(jiffies, timeout)) {
344 		if (cpu->flags & 1)
345 			goto started;
346 		udelay(10);
347 		barrier();
348 	}
349 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
350 	return -1;
351 
352  started:
353 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
354 	return 0;
355 }
356 
357 /*
358  * Bring one cpu online.
359  */
360 static int __init
361 smp_boot_one_cpu(int cpuid)
362 {
363 	struct task_struct *idle;
364 	unsigned long timeout;
365 
366 	/* Cook up an idler for this guy.  Note that the address we
367 	   give to kernel_thread is irrelevant -- it's going to start
368 	   where HWRPB.CPU_restart says to start.  But this gets all
369 	   the other task-y sort of data structures set up like we
370 	   wish.  We can't use kernel_thread since we must avoid
371 	   rescheduling the child.  */
372 	idle = fork_idle(cpuid);
373 	if (IS_ERR(idle))
374 		panic("failed fork for CPU %d", cpuid);
375 
376 	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
377 	      cpuid, idle->state, idle->flags));
378 
379 	/* Signal the secondary to wait a moment.  */
380 	smp_secondary_alive = -1;
381 
382 	/* Whirrr, whirrr, whirrrrrrrrr... */
383 	if (secondary_cpu_start(cpuid, idle))
384 		return -1;
385 
386 	/* Notify the secondary CPU it can run calibrate_delay.  */
387 	mb();
388 	smp_secondary_alive = 0;
389 
390 	/* We've been acked by the console; wait one second for
391 	   the task to start up for real.  */
392 	timeout = jiffies + 1*HZ;
393 	while (time_before(jiffies, timeout)) {
394 		if (smp_secondary_alive == 1)
395 			goto alive;
396 		udelay(10);
397 		barrier();
398 	}
399 
400 	/* We failed to boot the CPU.  */
401 
402 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
403 	return -1;
404 
405  alive:
406 	/* Another "Red Snapper". */
407 	return 0;
408 }
409 
410 /*
411  * Called from setup_arch.  Detect an SMP system and which processors
412  * are present.
413  */
414 void __init
415 setup_smp(void)
416 {
417 	struct percpu_struct *cpubase, *cpu;
418 	unsigned long i;
419 
420 	if (boot_cpuid != 0) {
421 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
422 		       boot_cpuid);
423 	}
424 
425 	if (hwrpb->nr_processors > 1) {
426 		int boot_cpu_palrev;
427 
428 		DBGS(("setup_smp: nr_processors %ld\n",
429 		      hwrpb->nr_processors));
430 
431 		cpubase = (struct percpu_struct *)
432 			((char*)hwrpb + hwrpb->processor_offset);
433 		boot_cpu_palrev = cpubase->pal_revision;
434 
435 		for (i = 0; i < hwrpb->nr_processors; i++) {
436 			cpu = (struct percpu_struct *)
437 				((char *)cpubase + i*hwrpb->processor_size);
438 			if ((cpu->flags & 0x1cc) == 0x1cc) {
439 				smp_num_probed++;
440 				/* Assume here that "whami" == index */
441 				cpu_set(i, cpu_present_map);
442 				cpu->pal_revision = boot_cpu_palrev;
443 			}
444 
445 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
446 			      i, cpu->flags, cpu->type));
447 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
448 			      i, cpu->pal_revision));
449 		}
450 	} else {
451 		smp_num_probed = 1;
452 	}
453 
454 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
455 	       smp_num_probed, cpu_present_map.bits[0]);
456 }
457 
458 /*
459  * Called by smp_init prepare the secondaries
460  */
461 void __init
462 smp_prepare_cpus(unsigned int max_cpus)
463 {
464 	/* Take care of some initial bookkeeping.  */
465 	memset(ipi_data, 0, sizeof(ipi_data));
466 
467 	current_thread_info()->cpu = boot_cpuid;
468 
469 	smp_store_cpu_info(boot_cpuid);
470 	smp_setup_percpu_timer(boot_cpuid);
471 
472 	/* Nothing to do on a UP box, or when told not to.  */
473 	if (smp_num_probed == 1 || max_cpus == 0) {
474 		cpu_present_map = cpumask_of_cpu(boot_cpuid);
475 		printk(KERN_INFO "SMP mode deactivated.\n");
476 		return;
477 	}
478 
479 	printk(KERN_INFO "SMP starting up secondaries.\n");
480 
481 	smp_num_cpus = smp_num_probed;
482 }
483 
484 void __devinit
485 smp_prepare_boot_cpu(void)
486 {
487 }
488 
489 int __devinit
490 __cpu_up(unsigned int cpu)
491 {
492 	smp_boot_one_cpu(cpu);
493 
494 	return cpu_online(cpu) ? 0 : -ENOSYS;
495 }
496 
497 void __init
498 smp_cpus_done(unsigned int max_cpus)
499 {
500 	int cpu;
501 	unsigned long bogosum = 0;
502 
503 	for(cpu = 0; cpu < NR_CPUS; cpu++)
504 		if (cpu_online(cpu))
505 			bogosum += cpu_data[cpu].loops_per_jiffy;
506 
507 	printk(KERN_INFO "SMP: Total of %d processors activated "
508 	       "(%lu.%02lu BogoMIPS).\n",
509 	       num_online_cpus(),
510 	       (bogosum + 2500) / (500000/HZ),
511 	       ((bogosum + 2500) / (5000/HZ)) % 100);
512 }
513 
514 
515 void
516 smp_percpu_timer_interrupt(struct pt_regs *regs)
517 {
518 	int cpu = smp_processor_id();
519 	unsigned long user = user_mode(regs);
520 	struct cpuinfo_alpha *data = &cpu_data[cpu];
521 
522 	/* Record kernel PC.  */
523 	profile_tick(CPU_PROFILING, regs);
524 
525 	if (!--data->prof_counter) {
526 		/* We need to make like a normal interrupt -- otherwise
527 		   timer interrupts ignore the global interrupt lock,
528 		   which would be a Bad Thing.  */
529 		irq_enter();
530 
531 		update_process_times(user);
532 
533 		data->prof_counter = data->prof_multiplier;
534 
535 		irq_exit();
536 	}
537 }
538 
539 int __init
540 setup_profiling_timer(unsigned int multiplier)
541 {
542 	return -EINVAL;
543 }
544 
545 
546 static void
547 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
548 {
549 	int i;
550 
551 	mb();
552 	for_each_cpu_mask(i, to_whom)
553 		set_bit(operation, &ipi_data[i].bits);
554 
555 	mb();
556 	for_each_cpu_mask(i, to_whom)
557 		wripir(i);
558 }
559 
560 /* Structure and data for smp_call_function.  This is designed to
561    minimize static memory requirements.  Plus it looks cleaner.  */
562 
563 struct smp_call_struct {
564 	void (*func) (void *info);
565 	void *info;
566 	long wait;
567 	atomic_t unstarted_count;
568 	atomic_t unfinished_count;
569 };
570 
571 static struct smp_call_struct *smp_call_function_data;
572 
573 /* Atomicly drop data into a shared pointer.  The pointer is free if
574    it is initially locked.  If retry, spin until free.  */
575 
576 static int
577 pointer_lock (void *lock, void *data, int retry)
578 {
579 	void *old, *tmp;
580 
581 	mb();
582  again:
583 	/* Compare and swap with zero.  */
584 	asm volatile (
585 	"1:	ldq_l	%0,%1\n"
586 	"	mov	%3,%2\n"
587 	"	bne	%0,2f\n"
588 	"	stq_c	%2,%1\n"
589 	"	beq	%2,1b\n"
590 	"2:"
591 	: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
592 	: "r"(data)
593 	: "memory");
594 
595 	if (old == 0)
596 		return 0;
597 	if (! retry)
598 		return -EBUSY;
599 
600 	while (*(void **)lock)
601 		barrier();
602 	goto again;
603 }
604 
605 void
606 handle_ipi(struct pt_regs *regs)
607 {
608 	int this_cpu = smp_processor_id();
609 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
610 	unsigned long ops;
611 
612 #if 0
613 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
614 	      this_cpu, *pending_ipis, regs->pc));
615 #endif
616 
617 	mb();	/* Order interrupt and bit testing. */
618 	while ((ops = xchg(pending_ipis, 0)) != 0) {
619 	  mb();	/* Order bit clearing and data access. */
620 	  do {
621 		unsigned long which;
622 
623 		which = ops & -ops;
624 		ops &= ~which;
625 		which = __ffs(which);
626 
627 		switch (which) {
628 		case IPI_RESCHEDULE:
629 			/* Reschedule callback.  Everything to be done
630 			   is done by the interrupt return path.  */
631 			break;
632 
633 		case IPI_CALL_FUNC:
634 		    {
635 			struct smp_call_struct *data;
636 			void (*func)(void *info);
637 			void *info;
638 			int wait;
639 
640 			data = smp_call_function_data;
641 			func = data->func;
642 			info = data->info;
643 			wait = data->wait;
644 
645 			/* Notify the sending CPU that the data has been
646 			   received, and execution is about to begin.  */
647 			mb();
648 			atomic_dec (&data->unstarted_count);
649 
650 			/* At this point the structure may be gone unless
651 			   wait is true.  */
652 			(*func)(info);
653 
654 			/* Notify the sending CPU that the task is done.  */
655 			mb();
656 			if (wait) atomic_dec (&data->unfinished_count);
657 			break;
658 		    }
659 
660 		case IPI_CPU_STOP:
661 			halt();
662 
663 		default:
664 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
665 			       this_cpu, which);
666 			break;
667 		}
668 	  } while (ops);
669 
670 	  mb();	/* Order data access and bit testing. */
671 	}
672 
673 	cpu_data[this_cpu].ipi_count++;
674 
675 	if (hwrpb->txrdy)
676 		recv_secondary_console_msg();
677 }
678 
679 void
680 smp_send_reschedule(int cpu)
681 {
682 #ifdef DEBUG_IPI_MSG
683 	if (cpu == hard_smp_processor_id())
684 		printk(KERN_WARNING
685 		       "smp_send_reschedule: Sending IPI to self.\n");
686 #endif
687 	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
688 }
689 
690 void
691 smp_send_stop(void)
692 {
693 	cpumask_t to_whom = cpu_possible_map;
694 	cpu_clear(smp_processor_id(), to_whom);
695 #ifdef DEBUG_IPI_MSG
696 	if (hard_smp_processor_id() != boot_cpu_id)
697 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
698 #endif
699 	send_ipi_message(to_whom, IPI_CPU_STOP);
700 }
701 
702 /*
703  * Run a function on all other CPUs.
704  *  <func>	The function to run. This must be fast and non-blocking.
705  *  <info>	An arbitrary pointer to pass to the function.
706  *  <retry>	If true, keep retrying until ready.
707  *  <wait>	If true, wait until function has completed on other CPUs.
708  *  [RETURNS]   0 on success, else a negative status code.
709  *
710  * Does not return until remote CPUs are nearly ready to execute <func>
711  * or are or have executed.
712  * You must not call this function with disabled interrupts or from a
713  * hardware interrupt handler or from a bottom half handler.
714  */
715 
716 int
717 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
718 			  int wait, cpumask_t to_whom)
719 {
720 	struct smp_call_struct data;
721 	unsigned long timeout;
722 	int num_cpus_to_call;
723 
724 	/* Can deadlock when called with interrupts disabled */
725 	WARN_ON(irqs_disabled());
726 
727 	data.func = func;
728 	data.info = info;
729 	data.wait = wait;
730 
731 	cpu_clear(smp_processor_id(), to_whom);
732 	num_cpus_to_call = cpus_weight(to_whom);
733 
734 	atomic_set(&data.unstarted_count, num_cpus_to_call);
735 	atomic_set(&data.unfinished_count, num_cpus_to_call);
736 
737 	/* Acquire the smp_call_function_data mutex.  */
738 	if (pointer_lock(&smp_call_function_data, &data, retry))
739 		return -EBUSY;
740 
741 	/* Send a message to the requested CPUs.  */
742 	send_ipi_message(to_whom, IPI_CALL_FUNC);
743 
744 	/* Wait for a minimal response.  */
745 	timeout = jiffies + HZ;
746 	while (atomic_read (&data.unstarted_count) > 0
747 	       && time_before (jiffies, timeout))
748 		barrier();
749 
750 	/* If there's no response yet, log a message but allow a longer
751 	 * timeout period -- if we get a response this time, log
752 	 * a message saying when we got it..
753 	 */
754 	if (atomic_read(&data.unstarted_count) > 0) {
755 		long start_time = jiffies;
756 		printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
757 		       __FUNCTION__);
758 		timeout = jiffies + 30 * HZ;
759 		while (atomic_read(&data.unstarted_count) > 0
760 		       && time_before(jiffies, timeout))
761 			barrier();
762 		if (atomic_read(&data.unstarted_count) <= 0) {
763 			long delta = jiffies - start_time;
764 			printk(KERN_ERR
765 			       "%s: response %ld.%ld seconds into long wait\n",
766 			       __FUNCTION__, delta / HZ,
767 			       (100 * (delta - ((delta / HZ) * HZ))) / HZ);
768 		}
769 	}
770 
771 	/* We either got one or timed out -- clear the lock. */
772 	mb();
773 	smp_call_function_data = NULL;
774 
775 	/*
776 	 * If after both the initial and long timeout periods we still don't
777 	 * have a response, something is very wrong...
778 	 */
779 	BUG_ON(atomic_read (&data.unstarted_count) > 0);
780 
781 	/* Wait for a complete response, if needed.  */
782 	if (wait) {
783 		while (atomic_read (&data.unfinished_count) > 0)
784 			barrier();
785 	}
786 
787 	return 0;
788 }
789 
790 int
791 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
792 {
793 	return smp_call_function_on_cpu (func, info, retry, wait,
794 					 cpu_online_map);
795 }
796 
797 static void
798 ipi_imb(void *ignored)
799 {
800 	imb();
801 }
802 
803 void
804 smp_imb(void)
805 {
806 	/* Must wait other processors to flush their icache before continue. */
807 	if (on_each_cpu(ipi_imb, NULL, 1, 1))
808 		printk(KERN_CRIT "smp_imb: timed out\n");
809 }
810 
811 static void
812 ipi_flush_tlb_all(void *ignored)
813 {
814 	tbia();
815 }
816 
817 void
818 flush_tlb_all(void)
819 {
820 	/* Although we don't have any data to pass, we do want to
821 	   synchronize with the other processors.  */
822 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
823 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
824 	}
825 }
826 
827 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
828 
829 static void
830 ipi_flush_tlb_mm(void *x)
831 {
832 	struct mm_struct *mm = (struct mm_struct *) x;
833 	if (mm == current->active_mm && !asn_locked())
834 		flush_tlb_current(mm);
835 	else
836 		flush_tlb_other(mm);
837 }
838 
839 void
840 flush_tlb_mm(struct mm_struct *mm)
841 {
842 	preempt_disable();
843 
844 	if (mm == current->active_mm) {
845 		flush_tlb_current(mm);
846 		if (atomic_read(&mm->mm_users) <= 1) {
847 			int cpu, this_cpu = smp_processor_id();
848 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
849 				if (!cpu_online(cpu) || cpu == this_cpu)
850 					continue;
851 				if (mm->context[cpu])
852 					mm->context[cpu] = 0;
853 			}
854 			preempt_enable();
855 			return;
856 		}
857 	}
858 
859 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
860 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
861 	}
862 
863 	preempt_enable();
864 }
865 
866 struct flush_tlb_page_struct {
867 	struct vm_area_struct *vma;
868 	struct mm_struct *mm;
869 	unsigned long addr;
870 };
871 
872 static void
873 ipi_flush_tlb_page(void *x)
874 {
875 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
876 	struct mm_struct * mm = data->mm;
877 
878 	if (mm == current->active_mm && !asn_locked())
879 		flush_tlb_current_page(mm, data->vma, data->addr);
880 	else
881 		flush_tlb_other(mm);
882 }
883 
884 void
885 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
886 {
887 	struct flush_tlb_page_struct data;
888 	struct mm_struct *mm = vma->vm_mm;
889 
890 	preempt_disable();
891 
892 	if (mm == current->active_mm) {
893 		flush_tlb_current_page(mm, vma, addr);
894 		if (atomic_read(&mm->mm_users) <= 1) {
895 			int cpu, this_cpu = smp_processor_id();
896 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
897 				if (!cpu_online(cpu) || cpu == this_cpu)
898 					continue;
899 				if (mm->context[cpu])
900 					mm->context[cpu] = 0;
901 			}
902 			preempt_enable();
903 			return;
904 		}
905 	}
906 
907 	data.vma = vma;
908 	data.mm = mm;
909 	data.addr = addr;
910 
911 	if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
912 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
913 	}
914 
915 	preempt_enable();
916 }
917 
918 void
919 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
920 {
921 	/* On the Alpha we always flush the whole user tlb.  */
922 	flush_tlb_mm(vma->vm_mm);
923 }
924 
925 static void
926 ipi_flush_icache_page(void *x)
927 {
928 	struct mm_struct *mm = (struct mm_struct *) x;
929 	if (mm == current->active_mm && !asn_locked())
930 		__load_new_mm_context(mm);
931 	else
932 		flush_tlb_other(mm);
933 }
934 
935 void
936 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
937 			unsigned long addr, int len)
938 {
939 	struct mm_struct *mm = vma->vm_mm;
940 
941 	if ((vma->vm_flags & VM_EXEC) == 0)
942 		return;
943 
944 	preempt_disable();
945 
946 	if (mm == current->active_mm) {
947 		__load_new_mm_context(mm);
948 		if (atomic_read(&mm->mm_users) <= 1) {
949 			int cpu, this_cpu = smp_processor_id();
950 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
951 				if (!cpu_online(cpu) || cpu == this_cpu)
952 					continue;
953 				if (mm->context[cpu])
954 					mm->context[cpu] = 0;
955 			}
956 			preempt_enable();
957 			return;
958 		}
959 	}
960 
961 	if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
962 		printk(KERN_CRIT "flush_icache_page: timed out\n");
963 	}
964 
965 	preempt_enable();
966 }
967