xref: /linux/arch/alpha/kernel/smp.c (revision 7ec7fb394298c212c30e063c57e0aa895efe9439)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31 
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <asm/atomic.h>
35 
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42 
43 #include "proto.h"
44 #include "irq_impl.h"
45 
46 
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)	printk args
50 #else
51 #define DBGS(args)
52 #endif
53 
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57 
58 /* A collection of single bit ipi messages.  */
59 static struct {
60 	unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62 
63 enum ipi_message_type {
64 	IPI_RESCHEDULE,
65 	IPI_CALL_FUNC,
66 	IPI_CALL_FUNC_SINGLE,
67 	IPI_CPU_STOP,
68 };
69 
70 /* Set to a secondary's cpuid when it comes online.  */
71 static int smp_secondary_alive __devinitdata = 0;
72 
73 int smp_num_probed;		/* Internal processor count */
74 int smp_num_cpus = 1;		/* Number that came online.  */
75 EXPORT_SYMBOL(smp_num_cpus);
76 
77 /*
78  * Called by both boot and secondaries to move global data into
79  *  per-processor storage.
80  */
81 static inline void __init
82 smp_store_cpu_info(int cpuid)
83 {
84 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86 	cpu_data[cpuid].need_new_asn = 0;
87 	cpu_data[cpuid].asn_lock = 0;
88 }
89 
90 /*
91  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
92  */
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid)
95 {
96 	cpu_data[cpuid].prof_counter = 1;
97 	cpu_data[cpuid].prof_multiplier = 1;
98 }
99 
100 static void __init
101 wait_boot_cpu_to_stop(int cpuid)
102 {
103 	unsigned long stop = jiffies + 10*HZ;
104 
105 	while (time_before(jiffies, stop)) {
106 	        if (!smp_secondary_alive)
107 			return;
108 		barrier();
109 	}
110 
111 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112 	for (;;)
113 		barrier();
114 }
115 
116 /*
117  * Where secondaries begin a life of C.
118  */
119 void __cpuinit
120 smp_callin(void)
121 {
122 	int cpuid = hard_smp_processor_id();
123 
124 	if (cpu_test_and_set(cpuid, cpu_online_map)) {
125 		printk("??, cpu 0x%x already present??\n", cpuid);
126 		BUG();
127 	}
128 
129 	/* Turn on machine checks.  */
130 	wrmces(7);
131 
132 	/* Set trap vectors.  */
133 	trap_init();
134 
135 	/* Set interrupt vector.  */
136 	wrent(entInt, 0);
137 
138 	/* Get our local ticker going. */
139 	smp_setup_percpu_timer(cpuid);
140 
141 	/* Call platform-specific callin, if specified */
142 	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
143 
144 	/* All kernel threads share the same mm context.  */
145 	atomic_inc(&init_mm.mm_count);
146 	current->active_mm = &init_mm;
147 
148 	/* inform the notifiers about the new cpu */
149 	notify_cpu_starting(cpuid);
150 
151 	/* Must have completely accurate bogos.  */
152 	local_irq_enable();
153 
154 	/* Wait boot CPU to stop with irq enabled before running
155 	   calibrate_delay. */
156 	wait_boot_cpu_to_stop(cpuid);
157 	mb();
158 	calibrate_delay();
159 
160 	smp_store_cpu_info(cpuid);
161 	/* Allow master to continue only after we written loops_per_jiffy.  */
162 	wmb();
163 	smp_secondary_alive = 1;
164 
165 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
166 	      cpuid, current, current->active_mm));
167 
168 	/* Do nothing.  */
169 	cpu_idle();
170 }
171 
172 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
173 static int __devinit
174 wait_for_txrdy (unsigned long cpumask)
175 {
176 	unsigned long timeout;
177 
178 	if (!(hwrpb->txrdy & cpumask))
179 		return 0;
180 
181 	timeout = jiffies + 10*HZ;
182 	while (time_before(jiffies, timeout)) {
183 		if (!(hwrpb->txrdy & cpumask))
184 			return 0;
185 		udelay(10);
186 		barrier();
187 	}
188 
189 	return -1;
190 }
191 
192 /*
193  * Send a message to a secondary's console.  "START" is one such
194  * interesting message.  ;-)
195  */
196 static void __cpuinit
197 send_secondary_console_msg(char *str, int cpuid)
198 {
199 	struct percpu_struct *cpu;
200 	register char *cp1, *cp2;
201 	unsigned long cpumask;
202 	size_t len;
203 
204 	cpu = (struct percpu_struct *)
205 		((char*)hwrpb
206 		 + hwrpb->processor_offset
207 		 + cpuid * hwrpb->processor_size);
208 
209 	cpumask = (1UL << cpuid);
210 	if (wait_for_txrdy(cpumask))
211 		goto timeout;
212 
213 	cp2 = str;
214 	len = strlen(cp2);
215 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
216 	cp1 = (char *) &cpu->ipc_buffer[1];
217 	memcpy(cp1, cp2, len);
218 
219 	/* atomic test and set */
220 	wmb();
221 	set_bit(cpuid, &hwrpb->rxrdy);
222 
223 	if (wait_for_txrdy(cpumask))
224 		goto timeout;
225 	return;
226 
227  timeout:
228 	printk("Processor %x not ready\n", cpuid);
229 }
230 
231 /*
232  * A secondary console wants to send a message.  Receive it.
233  */
234 static void
235 recv_secondary_console_msg(void)
236 {
237 	int mycpu, i, cnt;
238 	unsigned long txrdy = hwrpb->txrdy;
239 	char *cp1, *cp2, buf[80];
240 	struct percpu_struct *cpu;
241 
242 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
243 
244 	mycpu = hard_smp_processor_id();
245 
246 	for (i = 0; i < NR_CPUS; i++) {
247 		if (!(txrdy & (1UL << i)))
248 			continue;
249 
250 		DBGS(("recv_secondary_console_msg: "
251 		      "TXRDY contains CPU %d.\n", i));
252 
253 		cpu = (struct percpu_struct *)
254 		  ((char*)hwrpb
255 		   + hwrpb->processor_offset
256 		   + i * hwrpb->processor_size);
257 
258  		DBGS(("recv_secondary_console_msg: on %d from %d"
259 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
260 		      mycpu, i, cpu->halt_reason, cpu->flags));
261 
262 		cnt = cpu->ipc_buffer[0] >> 32;
263 		if (cnt <= 0 || cnt >= 80)
264 			strcpy(buf, "<<< BOGUS MSG >>>");
265 		else {
266 			cp1 = (char *) &cpu->ipc_buffer[11];
267 			cp2 = buf;
268 			strcpy(cp2, cp1);
269 
270 			while ((cp2 = strchr(cp2, '\r')) != 0) {
271 				*cp2 = ' ';
272 				if (cp2[1] == '\n')
273 					cp2[1] = ' ';
274 			}
275 		}
276 
277 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
278 		      "message is '%s'\n", mycpu, buf));
279 	}
280 
281 	hwrpb->txrdy = 0;
282 }
283 
284 /*
285  * Convince the console to have a secondary cpu begin execution.
286  */
287 static int __cpuinit
288 secondary_cpu_start(int cpuid, struct task_struct *idle)
289 {
290 	struct percpu_struct *cpu;
291 	struct pcb_struct *hwpcb, *ipcb;
292 	unsigned long timeout;
293 
294 	cpu = (struct percpu_struct *)
295 		((char*)hwrpb
296 		 + hwrpb->processor_offset
297 		 + cpuid * hwrpb->processor_size);
298 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
299 	ipcb = &task_thread_info(idle)->pcb;
300 
301 	/* Initialize the CPU's HWPCB to something just good enough for
302 	   us to get started.  Immediately after starting, we'll swpctx
303 	   to the target idle task's pcb.  Reuse the stack in the mean
304 	   time.  Precalculate the target PCBB.  */
305 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
306 	hwpcb->usp = 0;
307 	hwpcb->ptbr = ipcb->ptbr;
308 	hwpcb->pcc = 0;
309 	hwpcb->asn = 0;
310 	hwpcb->unique = virt_to_phys(ipcb);
311 	hwpcb->flags = ipcb->flags;
312 	hwpcb->res1 = hwpcb->res2 = 0;
313 
314 #if 0
315 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
316 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
317 #endif
318 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
319 	      cpuid, idle->state, ipcb->flags));
320 
321 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
322 	hwrpb->CPU_restart = __smp_callin;
323 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
324 
325 	/* Recalculate and update the HWRPB checksum */
326 	hwrpb_update_checksum(hwrpb);
327 
328 	/*
329 	 * Send a "start" command to the specified processor.
330 	 */
331 
332 	/* SRM III 3.4.1.3 */
333 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
334 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
335 	wmb();
336 
337 	send_secondary_console_msg("START\r\n", cpuid);
338 
339 	/* Wait 10 seconds for an ACK from the console.  */
340 	timeout = jiffies + 10*HZ;
341 	while (time_before(jiffies, timeout)) {
342 		if (cpu->flags & 1)
343 			goto started;
344 		udelay(10);
345 		barrier();
346 	}
347 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
348 	return -1;
349 
350  started:
351 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
352 	return 0;
353 }
354 
355 /*
356  * Bring one cpu online.
357  */
358 static int __cpuinit
359 smp_boot_one_cpu(int cpuid)
360 {
361 	struct task_struct *idle;
362 	unsigned long timeout;
363 
364 	/* Cook up an idler for this guy.  Note that the address we
365 	   give to kernel_thread is irrelevant -- it's going to start
366 	   where HWRPB.CPU_restart says to start.  But this gets all
367 	   the other task-y sort of data structures set up like we
368 	   wish.  We can't use kernel_thread since we must avoid
369 	   rescheduling the child.  */
370 	idle = fork_idle(cpuid);
371 	if (IS_ERR(idle))
372 		panic("failed fork for CPU %d", cpuid);
373 
374 	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
375 	      cpuid, idle->state, idle->flags));
376 
377 	/* Signal the secondary to wait a moment.  */
378 	smp_secondary_alive = -1;
379 
380 	/* Whirrr, whirrr, whirrrrrrrrr... */
381 	if (secondary_cpu_start(cpuid, idle))
382 		return -1;
383 
384 	/* Notify the secondary CPU it can run calibrate_delay.  */
385 	mb();
386 	smp_secondary_alive = 0;
387 
388 	/* We've been acked by the console; wait one second for
389 	   the task to start up for real.  */
390 	timeout = jiffies + 1*HZ;
391 	while (time_before(jiffies, timeout)) {
392 		if (smp_secondary_alive == 1)
393 			goto alive;
394 		udelay(10);
395 		barrier();
396 	}
397 
398 	/* We failed to boot the CPU.  */
399 
400 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
401 	return -1;
402 
403  alive:
404 	/* Another "Red Snapper". */
405 	return 0;
406 }
407 
408 /*
409  * Called from setup_arch.  Detect an SMP system and which processors
410  * are present.
411  */
412 void __init
413 setup_smp(void)
414 {
415 	struct percpu_struct *cpubase, *cpu;
416 	unsigned long i;
417 
418 	if (boot_cpuid != 0) {
419 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
420 		       boot_cpuid);
421 	}
422 
423 	if (hwrpb->nr_processors > 1) {
424 		int boot_cpu_palrev;
425 
426 		DBGS(("setup_smp: nr_processors %ld\n",
427 		      hwrpb->nr_processors));
428 
429 		cpubase = (struct percpu_struct *)
430 			((char*)hwrpb + hwrpb->processor_offset);
431 		boot_cpu_palrev = cpubase->pal_revision;
432 
433 		for (i = 0; i < hwrpb->nr_processors; i++) {
434 			cpu = (struct percpu_struct *)
435 				((char *)cpubase + i*hwrpb->processor_size);
436 			if ((cpu->flags & 0x1cc) == 0x1cc) {
437 				smp_num_probed++;
438 				cpu_set(i, cpu_possible_map);
439 				cpu_set(i, cpu_present_map);
440 				cpu->pal_revision = boot_cpu_palrev;
441 			}
442 
443 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
444 			      i, cpu->flags, cpu->type));
445 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
446 			      i, cpu->pal_revision));
447 		}
448 	} else {
449 		smp_num_probed = 1;
450 	}
451 
452 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
453 	       smp_num_probed, cpu_present_map.bits[0]);
454 }
455 
456 /*
457  * Called by smp_init prepare the secondaries
458  */
459 void __init
460 smp_prepare_cpus(unsigned int max_cpus)
461 {
462 	/* Take care of some initial bookkeeping.  */
463 	memset(ipi_data, 0, sizeof(ipi_data));
464 
465 	current_thread_info()->cpu = boot_cpuid;
466 
467 	smp_store_cpu_info(boot_cpuid);
468 	smp_setup_percpu_timer(boot_cpuid);
469 
470 	/* Nothing to do on a UP box, or when told not to.  */
471 	if (smp_num_probed == 1 || max_cpus == 0) {
472 		cpu_possible_map = cpumask_of_cpu(boot_cpuid);
473 		cpu_present_map = cpumask_of_cpu(boot_cpuid);
474 		printk(KERN_INFO "SMP mode deactivated.\n");
475 		return;
476 	}
477 
478 	printk(KERN_INFO "SMP starting up secondaries.\n");
479 
480 	smp_num_cpus = smp_num_probed;
481 }
482 
483 void __devinit
484 smp_prepare_boot_cpu(void)
485 {
486 }
487 
488 int __cpuinit
489 __cpu_up(unsigned int cpu)
490 {
491 	smp_boot_one_cpu(cpu);
492 
493 	return cpu_online(cpu) ? 0 : -ENOSYS;
494 }
495 
496 void __init
497 smp_cpus_done(unsigned int max_cpus)
498 {
499 	int cpu;
500 	unsigned long bogosum = 0;
501 
502 	for(cpu = 0; cpu < NR_CPUS; cpu++)
503 		if (cpu_online(cpu))
504 			bogosum += cpu_data[cpu].loops_per_jiffy;
505 
506 	printk(KERN_INFO "SMP: Total of %d processors activated "
507 	       "(%lu.%02lu BogoMIPS).\n",
508 	       num_online_cpus(),
509 	       (bogosum + 2500) / (500000/HZ),
510 	       ((bogosum + 2500) / (5000/HZ)) % 100);
511 }
512 
513 
514 void
515 smp_percpu_timer_interrupt(struct pt_regs *regs)
516 {
517 	struct pt_regs *old_regs;
518 	int cpu = smp_processor_id();
519 	unsigned long user = user_mode(regs);
520 	struct cpuinfo_alpha *data = &cpu_data[cpu];
521 
522 	old_regs = set_irq_regs(regs);
523 
524 	/* Record kernel PC.  */
525 	profile_tick(CPU_PROFILING);
526 
527 	if (!--data->prof_counter) {
528 		/* We need to make like a normal interrupt -- otherwise
529 		   timer interrupts ignore the global interrupt lock,
530 		   which would be a Bad Thing.  */
531 		irq_enter();
532 
533 		update_process_times(user);
534 
535 		data->prof_counter = data->prof_multiplier;
536 
537 		irq_exit();
538 	}
539 	set_irq_regs(old_regs);
540 }
541 
542 int
543 setup_profiling_timer(unsigned int multiplier)
544 {
545 	return -EINVAL;
546 }
547 
548 
549 static void
550 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
551 {
552 	int i;
553 
554 	mb();
555 	for_each_cpu_mask(i, to_whom)
556 		set_bit(operation, &ipi_data[i].bits);
557 
558 	mb();
559 	for_each_cpu_mask(i, to_whom)
560 		wripir(i);
561 }
562 
563 void
564 handle_ipi(struct pt_regs *regs)
565 {
566 	int this_cpu = smp_processor_id();
567 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
568 	unsigned long ops;
569 
570 #if 0
571 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
572 	      this_cpu, *pending_ipis, regs->pc));
573 #endif
574 
575 	mb();	/* Order interrupt and bit testing. */
576 	while ((ops = xchg(pending_ipis, 0)) != 0) {
577 	  mb();	/* Order bit clearing and data access. */
578 	  do {
579 		unsigned long which;
580 
581 		which = ops & -ops;
582 		ops &= ~which;
583 		which = __ffs(which);
584 
585 		switch (which) {
586 		case IPI_RESCHEDULE:
587 			/* Reschedule callback.  Everything to be done
588 			   is done by the interrupt return path.  */
589 			break;
590 
591 		case IPI_CALL_FUNC:
592 			generic_smp_call_function_interrupt();
593 			break;
594 
595 		case IPI_CALL_FUNC_SINGLE:
596 			generic_smp_call_function_single_interrupt();
597 			break;
598 
599 		case IPI_CPU_STOP:
600 			halt();
601 
602 		default:
603 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
604 			       this_cpu, which);
605 			break;
606 		}
607 	  } while (ops);
608 
609 	  mb();	/* Order data access and bit testing. */
610 	}
611 
612 	cpu_data[this_cpu].ipi_count++;
613 
614 	if (hwrpb->txrdy)
615 		recv_secondary_console_msg();
616 }
617 
618 void
619 smp_send_reschedule(int cpu)
620 {
621 #ifdef DEBUG_IPI_MSG
622 	if (cpu == hard_smp_processor_id())
623 		printk(KERN_WARNING
624 		       "smp_send_reschedule: Sending IPI to self.\n");
625 #endif
626 	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
627 }
628 
629 void
630 smp_send_stop(void)
631 {
632 	cpumask_t to_whom = cpu_possible_map;
633 	cpu_clear(smp_processor_id(), to_whom);
634 #ifdef DEBUG_IPI_MSG
635 	if (hard_smp_processor_id() != boot_cpu_id)
636 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
637 #endif
638 	send_ipi_message(to_whom, IPI_CPU_STOP);
639 }
640 
641 void arch_send_call_function_ipi(cpumask_t mask)
642 {
643 	send_ipi_message(mask, IPI_CALL_FUNC);
644 }
645 
646 void arch_send_call_function_single_ipi(int cpu)
647 {
648 	send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
649 }
650 
651 static void
652 ipi_imb(void *ignored)
653 {
654 	imb();
655 }
656 
657 void
658 smp_imb(void)
659 {
660 	/* Must wait other processors to flush their icache before continue. */
661 	if (on_each_cpu(ipi_imb, NULL, 1))
662 		printk(KERN_CRIT "smp_imb: timed out\n");
663 }
664 EXPORT_SYMBOL(smp_imb);
665 
666 static void
667 ipi_flush_tlb_all(void *ignored)
668 {
669 	tbia();
670 }
671 
672 void
673 flush_tlb_all(void)
674 {
675 	/* Although we don't have any data to pass, we do want to
676 	   synchronize with the other processors.  */
677 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
678 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
679 	}
680 }
681 
682 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
683 
684 static void
685 ipi_flush_tlb_mm(void *x)
686 {
687 	struct mm_struct *mm = (struct mm_struct *) x;
688 	if (mm == current->active_mm && !asn_locked())
689 		flush_tlb_current(mm);
690 	else
691 		flush_tlb_other(mm);
692 }
693 
694 void
695 flush_tlb_mm(struct mm_struct *mm)
696 {
697 	preempt_disable();
698 
699 	if (mm == current->active_mm) {
700 		flush_tlb_current(mm);
701 		if (atomic_read(&mm->mm_users) <= 1) {
702 			int cpu, this_cpu = smp_processor_id();
703 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
704 				if (!cpu_online(cpu) || cpu == this_cpu)
705 					continue;
706 				if (mm->context[cpu])
707 					mm->context[cpu] = 0;
708 			}
709 			preempt_enable();
710 			return;
711 		}
712 	}
713 
714 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
715 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
716 	}
717 
718 	preempt_enable();
719 }
720 EXPORT_SYMBOL(flush_tlb_mm);
721 
722 struct flush_tlb_page_struct {
723 	struct vm_area_struct *vma;
724 	struct mm_struct *mm;
725 	unsigned long addr;
726 };
727 
728 static void
729 ipi_flush_tlb_page(void *x)
730 {
731 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
732 	struct mm_struct * mm = data->mm;
733 
734 	if (mm == current->active_mm && !asn_locked())
735 		flush_tlb_current_page(mm, data->vma, data->addr);
736 	else
737 		flush_tlb_other(mm);
738 }
739 
740 void
741 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
742 {
743 	struct flush_tlb_page_struct data;
744 	struct mm_struct *mm = vma->vm_mm;
745 
746 	preempt_disable();
747 
748 	if (mm == current->active_mm) {
749 		flush_tlb_current_page(mm, vma, addr);
750 		if (atomic_read(&mm->mm_users) <= 1) {
751 			int cpu, this_cpu = smp_processor_id();
752 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
753 				if (!cpu_online(cpu) || cpu == this_cpu)
754 					continue;
755 				if (mm->context[cpu])
756 					mm->context[cpu] = 0;
757 			}
758 			preempt_enable();
759 			return;
760 		}
761 	}
762 
763 	data.vma = vma;
764 	data.mm = mm;
765 	data.addr = addr;
766 
767 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
768 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
769 	}
770 
771 	preempt_enable();
772 }
773 EXPORT_SYMBOL(flush_tlb_page);
774 
775 void
776 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
777 {
778 	/* On the Alpha we always flush the whole user tlb.  */
779 	flush_tlb_mm(vma->vm_mm);
780 }
781 EXPORT_SYMBOL(flush_tlb_range);
782 
783 static void
784 ipi_flush_icache_page(void *x)
785 {
786 	struct mm_struct *mm = (struct mm_struct *) x;
787 	if (mm == current->active_mm && !asn_locked())
788 		__load_new_mm_context(mm);
789 	else
790 		flush_tlb_other(mm);
791 }
792 
793 void
794 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
795 			unsigned long addr, int len)
796 {
797 	struct mm_struct *mm = vma->vm_mm;
798 
799 	if ((vma->vm_flags & VM_EXEC) == 0)
800 		return;
801 
802 	preempt_disable();
803 
804 	if (mm == current->active_mm) {
805 		__load_new_mm_context(mm);
806 		if (atomic_read(&mm->mm_users) <= 1) {
807 			int cpu, this_cpu = smp_processor_id();
808 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
809 				if (!cpu_online(cpu) || cpu == this_cpu)
810 					continue;
811 				if (mm->context[cpu])
812 					mm->context[cpu] = 0;
813 			}
814 			preempt_enable();
815 			return;
816 		}
817 	}
818 
819 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
820 		printk(KERN_CRIT "flush_icache_page: timed out\n");
821 	}
822 
823 	preempt_enable();
824 }
825