xref: /linux/arch/alpha/kernel/smp.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31 
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42 
43 #include "proto.h"
44 #include "irq_impl.h"
45 
46 
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)	printk args
50 #else
51 #define DBGS(args)
52 #endif
53 
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57 
58 /* A collection of single bit ipi messages.  */
59 static struct {
60 	unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62 
63 enum ipi_message_type {
64 	IPI_RESCHEDULE,
65 	IPI_CALL_FUNC,
66 	IPI_CALL_FUNC_SINGLE,
67 	IPI_CPU_STOP,
68 };
69 
70 /* Set to a secondary's cpuid when it comes online.  */
71 static int smp_secondary_alive = 0;
72 
73 int smp_num_probed;		/* Internal processor count */
74 int smp_num_cpus = 1;		/* Number that came online.  */
75 EXPORT_SYMBOL(smp_num_cpus);
76 
77 /*
78  * Called by both boot and secondaries to move global data into
79  *  per-processor storage.
80  */
81 static inline void __init
82 smp_store_cpu_info(int cpuid)
83 {
84 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86 	cpu_data[cpuid].need_new_asn = 0;
87 	cpu_data[cpuid].asn_lock = 0;
88 }
89 
90 /*
91  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
92  */
93 static inline void __init
94 smp_setup_percpu_timer(int cpuid)
95 {
96 	cpu_data[cpuid].prof_counter = 1;
97 	cpu_data[cpuid].prof_multiplier = 1;
98 }
99 
100 static void __init
101 wait_boot_cpu_to_stop(int cpuid)
102 {
103 	unsigned long stop = jiffies + 10*HZ;
104 
105 	while (time_before(jiffies, stop)) {
106 	        if (!smp_secondary_alive)
107 			return;
108 		barrier();
109 	}
110 
111 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112 	for (;;)
113 		barrier();
114 }
115 
116 /*
117  * Where secondaries begin a life of C.
118  */
119 void
120 smp_callin(void)
121 {
122 	int cpuid = hard_smp_processor_id();
123 
124 	if (cpu_online(cpuid)) {
125 		printk("??, cpu 0x%x already present??\n", cpuid);
126 		BUG();
127 	}
128 	set_cpu_online(cpuid, true);
129 
130 	/* Turn on machine checks.  */
131 	wrmces(7);
132 
133 	/* Set trap vectors.  */
134 	trap_init();
135 
136 	/* Set interrupt vector.  */
137 	wrent(entInt, 0);
138 
139 	/* Get our local ticker going. */
140 	smp_setup_percpu_timer(cpuid);
141 	init_clockevent();
142 
143 	/* Call platform-specific callin, if specified */
144 	if (alpha_mv.smp_callin)
145 		alpha_mv.smp_callin();
146 
147 	/* All kernel threads share the same mm context.  */
148 	atomic_inc(&init_mm.mm_count);
149 	current->active_mm = &init_mm;
150 
151 	/* inform the notifiers about the new cpu */
152 	notify_cpu_starting(cpuid);
153 
154 	/* Must have completely accurate bogos.  */
155 	local_irq_enable();
156 
157 	/* Wait boot CPU to stop with irq enabled before running
158 	   calibrate_delay. */
159 	wait_boot_cpu_to_stop(cpuid);
160 	mb();
161 	calibrate_delay();
162 
163 	smp_store_cpu_info(cpuid);
164 	/* Allow master to continue only after we written loops_per_jiffy.  */
165 	wmb();
166 	smp_secondary_alive = 1;
167 
168 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169 	      cpuid, current, current->active_mm));
170 
171 	preempt_disable();
172 	cpu_startup_entry(CPUHP_ONLINE);
173 }
174 
175 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
176 static int
177 wait_for_txrdy (unsigned long cpumask)
178 {
179 	unsigned long timeout;
180 
181 	if (!(hwrpb->txrdy & cpumask))
182 		return 0;
183 
184 	timeout = jiffies + 10*HZ;
185 	while (time_before(jiffies, timeout)) {
186 		if (!(hwrpb->txrdy & cpumask))
187 			return 0;
188 		udelay(10);
189 		barrier();
190 	}
191 
192 	return -1;
193 }
194 
195 /*
196  * Send a message to a secondary's console.  "START" is one such
197  * interesting message.  ;-)
198  */
199 static void
200 send_secondary_console_msg(char *str, int cpuid)
201 {
202 	struct percpu_struct *cpu;
203 	register char *cp1, *cp2;
204 	unsigned long cpumask;
205 	size_t len;
206 
207 	cpu = (struct percpu_struct *)
208 		((char*)hwrpb
209 		 + hwrpb->processor_offset
210 		 + cpuid * hwrpb->processor_size);
211 
212 	cpumask = (1UL << cpuid);
213 	if (wait_for_txrdy(cpumask))
214 		goto timeout;
215 
216 	cp2 = str;
217 	len = strlen(cp2);
218 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
219 	cp1 = (char *) &cpu->ipc_buffer[1];
220 	memcpy(cp1, cp2, len);
221 
222 	/* atomic test and set */
223 	wmb();
224 	set_bit(cpuid, &hwrpb->rxrdy);
225 
226 	if (wait_for_txrdy(cpumask))
227 		goto timeout;
228 	return;
229 
230  timeout:
231 	printk("Processor %x not ready\n", cpuid);
232 }
233 
234 /*
235  * A secondary console wants to send a message.  Receive it.
236  */
237 static void
238 recv_secondary_console_msg(void)
239 {
240 	int mycpu, i, cnt;
241 	unsigned long txrdy = hwrpb->txrdy;
242 	char *cp1, *cp2, buf[80];
243 	struct percpu_struct *cpu;
244 
245 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246 
247 	mycpu = hard_smp_processor_id();
248 
249 	for (i = 0; i < NR_CPUS; i++) {
250 		if (!(txrdy & (1UL << i)))
251 			continue;
252 
253 		DBGS(("recv_secondary_console_msg: "
254 		      "TXRDY contains CPU %d.\n", i));
255 
256 		cpu = (struct percpu_struct *)
257 		  ((char*)hwrpb
258 		   + hwrpb->processor_offset
259 		   + i * hwrpb->processor_size);
260 
261  		DBGS(("recv_secondary_console_msg: on %d from %d"
262 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263 		      mycpu, i, cpu->halt_reason, cpu->flags));
264 
265 		cnt = cpu->ipc_buffer[0] >> 32;
266 		if (cnt <= 0 || cnt >= 80)
267 			strcpy(buf, "<<< BOGUS MSG >>>");
268 		else {
269 			cp1 = (char *) &cpu->ipc_buffer[1];
270 			cp2 = buf;
271 			memcpy(cp2, cp1, cnt);
272 			cp2[cnt] = '\0';
273 
274 			while ((cp2 = strchr(cp2, '\r')) != 0) {
275 				*cp2 = ' ';
276 				if (cp2[1] == '\n')
277 					cp2[1] = ' ';
278 			}
279 		}
280 
281 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
282 		      "message is '%s'\n", mycpu, buf));
283 	}
284 
285 	hwrpb->txrdy = 0;
286 }
287 
288 /*
289  * Convince the console to have a secondary cpu begin execution.
290  */
291 static int
292 secondary_cpu_start(int cpuid, struct task_struct *idle)
293 {
294 	struct percpu_struct *cpu;
295 	struct pcb_struct *hwpcb, *ipcb;
296 	unsigned long timeout;
297 
298 	cpu = (struct percpu_struct *)
299 		((char*)hwrpb
300 		 + hwrpb->processor_offset
301 		 + cpuid * hwrpb->processor_size);
302 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
303 	ipcb = &task_thread_info(idle)->pcb;
304 
305 	/* Initialize the CPU's HWPCB to something just good enough for
306 	   us to get started.  Immediately after starting, we'll swpctx
307 	   to the target idle task's pcb.  Reuse the stack in the mean
308 	   time.  Precalculate the target PCBB.  */
309 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310 	hwpcb->usp = 0;
311 	hwpcb->ptbr = ipcb->ptbr;
312 	hwpcb->pcc = 0;
313 	hwpcb->asn = 0;
314 	hwpcb->unique = virt_to_phys(ipcb);
315 	hwpcb->flags = ipcb->flags;
316 	hwpcb->res1 = hwpcb->res2 = 0;
317 
318 #if 0
319 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
320 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321 #endif
322 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
323 	      cpuid, idle->state, ipcb->flags));
324 
325 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
326 	hwrpb->CPU_restart = __smp_callin;
327 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
328 
329 	/* Recalculate and update the HWRPB checksum */
330 	hwrpb_update_checksum(hwrpb);
331 
332 	/*
333 	 * Send a "start" command to the specified processor.
334 	 */
335 
336 	/* SRM III 3.4.1.3 */
337 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
338 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
339 	wmb();
340 
341 	send_secondary_console_msg("START\r\n", cpuid);
342 
343 	/* Wait 10 seconds for an ACK from the console.  */
344 	timeout = jiffies + 10*HZ;
345 	while (time_before(jiffies, timeout)) {
346 		if (cpu->flags & 1)
347 			goto started;
348 		udelay(10);
349 		barrier();
350 	}
351 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
352 	return -1;
353 
354  started:
355 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
356 	return 0;
357 }
358 
359 /*
360  * Bring one cpu online.
361  */
362 static int
363 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
364 {
365 	unsigned long timeout;
366 
367 	/* Signal the secondary to wait a moment.  */
368 	smp_secondary_alive = -1;
369 
370 	/* Whirrr, whirrr, whirrrrrrrrr... */
371 	if (secondary_cpu_start(cpuid, idle))
372 		return -1;
373 
374 	/* Notify the secondary CPU it can run calibrate_delay.  */
375 	mb();
376 	smp_secondary_alive = 0;
377 
378 	/* We've been acked by the console; wait one second for
379 	   the task to start up for real.  */
380 	timeout = jiffies + 1*HZ;
381 	while (time_before(jiffies, timeout)) {
382 		if (smp_secondary_alive == 1)
383 			goto alive;
384 		udelay(10);
385 		barrier();
386 	}
387 
388 	/* We failed to boot the CPU.  */
389 
390 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
391 	return -1;
392 
393  alive:
394 	/* Another "Red Snapper". */
395 	return 0;
396 }
397 
398 /*
399  * Called from setup_arch.  Detect an SMP system and which processors
400  * are present.
401  */
402 void __init
403 setup_smp(void)
404 {
405 	struct percpu_struct *cpubase, *cpu;
406 	unsigned long i;
407 
408 	if (boot_cpuid != 0) {
409 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
410 		       boot_cpuid);
411 	}
412 
413 	if (hwrpb->nr_processors > 1) {
414 		int boot_cpu_palrev;
415 
416 		DBGS(("setup_smp: nr_processors %ld\n",
417 		      hwrpb->nr_processors));
418 
419 		cpubase = (struct percpu_struct *)
420 			((char*)hwrpb + hwrpb->processor_offset);
421 		boot_cpu_palrev = cpubase->pal_revision;
422 
423 		for (i = 0; i < hwrpb->nr_processors; i++) {
424 			cpu = (struct percpu_struct *)
425 				((char *)cpubase + i*hwrpb->processor_size);
426 			if ((cpu->flags & 0x1cc) == 0x1cc) {
427 				smp_num_probed++;
428 				set_cpu_possible(i, true);
429 				set_cpu_present(i, true);
430 				cpu->pal_revision = boot_cpu_palrev;
431 			}
432 
433 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
434 			      i, cpu->flags, cpu->type));
435 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
436 			      i, cpu->pal_revision));
437 		}
438 	} else {
439 		smp_num_probed = 1;
440 	}
441 
442 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
443 	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
444 }
445 
446 /*
447  * Called by smp_init prepare the secondaries
448  */
449 void __init
450 smp_prepare_cpus(unsigned int max_cpus)
451 {
452 	/* Take care of some initial bookkeeping.  */
453 	memset(ipi_data, 0, sizeof(ipi_data));
454 
455 	current_thread_info()->cpu = boot_cpuid;
456 
457 	smp_store_cpu_info(boot_cpuid);
458 	smp_setup_percpu_timer(boot_cpuid);
459 
460 	/* Nothing to do on a UP box, or when told not to.  */
461 	if (smp_num_probed == 1 || max_cpus == 0) {
462 		init_cpu_possible(cpumask_of(boot_cpuid));
463 		init_cpu_present(cpumask_of(boot_cpuid));
464 		printk(KERN_INFO "SMP mode deactivated.\n");
465 		return;
466 	}
467 
468 	printk(KERN_INFO "SMP starting up secondaries.\n");
469 
470 	smp_num_cpus = smp_num_probed;
471 }
472 
473 void
474 smp_prepare_boot_cpu(void)
475 {
476 }
477 
478 int
479 __cpu_up(unsigned int cpu, struct task_struct *tidle)
480 {
481 	smp_boot_one_cpu(cpu, tidle);
482 
483 	return cpu_online(cpu) ? 0 : -ENOSYS;
484 }
485 
486 void __init
487 smp_cpus_done(unsigned int max_cpus)
488 {
489 	int cpu;
490 	unsigned long bogosum = 0;
491 
492 	for(cpu = 0; cpu < NR_CPUS; cpu++)
493 		if (cpu_online(cpu))
494 			bogosum += cpu_data[cpu].loops_per_jiffy;
495 
496 	printk(KERN_INFO "SMP: Total of %d processors activated "
497 	       "(%lu.%02lu BogoMIPS).\n",
498 	       num_online_cpus(),
499 	       (bogosum + 2500) / (500000/HZ),
500 	       ((bogosum + 2500) / (5000/HZ)) % 100);
501 }
502 
503 int
504 setup_profiling_timer(unsigned int multiplier)
505 {
506 	return -EINVAL;
507 }
508 
509 
510 static void
511 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
512 {
513 	int i;
514 
515 	mb();
516 	for_each_cpu(i, to_whom)
517 		set_bit(operation, &ipi_data[i].bits);
518 
519 	mb();
520 	for_each_cpu(i, to_whom)
521 		wripir(i);
522 }
523 
524 void
525 handle_ipi(struct pt_regs *regs)
526 {
527 	int this_cpu = smp_processor_id();
528 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
529 	unsigned long ops;
530 
531 #if 0
532 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
533 	      this_cpu, *pending_ipis, regs->pc));
534 #endif
535 
536 	mb();	/* Order interrupt and bit testing. */
537 	while ((ops = xchg(pending_ipis, 0)) != 0) {
538 	  mb();	/* Order bit clearing and data access. */
539 	  do {
540 		unsigned long which;
541 
542 		which = ops & -ops;
543 		ops &= ~which;
544 		which = __ffs(which);
545 
546 		switch (which) {
547 		case IPI_RESCHEDULE:
548 			scheduler_ipi();
549 			break;
550 
551 		case IPI_CALL_FUNC:
552 			generic_smp_call_function_interrupt();
553 			break;
554 
555 		case IPI_CALL_FUNC_SINGLE:
556 			generic_smp_call_function_single_interrupt();
557 			break;
558 
559 		case IPI_CPU_STOP:
560 			halt();
561 
562 		default:
563 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
564 			       this_cpu, which);
565 			break;
566 		}
567 	  } while (ops);
568 
569 	  mb();	/* Order data access and bit testing. */
570 	}
571 
572 	cpu_data[this_cpu].ipi_count++;
573 
574 	if (hwrpb->txrdy)
575 		recv_secondary_console_msg();
576 }
577 
578 void
579 smp_send_reschedule(int cpu)
580 {
581 #ifdef DEBUG_IPI_MSG
582 	if (cpu == hard_smp_processor_id())
583 		printk(KERN_WARNING
584 		       "smp_send_reschedule: Sending IPI to self.\n");
585 #endif
586 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
587 }
588 
589 void
590 smp_send_stop(void)
591 {
592 	cpumask_t to_whom;
593 	cpumask_copy(&to_whom, cpu_possible_mask);
594 	cpumask_clear_cpu(smp_processor_id(), &to_whom);
595 #ifdef DEBUG_IPI_MSG
596 	if (hard_smp_processor_id() != boot_cpu_id)
597 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
598 #endif
599 	send_ipi_message(&to_whom, IPI_CPU_STOP);
600 }
601 
602 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
603 {
604 	send_ipi_message(mask, IPI_CALL_FUNC);
605 }
606 
607 void arch_send_call_function_single_ipi(int cpu)
608 {
609 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
610 }
611 
612 static void
613 ipi_imb(void *ignored)
614 {
615 	imb();
616 }
617 
618 void
619 smp_imb(void)
620 {
621 	/* Must wait other processors to flush their icache before continue. */
622 	if (on_each_cpu(ipi_imb, NULL, 1))
623 		printk(KERN_CRIT "smp_imb: timed out\n");
624 }
625 EXPORT_SYMBOL(smp_imb);
626 
627 static void
628 ipi_flush_tlb_all(void *ignored)
629 {
630 	tbia();
631 }
632 
633 void
634 flush_tlb_all(void)
635 {
636 	/* Although we don't have any data to pass, we do want to
637 	   synchronize with the other processors.  */
638 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
639 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
640 	}
641 }
642 
643 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
644 
645 static void
646 ipi_flush_tlb_mm(void *x)
647 {
648 	struct mm_struct *mm = (struct mm_struct *) x;
649 	if (mm == current->active_mm && !asn_locked())
650 		flush_tlb_current(mm);
651 	else
652 		flush_tlb_other(mm);
653 }
654 
655 void
656 flush_tlb_mm(struct mm_struct *mm)
657 {
658 	preempt_disable();
659 
660 	if (mm == current->active_mm) {
661 		flush_tlb_current(mm);
662 		if (atomic_read(&mm->mm_users) <= 1) {
663 			int cpu, this_cpu = smp_processor_id();
664 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
665 				if (!cpu_online(cpu) || cpu == this_cpu)
666 					continue;
667 				if (mm->context[cpu])
668 					mm->context[cpu] = 0;
669 			}
670 			preempt_enable();
671 			return;
672 		}
673 	}
674 
675 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
676 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
677 	}
678 
679 	preempt_enable();
680 }
681 EXPORT_SYMBOL(flush_tlb_mm);
682 
683 struct flush_tlb_page_struct {
684 	struct vm_area_struct *vma;
685 	struct mm_struct *mm;
686 	unsigned long addr;
687 };
688 
689 static void
690 ipi_flush_tlb_page(void *x)
691 {
692 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
693 	struct mm_struct * mm = data->mm;
694 
695 	if (mm == current->active_mm && !asn_locked())
696 		flush_tlb_current_page(mm, data->vma, data->addr);
697 	else
698 		flush_tlb_other(mm);
699 }
700 
701 void
702 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
703 {
704 	struct flush_tlb_page_struct data;
705 	struct mm_struct *mm = vma->vm_mm;
706 
707 	preempt_disable();
708 
709 	if (mm == current->active_mm) {
710 		flush_tlb_current_page(mm, vma, addr);
711 		if (atomic_read(&mm->mm_users) <= 1) {
712 			int cpu, this_cpu = smp_processor_id();
713 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
714 				if (!cpu_online(cpu) || cpu == this_cpu)
715 					continue;
716 				if (mm->context[cpu])
717 					mm->context[cpu] = 0;
718 			}
719 			preempt_enable();
720 			return;
721 		}
722 	}
723 
724 	data.vma = vma;
725 	data.mm = mm;
726 	data.addr = addr;
727 
728 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
729 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
730 	}
731 
732 	preempt_enable();
733 }
734 EXPORT_SYMBOL(flush_tlb_page);
735 
736 void
737 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
738 {
739 	/* On the Alpha we always flush the whole user tlb.  */
740 	flush_tlb_mm(vma->vm_mm);
741 }
742 EXPORT_SYMBOL(flush_tlb_range);
743 
744 static void
745 ipi_flush_icache_page(void *x)
746 {
747 	struct mm_struct *mm = (struct mm_struct *) x;
748 	if (mm == current->active_mm && !asn_locked())
749 		__load_new_mm_context(mm);
750 	else
751 		flush_tlb_other(mm);
752 }
753 
754 void
755 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
756 			unsigned long addr, int len)
757 {
758 	struct mm_struct *mm = vma->vm_mm;
759 
760 	if ((vma->vm_flags & VM_EXEC) == 0)
761 		return;
762 
763 	preempt_disable();
764 
765 	if (mm == current->active_mm) {
766 		__load_new_mm_context(mm);
767 		if (atomic_read(&mm->mm_users) <= 1) {
768 			int cpu, this_cpu = smp_processor_id();
769 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
770 				if (!cpu_online(cpu) || cpu == this_cpu)
771 					continue;
772 				if (mm->context[cpu])
773 					mm->context[cpu] = 0;
774 			}
775 			preempt_enable();
776 			return;
777 		}
778 	}
779 
780 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
781 		printk(KERN_CRIT "flush_icache_page: timed out\n");
782 	}
783 
784 	preempt_enable();
785 }
786