xref: /linux/arch/alpha/kernel/smp.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31 
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42 
43 #include "proto.h"
44 #include "irq_impl.h"
45 
46 
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)	printk args
50 #else
51 #define DBGS(args)
52 #endif
53 
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57 
58 /* A collection of single bit ipi messages.  */
59 static struct {
60 	unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62 
63 enum ipi_message_type {
64 	IPI_RESCHEDULE,
65 	IPI_CALL_FUNC,
66 	IPI_CPU_STOP,
67 };
68 
69 /* Set to a secondary's cpuid when it comes online.  */
70 static int smp_secondary_alive = 0;
71 
72 int smp_num_probed;		/* Internal processor count */
73 int smp_num_cpus = 1;		/* Number that came online.  */
74 EXPORT_SYMBOL(smp_num_cpus);
75 
76 /*
77  * Called by both boot and secondaries to move global data into
78  *  per-processor storage.
79  */
80 static inline void __init
81 smp_store_cpu_info(int cpuid)
82 {
83 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
84 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
85 	cpu_data[cpuid].need_new_asn = 0;
86 	cpu_data[cpuid].asn_lock = 0;
87 }
88 
89 /*
90  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
91  */
92 static inline void __init
93 smp_setup_percpu_timer(int cpuid)
94 {
95 	cpu_data[cpuid].prof_counter = 1;
96 	cpu_data[cpuid].prof_multiplier = 1;
97 }
98 
99 static void __init
100 wait_boot_cpu_to_stop(int cpuid)
101 {
102 	unsigned long stop = jiffies + 10*HZ;
103 
104 	while (time_before(jiffies, stop)) {
105 	        if (!smp_secondary_alive)
106 			return;
107 		barrier();
108 	}
109 
110 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
111 	for (;;)
112 		barrier();
113 }
114 
115 /*
116  * Where secondaries begin a life of C.
117  */
118 void
119 smp_callin(void)
120 {
121 	int cpuid = hard_smp_processor_id();
122 
123 	if (cpu_online(cpuid)) {
124 		printk("??, cpu 0x%x already present??\n", cpuid);
125 		BUG();
126 	}
127 	set_cpu_online(cpuid, true);
128 
129 	/* Turn on machine checks.  */
130 	wrmces(7);
131 
132 	/* Set trap vectors.  */
133 	trap_init();
134 
135 	/* Set interrupt vector.  */
136 	wrent(entInt, 0);
137 
138 	/* Get our local ticker going. */
139 	smp_setup_percpu_timer(cpuid);
140 	init_clockevent();
141 
142 	/* Call platform-specific callin, if specified */
143 	if (alpha_mv.smp_callin)
144 		alpha_mv.smp_callin();
145 
146 	/* All kernel threads share the same mm context.  */
147 	atomic_inc(&init_mm.mm_count);
148 	current->active_mm = &init_mm;
149 
150 	/* inform the notifiers about the new cpu */
151 	notify_cpu_starting(cpuid);
152 
153 	/* Must have completely accurate bogos.  */
154 	local_irq_enable();
155 
156 	/* Wait boot CPU to stop with irq enabled before running
157 	   calibrate_delay. */
158 	wait_boot_cpu_to_stop(cpuid);
159 	mb();
160 	calibrate_delay();
161 
162 	smp_store_cpu_info(cpuid);
163 	/* Allow master to continue only after we written loops_per_jiffy.  */
164 	wmb();
165 	smp_secondary_alive = 1;
166 
167 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
168 	      cpuid, current, current->active_mm));
169 
170 	preempt_disable();
171 	cpu_startup_entry(CPUHP_ONLINE);
172 }
173 
174 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
175 static int
176 wait_for_txrdy (unsigned long cpumask)
177 {
178 	unsigned long timeout;
179 
180 	if (!(hwrpb->txrdy & cpumask))
181 		return 0;
182 
183 	timeout = jiffies + 10*HZ;
184 	while (time_before(jiffies, timeout)) {
185 		if (!(hwrpb->txrdy & cpumask))
186 			return 0;
187 		udelay(10);
188 		barrier();
189 	}
190 
191 	return -1;
192 }
193 
194 /*
195  * Send a message to a secondary's console.  "START" is one such
196  * interesting message.  ;-)
197  */
198 static void
199 send_secondary_console_msg(char *str, int cpuid)
200 {
201 	struct percpu_struct *cpu;
202 	register char *cp1, *cp2;
203 	unsigned long cpumask;
204 	size_t len;
205 
206 	cpu = (struct percpu_struct *)
207 		((char*)hwrpb
208 		 + hwrpb->processor_offset
209 		 + cpuid * hwrpb->processor_size);
210 
211 	cpumask = (1UL << cpuid);
212 	if (wait_for_txrdy(cpumask))
213 		goto timeout;
214 
215 	cp2 = str;
216 	len = strlen(cp2);
217 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
218 	cp1 = (char *) &cpu->ipc_buffer[1];
219 	memcpy(cp1, cp2, len);
220 
221 	/* atomic test and set */
222 	wmb();
223 	set_bit(cpuid, &hwrpb->rxrdy);
224 
225 	if (wait_for_txrdy(cpumask))
226 		goto timeout;
227 	return;
228 
229  timeout:
230 	printk("Processor %x not ready\n", cpuid);
231 }
232 
233 /*
234  * A secondary console wants to send a message.  Receive it.
235  */
236 static void
237 recv_secondary_console_msg(void)
238 {
239 	int mycpu, i, cnt;
240 	unsigned long txrdy = hwrpb->txrdy;
241 	char *cp1, *cp2, buf[80];
242 	struct percpu_struct *cpu;
243 
244 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
245 
246 	mycpu = hard_smp_processor_id();
247 
248 	for (i = 0; i < NR_CPUS; i++) {
249 		if (!(txrdy & (1UL << i)))
250 			continue;
251 
252 		DBGS(("recv_secondary_console_msg: "
253 		      "TXRDY contains CPU %d.\n", i));
254 
255 		cpu = (struct percpu_struct *)
256 		  ((char*)hwrpb
257 		   + hwrpb->processor_offset
258 		   + i * hwrpb->processor_size);
259 
260  		DBGS(("recv_secondary_console_msg: on %d from %d"
261 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
262 		      mycpu, i, cpu->halt_reason, cpu->flags));
263 
264 		cnt = cpu->ipc_buffer[0] >> 32;
265 		if (cnt <= 0 || cnt >= 80)
266 			strcpy(buf, "<<< BOGUS MSG >>>");
267 		else {
268 			cp1 = (char *) &cpu->ipc_buffer[1];
269 			cp2 = buf;
270 			memcpy(cp2, cp1, cnt);
271 			cp2[cnt] = '\0';
272 
273 			while ((cp2 = strchr(cp2, '\r')) != 0) {
274 				*cp2 = ' ';
275 				if (cp2[1] == '\n')
276 					cp2[1] = ' ';
277 			}
278 		}
279 
280 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
281 		      "message is '%s'\n", mycpu, buf));
282 	}
283 
284 	hwrpb->txrdy = 0;
285 }
286 
287 /*
288  * Convince the console to have a secondary cpu begin execution.
289  */
290 static int
291 secondary_cpu_start(int cpuid, struct task_struct *idle)
292 {
293 	struct percpu_struct *cpu;
294 	struct pcb_struct *hwpcb, *ipcb;
295 	unsigned long timeout;
296 
297 	cpu = (struct percpu_struct *)
298 		((char*)hwrpb
299 		 + hwrpb->processor_offset
300 		 + cpuid * hwrpb->processor_size);
301 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
302 	ipcb = &task_thread_info(idle)->pcb;
303 
304 	/* Initialize the CPU's HWPCB to something just good enough for
305 	   us to get started.  Immediately after starting, we'll swpctx
306 	   to the target idle task's pcb.  Reuse the stack in the mean
307 	   time.  Precalculate the target PCBB.  */
308 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
309 	hwpcb->usp = 0;
310 	hwpcb->ptbr = ipcb->ptbr;
311 	hwpcb->pcc = 0;
312 	hwpcb->asn = 0;
313 	hwpcb->unique = virt_to_phys(ipcb);
314 	hwpcb->flags = ipcb->flags;
315 	hwpcb->res1 = hwpcb->res2 = 0;
316 
317 #if 0
318 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
319 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
320 #endif
321 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
322 	      cpuid, idle->state, ipcb->flags));
323 
324 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
325 	hwrpb->CPU_restart = __smp_callin;
326 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
327 
328 	/* Recalculate and update the HWRPB checksum */
329 	hwrpb_update_checksum(hwrpb);
330 
331 	/*
332 	 * Send a "start" command to the specified processor.
333 	 */
334 
335 	/* SRM III 3.4.1.3 */
336 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
337 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
338 	wmb();
339 
340 	send_secondary_console_msg("START\r\n", cpuid);
341 
342 	/* Wait 10 seconds for an ACK from the console.  */
343 	timeout = jiffies + 10*HZ;
344 	while (time_before(jiffies, timeout)) {
345 		if (cpu->flags & 1)
346 			goto started;
347 		udelay(10);
348 		barrier();
349 	}
350 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
351 	return -1;
352 
353  started:
354 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
355 	return 0;
356 }
357 
358 /*
359  * Bring one cpu online.
360  */
361 static int
362 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
363 {
364 	unsigned long timeout;
365 
366 	/* Signal the secondary to wait a moment.  */
367 	smp_secondary_alive = -1;
368 
369 	/* Whirrr, whirrr, whirrrrrrrrr... */
370 	if (secondary_cpu_start(cpuid, idle))
371 		return -1;
372 
373 	/* Notify the secondary CPU it can run calibrate_delay.  */
374 	mb();
375 	smp_secondary_alive = 0;
376 
377 	/* We've been acked by the console; wait one second for
378 	   the task to start up for real.  */
379 	timeout = jiffies + 1*HZ;
380 	while (time_before(jiffies, timeout)) {
381 		if (smp_secondary_alive == 1)
382 			goto alive;
383 		udelay(10);
384 		barrier();
385 	}
386 
387 	/* We failed to boot the CPU.  */
388 
389 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
390 	return -1;
391 
392  alive:
393 	/* Another "Red Snapper". */
394 	return 0;
395 }
396 
397 /*
398  * Called from setup_arch.  Detect an SMP system and which processors
399  * are present.
400  */
401 void __init
402 setup_smp(void)
403 {
404 	struct percpu_struct *cpubase, *cpu;
405 	unsigned long i;
406 
407 	if (boot_cpuid != 0) {
408 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
409 		       boot_cpuid);
410 	}
411 
412 	if (hwrpb->nr_processors > 1) {
413 		int boot_cpu_palrev;
414 
415 		DBGS(("setup_smp: nr_processors %ld\n",
416 		      hwrpb->nr_processors));
417 
418 		cpubase = (struct percpu_struct *)
419 			((char*)hwrpb + hwrpb->processor_offset);
420 		boot_cpu_palrev = cpubase->pal_revision;
421 
422 		for (i = 0; i < hwrpb->nr_processors; i++) {
423 			cpu = (struct percpu_struct *)
424 				((char *)cpubase + i*hwrpb->processor_size);
425 			if ((cpu->flags & 0x1cc) == 0x1cc) {
426 				smp_num_probed++;
427 				set_cpu_possible(i, true);
428 				set_cpu_present(i, true);
429 				cpu->pal_revision = boot_cpu_palrev;
430 			}
431 
432 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
433 			      i, cpu->flags, cpu->type));
434 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
435 			      i, cpu->pal_revision));
436 		}
437 	} else {
438 		smp_num_probed = 1;
439 	}
440 
441 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
442 	       smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
443 }
444 
445 /*
446  * Called by smp_init prepare the secondaries
447  */
448 void __init
449 smp_prepare_cpus(unsigned int max_cpus)
450 {
451 	/* Take care of some initial bookkeeping.  */
452 	memset(ipi_data, 0, sizeof(ipi_data));
453 
454 	current_thread_info()->cpu = boot_cpuid;
455 
456 	smp_store_cpu_info(boot_cpuid);
457 	smp_setup_percpu_timer(boot_cpuid);
458 
459 	/* Nothing to do on a UP box, or when told not to.  */
460 	if (smp_num_probed == 1 || max_cpus == 0) {
461 		init_cpu_possible(cpumask_of(boot_cpuid));
462 		init_cpu_present(cpumask_of(boot_cpuid));
463 		printk(KERN_INFO "SMP mode deactivated.\n");
464 		return;
465 	}
466 
467 	printk(KERN_INFO "SMP starting up secondaries.\n");
468 
469 	smp_num_cpus = smp_num_probed;
470 }
471 
472 void
473 smp_prepare_boot_cpu(void)
474 {
475 }
476 
477 int
478 __cpu_up(unsigned int cpu, struct task_struct *tidle)
479 {
480 	smp_boot_one_cpu(cpu, tidle);
481 
482 	return cpu_online(cpu) ? 0 : -ENOSYS;
483 }
484 
485 void __init
486 smp_cpus_done(unsigned int max_cpus)
487 {
488 	int cpu;
489 	unsigned long bogosum = 0;
490 
491 	for(cpu = 0; cpu < NR_CPUS; cpu++)
492 		if (cpu_online(cpu))
493 			bogosum += cpu_data[cpu].loops_per_jiffy;
494 
495 	printk(KERN_INFO "SMP: Total of %d processors activated "
496 	       "(%lu.%02lu BogoMIPS).\n",
497 	       num_online_cpus(),
498 	       (bogosum + 2500) / (500000/HZ),
499 	       ((bogosum + 2500) / (5000/HZ)) % 100);
500 }
501 
502 int
503 setup_profiling_timer(unsigned int multiplier)
504 {
505 	return -EINVAL;
506 }
507 
508 static void
509 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
510 {
511 	int i;
512 
513 	mb();
514 	for_each_cpu(i, to_whom)
515 		set_bit(operation, &ipi_data[i].bits);
516 
517 	mb();
518 	for_each_cpu(i, to_whom)
519 		wripir(i);
520 }
521 
522 void
523 handle_ipi(struct pt_regs *regs)
524 {
525 	int this_cpu = smp_processor_id();
526 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
527 	unsigned long ops;
528 
529 #if 0
530 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
531 	      this_cpu, *pending_ipis, regs->pc));
532 #endif
533 
534 	mb();	/* Order interrupt and bit testing. */
535 	while ((ops = xchg(pending_ipis, 0)) != 0) {
536 	  mb();	/* Order bit clearing and data access. */
537 	  do {
538 		unsigned long which;
539 
540 		which = ops & -ops;
541 		ops &= ~which;
542 		which = __ffs(which);
543 
544 		switch (which) {
545 		case IPI_RESCHEDULE:
546 			scheduler_ipi();
547 			break;
548 
549 		case IPI_CALL_FUNC:
550 			generic_smp_call_function_interrupt();
551 			break;
552 
553 		case IPI_CPU_STOP:
554 			halt();
555 
556 		default:
557 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
558 			       this_cpu, which);
559 			break;
560 		}
561 	  } while (ops);
562 
563 	  mb();	/* Order data access and bit testing. */
564 	}
565 
566 	cpu_data[this_cpu].ipi_count++;
567 
568 	if (hwrpb->txrdy)
569 		recv_secondary_console_msg();
570 }
571 
572 void
573 smp_send_reschedule(int cpu)
574 {
575 #ifdef DEBUG_IPI_MSG
576 	if (cpu == hard_smp_processor_id())
577 		printk(KERN_WARNING
578 		       "smp_send_reschedule: Sending IPI to self.\n");
579 #endif
580 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
581 }
582 
583 void
584 smp_send_stop(void)
585 {
586 	cpumask_t to_whom;
587 	cpumask_copy(&to_whom, cpu_possible_mask);
588 	cpumask_clear_cpu(smp_processor_id(), &to_whom);
589 #ifdef DEBUG_IPI_MSG
590 	if (hard_smp_processor_id() != boot_cpu_id)
591 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
592 #endif
593 	send_ipi_message(&to_whom, IPI_CPU_STOP);
594 }
595 
596 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
597 {
598 	send_ipi_message(mask, IPI_CALL_FUNC);
599 }
600 
601 void arch_send_call_function_single_ipi(int cpu)
602 {
603 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
604 }
605 
606 static void
607 ipi_imb(void *ignored)
608 {
609 	imb();
610 }
611 
612 void
613 smp_imb(void)
614 {
615 	/* Must wait other processors to flush their icache before continue. */
616 	if (on_each_cpu(ipi_imb, NULL, 1))
617 		printk(KERN_CRIT "smp_imb: timed out\n");
618 }
619 EXPORT_SYMBOL(smp_imb);
620 
621 static void
622 ipi_flush_tlb_all(void *ignored)
623 {
624 	tbia();
625 }
626 
627 void
628 flush_tlb_all(void)
629 {
630 	/* Although we don't have any data to pass, we do want to
631 	   synchronize with the other processors.  */
632 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
633 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
634 	}
635 }
636 
637 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
638 
639 static void
640 ipi_flush_tlb_mm(void *x)
641 {
642 	struct mm_struct *mm = (struct mm_struct *) x;
643 	if (mm == current->active_mm && !asn_locked())
644 		flush_tlb_current(mm);
645 	else
646 		flush_tlb_other(mm);
647 }
648 
649 void
650 flush_tlb_mm(struct mm_struct *mm)
651 {
652 	preempt_disable();
653 
654 	if (mm == current->active_mm) {
655 		flush_tlb_current(mm);
656 		if (atomic_read(&mm->mm_users) <= 1) {
657 			int cpu, this_cpu = smp_processor_id();
658 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
659 				if (!cpu_online(cpu) || cpu == this_cpu)
660 					continue;
661 				if (mm->context[cpu])
662 					mm->context[cpu] = 0;
663 			}
664 			preempt_enable();
665 			return;
666 		}
667 	}
668 
669 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
670 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
671 	}
672 
673 	preempt_enable();
674 }
675 EXPORT_SYMBOL(flush_tlb_mm);
676 
677 struct flush_tlb_page_struct {
678 	struct vm_area_struct *vma;
679 	struct mm_struct *mm;
680 	unsigned long addr;
681 };
682 
683 static void
684 ipi_flush_tlb_page(void *x)
685 {
686 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
687 	struct mm_struct * mm = data->mm;
688 
689 	if (mm == current->active_mm && !asn_locked())
690 		flush_tlb_current_page(mm, data->vma, data->addr);
691 	else
692 		flush_tlb_other(mm);
693 }
694 
695 void
696 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
697 {
698 	struct flush_tlb_page_struct data;
699 	struct mm_struct *mm = vma->vm_mm;
700 
701 	preempt_disable();
702 
703 	if (mm == current->active_mm) {
704 		flush_tlb_current_page(mm, vma, addr);
705 		if (atomic_read(&mm->mm_users) <= 1) {
706 			int cpu, this_cpu = smp_processor_id();
707 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
708 				if (!cpu_online(cpu) || cpu == this_cpu)
709 					continue;
710 				if (mm->context[cpu])
711 					mm->context[cpu] = 0;
712 			}
713 			preempt_enable();
714 			return;
715 		}
716 	}
717 
718 	data.vma = vma;
719 	data.mm = mm;
720 	data.addr = addr;
721 
722 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
723 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
724 	}
725 
726 	preempt_enable();
727 }
728 EXPORT_SYMBOL(flush_tlb_page);
729 
730 void
731 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
732 {
733 	/* On the Alpha we always flush the whole user tlb.  */
734 	flush_tlb_mm(vma->vm_mm);
735 }
736 EXPORT_SYMBOL(flush_tlb_range);
737 
738 static void
739 ipi_flush_icache_page(void *x)
740 {
741 	struct mm_struct *mm = (struct mm_struct *) x;
742 	if (mm == current->active_mm && !asn_locked())
743 		__load_new_mm_context(mm);
744 	else
745 		flush_tlb_other(mm);
746 }
747 
748 void
749 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
750 			unsigned long addr, int len)
751 {
752 	struct mm_struct *mm = vma->vm_mm;
753 
754 	if ((vma->vm_flags & VM_EXEC) == 0)
755 		return;
756 
757 	preempt_disable();
758 
759 	if (mm == current->active_mm) {
760 		__load_new_mm_context(mm);
761 		if (atomic_read(&mm->mm_users) <= 1) {
762 			int cpu, this_cpu = smp_processor_id();
763 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
764 				if (!cpu_online(cpu) || cpu == this_cpu)
765 					continue;
766 				if (mm->context[cpu])
767 					mm->context[cpu] = 0;
768 			}
769 			preempt_enable();
770 			return;
771 		}
772 	}
773 
774 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
775 		printk(KERN_CRIT "flush_icache_page: timed out\n");
776 	}
777 
778 	preempt_enable();
779 }
780