xref: /linux/arch/arm/mm/context.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  *  linux/arch/arm/mm/context.c
3  *
4  *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/init.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
15 
16 #include <asm/mmu_context.h>
17 #include <asm/tlbflush.h>
18 
19 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
20 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21 
22 #ifdef CONFIG_ARM_LPAE
23 void cpu_set_reserved_ttbr0(void)
24 {
25 	unsigned long ttbl = __pa(swapper_pg_dir);
26 	unsigned long ttbh = 0;
27 
28 	/*
29 	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
30 	 * ASID is set to 0.
31 	 */
32 	asm volatile(
33 	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"
34 	:
35 	: "r" (ttbl), "r" (ttbh));
36 	isb();
37 }
38 #else
39 void cpu_set_reserved_ttbr0(void)
40 {
41 	u32 ttb;
42 	/* Copy TTBR1 into TTBR0 */
43 	asm volatile(
44 	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
45 	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
46 	: "=r" (ttb));
47 	isb();
48 }
49 #endif
50 
51 /*
52  * We fork()ed a process, and we need a new context for the child
53  * to run in.
54  */
55 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
56 {
57 	mm->context.id = 0;
58 	raw_spin_lock_init(&mm->context.id_lock);
59 }
60 
61 static void flush_context(void)
62 {
63 	cpu_set_reserved_ttbr0();
64 	local_flush_tlb_all();
65 	if (icache_is_vivt_asid_tagged()) {
66 		__flush_icache_all();
67 		dsb();
68 	}
69 }
70 
71 #ifdef CONFIG_SMP
72 
73 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
74 {
75 	unsigned long flags;
76 
77 	/*
78 	 * Locking needed for multi-threaded applications where the
79 	 * same mm->context.id could be set from different CPUs during
80 	 * the broadcast. This function is also called via IPI so the
81 	 * mm->context.id_lock has to be IRQ-safe.
82 	 */
83 	raw_spin_lock_irqsave(&mm->context.id_lock, flags);
84 	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
85 		/*
86 		 * Old version of ASID found. Set the new one and
87 		 * reset mm_cpumask(mm).
88 		 */
89 		mm->context.id = asid;
90 		cpumask_clear(mm_cpumask(mm));
91 	}
92 	raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
93 
94 	/*
95 	 * Set the mm_cpumask(mm) bit for the current CPU.
96 	 */
97 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
98 }
99 
100 /*
101  * Reset the ASID on the current CPU. This function call is broadcast
102  * from the CPU handling the ASID rollover and holding cpu_asid_lock.
103  */
104 static void reset_context(void *info)
105 {
106 	unsigned int asid;
107 	unsigned int cpu = smp_processor_id();
108 	struct mm_struct *mm = current->active_mm;
109 
110 	smp_rmb();
111 	asid = cpu_last_asid + cpu + 1;
112 
113 	flush_context();
114 	set_mm_context(mm, asid);
115 
116 	/* set the new ASID */
117 	cpu_switch_mm(mm->pgd, mm);
118 }
119 
120 #else
121 
122 static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
123 {
124 	mm->context.id = asid;
125 	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
126 }
127 
128 #endif
129 
130 void __new_context(struct mm_struct *mm)
131 {
132 	unsigned int asid;
133 
134 	raw_spin_lock(&cpu_asid_lock);
135 #ifdef CONFIG_SMP
136 	/*
137 	 * Check the ASID again, in case the change was broadcast from
138 	 * another CPU before we acquired the lock.
139 	 */
140 	if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
141 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
142 		raw_spin_unlock(&cpu_asid_lock);
143 		return;
144 	}
145 #endif
146 	/*
147 	 * At this point, it is guaranteed that the current mm (with
148 	 * an old ASID) isn't active on any other CPU since the ASIDs
149 	 * are changed simultaneously via IPI.
150 	 */
151 	asid = ++cpu_last_asid;
152 	if (asid == 0)
153 		asid = cpu_last_asid = ASID_FIRST_VERSION;
154 
155 	/*
156 	 * If we've used up all our ASIDs, we need
157 	 * to start a new version and flush the TLB.
158 	 */
159 	if (unlikely((asid & ~ASID_MASK) == 0)) {
160 		asid = cpu_last_asid + smp_processor_id() + 1;
161 		flush_context();
162 #ifdef CONFIG_SMP
163 		smp_wmb();
164 		smp_call_function(reset_context, NULL, 1);
165 #endif
166 		cpu_last_asid += NR_CPUS;
167 	}
168 
169 	set_mm_context(mm, asid);
170 	raw_spin_unlock(&cpu_asid_lock);
171 }
172