xref: /linux/arch/arm64/mm/context.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Based on arch/arm/mm/context.c
3  *
4  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/tlbflush.h>
29 
30 static u32 asid_bits;
31 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
32 
33 static atomic64_t asid_generation;
34 static unsigned long *asid_map;
35 
36 static DEFINE_PER_CPU(atomic64_t, active_asids);
37 static DEFINE_PER_CPU(u64, reserved_asids);
38 static cpumask_t tlb_flush_pending;
39 
40 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
41 #define ASID_FIRST_VERSION	(1UL << asid_bits)
42 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
43 
44 /* Get the ASIDBits supported by the current CPU */
45 static u32 get_cpu_asid_bits(void)
46 {
47 	u32 asid;
48 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
49 						ID_AA64MMFR0_ASID_SHIFT);
50 
51 	switch (fld) {
52 	default:
53 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
54 					smp_processor_id(),  fld);
55 		/* Fallthrough */
56 	case 0:
57 		asid = 8;
58 		break;
59 	case 2:
60 		asid = 16;
61 	}
62 
63 	return asid;
64 }
65 
66 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
67 void verify_cpu_asid_bits(void)
68 {
69 	u32 asid = get_cpu_asid_bits();
70 
71 	if (asid < asid_bits) {
72 		/*
73 		 * We cannot decrease the ASID size at runtime, so panic if we support
74 		 * fewer ASID bits than the boot CPU.
75 		 */
76 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
77 				smp_processor_id(), asid, asid_bits);
78 		cpu_panic_kernel();
79 	}
80 }
81 
82 static void set_reserved_asid_bits(void)
83 {
84 	if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
85 	    cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
86 		__set_bit(FALKOR_RESERVED_ASID, asid_map);
87 }
88 
89 static void flush_context(unsigned int cpu)
90 {
91 	int i;
92 	u64 asid;
93 
94 	/* Update the list of reserved ASIDs and the ASID bitmap. */
95 	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
96 
97 	set_reserved_asid_bits();
98 
99 	/*
100 	 * Ensure the generation bump is observed before we xchg the
101 	 * active_asids.
102 	 */
103 	smp_wmb();
104 
105 	for_each_possible_cpu(i) {
106 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
107 		/*
108 		 * If this CPU has already been through a
109 		 * rollover, but hasn't run another task in
110 		 * the meantime, we must preserve its reserved
111 		 * ASID, as this is the only trace we have of
112 		 * the process it is still running.
113 		 */
114 		if (asid == 0)
115 			asid = per_cpu(reserved_asids, i);
116 		__set_bit(asid & ~ASID_MASK, asid_map);
117 		per_cpu(reserved_asids, i) = asid;
118 	}
119 
120 	/* Queue a TLB invalidate and flush the I-cache if necessary. */
121 	cpumask_setall(&tlb_flush_pending);
122 }
123 
124 static bool check_update_reserved_asid(u64 asid, u64 newasid)
125 {
126 	int cpu;
127 	bool hit = false;
128 
129 	/*
130 	 * Iterate over the set of reserved ASIDs looking for a match.
131 	 * If we find one, then we can update our mm to use newasid
132 	 * (i.e. the same ASID in the current generation) but we can't
133 	 * exit the loop early, since we need to ensure that all copies
134 	 * of the old ASID are updated to reflect the mm. Failure to do
135 	 * so could result in us missing the reserved ASID in a future
136 	 * generation.
137 	 */
138 	for_each_possible_cpu(cpu) {
139 		if (per_cpu(reserved_asids, cpu) == asid) {
140 			hit = true;
141 			per_cpu(reserved_asids, cpu) = newasid;
142 		}
143 	}
144 
145 	return hit;
146 }
147 
148 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
149 {
150 	static u32 cur_idx = 1;
151 	u64 asid = atomic64_read(&mm->context.id);
152 	u64 generation = atomic64_read(&asid_generation);
153 
154 	if (asid != 0) {
155 		u64 newasid = generation | (asid & ~ASID_MASK);
156 
157 		/*
158 		 * If our current ASID was active during a rollover, we
159 		 * can continue to use it and this was just a false alarm.
160 		 */
161 		if (check_update_reserved_asid(asid, newasid))
162 			return newasid;
163 
164 		/*
165 		 * We had a valid ASID in a previous life, so try to re-use
166 		 * it if possible.
167 		 */
168 		asid &= ~ASID_MASK;
169 		if (!__test_and_set_bit(asid, asid_map))
170 			return newasid;
171 	}
172 
173 	/*
174 	 * Allocate a free ASID. If we can't find one, take a note of the
175 	 * currently active ASIDs and mark the TLBs as requiring flushes.
176 	 * We always count from ASID #1, as we use ASID #0 when setting a
177 	 * reserved TTBR0 for the init_mm.
178 	 */
179 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
180 	if (asid != NUM_USER_ASIDS)
181 		goto set_asid;
182 
183 	/* We're out of ASIDs, so increment the global generation count */
184 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
185 						 &asid_generation);
186 	flush_context(cpu);
187 
188 	/* We have more ASIDs than CPUs, so this will always succeed */
189 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
190 
191 set_asid:
192 	__set_bit(asid, asid_map);
193 	cur_idx = asid;
194 	return asid | generation;
195 }
196 
197 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
198 {
199 	unsigned long flags;
200 	u64 asid;
201 
202 	asid = atomic64_read(&mm->context.id);
203 
204 	/*
205 	 * The memory ordering here is subtle. We rely on the control
206 	 * dependency between the generation read and the update of
207 	 * active_asids to ensure that we are synchronised with a
208 	 * parallel rollover (i.e. this pairs with the smp_wmb() in
209 	 * flush_context).
210 	 */
211 	if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
212 	    && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
213 		goto switch_mm_fastpath;
214 
215 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
216 	/* Check that our ASID belongs to the current generation. */
217 	asid = atomic64_read(&mm->context.id);
218 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
219 		asid = new_context(mm, cpu);
220 		atomic64_set(&mm->context.id, asid);
221 	}
222 
223 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
224 		local_flush_tlb_all();
225 
226 	atomic64_set(&per_cpu(active_asids, cpu), asid);
227 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
228 
229 switch_mm_fastpath:
230 	/*
231 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
232 	 * emulating PAN.
233 	 */
234 	if (!system_uses_ttbr0_pan())
235 		cpu_switch_mm(mm->pgd, mm);
236 }
237 
238 static int asids_init(void)
239 {
240 	asid_bits = get_cpu_asid_bits();
241 	/*
242 	 * Expect allocation after rollover to fail if we don't have at least
243 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
244 	 */
245 	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
246 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
247 	asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
248 			   GFP_KERNEL);
249 	if (!asid_map)
250 		panic("Failed to allocate bitmap for %lu ASIDs\n",
251 		      NUM_USER_ASIDS);
252 
253 	set_reserved_asid_bits();
254 
255 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
256 	return 0;
257 }
258 early_initcall(asids_init);
259