xref: /linux/arch/arm64/mm/context.c (revision ca64d84e93762f4e587e040a44ad9f6089afc777)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/context.c
4  *
5  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/mm.h>
14 
15 #include <asm/cpufeature.h>
16 #include <asm/mmu_context.h>
17 #include <asm/smp.h>
18 #include <asm/tlbflush.h>
19 
20 static u32 asid_bits;
21 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22 
23 static atomic64_t asid_generation;
24 static unsigned long *asid_map;
25 
26 static DEFINE_PER_CPU(atomic64_t, active_asids);
27 static DEFINE_PER_CPU(u64, reserved_asids);
28 static cpumask_t tlb_flush_pending;
29 
30 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
31 #define ASID_FIRST_VERSION	(1UL << asid_bits)
32 
33 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
34 #define asid2idx(asid)		((asid) & ~ASID_MASK)
35 #define idx2asid(idx)		asid2idx(idx)
36 
37 /* Get the ASIDBits supported by the current CPU */
38 static u32 get_cpu_asid_bits(void)
39 {
40 	u32 asid;
41 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
42 						ID_AA64MMFR0_ASID_SHIFT);
43 
44 	switch (fld) {
45 	default:
46 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
47 					smp_processor_id(),  fld);
48 		/* Fallthrough */
49 	case 0:
50 		asid = 8;
51 		break;
52 	case 2:
53 		asid = 16;
54 	}
55 
56 	return asid;
57 }
58 
59 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
60 void verify_cpu_asid_bits(void)
61 {
62 	u32 asid = get_cpu_asid_bits();
63 
64 	if (asid < asid_bits) {
65 		/*
66 		 * We cannot decrease the ASID size at runtime, so panic if we support
67 		 * fewer ASID bits than the boot CPU.
68 		 */
69 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
70 				smp_processor_id(), asid, asid_bits);
71 		cpu_panic_kernel();
72 	}
73 }
74 
75 static void set_kpti_asid_bits(void)
76 {
77 	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
78 	/*
79 	 * In case of KPTI kernel/user ASIDs are allocated in
80 	 * pairs, the bottom bit distinguishes the two: if it
81 	 * is set, then the ASID will map only userspace. Thus
82 	 * mark even as reserved for kernel.
83 	 */
84 	memset(asid_map, 0xaa, len);
85 }
86 
87 static void set_reserved_asid_bits(void)
88 {
89 	if (arm64_kernel_unmapped_at_el0())
90 		set_kpti_asid_bits();
91 	else
92 		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
93 }
94 
95 static void flush_context(void)
96 {
97 	int i;
98 	u64 asid;
99 
100 	/* Update the list of reserved ASIDs and the ASID bitmap. */
101 	set_reserved_asid_bits();
102 
103 	for_each_possible_cpu(i) {
104 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
105 		/*
106 		 * If this CPU has already been through a
107 		 * rollover, but hasn't run another task in
108 		 * the meantime, we must preserve its reserved
109 		 * ASID, as this is the only trace we have of
110 		 * the process it is still running.
111 		 */
112 		if (asid == 0)
113 			asid = per_cpu(reserved_asids, i);
114 		__set_bit(asid2idx(asid), asid_map);
115 		per_cpu(reserved_asids, i) = asid;
116 	}
117 
118 	/*
119 	 * Queue a TLB invalidation for each CPU to perform on next
120 	 * context-switch
121 	 */
122 	cpumask_setall(&tlb_flush_pending);
123 }
124 
125 static bool check_update_reserved_asid(u64 asid, u64 newasid)
126 {
127 	int cpu;
128 	bool hit = false;
129 
130 	/*
131 	 * Iterate over the set of reserved ASIDs looking for a match.
132 	 * If we find one, then we can update our mm to use newasid
133 	 * (i.e. the same ASID in the current generation) but we can't
134 	 * exit the loop early, since we need to ensure that all copies
135 	 * of the old ASID are updated to reflect the mm. Failure to do
136 	 * so could result in us missing the reserved ASID in a future
137 	 * generation.
138 	 */
139 	for_each_possible_cpu(cpu) {
140 		if (per_cpu(reserved_asids, cpu) == asid) {
141 			hit = true;
142 			per_cpu(reserved_asids, cpu) = newasid;
143 		}
144 	}
145 
146 	return hit;
147 }
148 
149 static u64 new_context(struct mm_struct *mm)
150 {
151 	static u32 cur_idx = 1;
152 	u64 asid = atomic64_read(&mm->context.id);
153 	u64 generation = atomic64_read(&asid_generation);
154 
155 	if (asid != 0) {
156 		u64 newasid = generation | (asid & ~ASID_MASK);
157 
158 		/*
159 		 * If our current ASID was active during a rollover, we
160 		 * can continue to use it and this was just a false alarm.
161 		 */
162 		if (check_update_reserved_asid(asid, newasid))
163 			return newasid;
164 
165 		/*
166 		 * We had a valid ASID in a previous life, so try to re-use
167 		 * it if possible.
168 		 */
169 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
170 			return newasid;
171 	}
172 
173 	/*
174 	 * Allocate a free ASID. If we can't find one, take a note of the
175 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
176 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
177 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
178 	 * pairs.
179 	 */
180 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
181 	if (asid != NUM_USER_ASIDS)
182 		goto set_asid;
183 
184 	/* We're out of ASIDs, so increment the global generation count */
185 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
186 						 &asid_generation);
187 	flush_context();
188 
189 	/* We have more ASIDs than CPUs, so this will always succeed */
190 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
191 
192 set_asid:
193 	__set_bit(asid, asid_map);
194 	cur_idx = asid;
195 	return idx2asid(asid) | generation;
196 }
197 
198 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
199 {
200 	unsigned long flags;
201 	u64 asid, old_active_asid;
202 
203 	if (system_supports_cnp())
204 		cpu_set_reserved_ttbr0();
205 
206 	asid = atomic64_read(&mm->context.id);
207 
208 	/*
209 	 * The memory ordering here is subtle.
210 	 * If our active_asids is non-zero and the ASID matches the current
211 	 * generation, then we update the active_asids entry with a relaxed
212 	 * cmpxchg. Racing with a concurrent rollover means that either:
213 	 *
214 	 * - We get a zero back from the cmpxchg and end up waiting on the
215 	 *   lock. Taking the lock synchronises with the rollover and so
216 	 *   we are forced to see the updated generation.
217 	 *
218 	 * - We get a valid ASID back from the cmpxchg, which means the
219 	 *   relaxed xchg in flush_context will treat us as reserved
220 	 *   because atomic RmWs are totally ordered for a given location.
221 	 */
222 	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
223 	if (old_active_asid &&
224 	    !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
225 	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
226 				     old_active_asid, asid))
227 		goto switch_mm_fastpath;
228 
229 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
230 	/* Check that our ASID belongs to the current generation. */
231 	asid = atomic64_read(&mm->context.id);
232 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
233 		asid = new_context(mm);
234 		atomic64_set(&mm->context.id, asid);
235 	}
236 
237 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
238 		local_flush_tlb_all();
239 
240 	atomic64_set(&per_cpu(active_asids, cpu), asid);
241 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
242 
243 switch_mm_fastpath:
244 
245 	arm64_apply_bp_hardening();
246 
247 	/*
248 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
249 	 * emulating PAN.
250 	 */
251 	if (!system_uses_ttbr0_pan())
252 		cpu_switch_mm(mm->pgd, mm);
253 }
254 
255 /* Errata workaround post TTBRx_EL1 update. */
256 asmlinkage void post_ttbr_update_workaround(void)
257 {
258 	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
259 		return;
260 
261 	asm(ALTERNATIVE("nop; nop; nop",
262 			"ic iallu; dsb nsh; isb",
263 			ARM64_WORKAROUND_CAVIUM_27456));
264 }
265 
266 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
267 {
268 	unsigned long ttbr1 = read_sysreg(ttbr1_el1);
269 	unsigned long asid = ASID(mm);
270 	unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
271 
272 	/* Skip CNP for the reserved ASID */
273 	if (system_supports_cnp() && asid)
274 		ttbr0 |= TTBR_CNP_BIT;
275 
276 	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
277 	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
278 		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
279 
280 	/* Set ASID in TTBR1 since TCR.A1 is set */
281 	ttbr1 &= ~TTBR_ASID_MASK;
282 	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
283 
284 	write_sysreg(ttbr1, ttbr1_el1);
285 	isb();
286 	write_sysreg(ttbr0, ttbr0_el1);
287 	isb();
288 	post_ttbr_update_workaround();
289 }
290 
291 static int asids_update_limit(void)
292 {
293 	unsigned long num_available_asids = NUM_USER_ASIDS;
294 
295 	if (arm64_kernel_unmapped_at_el0())
296 		num_available_asids /= 2;
297 	/*
298 	 * Expect allocation after rollover to fail if we don't have at least
299 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
300 	 */
301 	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
302 	pr_info("ASID allocator initialised with %lu entries\n",
303 		num_available_asids);
304 	return 0;
305 }
306 arch_initcall(asids_update_limit);
307 
308 static int asids_init(void)
309 {
310 	asid_bits = get_cpu_asid_bits();
311 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
312 	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
313 			   GFP_KERNEL);
314 	if (!asid_map)
315 		panic("Failed to allocate bitmap for %lu ASIDs\n",
316 		      NUM_USER_ASIDS);
317 
318 	/*
319 	 * We cannot call set_reserved_asid_bits() here because CPU
320 	 * caps are not finalized yet, so it is safer to assume KPTI
321 	 * and reserve kernel ASID's from beginning.
322 	 */
323 	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
324 		set_kpti_asid_bits();
325 	return 0;
326 }
327 early_initcall(asids_init);
328