xref: /linux/arch/arm64/include/asm/mmu_context.h (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/mmu_context.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_MMU_CONTEXT_H
9 #define __ASM_MMU_CONTEXT_H
10 
11 #ifndef __ASSEMBLY__
12 
13 #include <linux/compiler.h>
14 #include <linux/sched.h>
15 #include <linux/sched/hotplug.h>
16 #include <linux/mm_types.h>
17 #include <linux/pgtable.h>
18 
19 #include <asm/cacheflush.h>
20 #include <asm/cpufeature.h>
21 #include <asm/proc-fns.h>
22 #include <asm-generic/mm_hooks.h>
23 #include <asm/cputype.h>
24 #include <asm/sysreg.h>
25 #include <asm/tlbflush.h>
26 
27 extern bool rodata_full;
28 
29 static inline void contextidr_thread_switch(struct task_struct *next)
30 {
31 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
32 		return;
33 
34 	write_sysreg(task_pid_nr(next), contextidr_el1);
35 	isb();
36 }
37 
38 /*
39  * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
40  */
41 static inline void cpu_set_reserved_ttbr0(void)
42 {
43 	unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
44 
45 	write_sysreg(ttbr, ttbr0_el1);
46 	isb();
47 }
48 
49 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
50 
51 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
52 {
53 	BUG_ON(pgd == swapper_pg_dir);
54 	cpu_set_reserved_ttbr0();
55 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
56 }
57 
58 /*
59  * TCR.T0SZ value to use when the ID map is active. Usually equals
60  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
61  * physical memory, in which case it will be smaller.
62  */
63 extern u64 idmap_t0sz;
64 extern u64 idmap_ptrs_per_pgd;
65 
66 /*
67  * Ensure TCR.T0SZ is set to the provided value.
68  */
69 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
70 {
71 	unsigned long tcr = read_sysreg(tcr_el1);
72 
73 	if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
74 		return;
75 
76 	tcr &= ~TCR_T0SZ_MASK;
77 	tcr |= t0sz << TCR_T0SZ_OFFSET;
78 	write_sysreg(tcr, tcr_el1);
79 	isb();
80 }
81 
82 #define cpu_set_default_tcr_t0sz()	__cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
83 #define cpu_set_idmap_tcr_t0sz()	__cpu_set_tcr_t0sz(idmap_t0sz)
84 
85 /*
86  * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
87  *
88  * The idmap lives in the same VA range as userspace, but uses global entries
89  * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
90  * speculative TLB fetches, we must temporarily install the reserved page
91  * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
92  *
93  * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
94  * which should not be installed in TTBR0_EL1. In this case we can leave the
95  * reserved page tables in place.
96  */
97 static inline void cpu_uninstall_idmap(void)
98 {
99 	struct mm_struct *mm = current->active_mm;
100 
101 	cpu_set_reserved_ttbr0();
102 	local_flush_tlb_all();
103 	cpu_set_default_tcr_t0sz();
104 
105 	if (mm != &init_mm && !system_uses_ttbr0_pan())
106 		cpu_switch_mm(mm->pgd, mm);
107 }
108 
109 static inline void cpu_install_idmap(void)
110 {
111 	cpu_set_reserved_ttbr0();
112 	local_flush_tlb_all();
113 	cpu_set_idmap_tcr_t0sz();
114 
115 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
116 }
117 
118 /*
119  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
120  * avoiding the possibility of conflicting TLB entries being allocated.
121  */
122 static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
123 {
124 	typedef void (ttbr_replace_func)(phys_addr_t);
125 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
126 	ttbr_replace_func *replace_phys;
127 
128 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
129 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
130 
131 	if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
132 		/*
133 		 * cpu_replace_ttbr1() is used when there's a boot CPU
134 		 * up (i.e. cpufeature framework is not up yet) and
135 		 * latter only when we enable CNP via cpufeature's
136 		 * enable() callback.
137 		 * Also we rely on the cpu_hwcap bit being set before
138 		 * calling the enable() function.
139 		 */
140 		ttbr1 |= TTBR_CNP_BIT;
141 	}
142 
143 	replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
144 
145 	cpu_install_idmap();
146 	replace_phys(ttbr1);
147 	cpu_uninstall_idmap();
148 }
149 
150 /*
151  * It would be nice to return ASIDs back to the allocator, but unfortunately
152  * that introduces a race with a generation rollover where we could erroneously
153  * free an ASID allocated in a future generation. We could workaround this by
154  * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
155  * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
156  * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
157  * take CPU migration into account.
158  */
159 void check_and_switch_context(struct mm_struct *mm);
160 
161 #define init_new_context(tsk, mm) init_new_context(tsk, mm)
162 static inline int
163 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
164 {
165 	atomic64_set(&mm->context.id, 0);
166 	refcount_set(&mm->context.pinned, 0);
167 	return 0;
168 }
169 
170 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
171 static inline void update_saved_ttbr0(struct task_struct *tsk,
172 				      struct mm_struct *mm)
173 {
174 	u64 ttbr;
175 
176 	if (!system_uses_ttbr0_pan())
177 		return;
178 
179 	if (mm == &init_mm)
180 		ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
181 	else
182 		ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
183 
184 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
185 }
186 #else
187 static inline void update_saved_ttbr0(struct task_struct *tsk,
188 				      struct mm_struct *mm)
189 {
190 }
191 #endif
192 
193 #define enter_lazy_tlb enter_lazy_tlb
194 static inline void
195 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
196 {
197 	/*
198 	 * We don't actually care about the ttbr0 mapping, so point it at the
199 	 * zero page.
200 	 */
201 	update_saved_ttbr0(tsk, &init_mm);
202 }
203 
204 static inline void __switch_mm(struct mm_struct *next)
205 {
206 	/*
207 	 * init_mm.pgd does not contain any user mappings and it is always
208 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
209 	 */
210 	if (next == &init_mm) {
211 		cpu_set_reserved_ttbr0();
212 		return;
213 	}
214 
215 	check_and_switch_context(next);
216 }
217 
218 static inline void
219 switch_mm(struct mm_struct *prev, struct mm_struct *next,
220 	  struct task_struct *tsk)
221 {
222 	if (prev != next)
223 		__switch_mm(next);
224 
225 	/*
226 	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
227 	 * value may have not been initialised yet (activate_mm caller) or the
228 	 * ASID has changed since the last run (following the context switch
229 	 * of another thread of the same process).
230 	 */
231 	update_saved_ttbr0(tsk, next);
232 }
233 
234 static inline const struct cpumask *
235 task_cpu_possible_mask(struct task_struct *p)
236 {
237 	if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
238 		return cpu_possible_mask;
239 
240 	if (!is_compat_thread(task_thread_info(p)))
241 		return cpu_possible_mask;
242 
243 	return system_32bit_el0_cpumask();
244 }
245 #define task_cpu_possible_mask	task_cpu_possible_mask
246 
247 void verify_cpu_asid_bits(void);
248 void post_ttbr_update_workaround(void);
249 
250 unsigned long arm64_mm_context_get(struct mm_struct *mm);
251 void arm64_mm_context_put(struct mm_struct *mm);
252 
253 #include <asm-generic/mmu_context.h>
254 
255 #endif /* !__ASSEMBLY__ */
256 
257 #endif /* !__ASM_MMU_CONTEXT_H */
258