1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDSO implementations.
4 *
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/vdso_datastore.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30
31 enum vdso_abi {
32 VDSO_ABI_AA64,
33 VDSO_ABI_AA32,
34 };
35
36 struct vdso_abi_info {
37 const char *name;
38 const char *vdso_code_start;
39 const char *vdso_code_end;
40 unsigned long vdso_pages;
41 /* Code Mapping */
42 struct vm_special_mapping *cm;
43 };
44
45 static struct vdso_abi_info vdso_info[] __ro_after_init = {
46 [VDSO_ABI_AA64] = {
47 .name = "vdso",
48 .vdso_code_start = vdso_start,
49 .vdso_code_end = vdso_end,
50 },
51 #ifdef CONFIG_COMPAT_VDSO
52 [VDSO_ABI_AA32] = {
53 .name = "vdso32",
54 .vdso_code_start = vdso32_start,
55 .vdso_code_end = vdso32_end,
56 },
57 #endif /* CONFIG_COMPAT_VDSO */
58 };
59
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)60 static int vdso_mremap(const struct vm_special_mapping *sm,
61 struct vm_area_struct *new_vma)
62 {
63 current->mm->context.vdso = (void *)new_vma->vm_start;
64
65 return 0;
66 }
67
__vdso_init(enum vdso_abi abi)68 static int __init __vdso_init(enum vdso_abi abi)
69 {
70 int i;
71 struct page **vdso_pagelist;
72 unsigned long pfn;
73
74 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
75 pr_err("vDSO is not a valid ELF object!\n");
76 return -EINVAL;
77 }
78
79 vdso_info[abi].vdso_pages = (
80 vdso_info[abi].vdso_code_end -
81 vdso_info[abi].vdso_code_start) >>
82 PAGE_SHIFT;
83
84 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
85 sizeof(struct page *),
86 GFP_KERNEL);
87 if (vdso_pagelist == NULL)
88 return -ENOMEM;
89
90 /* Grab the vDSO code pages. */
91 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
92
93 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
94 vdso_pagelist[i] = pfn_to_page(pfn + i);
95
96 vdso_info[abi].cm->pages = vdso_pagelist;
97
98 return 0;
99 }
100
__setup_additional_pages(enum vdso_abi abi,struct mm_struct * mm,struct linux_binprm * bprm,int uses_interp)101 static int __setup_additional_pages(enum vdso_abi abi,
102 struct mm_struct *mm,
103 struct linux_binprm *bprm,
104 int uses_interp)
105 {
106 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
107 unsigned long gp_flags = 0;
108 void *ret;
109
110 BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
111
112 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
113 /* Be sure to map the data page */
114 vdso_mapping_len = vdso_text_len + VDSO_NR_PAGES * PAGE_SIZE;
115
116 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
117 if (IS_ERR_VALUE(vdso_base)) {
118 ret = ERR_PTR(vdso_base);
119 goto up_fail;
120 }
121
122 ret = vdso_install_vvar_mapping(mm, vdso_base);
123 if (IS_ERR(ret))
124 goto up_fail;
125
126 if (system_supports_bti_kernel())
127 gp_flags = VM_ARM64_BTI;
128
129 vdso_base += VDSO_NR_PAGES * PAGE_SIZE;
130 mm->context.vdso = (void *)vdso_base;
131 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
132 VM_READ|VM_EXEC|gp_flags|
133 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
134 VM_SEALED_SYSMAP,
135 vdso_info[abi].cm);
136 if (IS_ERR(ret))
137 goto up_fail;
138
139 return 0;
140
141 up_fail:
142 mm->context.vdso = NULL;
143 return PTR_ERR(ret);
144 }
145
146 #ifdef CONFIG_COMPAT
147 /*
148 * Create and map the vectors page for AArch32 tasks.
149 */
150 enum aarch32_map {
151 AA32_MAP_VECTORS, /* kuser helpers */
152 AA32_MAP_SIGPAGE,
153 AA32_MAP_VDSO,
154 };
155
156 static struct page *aarch32_vectors_page __ro_after_init;
157 static struct page *aarch32_sig_page __ro_after_init;
158
aarch32_sigpage_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)159 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
160 struct vm_area_struct *new_vma)
161 {
162 current->mm->context.sigpage = (void *)new_vma->vm_start;
163
164 return 0;
165 }
166
167 static struct vm_special_mapping aarch32_vdso_maps[] = {
168 [AA32_MAP_VECTORS] = {
169 .name = "[vectors]", /* ABI */
170 .pages = &aarch32_vectors_page,
171 },
172 [AA32_MAP_SIGPAGE] = {
173 .name = "[sigpage]", /* ABI */
174 .pages = &aarch32_sig_page,
175 .mremap = aarch32_sigpage_mremap,
176 },
177 [AA32_MAP_VDSO] = {
178 .name = "[vdso]",
179 .mremap = vdso_mremap,
180 },
181 };
182
aarch32_alloc_kuser_vdso_page(void)183 static int aarch32_alloc_kuser_vdso_page(void)
184 {
185 extern char __kuser_helper_start[], __kuser_helper_end[];
186 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
187 unsigned long vdso_page;
188
189 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
190 return 0;
191
192 vdso_page = get_zeroed_page(GFP_KERNEL);
193 if (!vdso_page)
194 return -ENOMEM;
195
196 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
197 kuser_sz);
198 aarch32_vectors_page = virt_to_page((void *)vdso_page);
199 return 0;
200 }
201
202 #define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
aarch32_alloc_sigpage(void)203 static int aarch32_alloc_sigpage(void)
204 {
205 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
206 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
207 __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
208 void *sigpage;
209
210 sigpage = (void *)__get_free_page(GFP_KERNEL);
211 if (!sigpage)
212 return -ENOMEM;
213
214 memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
215 memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
216 aarch32_sig_page = virt_to_page(sigpage);
217 return 0;
218 }
219
__aarch32_alloc_vdso_pages(void)220 static int __init __aarch32_alloc_vdso_pages(void)
221 {
222
223 if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
224 return 0;
225
226 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
227
228 return __vdso_init(VDSO_ABI_AA32);
229 }
230
aarch32_alloc_vdso_pages(void)231 static int __init aarch32_alloc_vdso_pages(void)
232 {
233 int ret;
234
235 ret = __aarch32_alloc_vdso_pages();
236 if (ret)
237 return ret;
238
239 ret = aarch32_alloc_sigpage();
240 if (ret)
241 return ret;
242
243 return aarch32_alloc_kuser_vdso_page();
244 }
245 arch_initcall(aarch32_alloc_vdso_pages);
246
aarch32_kuser_helpers_setup(struct mm_struct * mm)247 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
248 {
249 void *ret;
250
251 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
252 return 0;
253
254 /*
255 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
256 * not safe to CoW the page containing the CPU exception vectors.
257 */
258 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
259 VM_READ | VM_EXEC |
260 VM_MAYREAD | VM_MAYEXEC |
261 VM_SEALED_SYSMAP,
262 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
263
264 return PTR_ERR_OR_ZERO(ret);
265 }
266
aarch32_sigreturn_setup(struct mm_struct * mm)267 static int aarch32_sigreturn_setup(struct mm_struct *mm)
268 {
269 unsigned long addr;
270 void *ret;
271
272 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
273 if (IS_ERR_VALUE(addr)) {
274 ret = ERR_PTR(addr);
275 goto out;
276 }
277
278 /*
279 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
280 * set breakpoints.
281 */
282 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
283 VM_READ | VM_EXEC | VM_MAYREAD |
284 VM_MAYWRITE | VM_MAYEXEC |
285 VM_SEALED_SYSMAP,
286 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
287 if (IS_ERR(ret))
288 goto out;
289
290 mm->context.sigpage = (void *)addr;
291
292 out:
293 return PTR_ERR_OR_ZERO(ret);
294 }
295
aarch32_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)296 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
297 {
298 struct mm_struct *mm = current->mm;
299 int ret;
300
301 if (mmap_write_lock_killable(mm))
302 return -EINTR;
303
304 ret = aarch32_kuser_helpers_setup(mm);
305 if (ret)
306 goto out;
307
308 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
309 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
310 uses_interp);
311 if (ret)
312 goto out;
313 }
314
315 ret = aarch32_sigreturn_setup(mm);
316 out:
317 mmap_write_unlock(mm);
318 return ret;
319 }
320 #endif /* CONFIG_COMPAT */
321
322 static struct vm_special_mapping aarch64_vdso_map __ro_after_init = {
323 .name = "[vdso]",
324 .mremap = vdso_mremap,
325 };
326
vdso_init(void)327 static int __init vdso_init(void)
328 {
329 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_map;
330
331 return __vdso_init(VDSO_ABI_AA64);
332 }
333 arch_initcall(vdso_init);
334
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)335 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
336 {
337 struct mm_struct *mm = current->mm;
338 int ret;
339
340 if (mmap_write_lock_killable(mm))
341 return -EINTR;
342
343 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
344 mmap_write_unlock(mm);
345
346 return ret;
347 }
348