xref: /linux/arch/arm64/kernel/vdso.c (revision 3ee16ff3437ca5388d8b60a122fde94f896f50d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30 
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35 
36 /* vdso_lookup arch_index */
37 enum arch_vdso_type {
38 	ARM64_VDSO,
39 #ifdef CONFIG_COMPAT_VDSO
40 	ARM64_VDSO32,
41 #endif /* CONFIG_COMPAT_VDSO */
42 };
43 
44 struct __vdso_abi {
45 	const char *name;
46 	const char *vdso_code_start;
47 	const char *vdso_code_end;
48 	unsigned long vdso_pages;
49 	/* Data Mapping */
50 	struct vm_special_mapping *dm;
51 	/* Code Mapping */
52 	struct vm_special_mapping *cm;
53 };
54 
55 static struct __vdso_abi vdso_lookup[] __ro_after_init = {
56 	[ARM64_VDSO] = {
57 		.name = "vdso",
58 		.vdso_code_start = vdso_start,
59 		.vdso_code_end = vdso_end,
60 	},
61 #ifdef CONFIG_COMPAT_VDSO
62 	[ARM64_VDSO32] = {
63 		.name = "vdso32",
64 		.vdso_code_start = vdso32_start,
65 		.vdso_code_end = vdso32_end,
66 	},
67 #endif /* CONFIG_COMPAT_VDSO */
68 };
69 
70 /*
71  * The vDSO data page.
72  */
73 static union {
74 	struct vdso_data	data[CS_BASES];
75 	u8			page[PAGE_SIZE];
76 } vdso_data_store __page_aligned_data;
77 struct vdso_data *vdso_data = vdso_data_store.data;
78 
79 static int __vdso_remap(enum arch_vdso_type arch_index,
80 			const struct vm_special_mapping *sm,
81 			struct vm_area_struct *new_vma)
82 {
83 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
84 	unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
85 				  vdso_lookup[arch_index].vdso_code_start;
86 
87 	if (vdso_size != new_size)
88 		return -EINVAL;
89 
90 	current->mm->context.vdso = (void *)new_vma->vm_start;
91 
92 	return 0;
93 }
94 
95 static int __vdso_init(enum arch_vdso_type arch_index)
96 {
97 	int i;
98 	struct page **vdso_pagelist;
99 	unsigned long pfn;
100 
101 	if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
102 		pr_err("vDSO is not a valid ELF object!\n");
103 		return -EINVAL;
104 	}
105 
106 	vdso_lookup[arch_index].vdso_pages = (
107 			vdso_lookup[arch_index].vdso_code_end -
108 			vdso_lookup[arch_index].vdso_code_start) >>
109 			PAGE_SHIFT;
110 
111 	/* Allocate the vDSO pagelist, plus a page for the data. */
112 	vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
113 				sizeof(struct page *),
114 				GFP_KERNEL);
115 	if (vdso_pagelist == NULL)
116 		return -ENOMEM;
117 
118 	/* Grab the vDSO data page. */
119 	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
120 
121 
122 	/* Grab the vDSO code pages. */
123 	pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
124 
125 	for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
126 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
127 
128 	vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
129 	vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
130 
131 	return 0;
132 }
133 
134 static int __setup_additional_pages(enum arch_vdso_type arch_index,
135 				    struct mm_struct *mm,
136 				    struct linux_binprm *bprm,
137 				    int uses_interp)
138 {
139 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
140 	void *ret;
141 
142 	vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
143 	/* Be sure to map the data page */
144 	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
145 
146 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
147 	if (IS_ERR_VALUE(vdso_base)) {
148 		ret = ERR_PTR(vdso_base);
149 		goto up_fail;
150 	}
151 
152 	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
153 				       VM_READ|VM_MAYREAD,
154 				       vdso_lookup[arch_index].dm);
155 	if (IS_ERR(ret))
156 		goto up_fail;
157 
158 	vdso_base += PAGE_SIZE;
159 	mm->context.vdso = (void *)vdso_base;
160 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
161 				       VM_READ|VM_EXEC|
162 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
163 				       vdso_lookup[arch_index].cm);
164 	if (IS_ERR(ret))
165 		goto up_fail;
166 
167 	return 0;
168 
169 up_fail:
170 	mm->context.vdso = NULL;
171 	return PTR_ERR(ret);
172 }
173 
174 #ifdef CONFIG_COMPAT
175 /*
176  * Create and map the vectors page for AArch32 tasks.
177  */
178 #ifdef CONFIG_COMPAT_VDSO
179 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
180 		struct vm_area_struct *new_vma)
181 {
182 	return __vdso_remap(ARM64_VDSO32, sm, new_vma);
183 }
184 #endif /* CONFIG_COMPAT_VDSO */
185 
186 /*
187  * aarch32_vdso_pages:
188  * 0 - kuser helpers
189  * 1 - sigreturn code
190  * or (CONFIG_COMPAT_VDSO):
191  * 0 - kuser helpers
192  * 1 - vdso data
193  * 2 - vdso code
194  */
195 #define C_VECTORS	0
196 #ifdef CONFIG_COMPAT_VDSO
197 #define C_VVAR		1
198 #define C_VDSO		2
199 #define C_PAGES		(C_VDSO + 1)
200 #else
201 #define C_SIGPAGE	1
202 #define C_PAGES		(C_SIGPAGE + 1)
203 #endif /* CONFIG_COMPAT_VDSO */
204 
205 static struct page *aarch32_vectors_page __ro_after_init;
206 #ifndef CONFIG_COMPAT_VDSO
207 static struct page *aarch32_sig_page __ro_after_init;
208 #endif
209 
210 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
211 	{
212 		.name	= "[vectors]", /* ABI */
213 		.pages	= &aarch32_vectors_page,
214 	},
215 #ifdef CONFIG_COMPAT_VDSO
216 	{
217 		.name = "[vvar]",
218 	},
219 	{
220 		.name = "[vdso]",
221 		.mremap = aarch32_vdso_mremap,
222 	},
223 #else
224 	{
225 		.name	= "[sigpage]", /* ABI */
226 		.pages	= &aarch32_sig_page,
227 	},
228 #endif /* CONFIG_COMPAT_VDSO */
229 };
230 
231 static int aarch32_alloc_kuser_vdso_page(void)
232 {
233 	extern char __kuser_helper_start[], __kuser_helper_end[];
234 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
235 	unsigned long vdso_page;
236 
237 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
238 		return 0;
239 
240 	vdso_page = get_zeroed_page(GFP_ATOMIC);
241 	if (!vdso_page)
242 		return -ENOMEM;
243 
244 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
245 	       kuser_sz);
246 	aarch32_vectors_page = virt_to_page(vdso_page);
247 	flush_dcache_page(aarch32_vectors_page);
248 	return 0;
249 }
250 
251 #ifdef CONFIG_COMPAT_VDSO
252 static int __aarch32_alloc_vdso_pages(void)
253 {
254 	int ret;
255 
256 	vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
257 	vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
258 
259 	ret = __vdso_init(ARM64_VDSO32);
260 	if (ret)
261 		return ret;
262 
263 	return aarch32_alloc_kuser_vdso_page();
264 }
265 #else
266 static int __aarch32_alloc_vdso_pages(void)
267 {
268 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
269 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
270 	unsigned long sigpage;
271 	int ret;
272 
273 	sigpage = get_zeroed_page(GFP_ATOMIC);
274 	if (!sigpage)
275 		return -ENOMEM;
276 
277 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
278 	aarch32_sig_page = virt_to_page(sigpage);
279 	flush_dcache_page(aarch32_sig_page);
280 
281 	ret = aarch32_alloc_kuser_vdso_page();
282 	if (ret)
283 		free_page(sigpage);
284 
285 	return ret;
286 }
287 #endif /* CONFIG_COMPAT_VDSO */
288 
289 static int __init aarch32_alloc_vdso_pages(void)
290 {
291 	return __aarch32_alloc_vdso_pages();
292 }
293 arch_initcall(aarch32_alloc_vdso_pages);
294 
295 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
296 {
297 	void *ret;
298 
299 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
300 		return 0;
301 
302 	/*
303 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
304 	 * not safe to CoW the page containing the CPU exception vectors.
305 	 */
306 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
307 				       VM_READ | VM_EXEC |
308 				       VM_MAYREAD | VM_MAYEXEC,
309 				       &aarch32_vdso_spec[C_VECTORS]);
310 
311 	return PTR_ERR_OR_ZERO(ret);
312 }
313 
314 #ifndef CONFIG_COMPAT_VDSO
315 static int aarch32_sigreturn_setup(struct mm_struct *mm)
316 {
317 	unsigned long addr;
318 	void *ret;
319 
320 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
321 	if (IS_ERR_VALUE(addr)) {
322 		ret = ERR_PTR(addr);
323 		goto out;
324 	}
325 
326 	/*
327 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
328 	 * set breakpoints.
329 	 */
330 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
331 				       VM_READ | VM_EXEC | VM_MAYREAD |
332 				       VM_MAYWRITE | VM_MAYEXEC,
333 				       &aarch32_vdso_spec[C_SIGPAGE]);
334 	if (IS_ERR(ret))
335 		goto out;
336 
337 	mm->context.vdso = (void *)addr;
338 
339 out:
340 	return PTR_ERR_OR_ZERO(ret);
341 }
342 #endif /* !CONFIG_COMPAT_VDSO */
343 
344 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
345 {
346 	struct mm_struct *mm = current->mm;
347 	int ret;
348 
349 	if (down_write_killable(&mm->mmap_sem))
350 		return -EINTR;
351 
352 	ret = aarch32_kuser_helpers_setup(mm);
353 	if (ret)
354 		goto out;
355 
356 #ifdef CONFIG_COMPAT_VDSO
357 	ret = __setup_additional_pages(ARM64_VDSO32,
358 				       mm,
359 				       bprm,
360 				       uses_interp);
361 #else
362 	ret = aarch32_sigreturn_setup(mm);
363 #endif /* CONFIG_COMPAT_VDSO */
364 
365 out:
366 	up_write(&mm->mmap_sem);
367 	return ret;
368 }
369 #endif /* CONFIG_COMPAT */
370 
371 static int vdso_mremap(const struct vm_special_mapping *sm,
372 		struct vm_area_struct *new_vma)
373 {
374 	return __vdso_remap(ARM64_VDSO, sm, new_vma);
375 }
376 
377 /*
378  * aarch64_vdso_pages:
379  * 0 - vvar
380  * 1 - vdso
381  */
382 #define A_VVAR		0
383 #define A_VDSO		1
384 #define A_PAGES		(A_VDSO + 1)
385 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
386 	{
387 		.name	= "[vvar]",
388 	},
389 	{
390 		.name	= "[vdso]",
391 		.mremap = vdso_mremap,
392 	},
393 };
394 
395 static int __init vdso_init(void)
396 {
397 	vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
398 	vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
399 
400 	return __vdso_init(ARM64_VDSO);
401 }
402 arch_initcall(vdso_init);
403 
404 int arch_setup_additional_pages(struct linux_binprm *bprm,
405 				int uses_interp)
406 {
407 	struct mm_struct *mm = current->mm;
408 	int ret;
409 
410 	if (down_write_killable(&mm->mmap_sem))
411 		return -EINTR;
412 
413 	ret = __setup_additional_pages(ARM64_VDSO,
414 				       mm,
415 				       bprm,
416 				       uses_interp);
417 
418 	up_write(&mm->mmap_sem);
419 
420 	return ret;
421 }
422