xref: /linux/arch/arm64/kernel/vdso.c (revision d3418f3839b667842eba8688ca8ebe84eca158db)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30 
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35 
36 enum vdso_abi {
37 	VDSO_ABI_AA64,
38 #ifdef CONFIG_COMPAT_VDSO
39 	VDSO_ABI_AA32,
40 #endif /* CONFIG_COMPAT_VDSO */
41 };
42 
43 struct vdso_abi_info {
44 	const char *name;
45 	const char *vdso_code_start;
46 	const char *vdso_code_end;
47 	unsigned long vdso_pages;
48 	/* Data Mapping */
49 	struct vm_special_mapping *dm;
50 	/* Code Mapping */
51 	struct vm_special_mapping *cm;
52 };
53 
54 static struct vdso_abi_info vdso_info[] __ro_after_init = {
55 	[VDSO_ABI_AA64] = {
56 		.name = "vdso",
57 		.vdso_code_start = vdso_start,
58 		.vdso_code_end = vdso_end,
59 	},
60 #ifdef CONFIG_COMPAT_VDSO
61 	[VDSO_ABI_AA32] = {
62 		.name = "vdso32",
63 		.vdso_code_start = vdso32_start,
64 		.vdso_code_end = vdso32_end,
65 	},
66 #endif /* CONFIG_COMPAT_VDSO */
67 };
68 
69 /*
70  * The vDSO data page.
71  */
72 static union {
73 	struct vdso_data	data[CS_BASES];
74 	u8			page[PAGE_SIZE];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = vdso_data_store.data;
77 
78 static int __vdso_remap(enum vdso_abi abi,
79 			const struct vm_special_mapping *sm,
80 			struct vm_area_struct *new_vma)
81 {
82 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
83 	unsigned long vdso_size = vdso_info[abi].vdso_code_end -
84 				  vdso_info[abi].vdso_code_start;
85 
86 	if (vdso_size != new_size)
87 		return -EINVAL;
88 
89 	current->mm->context.vdso = (void *)new_vma->vm_start;
90 
91 	return 0;
92 }
93 
94 static int __vdso_init(enum vdso_abi abi)
95 {
96 	int i;
97 	struct page **vdso_pagelist;
98 	unsigned long pfn;
99 
100 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
101 		pr_err("vDSO is not a valid ELF object!\n");
102 		return -EINVAL;
103 	}
104 
105 	vdso_info[abi].vdso_pages = (
106 			vdso_info[abi].vdso_code_end -
107 			vdso_info[abi].vdso_code_start) >>
108 			PAGE_SHIFT;
109 
110 	/* Allocate the vDSO pagelist, plus a page for the data. */
111 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
112 				sizeof(struct page *),
113 				GFP_KERNEL);
114 	if (vdso_pagelist == NULL)
115 		return -ENOMEM;
116 
117 	/* Grab the vDSO data page. */
118 	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
119 
120 
121 	/* Grab the vDSO code pages. */
122 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
123 
124 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
125 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
126 
127 	vdso_info[abi].dm->pages = &vdso_pagelist[0];
128 	vdso_info[abi].cm->pages = &vdso_pagelist[1];
129 
130 	return 0;
131 }
132 
133 static int __setup_additional_pages(enum vdso_abi abi,
134 				    struct mm_struct *mm,
135 				    struct linux_binprm *bprm,
136 				    int uses_interp)
137 {
138 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
139 	void *ret;
140 
141 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
142 	/* Be sure to map the data page */
143 	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
144 
145 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
146 	if (IS_ERR_VALUE(vdso_base)) {
147 		ret = ERR_PTR(vdso_base);
148 		goto up_fail;
149 	}
150 
151 	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
152 				       VM_READ|VM_MAYREAD,
153 				       vdso_info[abi].dm);
154 	if (IS_ERR(ret))
155 		goto up_fail;
156 
157 	vdso_base += PAGE_SIZE;
158 	mm->context.vdso = (void *)vdso_base;
159 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
160 				       VM_READ|VM_EXEC|
161 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
162 				       vdso_info[abi].cm);
163 	if (IS_ERR(ret))
164 		goto up_fail;
165 
166 	return 0;
167 
168 up_fail:
169 	mm->context.vdso = NULL;
170 	return PTR_ERR(ret);
171 }
172 
173 #ifdef CONFIG_COMPAT
174 /*
175  * Create and map the vectors page for AArch32 tasks.
176  */
177 #ifdef CONFIG_COMPAT_VDSO
178 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
179 		struct vm_area_struct *new_vma)
180 {
181 	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
182 }
183 #endif /* CONFIG_COMPAT_VDSO */
184 
185 /*
186  * aarch32_vdso_pages:
187  * 0 - kuser helpers
188  * 1 - sigreturn code
189  * or (CONFIG_COMPAT_VDSO):
190  * 0 - kuser helpers
191  * 1 - vdso data
192  * 2 - vdso code
193  */
194 #define C_VECTORS	0
195 #ifdef CONFIG_COMPAT_VDSO
196 #define C_VVAR		1
197 #define C_VDSO		2
198 #define C_PAGES		(C_VDSO + 1)
199 #else
200 #define C_SIGPAGE	1
201 #define C_PAGES		(C_SIGPAGE + 1)
202 #endif /* CONFIG_COMPAT_VDSO */
203 
204 static struct page *aarch32_vectors_page __ro_after_init;
205 #ifndef CONFIG_COMPAT_VDSO
206 static struct page *aarch32_sig_page __ro_after_init;
207 #endif
208 
209 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
210 	{
211 		.name	= "[vectors]", /* ABI */
212 		.pages	= &aarch32_vectors_page,
213 	},
214 #ifdef CONFIG_COMPAT_VDSO
215 	{
216 		.name = "[vvar]",
217 	},
218 	{
219 		.name = "[vdso]",
220 		.mremap = aarch32_vdso_mremap,
221 	},
222 #else
223 	{
224 		.name	= "[sigpage]", /* ABI */
225 		.pages	= &aarch32_sig_page,
226 	},
227 #endif /* CONFIG_COMPAT_VDSO */
228 };
229 
230 static int aarch32_alloc_kuser_vdso_page(void)
231 {
232 	extern char __kuser_helper_start[], __kuser_helper_end[];
233 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
234 	unsigned long vdso_page;
235 
236 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
237 		return 0;
238 
239 	vdso_page = get_zeroed_page(GFP_ATOMIC);
240 	if (!vdso_page)
241 		return -ENOMEM;
242 
243 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
244 	       kuser_sz);
245 	aarch32_vectors_page = virt_to_page(vdso_page);
246 	flush_dcache_page(aarch32_vectors_page);
247 	return 0;
248 }
249 
250 #ifdef CONFIG_COMPAT_VDSO
251 static int __aarch32_alloc_vdso_pages(void)
252 {
253 	int ret;
254 
255 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_spec[C_VVAR];
256 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_spec[C_VDSO];
257 
258 	ret = __vdso_init(VDSO_ABI_AA32);
259 	if (ret)
260 		return ret;
261 
262 	return aarch32_alloc_kuser_vdso_page();
263 }
264 #else
265 static int __aarch32_alloc_vdso_pages(void)
266 {
267 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
268 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
269 	unsigned long sigpage;
270 	int ret;
271 
272 	sigpage = get_zeroed_page(GFP_ATOMIC);
273 	if (!sigpage)
274 		return -ENOMEM;
275 
276 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
277 	aarch32_sig_page = virt_to_page(sigpage);
278 	flush_dcache_page(aarch32_sig_page);
279 
280 	ret = aarch32_alloc_kuser_vdso_page();
281 	if (ret)
282 		free_page(sigpage);
283 
284 	return ret;
285 }
286 #endif /* CONFIG_COMPAT_VDSO */
287 
288 static int __init aarch32_alloc_vdso_pages(void)
289 {
290 	return __aarch32_alloc_vdso_pages();
291 }
292 arch_initcall(aarch32_alloc_vdso_pages);
293 
294 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
295 {
296 	void *ret;
297 
298 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
299 		return 0;
300 
301 	/*
302 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
303 	 * not safe to CoW the page containing the CPU exception vectors.
304 	 */
305 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
306 				       VM_READ | VM_EXEC |
307 				       VM_MAYREAD | VM_MAYEXEC,
308 				       &aarch32_vdso_spec[C_VECTORS]);
309 
310 	return PTR_ERR_OR_ZERO(ret);
311 }
312 
313 #ifndef CONFIG_COMPAT_VDSO
314 static int aarch32_sigreturn_setup(struct mm_struct *mm)
315 {
316 	unsigned long addr;
317 	void *ret;
318 
319 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
320 	if (IS_ERR_VALUE(addr)) {
321 		ret = ERR_PTR(addr);
322 		goto out;
323 	}
324 
325 	/*
326 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
327 	 * set breakpoints.
328 	 */
329 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
330 				       VM_READ | VM_EXEC | VM_MAYREAD |
331 				       VM_MAYWRITE | VM_MAYEXEC,
332 				       &aarch32_vdso_spec[C_SIGPAGE]);
333 	if (IS_ERR(ret))
334 		goto out;
335 
336 	mm->context.vdso = (void *)addr;
337 
338 out:
339 	return PTR_ERR_OR_ZERO(ret);
340 }
341 #endif /* !CONFIG_COMPAT_VDSO */
342 
343 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
344 {
345 	struct mm_struct *mm = current->mm;
346 	int ret;
347 
348 	if (down_write_killable(&mm->mmap_sem))
349 		return -EINTR;
350 
351 	ret = aarch32_kuser_helpers_setup(mm);
352 	if (ret)
353 		goto out;
354 
355 #ifdef CONFIG_COMPAT_VDSO
356 	ret = __setup_additional_pages(VDSO_ABI_AA32,
357 				       mm,
358 				       bprm,
359 				       uses_interp);
360 #else
361 	ret = aarch32_sigreturn_setup(mm);
362 #endif /* CONFIG_COMPAT_VDSO */
363 
364 out:
365 	up_write(&mm->mmap_sem);
366 	return ret;
367 }
368 #endif /* CONFIG_COMPAT */
369 
370 static int vdso_mremap(const struct vm_special_mapping *sm,
371 		struct vm_area_struct *new_vma)
372 {
373 	return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
374 }
375 
376 /*
377  * aarch64_vdso_pages:
378  * 0 - vvar
379  * 1 - vdso
380  */
381 #define A_VVAR		0
382 #define A_VDSO		1
383 #define A_PAGES		(A_VDSO + 1)
384 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
385 	{
386 		.name	= "[vvar]",
387 	},
388 	{
389 		.name	= "[vdso]",
390 		.mremap = vdso_mremap,
391 	},
392 };
393 
394 static int __init vdso_init(void)
395 {
396 	vdso_info[VDSO_ABI_AA64].dm = &vdso_spec[A_VVAR];
397 	vdso_info[VDSO_ABI_AA64].cm = &vdso_spec[A_VDSO];
398 
399 	return __vdso_init(VDSO_ABI_AA64);
400 }
401 arch_initcall(vdso_init);
402 
403 int arch_setup_additional_pages(struct linux_binprm *bprm,
404 				int uses_interp)
405 {
406 	struct mm_struct *mm = current->mm;
407 	int ret;
408 
409 	if (down_write_killable(&mm->mmap_sem))
410 		return -EINTR;
411 
412 	ret = __setup_additional_pages(VDSO_ABI_AA64,
413 				       mm,
414 				       bprm,
415 				       uses_interp);
416 
417 	up_write(&mm->mmap_sem);
418 
419 	return ret;
420 }
421