xref: /linux/arch/arm64/kernel/vdso.c (revision bf740a905ffedda60d2dacbfa0c3aca81490fda1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30 
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35 
36 /* vdso_lookup arch_index */
37 enum arch_vdso_type {
38 	ARM64_VDSO = 0,
39 #ifdef CONFIG_COMPAT_VDSO
40 	ARM64_VDSO32 = 1,
41 #endif /* CONFIG_COMPAT_VDSO */
42 };
43 #ifdef CONFIG_COMPAT_VDSO
44 #define VDSO_TYPES		(ARM64_VDSO32 + 1)
45 #else
46 #define VDSO_TYPES		(ARM64_VDSO + 1)
47 #endif /* CONFIG_COMPAT_VDSO */
48 
49 struct __vdso_abi {
50 	const char *name;
51 	const char *vdso_code_start;
52 	const char *vdso_code_end;
53 	unsigned long vdso_pages;
54 	/* Data Mapping */
55 	struct vm_special_mapping *dm;
56 	/* Code Mapping */
57 	struct vm_special_mapping *cm;
58 };
59 
60 static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
61 	{
62 		.name = "vdso",
63 		.vdso_code_start = vdso_start,
64 		.vdso_code_end = vdso_end,
65 	},
66 #ifdef CONFIG_COMPAT_VDSO
67 	{
68 		.name = "vdso32",
69 		.vdso_code_start = vdso32_start,
70 		.vdso_code_end = vdso32_end,
71 	},
72 #endif /* CONFIG_COMPAT_VDSO */
73 };
74 
75 /*
76  * The vDSO data page.
77  */
78 static union {
79 	struct vdso_data	data[CS_BASES];
80 	u8			page[PAGE_SIZE];
81 } vdso_data_store __page_aligned_data;
82 struct vdso_data *vdso_data = vdso_data_store.data;
83 
84 static int __vdso_remap(enum arch_vdso_type arch_index,
85 			const struct vm_special_mapping *sm,
86 			struct vm_area_struct *new_vma)
87 {
88 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
89 	unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
90 				  vdso_lookup[arch_index].vdso_code_start;
91 
92 	if (vdso_size != new_size)
93 		return -EINVAL;
94 
95 	current->mm->context.vdso = (void *)new_vma->vm_start;
96 
97 	return 0;
98 }
99 
100 static int __vdso_init(enum arch_vdso_type arch_index)
101 {
102 	int i;
103 	struct page **vdso_pagelist;
104 	unsigned long pfn;
105 
106 	if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
107 		pr_err("vDSO is not a valid ELF object!\n");
108 		return -EINVAL;
109 	}
110 
111 	vdso_lookup[arch_index].vdso_pages = (
112 			vdso_lookup[arch_index].vdso_code_end -
113 			vdso_lookup[arch_index].vdso_code_start) >>
114 			PAGE_SHIFT;
115 
116 	/* Allocate the vDSO pagelist, plus a page for the data. */
117 	vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
118 				sizeof(struct page *),
119 				GFP_KERNEL);
120 	if (vdso_pagelist == NULL)
121 		return -ENOMEM;
122 
123 	/* Grab the vDSO data page. */
124 	vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
125 
126 
127 	/* Grab the vDSO code pages. */
128 	pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
129 
130 	for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
131 		vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
132 
133 	vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
134 	vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
135 
136 	return 0;
137 }
138 
139 static int __setup_additional_pages(enum arch_vdso_type arch_index,
140 				    struct mm_struct *mm,
141 				    struct linux_binprm *bprm,
142 				    int uses_interp)
143 {
144 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
145 	unsigned long gp_flags = 0;
146 	void *ret;
147 
148 	vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
149 	/* Be sure to map the data page */
150 	vdso_mapping_len = vdso_text_len + PAGE_SIZE;
151 
152 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
153 	if (IS_ERR_VALUE(vdso_base)) {
154 		ret = ERR_PTR(vdso_base);
155 		goto up_fail;
156 	}
157 
158 	ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
159 				       VM_READ|VM_MAYREAD,
160 				       vdso_lookup[arch_index].dm);
161 	if (IS_ERR(ret))
162 		goto up_fail;
163 
164 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
165 		gp_flags = VM_ARM64_BTI;
166 
167 	vdso_base += PAGE_SIZE;
168 	mm->context.vdso = (void *)vdso_base;
169 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
170 				       VM_READ|VM_EXEC|gp_flags|
171 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
172 				       vdso_lookup[arch_index].cm);
173 	if (IS_ERR(ret))
174 		goto up_fail;
175 
176 	return 0;
177 
178 up_fail:
179 	mm->context.vdso = NULL;
180 	return PTR_ERR(ret);
181 }
182 
183 #ifdef CONFIG_COMPAT
184 /*
185  * Create and map the vectors page for AArch32 tasks.
186  */
187 #ifdef CONFIG_COMPAT_VDSO
188 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
189 		struct vm_area_struct *new_vma)
190 {
191 	return __vdso_remap(ARM64_VDSO32, sm, new_vma);
192 }
193 #endif /* CONFIG_COMPAT_VDSO */
194 
195 /*
196  * aarch32_vdso_pages:
197  * 0 - kuser helpers
198  * 1 - sigreturn code
199  * or (CONFIG_COMPAT_VDSO):
200  * 0 - kuser helpers
201  * 1 - vdso data
202  * 2 - vdso code
203  */
204 #define C_VECTORS	0
205 #ifdef CONFIG_COMPAT_VDSO
206 #define C_VVAR		1
207 #define C_VDSO		2
208 #define C_PAGES		(C_VDSO + 1)
209 #else
210 #define C_SIGPAGE	1
211 #define C_PAGES		(C_SIGPAGE + 1)
212 #endif /* CONFIG_COMPAT_VDSO */
213 static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
214 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
215 	{
216 		.name	= "[vectors]", /* ABI */
217 		.pages	= &aarch32_vdso_pages[C_VECTORS],
218 	},
219 #ifdef CONFIG_COMPAT_VDSO
220 	{
221 		.name = "[vvar]",
222 	},
223 	{
224 		.name = "[vdso]",
225 		.mremap = aarch32_vdso_mremap,
226 	},
227 #else
228 	{
229 		.name	= "[sigpage]", /* ABI */
230 		.pages	= &aarch32_vdso_pages[C_SIGPAGE],
231 	},
232 #endif /* CONFIG_COMPAT_VDSO */
233 };
234 
235 static int aarch32_alloc_kuser_vdso_page(void)
236 {
237 	extern char __kuser_helper_start[], __kuser_helper_end[];
238 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
239 	unsigned long vdso_page;
240 
241 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
242 		return 0;
243 
244 	vdso_page = get_zeroed_page(GFP_ATOMIC);
245 	if (!vdso_page)
246 		return -ENOMEM;
247 
248 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
249 	       kuser_sz);
250 	aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
251 	flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
252 	return 0;
253 }
254 
255 #ifdef CONFIG_COMPAT_VDSO
256 static int __aarch32_alloc_vdso_pages(void)
257 {
258 	int ret;
259 
260 	vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
261 	vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
262 
263 	ret = __vdso_init(ARM64_VDSO32);
264 	if (ret)
265 		return ret;
266 
267 	return aarch32_alloc_kuser_vdso_page();
268 }
269 #else
270 static int __aarch32_alloc_vdso_pages(void)
271 {
272 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
273 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
274 	unsigned long sigpage;
275 	int ret;
276 
277 	sigpage = get_zeroed_page(GFP_ATOMIC);
278 	if (!sigpage)
279 		return -ENOMEM;
280 
281 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
282 	aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
283 	flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
284 
285 	ret = aarch32_alloc_kuser_vdso_page();
286 	if (ret)
287 		free_page(sigpage);
288 
289 	return ret;
290 }
291 #endif /* CONFIG_COMPAT_VDSO */
292 
293 static int __init aarch32_alloc_vdso_pages(void)
294 {
295 	return __aarch32_alloc_vdso_pages();
296 }
297 arch_initcall(aarch32_alloc_vdso_pages);
298 
299 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
300 {
301 	void *ret;
302 
303 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
304 		return 0;
305 
306 	/*
307 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
308 	 * not safe to CoW the page containing the CPU exception vectors.
309 	 */
310 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
311 				       VM_READ | VM_EXEC |
312 				       VM_MAYREAD | VM_MAYEXEC,
313 				       &aarch32_vdso_spec[C_VECTORS]);
314 
315 	return PTR_ERR_OR_ZERO(ret);
316 }
317 
318 #ifndef CONFIG_COMPAT_VDSO
319 static int aarch32_sigreturn_setup(struct mm_struct *mm)
320 {
321 	unsigned long addr;
322 	void *ret;
323 
324 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
325 	if (IS_ERR_VALUE(addr)) {
326 		ret = ERR_PTR(addr);
327 		goto out;
328 	}
329 
330 	/*
331 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
332 	 * set breakpoints.
333 	 */
334 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
335 				       VM_READ | VM_EXEC | VM_MAYREAD |
336 				       VM_MAYWRITE | VM_MAYEXEC,
337 				       &aarch32_vdso_spec[C_SIGPAGE]);
338 	if (IS_ERR(ret))
339 		goto out;
340 
341 	mm->context.vdso = (void *)addr;
342 
343 out:
344 	return PTR_ERR_OR_ZERO(ret);
345 }
346 #endif /* !CONFIG_COMPAT_VDSO */
347 
348 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
349 {
350 	struct mm_struct *mm = current->mm;
351 	int ret;
352 
353 	if (down_write_killable(&mm->mmap_sem))
354 		return -EINTR;
355 
356 	ret = aarch32_kuser_helpers_setup(mm);
357 	if (ret)
358 		goto out;
359 
360 #ifdef CONFIG_COMPAT_VDSO
361 	ret = __setup_additional_pages(ARM64_VDSO32,
362 				       mm,
363 				       bprm,
364 				       uses_interp);
365 #else
366 	ret = aarch32_sigreturn_setup(mm);
367 #endif /* CONFIG_COMPAT_VDSO */
368 
369 out:
370 	up_write(&mm->mmap_sem);
371 	return ret;
372 }
373 #endif /* CONFIG_COMPAT */
374 
375 static int vdso_mremap(const struct vm_special_mapping *sm,
376 		struct vm_area_struct *new_vma)
377 {
378 	return __vdso_remap(ARM64_VDSO, sm, new_vma);
379 }
380 
381 /*
382  * aarch64_vdso_pages:
383  * 0 - vvar
384  * 1 - vdso
385  */
386 #define A_VVAR		0
387 #define A_VDSO		1
388 #define A_PAGES		(A_VDSO + 1)
389 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
390 	{
391 		.name	= "[vvar]",
392 	},
393 	{
394 		.name	= "[vdso]",
395 		.mremap = vdso_mremap,
396 	},
397 };
398 
399 static int __init vdso_init(void)
400 {
401 	vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
402 	vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
403 
404 	return __vdso_init(ARM64_VDSO);
405 }
406 arch_initcall(vdso_init);
407 
408 int arch_setup_additional_pages(struct linux_binprm *bprm,
409 				int uses_interp)
410 {
411 	struct mm_struct *mm = current->mm;
412 	int ret;
413 
414 	if (down_write_killable(&mm->mmap_sem))
415 		return -EINTR;
416 
417 	ret = __setup_additional_pages(ARM64_VDSO,
418 				       mm,
419 				       bprm,
420 				       uses_interp);
421 
422 	up_write(&mm->mmap_sem);
423 
424 	return ret;
425 }
426