xref: /linux/arch/arm64/kernel/vdso.c (revision 3503d56cc7233ced602e38a4c13caa64f00ab2aa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30 
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35 
36 enum vdso_abi {
37 	VDSO_ABI_AA64,
38 #ifdef CONFIG_COMPAT_VDSO
39 	VDSO_ABI_AA32,
40 #endif /* CONFIG_COMPAT_VDSO */
41 };
42 
43 enum vvar_pages {
44 	VVAR_DATA_PAGE_OFFSET,
45 	VVAR_TIMENS_PAGE_OFFSET,
46 	VVAR_NR_PAGES,
47 };
48 
49 struct vdso_abi_info {
50 	const char *name;
51 	const char *vdso_code_start;
52 	const char *vdso_code_end;
53 	unsigned long vdso_pages;
54 	/* Data Mapping */
55 	struct vm_special_mapping *dm;
56 	/* Code Mapping */
57 	struct vm_special_mapping *cm;
58 };
59 
60 static struct vdso_abi_info vdso_info[] __ro_after_init = {
61 	[VDSO_ABI_AA64] = {
62 		.name = "vdso",
63 		.vdso_code_start = vdso_start,
64 		.vdso_code_end = vdso_end,
65 	},
66 #ifdef CONFIG_COMPAT_VDSO
67 	[VDSO_ABI_AA32] = {
68 		.name = "vdso32",
69 		.vdso_code_start = vdso32_start,
70 		.vdso_code_end = vdso32_end,
71 	},
72 #endif /* CONFIG_COMPAT_VDSO */
73 };
74 
75 /*
76  * The vDSO data page.
77  */
78 static union {
79 	struct vdso_data	data[CS_BASES];
80 	u8			page[PAGE_SIZE];
81 } vdso_data_store __page_aligned_data;
82 struct vdso_data *vdso_data = vdso_data_store.data;
83 
84 static int __vdso_remap(enum vdso_abi abi,
85 			const struct vm_special_mapping *sm,
86 			struct vm_area_struct *new_vma)
87 {
88 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
89 	unsigned long vdso_size = vdso_info[abi].vdso_code_end -
90 				  vdso_info[abi].vdso_code_start;
91 
92 	if (vdso_size != new_size)
93 		return -EINVAL;
94 
95 	current->mm->context.vdso = (void *)new_vma->vm_start;
96 
97 	return 0;
98 }
99 
100 static int __vdso_init(enum vdso_abi abi)
101 {
102 	int i;
103 	struct page **vdso_pagelist;
104 	unsigned long pfn;
105 
106 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
107 		pr_err("vDSO is not a valid ELF object!\n");
108 		return -EINVAL;
109 	}
110 
111 	vdso_info[abi].vdso_pages = (
112 			vdso_info[abi].vdso_code_end -
113 			vdso_info[abi].vdso_code_start) >>
114 			PAGE_SHIFT;
115 
116 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
117 				sizeof(struct page *),
118 				GFP_KERNEL);
119 	if (vdso_pagelist == NULL)
120 		return -ENOMEM;
121 
122 	/* Grab the vDSO code pages. */
123 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
124 
125 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
126 		vdso_pagelist[i] = pfn_to_page(pfn + i);
127 
128 	vdso_info[abi].cm->pages = vdso_pagelist;
129 
130 	return 0;
131 }
132 
133 #ifdef CONFIG_TIME_NS
134 struct vdso_data *arch_get_vdso_data(void *vvar_page)
135 {
136 	return (struct vdso_data *)(vvar_page);
137 }
138 
139 /*
140  * The vvar mapping contains data for a specific time namespace, so when a task
141  * changes namespace we must unmap its vvar data for the old namespace.
142  * Subsequent faults will map in data for the new namespace.
143  *
144  * For more details see timens_setup_vdso_data().
145  */
146 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
147 {
148 	struct mm_struct *mm = task->mm;
149 	struct vm_area_struct *vma;
150 
151 	mmap_read_lock(mm);
152 
153 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
154 		unsigned long size = vma->vm_end - vma->vm_start;
155 
156 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
157 			zap_page_range(vma, vma->vm_start, size);
158 #ifdef CONFIG_COMPAT_VDSO
159 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
160 			zap_page_range(vma, vma->vm_start, size);
161 #endif
162 	}
163 
164 	mmap_read_unlock(mm);
165 	return 0;
166 }
167 #endif
168 
169 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
170 			     struct vm_area_struct *vma, struct vm_fault *vmf)
171 {
172 	if (vmf->pgoff == 0)
173 		return vmf_insert_pfn(vma, vmf->address,
174 				sym_to_pfn(vdso_data));
175 	return VM_FAULT_SIGBUS;
176 }
177 
178 static int __setup_additional_pages(enum vdso_abi abi,
179 				    struct mm_struct *mm,
180 				    struct linux_binprm *bprm,
181 				    int uses_interp)
182 {
183 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
184 	unsigned long gp_flags = 0;
185 	void *ret;
186 
187 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
188 
189 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
190 	/* Be sure to map the data page */
191 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
192 
193 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
194 	if (IS_ERR_VALUE(vdso_base)) {
195 		ret = ERR_PTR(vdso_base);
196 		goto up_fail;
197 	}
198 
199 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
200 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
201 				       vdso_info[abi].dm);
202 	if (IS_ERR(ret))
203 		goto up_fail;
204 
205 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
206 		gp_flags = VM_ARM64_BTI;
207 
208 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
209 	mm->context.vdso = (void *)vdso_base;
210 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
211 				       VM_READ|VM_EXEC|gp_flags|
212 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
213 				       vdso_info[abi].cm);
214 	if (IS_ERR(ret))
215 		goto up_fail;
216 
217 	return 0;
218 
219 up_fail:
220 	mm->context.vdso = NULL;
221 	return PTR_ERR(ret);
222 }
223 
224 #ifdef CONFIG_COMPAT
225 /*
226  * Create and map the vectors page for AArch32 tasks.
227  */
228 #ifdef CONFIG_COMPAT_VDSO
229 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
230 		struct vm_area_struct *new_vma)
231 {
232 	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
233 }
234 #endif /* CONFIG_COMPAT_VDSO */
235 
236 enum aarch32_map {
237 	AA32_MAP_VECTORS, /* kuser helpers */
238 #ifdef CONFIG_COMPAT_VDSO
239 	AA32_MAP_VVAR,
240 	AA32_MAP_VDSO,
241 #endif
242 	AA32_MAP_SIGPAGE
243 };
244 
245 static struct page *aarch32_vectors_page __ro_after_init;
246 static struct page *aarch32_sig_page __ro_after_init;
247 
248 static struct vm_special_mapping aarch32_vdso_maps[] = {
249 	[AA32_MAP_VECTORS] = {
250 		.name	= "[vectors]", /* ABI */
251 		.pages	= &aarch32_vectors_page,
252 	},
253 #ifdef CONFIG_COMPAT_VDSO
254 	[AA32_MAP_VVAR] = {
255 		.name = "[vvar]",
256 		.fault = vvar_fault,
257 	},
258 	[AA32_MAP_VDSO] = {
259 		.name = "[vdso]",
260 		.mremap = aarch32_vdso_mremap,
261 	},
262 #endif /* CONFIG_COMPAT_VDSO */
263 	[AA32_MAP_SIGPAGE] = {
264 		.name	= "[sigpage]", /* ABI */
265 		.pages	= &aarch32_sig_page,
266 	},
267 };
268 
269 static int aarch32_alloc_kuser_vdso_page(void)
270 {
271 	extern char __kuser_helper_start[], __kuser_helper_end[];
272 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
273 	unsigned long vdso_page;
274 
275 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
276 		return 0;
277 
278 	vdso_page = get_zeroed_page(GFP_ATOMIC);
279 	if (!vdso_page)
280 		return -ENOMEM;
281 
282 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
283 	       kuser_sz);
284 	aarch32_vectors_page = virt_to_page(vdso_page);
285 	flush_dcache_page(aarch32_vectors_page);
286 	return 0;
287 }
288 
289 static int aarch32_alloc_sigpage(void)
290 {
291 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
292 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
293 	unsigned long sigpage;
294 
295 	sigpage = get_zeroed_page(GFP_ATOMIC);
296 	if (!sigpage)
297 		return -ENOMEM;
298 
299 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
300 	aarch32_sig_page = virt_to_page(sigpage);
301 	flush_dcache_page(aarch32_sig_page);
302 	return 0;
303 }
304 
305 #ifdef CONFIG_COMPAT_VDSO
306 static int __aarch32_alloc_vdso_pages(void)
307 {
308 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
309 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
310 
311 	return __vdso_init(VDSO_ABI_AA32);
312 }
313 #endif /* CONFIG_COMPAT_VDSO */
314 
315 static int __init aarch32_alloc_vdso_pages(void)
316 {
317 	int ret;
318 
319 #ifdef CONFIG_COMPAT_VDSO
320 	ret = __aarch32_alloc_vdso_pages();
321 	if (ret)
322 		return ret;
323 #endif
324 
325 	ret = aarch32_alloc_sigpage();
326 	if (ret)
327 		return ret;
328 
329 	return aarch32_alloc_kuser_vdso_page();
330 }
331 arch_initcall(aarch32_alloc_vdso_pages);
332 
333 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
334 {
335 	void *ret;
336 
337 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
338 		return 0;
339 
340 	/*
341 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
342 	 * not safe to CoW the page containing the CPU exception vectors.
343 	 */
344 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
345 				       VM_READ | VM_EXEC |
346 				       VM_MAYREAD | VM_MAYEXEC,
347 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
348 
349 	return PTR_ERR_OR_ZERO(ret);
350 }
351 
352 static int aarch32_sigreturn_setup(struct mm_struct *mm)
353 {
354 	unsigned long addr;
355 	void *ret;
356 
357 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
358 	if (IS_ERR_VALUE(addr)) {
359 		ret = ERR_PTR(addr);
360 		goto out;
361 	}
362 
363 	/*
364 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
365 	 * set breakpoints.
366 	 */
367 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
368 				       VM_READ | VM_EXEC | VM_MAYREAD |
369 				       VM_MAYWRITE | VM_MAYEXEC,
370 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
371 	if (IS_ERR(ret))
372 		goto out;
373 
374 	mm->context.sigpage = (void *)addr;
375 
376 out:
377 	return PTR_ERR_OR_ZERO(ret);
378 }
379 
380 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
381 {
382 	struct mm_struct *mm = current->mm;
383 	int ret;
384 
385 	if (mmap_write_lock_killable(mm))
386 		return -EINTR;
387 
388 	ret = aarch32_kuser_helpers_setup(mm);
389 	if (ret)
390 		goto out;
391 
392 #ifdef CONFIG_COMPAT_VDSO
393 	ret = __setup_additional_pages(VDSO_ABI_AA32,
394 				       mm,
395 				       bprm,
396 				       uses_interp);
397 	if (ret)
398 		goto out;
399 #endif /* CONFIG_COMPAT_VDSO */
400 
401 	ret = aarch32_sigreturn_setup(mm);
402 out:
403 	mmap_write_unlock(mm);
404 	return ret;
405 }
406 #endif /* CONFIG_COMPAT */
407 
408 static int vdso_mremap(const struct vm_special_mapping *sm,
409 		struct vm_area_struct *new_vma)
410 {
411 	return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
412 }
413 
414 enum aarch64_map {
415 	AA64_MAP_VVAR,
416 	AA64_MAP_VDSO,
417 };
418 
419 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
420 	[AA64_MAP_VVAR] = {
421 		.name	= "[vvar]",
422 		.fault = vvar_fault,
423 	},
424 	[AA64_MAP_VDSO] = {
425 		.name	= "[vdso]",
426 		.mremap = vdso_mremap,
427 	},
428 };
429 
430 static int __init vdso_init(void)
431 {
432 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
433 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
434 
435 	return __vdso_init(VDSO_ABI_AA64);
436 }
437 arch_initcall(vdso_init);
438 
439 int arch_setup_additional_pages(struct linux_binprm *bprm,
440 				int uses_interp)
441 {
442 	struct mm_struct *mm = current->mm;
443 	int ret;
444 
445 	if (mmap_write_lock_killable(mm))
446 		return -EINTR;
447 
448 	ret = __setup_additional_pages(VDSO_ABI_AA64,
449 				       mm,
450 				       bprm,
451 				       uses_interp);
452 
453 	mmap_write_unlock(mm);
454 
455 	return ret;
456 }
457