xref: /linux/arch/arm64/kernel/vdso.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31 
32 extern char vdso_start[], vdso_end[];
33 extern char vdso32_start[], vdso32_end[];
34 
35 enum vdso_abi {
36 	VDSO_ABI_AA64,
37 	VDSO_ABI_AA32,
38 };
39 
40 enum vvar_pages {
41 	VVAR_DATA_PAGE_OFFSET,
42 	VVAR_TIMENS_PAGE_OFFSET,
43 	VVAR_NR_PAGES,
44 };
45 
46 struct vdso_abi_info {
47 	const char *name;
48 	const char *vdso_code_start;
49 	const char *vdso_code_end;
50 	unsigned long vdso_pages;
51 	/* Data Mapping */
52 	struct vm_special_mapping *dm;
53 	/* Code Mapping */
54 	struct vm_special_mapping *cm;
55 };
56 
57 static struct vdso_abi_info vdso_info[] __ro_after_init = {
58 	[VDSO_ABI_AA64] = {
59 		.name = "vdso",
60 		.vdso_code_start = vdso_start,
61 		.vdso_code_end = vdso_end,
62 	},
63 #ifdef CONFIG_COMPAT_VDSO
64 	[VDSO_ABI_AA32] = {
65 		.name = "vdso32",
66 		.vdso_code_start = vdso32_start,
67 		.vdso_code_end = vdso32_end,
68 	},
69 #endif /* CONFIG_COMPAT_VDSO */
70 };
71 
72 /*
73  * The vDSO data page.
74  */
75 static union {
76 	struct vdso_data	data[CS_BASES];
77 	u8			page[PAGE_SIZE];
78 } vdso_data_store __page_aligned_data;
79 struct vdso_data *vdso_data = vdso_data_store.data;
80 
81 static int vdso_mremap(const struct vm_special_mapping *sm,
82 		struct vm_area_struct *new_vma)
83 {
84 	current->mm->context.vdso = (void *)new_vma->vm_start;
85 
86 	return 0;
87 }
88 
89 static int __init __vdso_init(enum vdso_abi abi)
90 {
91 	int i;
92 	struct page **vdso_pagelist;
93 	unsigned long pfn;
94 
95 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
96 		pr_err("vDSO is not a valid ELF object!\n");
97 		return -EINVAL;
98 	}
99 
100 	vdso_info[abi].vdso_pages = (
101 			vdso_info[abi].vdso_code_end -
102 			vdso_info[abi].vdso_code_start) >>
103 			PAGE_SHIFT;
104 
105 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
106 				sizeof(struct page *),
107 				GFP_KERNEL);
108 	if (vdso_pagelist == NULL)
109 		return -ENOMEM;
110 
111 	/* Grab the vDSO code pages. */
112 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
113 
114 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
115 		vdso_pagelist[i] = pfn_to_page(pfn + i);
116 
117 	vdso_info[abi].cm->pages = vdso_pagelist;
118 
119 	return 0;
120 }
121 
122 #ifdef CONFIG_TIME_NS
123 struct vdso_data *arch_get_vdso_data(void *vvar_page)
124 {
125 	return (struct vdso_data *)(vvar_page);
126 }
127 
128 /*
129  * The vvar mapping contains data for a specific time namespace, so when a task
130  * changes namespace we must unmap its vvar data for the old namespace.
131  * Subsequent faults will map in data for the new namespace.
132  *
133  * For more details see timens_setup_vdso_data().
134  */
135 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
136 {
137 	struct mm_struct *mm = task->mm;
138 	struct vm_area_struct *vma;
139 
140 	mmap_read_lock(mm);
141 
142 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
143 		unsigned long size = vma->vm_end - vma->vm_start;
144 
145 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
146 			zap_page_range(vma, vma->vm_start, size);
147 #ifdef CONFIG_COMPAT_VDSO
148 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
149 			zap_page_range(vma, vma->vm_start, size);
150 #endif
151 	}
152 
153 	mmap_read_unlock(mm);
154 	return 0;
155 }
156 
157 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
158 {
159 	if (likely(vma->vm_mm == current->mm))
160 		return current->nsproxy->time_ns->vvar_page;
161 
162 	/*
163 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
164 	 * through interfaces like /proc/$pid/mem or
165 	 * process_vm_{readv,writev}() as long as there's no .access()
166 	 * in special_mapping_vmops.
167 	 * For more details check_vma_flags() and __access_remote_vm()
168 	 */
169 	WARN(1, "vvar_page accessed remotely");
170 
171 	return NULL;
172 }
173 #else
174 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
175 {
176 	return NULL;
177 }
178 #endif
179 
180 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
181 			     struct vm_area_struct *vma, struct vm_fault *vmf)
182 {
183 	struct page *timens_page = find_timens_vvar_page(vma);
184 	unsigned long pfn;
185 
186 	switch (vmf->pgoff) {
187 	case VVAR_DATA_PAGE_OFFSET:
188 		if (timens_page)
189 			pfn = page_to_pfn(timens_page);
190 		else
191 			pfn = sym_to_pfn(vdso_data);
192 		break;
193 #ifdef CONFIG_TIME_NS
194 	case VVAR_TIMENS_PAGE_OFFSET:
195 		/*
196 		 * If a task belongs to a time namespace then a namespace
197 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
198 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
199 		 * offset.
200 		 * See also the comment near timens_setup_vdso_data().
201 		 */
202 		if (!timens_page)
203 			return VM_FAULT_SIGBUS;
204 		pfn = sym_to_pfn(vdso_data);
205 		break;
206 #endif /* CONFIG_TIME_NS */
207 	default:
208 		return VM_FAULT_SIGBUS;
209 	}
210 
211 	return vmf_insert_pfn(vma, vmf->address, pfn);
212 }
213 
214 static int __setup_additional_pages(enum vdso_abi abi,
215 				    struct mm_struct *mm,
216 				    struct linux_binprm *bprm,
217 				    int uses_interp)
218 {
219 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
220 	unsigned long gp_flags = 0;
221 	void *ret;
222 
223 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
224 
225 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
226 	/* Be sure to map the data page */
227 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
228 
229 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
230 	if (IS_ERR_VALUE(vdso_base)) {
231 		ret = ERR_PTR(vdso_base);
232 		goto up_fail;
233 	}
234 
235 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
236 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
237 				       vdso_info[abi].dm);
238 	if (IS_ERR(ret))
239 		goto up_fail;
240 
241 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
242 		gp_flags = VM_ARM64_BTI;
243 
244 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
245 	mm->context.vdso = (void *)vdso_base;
246 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
247 				       VM_READ|VM_EXEC|gp_flags|
248 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
249 				       vdso_info[abi].cm);
250 	if (IS_ERR(ret))
251 		goto up_fail;
252 
253 	return 0;
254 
255 up_fail:
256 	mm->context.vdso = NULL;
257 	return PTR_ERR(ret);
258 }
259 
260 #ifdef CONFIG_COMPAT
261 /*
262  * Create and map the vectors page for AArch32 tasks.
263  */
264 enum aarch32_map {
265 	AA32_MAP_VECTORS, /* kuser helpers */
266 	AA32_MAP_SIGPAGE,
267 	AA32_MAP_VVAR,
268 	AA32_MAP_VDSO,
269 };
270 
271 static struct page *aarch32_vectors_page __ro_after_init;
272 static struct page *aarch32_sig_page __ro_after_init;
273 
274 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
275 				  struct vm_area_struct *new_vma)
276 {
277 	current->mm->context.sigpage = (void *)new_vma->vm_start;
278 
279 	return 0;
280 }
281 
282 static struct vm_special_mapping aarch32_vdso_maps[] = {
283 	[AA32_MAP_VECTORS] = {
284 		.name	= "[vectors]", /* ABI */
285 		.pages	= &aarch32_vectors_page,
286 	},
287 	[AA32_MAP_SIGPAGE] = {
288 		.name	= "[sigpage]", /* ABI */
289 		.pages	= &aarch32_sig_page,
290 		.mremap	= aarch32_sigpage_mremap,
291 	},
292 	[AA32_MAP_VVAR] = {
293 		.name = "[vvar]",
294 		.fault = vvar_fault,
295 	},
296 	[AA32_MAP_VDSO] = {
297 		.name = "[vdso]",
298 		.mremap = vdso_mremap,
299 	},
300 };
301 
302 static int aarch32_alloc_kuser_vdso_page(void)
303 {
304 	extern char __kuser_helper_start[], __kuser_helper_end[];
305 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
306 	unsigned long vdso_page;
307 
308 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
309 		return 0;
310 
311 	vdso_page = get_zeroed_page(GFP_KERNEL);
312 	if (!vdso_page)
313 		return -ENOMEM;
314 
315 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
316 	       kuser_sz);
317 	aarch32_vectors_page = virt_to_page(vdso_page);
318 	return 0;
319 }
320 
321 #define COMPAT_SIGPAGE_POISON_WORD	0xe7fddef1
322 static int aarch32_alloc_sigpage(void)
323 {
324 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
325 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
326 	__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
327 	void *sigpage;
328 
329 	sigpage = (void *)__get_free_page(GFP_KERNEL);
330 	if (!sigpage)
331 		return -ENOMEM;
332 
333 	memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
334 	memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
335 	aarch32_sig_page = virt_to_page(sigpage);
336 	return 0;
337 }
338 
339 static int __init __aarch32_alloc_vdso_pages(void)
340 {
341 
342 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
343 		return 0;
344 
345 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
346 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
347 
348 	return __vdso_init(VDSO_ABI_AA32);
349 }
350 
351 static int __init aarch32_alloc_vdso_pages(void)
352 {
353 	int ret;
354 
355 	ret = __aarch32_alloc_vdso_pages();
356 	if (ret)
357 		return ret;
358 
359 	ret = aarch32_alloc_sigpage();
360 	if (ret)
361 		return ret;
362 
363 	return aarch32_alloc_kuser_vdso_page();
364 }
365 arch_initcall(aarch32_alloc_vdso_pages);
366 
367 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
368 {
369 	void *ret;
370 
371 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
372 		return 0;
373 
374 	/*
375 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
376 	 * not safe to CoW the page containing the CPU exception vectors.
377 	 */
378 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
379 				       VM_READ | VM_EXEC |
380 				       VM_MAYREAD | VM_MAYEXEC,
381 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
382 
383 	return PTR_ERR_OR_ZERO(ret);
384 }
385 
386 static int aarch32_sigreturn_setup(struct mm_struct *mm)
387 {
388 	unsigned long addr;
389 	void *ret;
390 
391 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
392 	if (IS_ERR_VALUE(addr)) {
393 		ret = ERR_PTR(addr);
394 		goto out;
395 	}
396 
397 	/*
398 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
399 	 * set breakpoints.
400 	 */
401 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
402 				       VM_READ | VM_EXEC | VM_MAYREAD |
403 				       VM_MAYWRITE | VM_MAYEXEC,
404 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
405 	if (IS_ERR(ret))
406 		goto out;
407 
408 	mm->context.sigpage = (void *)addr;
409 
410 out:
411 	return PTR_ERR_OR_ZERO(ret);
412 }
413 
414 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
415 {
416 	struct mm_struct *mm = current->mm;
417 	int ret;
418 
419 	if (mmap_write_lock_killable(mm))
420 		return -EINTR;
421 
422 	ret = aarch32_kuser_helpers_setup(mm);
423 	if (ret)
424 		goto out;
425 
426 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
427 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
428 					       uses_interp);
429 		if (ret)
430 			goto out;
431 	}
432 
433 	ret = aarch32_sigreturn_setup(mm);
434 out:
435 	mmap_write_unlock(mm);
436 	return ret;
437 }
438 #endif /* CONFIG_COMPAT */
439 
440 enum aarch64_map {
441 	AA64_MAP_VVAR,
442 	AA64_MAP_VDSO,
443 };
444 
445 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
446 	[AA64_MAP_VVAR] = {
447 		.name	= "[vvar]",
448 		.fault = vvar_fault,
449 	},
450 	[AA64_MAP_VDSO] = {
451 		.name	= "[vdso]",
452 		.mremap = vdso_mremap,
453 	},
454 };
455 
456 static int __init vdso_init(void)
457 {
458 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
459 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
460 
461 	return __vdso_init(VDSO_ABI_AA64);
462 }
463 arch_initcall(vdso_init);
464 
465 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
466 {
467 	struct mm_struct *mm = current->mm;
468 	int ret;
469 
470 	if (mmap_write_lock_killable(mm))
471 		return -EINTR;
472 
473 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
474 	mmap_write_unlock(mm);
475 
476 	return ret;
477 }
478