xref: /linux/arch/arm64/kernel/vdso.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31 
32 enum vdso_abi {
33 	VDSO_ABI_AA64,
34 	VDSO_ABI_AA32,
35 };
36 
37 struct vdso_abi_info {
38 	const char *name;
39 	const char *vdso_code_start;
40 	const char *vdso_code_end;
41 	unsigned long vdso_pages;
42 	/* Data Mapping */
43 	struct vm_special_mapping *dm;
44 	/* Code Mapping */
45 	struct vm_special_mapping *cm;
46 };
47 
48 static struct vdso_abi_info vdso_info[] __ro_after_init = {
49 	[VDSO_ABI_AA64] = {
50 		.name = "vdso",
51 		.vdso_code_start = vdso_start,
52 		.vdso_code_end = vdso_end,
53 	},
54 #ifdef CONFIG_COMPAT_VDSO
55 	[VDSO_ABI_AA32] = {
56 		.name = "vdso32",
57 		.vdso_code_start = vdso32_start,
58 		.vdso_code_end = vdso32_end,
59 	},
60 #endif /* CONFIG_COMPAT_VDSO */
61 };
62 
63 /*
64  * The vDSO data page.
65  */
66 static union vdso_data_store vdso_data_store __page_aligned_data;
67 struct vdso_data *vdso_data = vdso_data_store.data;
68 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)69 static int vdso_mremap(const struct vm_special_mapping *sm,
70 		struct vm_area_struct *new_vma)
71 {
72 	current->mm->context.vdso = (void *)new_vma->vm_start;
73 
74 	return 0;
75 }
76 
__vdso_init(enum vdso_abi abi)77 static int __init __vdso_init(enum vdso_abi abi)
78 {
79 	int i;
80 	struct page **vdso_pagelist;
81 	unsigned long pfn;
82 
83 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
84 		pr_err("vDSO is not a valid ELF object!\n");
85 		return -EINVAL;
86 	}
87 
88 	vdso_info[abi].vdso_pages = (
89 			vdso_info[abi].vdso_code_end -
90 			vdso_info[abi].vdso_code_start) >>
91 			PAGE_SHIFT;
92 
93 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
94 				sizeof(struct page *),
95 				GFP_KERNEL);
96 	if (vdso_pagelist == NULL)
97 		return -ENOMEM;
98 
99 	/* Grab the vDSO code pages. */
100 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
101 
102 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
103 		vdso_pagelist[i] = pfn_to_page(pfn + i);
104 
105 	vdso_info[abi].cm->pages = vdso_pagelist;
106 
107 	return 0;
108 }
109 
110 #ifdef CONFIG_TIME_NS
arch_get_vdso_data(void * vvar_page)111 struct vdso_data *arch_get_vdso_data(void *vvar_page)
112 {
113 	return (struct vdso_data *)(vvar_page);
114 }
115 
116 /*
117  * The vvar mapping contains data for a specific time namespace, so when a task
118  * changes namespace we must unmap its vvar data for the old namespace.
119  * Subsequent faults will map in data for the new namespace.
120  *
121  * For more details see timens_setup_vdso_data().
122  */
vdso_join_timens(struct task_struct * task,struct time_namespace * ns)123 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
124 {
125 	struct mm_struct *mm = task->mm;
126 	struct vm_area_struct *vma;
127 	VMA_ITERATOR(vmi, mm, 0);
128 
129 	mmap_read_lock(mm);
130 
131 	for_each_vma(vmi, vma) {
132 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
133 			zap_vma_pages(vma);
134 #ifdef CONFIG_COMPAT_VDSO
135 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
136 			zap_vma_pages(vma);
137 #endif
138 	}
139 
140 	mmap_read_unlock(mm);
141 	return 0;
142 }
143 #endif
144 
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)145 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
146 			     struct vm_area_struct *vma, struct vm_fault *vmf)
147 {
148 	struct page *timens_page = find_timens_vvar_page(vma);
149 	unsigned long pfn;
150 
151 	switch (vmf->pgoff) {
152 	case VVAR_DATA_PAGE_OFFSET:
153 		if (timens_page)
154 			pfn = page_to_pfn(timens_page);
155 		else
156 			pfn = sym_to_pfn(vdso_data);
157 		break;
158 #ifdef CONFIG_TIME_NS
159 	case VVAR_TIMENS_PAGE_OFFSET:
160 		/*
161 		 * If a task belongs to a time namespace then a namespace
162 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
163 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
164 		 * offset.
165 		 * See also the comment near timens_setup_vdso_data().
166 		 */
167 		if (!timens_page)
168 			return VM_FAULT_SIGBUS;
169 		pfn = sym_to_pfn(vdso_data);
170 		break;
171 #endif /* CONFIG_TIME_NS */
172 	default:
173 		return VM_FAULT_SIGBUS;
174 	}
175 
176 	return vmf_insert_pfn(vma, vmf->address, pfn);
177 }
178 
__setup_additional_pages(enum vdso_abi abi,struct mm_struct * mm,struct linux_binprm * bprm,int uses_interp)179 static int __setup_additional_pages(enum vdso_abi abi,
180 				    struct mm_struct *mm,
181 				    struct linux_binprm *bprm,
182 				    int uses_interp)
183 {
184 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
185 	unsigned long gp_flags = 0;
186 	void *ret;
187 
188 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
189 
190 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
191 	/* Be sure to map the data page */
192 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
193 
194 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
195 	if (IS_ERR_VALUE(vdso_base)) {
196 		ret = ERR_PTR(vdso_base);
197 		goto up_fail;
198 	}
199 
200 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
201 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
202 				       vdso_info[abi].dm);
203 	if (IS_ERR(ret))
204 		goto up_fail;
205 
206 	if (system_supports_bti_kernel())
207 		gp_flags = VM_ARM64_BTI;
208 
209 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
210 	mm->context.vdso = (void *)vdso_base;
211 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
212 				       VM_READ|VM_EXEC|gp_flags|
213 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
214 				       vdso_info[abi].cm);
215 	if (IS_ERR(ret))
216 		goto up_fail;
217 
218 	return 0;
219 
220 up_fail:
221 	mm->context.vdso = NULL;
222 	return PTR_ERR(ret);
223 }
224 
225 #ifdef CONFIG_COMPAT
226 /*
227  * Create and map the vectors page for AArch32 tasks.
228  */
229 enum aarch32_map {
230 	AA32_MAP_VECTORS, /* kuser helpers */
231 	AA32_MAP_SIGPAGE,
232 	AA32_MAP_VVAR,
233 	AA32_MAP_VDSO,
234 };
235 
236 static struct page *aarch32_vectors_page __ro_after_init;
237 static struct page *aarch32_sig_page __ro_after_init;
238 
aarch32_sigpage_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)239 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
240 				  struct vm_area_struct *new_vma)
241 {
242 	current->mm->context.sigpage = (void *)new_vma->vm_start;
243 
244 	return 0;
245 }
246 
247 static struct vm_special_mapping aarch32_vdso_maps[] = {
248 	[AA32_MAP_VECTORS] = {
249 		.name	= "[vectors]", /* ABI */
250 		.pages	= &aarch32_vectors_page,
251 	},
252 	[AA32_MAP_SIGPAGE] = {
253 		.name	= "[sigpage]", /* ABI */
254 		.pages	= &aarch32_sig_page,
255 		.mremap	= aarch32_sigpage_mremap,
256 	},
257 	[AA32_MAP_VVAR] = {
258 		.name = "[vvar]",
259 		.fault = vvar_fault,
260 	},
261 	[AA32_MAP_VDSO] = {
262 		.name = "[vdso]",
263 		.mremap = vdso_mremap,
264 	},
265 };
266 
aarch32_alloc_kuser_vdso_page(void)267 static int aarch32_alloc_kuser_vdso_page(void)
268 {
269 	extern char __kuser_helper_start[], __kuser_helper_end[];
270 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
271 	unsigned long vdso_page;
272 
273 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
274 		return 0;
275 
276 	vdso_page = get_zeroed_page(GFP_KERNEL);
277 	if (!vdso_page)
278 		return -ENOMEM;
279 
280 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
281 	       kuser_sz);
282 	aarch32_vectors_page = virt_to_page((void *)vdso_page);
283 	return 0;
284 }
285 
286 #define COMPAT_SIGPAGE_POISON_WORD	0xe7fddef1
aarch32_alloc_sigpage(void)287 static int aarch32_alloc_sigpage(void)
288 {
289 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
290 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
291 	__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
292 	void *sigpage;
293 
294 	sigpage = (void *)__get_free_page(GFP_KERNEL);
295 	if (!sigpage)
296 		return -ENOMEM;
297 
298 	memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
299 	memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
300 	aarch32_sig_page = virt_to_page(sigpage);
301 	return 0;
302 }
303 
__aarch32_alloc_vdso_pages(void)304 static int __init __aarch32_alloc_vdso_pages(void)
305 {
306 
307 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
308 		return 0;
309 
310 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
311 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
312 
313 	return __vdso_init(VDSO_ABI_AA32);
314 }
315 
aarch32_alloc_vdso_pages(void)316 static int __init aarch32_alloc_vdso_pages(void)
317 {
318 	int ret;
319 
320 	ret = __aarch32_alloc_vdso_pages();
321 	if (ret)
322 		return ret;
323 
324 	ret = aarch32_alloc_sigpage();
325 	if (ret)
326 		return ret;
327 
328 	return aarch32_alloc_kuser_vdso_page();
329 }
330 arch_initcall(aarch32_alloc_vdso_pages);
331 
aarch32_kuser_helpers_setup(struct mm_struct * mm)332 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
333 {
334 	void *ret;
335 
336 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
337 		return 0;
338 
339 	/*
340 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
341 	 * not safe to CoW the page containing the CPU exception vectors.
342 	 */
343 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
344 				       VM_READ | VM_EXEC |
345 				       VM_MAYREAD | VM_MAYEXEC,
346 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
347 
348 	return PTR_ERR_OR_ZERO(ret);
349 }
350 
aarch32_sigreturn_setup(struct mm_struct * mm)351 static int aarch32_sigreturn_setup(struct mm_struct *mm)
352 {
353 	unsigned long addr;
354 	void *ret;
355 
356 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
357 	if (IS_ERR_VALUE(addr)) {
358 		ret = ERR_PTR(addr);
359 		goto out;
360 	}
361 
362 	/*
363 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
364 	 * set breakpoints.
365 	 */
366 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
367 				       VM_READ | VM_EXEC | VM_MAYREAD |
368 				       VM_MAYWRITE | VM_MAYEXEC,
369 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
370 	if (IS_ERR(ret))
371 		goto out;
372 
373 	mm->context.sigpage = (void *)addr;
374 
375 out:
376 	return PTR_ERR_OR_ZERO(ret);
377 }
378 
aarch32_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)379 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
380 {
381 	struct mm_struct *mm = current->mm;
382 	int ret;
383 
384 	if (mmap_write_lock_killable(mm))
385 		return -EINTR;
386 
387 	ret = aarch32_kuser_helpers_setup(mm);
388 	if (ret)
389 		goto out;
390 
391 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
392 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
393 					       uses_interp);
394 		if (ret)
395 			goto out;
396 	}
397 
398 	ret = aarch32_sigreturn_setup(mm);
399 out:
400 	mmap_write_unlock(mm);
401 	return ret;
402 }
403 #endif /* CONFIG_COMPAT */
404 
405 enum aarch64_map {
406 	AA64_MAP_VVAR,
407 	AA64_MAP_VDSO,
408 };
409 
410 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
411 	[AA64_MAP_VVAR] = {
412 		.name	= "[vvar]",
413 		.fault = vvar_fault,
414 	},
415 	[AA64_MAP_VDSO] = {
416 		.name	= "[vdso]",
417 		.mremap = vdso_mremap,
418 	},
419 };
420 
vdso_init(void)421 static int __init vdso_init(void)
422 {
423 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
424 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
425 
426 	return __vdso_init(VDSO_ABI_AA64);
427 }
428 arch_initcall(vdso_init);
429 
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)430 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
431 {
432 	struct mm_struct *mm = current->mm;
433 	int ret;
434 
435 	if (mmap_write_lock_killable(mm))
436 		return -EINTR;
437 
438 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
439 	mmap_write_unlock(mm);
440 
441 	return ret;
442 }
443