xref: /linux/arch/hexagon/kernel/vdso.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vDSO implementation for Hexagon
4  *
5  * Copyright (c) 2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/err.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/binfmts.h>
12 
13 #include <asm/elf.h>
14 #include <asm/vdso.h>
15 
16 static struct page *vdso_page;
17 
18 /* Create a vDSO page holding the signal trampoline.
19  * We want this for a non-executable stack.
20  */
vdso_init(void)21 static int __init vdso_init(void)
22 {
23 	struct hexagon_vdso *vdso;
24 
25 	vdso_page = alloc_page(GFP_KERNEL);
26 	if (!vdso_page)
27 		panic("Cannot allocate vdso");
28 
29 	vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
30 	if (!vdso)
31 		panic("Cannot map vdso");
32 	clear_page(vdso);
33 
34 	/* Install the signal trampoline; currently looks like this:
35 	 *	r6 = #__NR_rt_sigreturn;
36 	 *	trap0(#1);
37 	 */
38 	vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
39 	vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
40 
41 	vunmap(vdso);
42 
43 	return 0;
44 }
45 arch_initcall(vdso_init);
46 
47 /*
48  * Called from binfmt_elf.  Create a VMA for the vDSO page.
49  */
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)50 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
51 {
52 	int ret;
53 	unsigned long vdso_base;
54 	struct vm_area_struct *vma;
55 	struct mm_struct *mm = current->mm;
56 	static struct vm_special_mapping vdso_mapping = {
57 		.name = "[vdso]",
58 	};
59 
60 	if (mmap_write_lock_killable(mm))
61 		return -EINTR;
62 
63 	/* Try to get it loaded right near ld.so/glibc. */
64 	vdso_base = STACK_TOP;
65 
66 	vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
67 	if (IS_ERR_VALUE(vdso_base)) {
68 		ret = vdso_base;
69 		goto up_fail;
70 	}
71 
72 	/* MAYWRITE to allow gdb to COW and set breakpoints. */
73 	vdso_mapping.pages = &vdso_page;
74 	vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
75 				      VM_READ|VM_EXEC|
76 				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
77 				      &vdso_mapping);
78 
79 	ret = PTR_ERR(vma);
80 	if (IS_ERR(vma))
81 		goto up_fail;
82 
83 	mm->context.vdso = (void *)vdso_base;
84 	ret = 0;
85 up_fail:
86 	mmap_write_unlock(mm);
87 	return ret;
88 }
89 
arch_vma_name(struct vm_area_struct * vma)90 const char *arch_vma_name(struct vm_area_struct *vma)
91 {
92 	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
93 		return "[vdso]";
94 	return NULL;
95 }
96