xref: /linux/arch/loongarch/kernel/vdso.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5  */
6 
7 #include <linux/binfmts.h>
8 #include <linux/elf.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/random.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vdso_datastore.h>
18 
19 #include <asm/page.h>
20 #include <asm/vdso.h>
21 #include <asm/vdso/vdso.h>
22 #include <vdso/helpers.h>
23 #include <vdso/vsyscall.h>
24 #include <vdso/datapage.h>
25 #include <generated/vdso-offsets.h>
26 
27 extern char vdso_start[], vdso_end[];
28 
29 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
30 {
31 	current->mm->context.vdso = (void *)(new_vma->vm_start);
32 
33 	return 0;
34 }
35 
36 struct loongarch_vdso_info vdso_info = {
37 	.vdso = vdso_start,
38 	.code_mapping = {
39 		.name = "[vdso]",
40 		.mremap = vdso_mremap,
41 	},
42 	.offset_sigreturn = vdso_offset_sigreturn,
43 };
44 
45 static int __init init_vdso(void)
46 {
47 	unsigned long i, cpu, pfn;
48 
49 	BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
50 
51 	for_each_possible_cpu(cpu)
52 		vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
53 
54 	vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
55 	vdso_info.code_mapping.pages =
56 		kzalloc_objs(struct page *, vdso_info.size / PAGE_SIZE);
57 
58 	if (!vdso_info.code_mapping.pages)
59 		return -ENOMEM;
60 
61 	pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
62 	for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
63 		vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
64 
65 	return 0;
66 }
67 subsys_initcall(init_vdso);
68 
69 static unsigned long vdso_base(void)
70 {
71 	unsigned long base = STACK_TOP;
72 
73 	if (current->flags & PF_RANDOMIZE) {
74 		base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
75 		base = PAGE_ALIGN(base);
76 	}
77 
78 	return base;
79 }
80 
81 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
82 {
83 	int ret;
84 	unsigned long size, data_addr, vdso_addr;
85 	struct mm_struct *mm = current->mm;
86 	struct vm_area_struct *vma;
87 	struct loongarch_vdso_info *info = current->thread.vdso;
88 
89 	if (mmap_write_lock_killable(mm))
90 		return -EINTR;
91 
92 	/*
93 	 * Determine total area size. This includes the VDSO data itself
94 	 * and the data pages.
95 	 */
96 	size = VVAR_SIZE + info->size;
97 
98 	data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
99 	if (IS_ERR_VALUE(data_addr)) {
100 		ret = data_addr;
101 		goto out;
102 	}
103 
104 	vma = vdso_install_vvar_mapping(mm, data_addr);
105 	if (IS_ERR(vma)) {
106 		ret = PTR_ERR(vma);
107 		goto out;
108 	}
109 
110 	vdso_addr = data_addr + VVAR_SIZE;
111 	vma = _install_special_mapping(mm, vdso_addr, info->size,
112 				       VM_READ | VM_EXEC |
113 				       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
114 				       VM_SEALED_SYSMAP,
115 				       &info->code_mapping);
116 	if (IS_ERR(vma)) {
117 		ret = PTR_ERR(vma);
118 		goto out;
119 	}
120 
121 	mm->context.vdso = (void *)vdso_addr;
122 	ret = 0;
123 
124 out:
125 	mmap_write_unlock(mm);
126 	return ret;
127 }
128