xref: /linux/arch/arm64/kernel/vdso.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/vdso_datastore.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30 
31 enum vdso_abi {
32 	VDSO_ABI_AA64,
33 	VDSO_ABI_AA32,
34 };
35 
36 struct vdso_abi_info {
37 	const char *name;
38 	const char *vdso_code_start;
39 	const char *vdso_code_end;
40 	unsigned long vdso_pages;
41 	/* Code Mapping */
42 	struct vm_special_mapping *cm;
43 };
44 
45 static struct vdso_abi_info vdso_info[] __ro_after_init = {
46 	[VDSO_ABI_AA64] = {
47 		.name = "vdso",
48 		.vdso_code_start = vdso_start,
49 		.vdso_code_end = vdso_end,
50 	},
51 #ifdef CONFIG_COMPAT_VDSO
52 	[VDSO_ABI_AA32] = {
53 		.name = "vdso32",
54 		.vdso_code_start = vdso32_start,
55 		.vdso_code_end = vdso32_end,
56 	},
57 #endif /* CONFIG_COMPAT_VDSO */
58 };
59 
60 static int vdso_mremap(const struct vm_special_mapping *sm,
61 		struct vm_area_struct *new_vma)
62 {
63 	current->mm->context.vdso = (void *)new_vma->vm_start;
64 
65 	return 0;
66 }
67 
68 static int __init __vdso_init(enum vdso_abi abi)
69 {
70 	int i;
71 	struct page **vdso_pagelist;
72 	unsigned long pfn;
73 
74 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
75 		pr_err("vDSO is not a valid ELF object!\n");
76 		return -EINVAL;
77 	}
78 
79 	vdso_info[abi].vdso_pages = (
80 			vdso_info[abi].vdso_code_end -
81 			vdso_info[abi].vdso_code_start) >>
82 			PAGE_SHIFT;
83 
84 	vdso_pagelist = kzalloc_objs(struct page *, vdso_info[abi].vdso_pages,
85 				     GFP_KERNEL);
86 	if (vdso_pagelist == NULL)
87 		return -ENOMEM;
88 
89 	/* Grab the vDSO code pages. */
90 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
91 
92 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
93 		vdso_pagelist[i] = pfn_to_page(pfn + i);
94 
95 	vdso_info[abi].cm->pages = vdso_pagelist;
96 
97 	return 0;
98 }
99 
100 static int __setup_additional_pages(enum vdso_abi abi,
101 				    struct mm_struct *mm,
102 				    struct linux_binprm *bprm,
103 				    int uses_interp)
104 {
105 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
106 	unsigned long gp_flags = 0;
107 	void *ret;
108 
109 	BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
110 
111 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
112 	/* Be sure to map the data page */
113 	vdso_mapping_len = vdso_text_len + VDSO_NR_PAGES * PAGE_SIZE;
114 
115 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
116 	if (IS_ERR_VALUE(vdso_base)) {
117 		ret = ERR_PTR(vdso_base);
118 		goto up_fail;
119 	}
120 
121 	ret = vdso_install_vvar_mapping(mm, vdso_base);
122 	if (IS_ERR(ret))
123 		goto up_fail;
124 
125 	if (system_supports_bti_kernel())
126 		gp_flags = VM_ARM64_BTI;
127 
128 	vdso_base += VDSO_NR_PAGES * PAGE_SIZE;
129 	mm->context.vdso = (void *)vdso_base;
130 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
131 				       VM_READ|VM_EXEC|gp_flags|
132 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
133 				       VM_SEALED_SYSMAP,
134 				       vdso_info[abi].cm);
135 	if (IS_ERR(ret))
136 		goto up_fail;
137 
138 	return 0;
139 
140 up_fail:
141 	mm->context.vdso = NULL;
142 	return PTR_ERR(ret);
143 }
144 
145 #ifdef CONFIG_COMPAT
146 /*
147  * Create and map the vectors page for AArch32 tasks.
148  */
149 enum aarch32_map {
150 	AA32_MAP_VECTORS, /* kuser helpers */
151 	AA32_MAP_SIGPAGE,
152 	AA32_MAP_VDSO,
153 };
154 
155 static struct page *aarch32_vectors_page __ro_after_init;
156 static struct page *aarch32_sig_page __ro_after_init;
157 
158 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
159 				  struct vm_area_struct *new_vma)
160 {
161 	current->mm->context.sigpage = (void *)new_vma->vm_start;
162 
163 	return 0;
164 }
165 
166 static struct vm_special_mapping aarch32_vdso_maps[] = {
167 	[AA32_MAP_VECTORS] = {
168 		.name	= "[vectors]", /* ABI */
169 		.pages	= &aarch32_vectors_page,
170 	},
171 	[AA32_MAP_SIGPAGE] = {
172 		.name	= "[sigpage]", /* ABI */
173 		.pages	= &aarch32_sig_page,
174 		.mremap	= aarch32_sigpage_mremap,
175 	},
176 	[AA32_MAP_VDSO] = {
177 		.name = "[vdso]",
178 		.mremap = vdso_mremap,
179 	},
180 };
181 
182 static int aarch32_alloc_kuser_vdso_page(void)
183 {
184 	extern char __kuser_helper_start[], __kuser_helper_end[];
185 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
186 	unsigned long vdso_page;
187 
188 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
189 		return 0;
190 
191 	vdso_page = get_zeroed_page(GFP_KERNEL);
192 	if (!vdso_page)
193 		return -ENOMEM;
194 
195 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
196 	       kuser_sz);
197 	aarch32_vectors_page = virt_to_page((void *)vdso_page);
198 	return 0;
199 }
200 
201 #define COMPAT_SIGPAGE_POISON_WORD	0xe7fddef1
202 static int aarch32_alloc_sigpage(void)
203 {
204 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
205 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
206 	__le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
207 	void *sigpage;
208 
209 	sigpage = (void *)__get_free_page(GFP_KERNEL);
210 	if (!sigpage)
211 		return -ENOMEM;
212 
213 	memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
214 	memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
215 	aarch32_sig_page = virt_to_page(sigpage);
216 	return 0;
217 }
218 
219 static int __init __aarch32_alloc_vdso_pages(void)
220 {
221 
222 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
223 		return 0;
224 
225 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
226 
227 	return __vdso_init(VDSO_ABI_AA32);
228 }
229 
230 static int __init aarch32_alloc_vdso_pages(void)
231 {
232 	int ret;
233 
234 	ret = __aarch32_alloc_vdso_pages();
235 	if (ret)
236 		return ret;
237 
238 	ret = aarch32_alloc_sigpage();
239 	if (ret)
240 		return ret;
241 
242 	return aarch32_alloc_kuser_vdso_page();
243 }
244 arch_initcall(aarch32_alloc_vdso_pages);
245 
246 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
247 {
248 	void *ret;
249 
250 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
251 		return 0;
252 
253 	/*
254 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
255 	 * not safe to CoW the page containing the CPU exception vectors.
256 	 */
257 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
258 				       VM_READ | VM_EXEC |
259 				       VM_MAYREAD | VM_MAYEXEC |
260 				       VM_SEALED_SYSMAP,
261 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
262 
263 	return PTR_ERR_OR_ZERO(ret);
264 }
265 
266 static int aarch32_sigreturn_setup(struct mm_struct *mm)
267 {
268 	unsigned long addr;
269 	void *ret;
270 
271 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
272 	if (IS_ERR_VALUE(addr)) {
273 		ret = ERR_PTR(addr);
274 		goto out;
275 	}
276 
277 	/*
278 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
279 	 * set breakpoints.
280 	 */
281 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
282 				       VM_READ | VM_EXEC | VM_MAYREAD |
283 				       VM_MAYWRITE | VM_MAYEXEC |
284 				       VM_SEALED_SYSMAP,
285 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
286 	if (IS_ERR(ret))
287 		goto out;
288 
289 	mm->context.sigpage = (void *)addr;
290 
291 out:
292 	return PTR_ERR_OR_ZERO(ret);
293 }
294 
295 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
296 {
297 	struct mm_struct *mm = current->mm;
298 	int ret;
299 
300 	if (mmap_write_lock_killable(mm))
301 		return -EINTR;
302 
303 	ret = aarch32_kuser_helpers_setup(mm);
304 	if (ret)
305 		goto out;
306 
307 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
308 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
309 					       uses_interp);
310 		if (ret)
311 			goto out;
312 	}
313 
314 	ret = aarch32_sigreturn_setup(mm);
315 out:
316 	mmap_write_unlock(mm);
317 	return ret;
318 }
319 #endif /* CONFIG_COMPAT */
320 
321 static struct vm_special_mapping aarch64_vdso_map __ro_after_init = {
322 	.name	= "[vdso]",
323 	.mremap = vdso_mremap,
324 };
325 
326 static int __init vdso_init(void)
327 {
328 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_map;
329 
330 	return __vdso_init(VDSO_ABI_AA64);
331 }
332 arch_initcall(vdso_init);
333 
334 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
335 {
336 	struct mm_struct *mm = current->mm;
337 	int ret;
338 
339 	if (mmap_write_lock_killable(mm))
340 		return -EINTR;
341 
342 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
343 	mmap_write_unlock(mm);
344 
345 	return ret;
346 }
347