xref: /linux/arch/sparc/vdso/vma.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Set up the VMAs to tell the VM about the vDSO.
4  * Copyright 2007 Andi Kleen, SUSE Labs.
5  */
6 
7 /*
8  * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <asm/cacheflush.h>
20 #include <asm/spitfire.h>
21 #include <asm/vdso.h>
22 #include <asm/vvar.h>
23 #include <asm/page.h>
24 
25 unsigned int __read_mostly vdso_enabled = 1;
26 
27 static struct vm_special_mapping vvar_mapping = {
28 	.name = "[vvar]"
29 };
30 
31 #ifdef	CONFIG_SPARC64
32 static struct vm_special_mapping vdso_mapping64 = {
33 	.name = "[vdso]"
34 };
35 #endif
36 
37 #ifdef CONFIG_COMPAT
38 static struct vm_special_mapping vdso_mapping32 = {
39 	.name = "[vdso]"
40 };
41 #endif
42 
43 struct vvar_data *vvar_data;
44 
45 struct vdso_elfinfo32 {
46 	Elf32_Ehdr	*hdr;
47 	Elf32_Sym	*dynsym;
48 	unsigned long	dynsymsize;
49 	const char	*dynstr;
50 	unsigned long	text;
51 };
52 
53 struct vdso_elfinfo64 {
54 	Elf64_Ehdr	*hdr;
55 	Elf64_Sym	*dynsym;
56 	unsigned long	dynsymsize;
57 	const char	*dynstr;
58 	unsigned long	text;
59 };
60 
61 struct vdso_elfinfo {
62 	union {
63 		struct vdso_elfinfo32 elf32;
64 		struct vdso_elfinfo64 elf64;
65 	} u;
66 };
67 
68 static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
69 			   unsigned long *size)
70 {
71 	const char *snames;
72 	Elf64_Shdr *shdrs;
73 	unsigned int i;
74 
75 	shdrs = (void *)e->hdr + e->hdr->e_shoff;
76 	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
77 	for (i = 1; i < e->hdr->e_shnum; i++) {
78 		if (!strcmp(snames+shdrs[i].sh_name, name)) {
79 			if (size)
80 				*size = shdrs[i].sh_size;
81 			return (void *)e->hdr + shdrs[i].sh_offset;
82 		}
83 	}
84 	return NULL;
85 }
86 
87 static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
88 {
89 	struct vdso_elfinfo64 *e = &_e->u.elf64;
90 
91 	e->hdr = image->data;
92 	e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
93 	e->dynstr = one_section64(e, ".dynstr", NULL);
94 
95 	if (!e->dynsym || !e->dynstr) {
96 		pr_err("VDSO64: Missing symbol sections.\n");
97 		return -ENODEV;
98 	}
99 	return 0;
100 }
101 
102 static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
103 {
104 	unsigned int i;
105 
106 	for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
107 		Elf64_Sym *s = &e->dynsym[i];
108 		if (s->st_name == 0)
109 			continue;
110 		if (!strcmp(e->dynstr + s->st_name, name))
111 			return s;
112 	}
113 	return NULL;
114 }
115 
116 static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
117 		      const char *new)
118 {
119 	struct vdso_elfinfo64 *e = &_e->u.elf64;
120 	Elf64_Sym *osym = find_sym64(e, orig);
121 	Elf64_Sym *nsym = find_sym64(e, new);
122 
123 	if (!nsym || !osym) {
124 		pr_err("VDSO64: Missing symbols.\n");
125 		return -ENODEV;
126 	}
127 	osym->st_value = nsym->st_value;
128 	osym->st_size = nsym->st_size;
129 	osym->st_info = nsym->st_info;
130 	osym->st_other = nsym->st_other;
131 	osym->st_shndx = nsym->st_shndx;
132 
133 	return 0;
134 }
135 
136 static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
137 			   unsigned long *size)
138 {
139 	const char *snames;
140 	Elf32_Shdr *shdrs;
141 	unsigned int i;
142 
143 	shdrs = (void *)e->hdr + e->hdr->e_shoff;
144 	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
145 	for (i = 1; i < e->hdr->e_shnum; i++) {
146 		if (!strcmp(snames+shdrs[i].sh_name, name)) {
147 			if (size)
148 				*size = shdrs[i].sh_size;
149 			return (void *)e->hdr + shdrs[i].sh_offset;
150 		}
151 	}
152 	return NULL;
153 }
154 
155 static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
156 {
157 	struct vdso_elfinfo32 *e = &_e->u.elf32;
158 
159 	e->hdr = image->data;
160 	e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
161 	e->dynstr = one_section32(e, ".dynstr", NULL);
162 
163 	if (!e->dynsym || !e->dynstr) {
164 		pr_err("VDSO32: Missing symbol sections.\n");
165 		return -ENODEV;
166 	}
167 	return 0;
168 }
169 
170 static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
171 {
172 	unsigned int i;
173 
174 	for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
175 		Elf32_Sym *s = &e->dynsym[i];
176 		if (s->st_name == 0)
177 			continue;
178 		if (!strcmp(e->dynstr + s->st_name, name))
179 			return s;
180 	}
181 	return NULL;
182 }
183 
184 static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
185 		      const char *new)
186 {
187 	struct vdso_elfinfo32 *e = &_e->u.elf32;
188 	Elf32_Sym *osym = find_sym32(e, orig);
189 	Elf32_Sym *nsym = find_sym32(e, new);
190 
191 	if (!nsym || !osym) {
192 		pr_err("VDSO32: Missing symbols.\n");
193 		return -ENODEV;
194 	}
195 	osym->st_value = nsym->st_value;
196 	osym->st_size = nsym->st_size;
197 	osym->st_info = nsym->st_info;
198 	osym->st_other = nsym->st_other;
199 	osym->st_shndx = nsym->st_shndx;
200 
201 	return 0;
202 }
203 
204 static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
205 			 bool elf64)
206 {
207 	if (elf64)
208 		return find_sections64(image, e);
209 	else
210 		return find_sections32(image, e);
211 }
212 
213 static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
214 			    const char *new_target, bool elf64)
215 {
216 	if (elf64)
217 		return patchsym64(e, orig, new_target);
218 	else
219 		return patchsym32(e, orig, new_target);
220 }
221 
222 static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
223 {
224 	int err;
225 
226 	err = find_sections(image, e, elf64);
227 	if (err)
228 		return err;
229 
230 	err = patch_one_symbol(e,
231 			       "__vdso_gettimeofday",
232 			       "__vdso_gettimeofday_stick", elf64);
233 	if (err)
234 		return err;
235 
236 	return patch_one_symbol(e,
237 				"__vdso_clock_gettime",
238 				"__vdso_clock_gettime_stick", elf64);
239 	return 0;
240 }
241 
242 /*
243  * Allocate pages for the vdso and vvar, and copy in the vdso text from the
244  * kernel image.
245  */
246 static int __init init_vdso_image(const struct vdso_image *image,
247 				  struct vm_special_mapping *vdso_mapping,
248 				  bool elf64)
249 {
250 	int cnpages = (image->size) / PAGE_SIZE;
251 	struct page *dp, **dpp = NULL;
252 	struct page *cp, **cpp = NULL;
253 	struct vdso_elfinfo ei;
254 	int i, dnpages = 0;
255 
256 	if (tlb_type != spitfire) {
257 		int err = stick_patch(image, &ei, elf64);
258 		if (err)
259 			return err;
260 	}
261 
262 	/*
263 	 * First, the vdso text.  This is initialied data, an integral number of
264 	 * pages long.
265 	 */
266 	if (WARN_ON(image->size % PAGE_SIZE != 0))
267 		goto oom;
268 
269 	cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
270 	vdso_mapping->pages = cpp;
271 
272 	if (!cpp)
273 		goto oom;
274 
275 	for (i = 0; i < cnpages; i++) {
276 		cp = alloc_page(GFP_KERNEL);
277 		if (!cp)
278 			goto oom;
279 		cpp[i] = cp;
280 		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
281 	}
282 
283 	/*
284 	 * Now the vvar page.  This is uninitialized data.
285 	 */
286 
287 	if (vvar_data == NULL) {
288 		dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
289 		if (WARN_ON(dnpages != 1))
290 			goto oom;
291 		dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
292 		vvar_mapping.pages = dpp;
293 
294 		if (!dpp)
295 			goto oom;
296 
297 		dp = alloc_page(GFP_KERNEL);
298 		if (!dp)
299 			goto oom;
300 
301 		dpp[0] = dp;
302 		vvar_data = page_address(dp);
303 		memset(vvar_data, 0, PAGE_SIZE);
304 
305 		vvar_data->seq = 0;
306 	}
307 
308 	return 0;
309  oom:
310 	if (cpp != NULL) {
311 		for (i = 0; i < cnpages; i++) {
312 			if (cpp[i] != NULL)
313 				__free_page(cpp[i]);
314 		}
315 		kfree(cpp);
316 		vdso_mapping->pages = NULL;
317 	}
318 
319 	if (dpp != NULL) {
320 		for (i = 0; i < dnpages; i++) {
321 			if (dpp[i] != NULL)
322 				__free_page(dpp[i]);
323 		}
324 		kfree(dpp);
325 		vvar_mapping.pages = NULL;
326 	}
327 
328 	pr_warn("Cannot allocate vdso\n");
329 	vdso_enabled = 0;
330 	return -ENOMEM;
331 }
332 
333 static int __init init_vdso(void)
334 {
335 	int err = 0;
336 #ifdef CONFIG_SPARC64
337 	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
338 	if (err)
339 		return err;
340 #endif
341 
342 #ifdef CONFIG_COMPAT
343 	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
344 #endif
345 	return err;
346 
347 }
348 subsys_initcall(init_vdso);
349 
350 struct linux_binprm;
351 
352 /* Shuffle the vdso up a bit, randomly. */
353 static unsigned long vdso_addr(unsigned long start, unsigned int len)
354 {
355 	unsigned int offset;
356 
357 	/* This loses some more bits than a modulo, but is cheaper */
358 	offset = get_random_u32_below(PTRS_PER_PTE);
359 	return start + (offset << PAGE_SHIFT);
360 }
361 
362 static int map_vdso(const struct vdso_image *image,
363 		struct vm_special_mapping *vdso_mapping)
364 {
365 	struct mm_struct *mm = current->mm;
366 	struct vm_area_struct *vma;
367 	unsigned long text_start, addr = 0;
368 	int ret = 0;
369 
370 	mmap_write_lock(mm);
371 
372 	/*
373 	 * First, get an unmapped region: then randomize it, and make sure that
374 	 * region is free.
375 	 */
376 	if (current->flags & PF_RANDOMIZE) {
377 		addr = get_unmapped_area(NULL, 0,
378 					 image->size - image->sym_vvar_start,
379 					 0, 0);
380 		if (IS_ERR_VALUE(addr)) {
381 			ret = addr;
382 			goto up_fail;
383 		}
384 		addr = vdso_addr(addr, image->size - image->sym_vvar_start);
385 	}
386 	addr = get_unmapped_area(NULL, addr,
387 				 image->size - image->sym_vvar_start, 0, 0);
388 	if (IS_ERR_VALUE(addr)) {
389 		ret = addr;
390 		goto up_fail;
391 	}
392 
393 	text_start = addr - image->sym_vvar_start;
394 	current->mm->context.vdso = (void __user *)text_start;
395 
396 	/*
397 	 * MAYWRITE to allow gdb to COW and set breakpoints
398 	 */
399 	vma = _install_special_mapping(mm,
400 				       text_start,
401 				       image->size,
402 				       VM_READ|VM_EXEC|
403 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
404 				       vdso_mapping);
405 
406 	if (IS_ERR(vma)) {
407 		ret = PTR_ERR(vma);
408 		goto up_fail;
409 	}
410 
411 	vma = _install_special_mapping(mm,
412 				       addr,
413 				       -image->sym_vvar_start,
414 				       VM_READ|VM_MAYREAD,
415 				       &vvar_mapping);
416 
417 	if (IS_ERR(vma)) {
418 		ret = PTR_ERR(vma);
419 		do_munmap(mm, text_start, image->size, NULL);
420 	}
421 
422 up_fail:
423 	if (ret)
424 		current->mm->context.vdso = NULL;
425 
426 	mmap_write_unlock(mm);
427 	return ret;
428 }
429 
430 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
431 {
432 
433 	if (!vdso_enabled)
434 		return 0;
435 
436 #if defined CONFIG_COMPAT
437 	if (!(is_32bit_task()))
438 		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
439 	else
440 		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
441 #else
442 	return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
443 #endif
444 
445 }
446 
447 static __init int vdso_setup(char *s)
448 {
449 	int err;
450 	unsigned long val;
451 
452 	err = kstrtoul(s, 10, &val);
453 	if (!err)
454 		vdso_enabled = val;
455 	return 1;
456 }
457 __setup("vdso=", vdso_setup);
458