xref: /linux/arch/sparc/vdso/vma.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Set up the VMAs to tell the VM about the vDSO.
4  * Copyright 2007 Andi Kleen, SUSE Labs.
5  */
6 
7 /*
8  * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/err.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17 #include <linux/random.h>
18 #include <linux/elf.h>
19 #include <linux/vdso_datastore.h>
20 #include <asm/cacheflush.h>
21 #include <asm/spitfire.h>
22 #include <asm/vdso.h>
23 #include <asm/page.h>
24 
25 #include <vdso/datapage.h>
26 #include <asm/vdso/vsyscall.h>
27 
28 unsigned int __read_mostly vdso_enabled = 1;
29 
30 #ifdef	CONFIG_SPARC64
31 static struct vm_special_mapping vdso_mapping64 = {
32 	.name = "[vdso]"
33 };
34 #endif
35 
36 #ifdef CONFIG_COMPAT
37 static struct vm_special_mapping vdso_mapping32 = {
38 	.name = "[vdso]"
39 };
40 #endif
41 
42 /*
43  * Allocate pages for the vdso and copy in the vdso text from the
44  * kernel image.
45  */
46 static int __init init_vdso_image(const struct vdso_image *image,
47 				  struct vm_special_mapping *vdso_mapping,
48 				  bool elf64)
49 {
50 	int cnpages = (image->size) / PAGE_SIZE;
51 	struct page *cp, **cpp = NULL;
52 	int i;
53 
54 	/*
55 	 * First, the vdso text.  This is initialied data, an integral number of
56 	 * pages long.
57 	 */
58 	if (WARN_ON(image->size % PAGE_SIZE != 0))
59 		goto oom;
60 
61 	cpp = kzalloc_objs(struct page *, cnpages);
62 	vdso_mapping->pages = cpp;
63 
64 	if (!cpp)
65 		goto oom;
66 
67 	for (i = 0; i < cnpages; i++) {
68 		cp = alloc_page(GFP_KERNEL);
69 		if (!cp)
70 			goto oom;
71 		cpp[i] = cp;
72 		copy_page(page_address(cp), image->data + i * PAGE_SIZE);
73 	}
74 
75 	return 0;
76  oom:
77 	if (cpp != NULL) {
78 		for (i = 0; i < cnpages; i++) {
79 			if (cpp[i] != NULL)
80 				__free_page(cpp[i]);
81 		}
82 		kfree(cpp);
83 		vdso_mapping->pages = NULL;
84 	}
85 
86 	pr_warn("Cannot allocate vdso\n");
87 	vdso_enabled = 0;
88 	return -ENOMEM;
89 }
90 
91 static int __init init_vdso(void)
92 {
93 	int err = 0;
94 #ifdef CONFIG_SPARC64
95 	err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
96 	if (err)
97 		return err;
98 #endif
99 
100 #ifdef CONFIG_COMPAT
101 	err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
102 #endif
103 	return err;
104 
105 }
106 subsys_initcall(init_vdso);
107 
108 struct linux_binprm;
109 
110 /* Shuffle the vdso up a bit, randomly. */
111 static unsigned long vdso_addr(unsigned long start, unsigned int len)
112 {
113 	unsigned int offset;
114 
115 	/* This loses some more bits than a modulo, but is cheaper */
116 	offset = get_random_u32_below(PTRS_PER_PTE);
117 	return start + (offset << PAGE_SHIFT);
118 }
119 
120 static_assert(VDSO_NR_PAGES == __VDSO_PAGES);
121 
122 static int map_vdso(const struct vdso_image *image,
123 		struct vm_special_mapping *vdso_mapping)
124 {
125 	const size_t area_size = image->size + VDSO_NR_PAGES * PAGE_SIZE;
126 	struct mm_struct *mm = current->mm;
127 	struct vm_area_struct *vma;
128 	unsigned long text_start, addr = 0;
129 	int ret = 0;
130 
131 	mmap_write_lock(mm);
132 
133 	/*
134 	 * First, get an unmapped region: then randomize it, and make sure that
135 	 * region is free.
136 	 */
137 	if (current->flags & PF_RANDOMIZE) {
138 		addr = get_unmapped_area(NULL, 0, area_size, 0, 0);
139 		if (IS_ERR_VALUE(addr)) {
140 			ret = addr;
141 			goto up_fail;
142 		}
143 		addr = vdso_addr(addr, area_size);
144 	}
145 	addr = get_unmapped_area(NULL, addr, area_size, 0, 0);
146 	if (IS_ERR_VALUE(addr)) {
147 		ret = addr;
148 		goto up_fail;
149 	}
150 
151 	text_start = addr + VDSO_NR_PAGES * PAGE_SIZE;
152 	current->mm->context.vdso = (void __user *)text_start;
153 
154 	/*
155 	 * MAYWRITE to allow gdb to COW and set breakpoints
156 	 */
157 	vma = _install_special_mapping(mm,
158 				       text_start,
159 				       image->size,
160 				       VM_READ|VM_EXEC|
161 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
162 				       vdso_mapping);
163 
164 	if (IS_ERR(vma)) {
165 		ret = PTR_ERR(vma);
166 		goto up_fail;
167 	}
168 
169 	vma = vdso_install_vvar_mapping(mm, addr);
170 
171 	if (IS_ERR(vma)) {
172 		ret = PTR_ERR(vma);
173 		do_munmap(mm, text_start, image->size, NULL);
174 	}
175 
176 up_fail:
177 	if (ret)
178 		current->mm->context.vdso = NULL;
179 
180 	mmap_write_unlock(mm);
181 	return ret;
182 }
183 
184 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
185 {
186 
187 	if (!vdso_enabled)
188 		return 0;
189 
190 #if defined CONFIG_COMPAT
191 	if (!(is_32bit_task()))
192 		return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
193 	else
194 		return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
195 #else
196 	return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
197 #endif
198 
199 }
200 
201 static __init int vdso_setup(char *s)
202 {
203 	int err;
204 	unsigned long val;
205 
206 	err = kstrtoul(s, 10, &val);
207 	if (!err)
208 		vdso_enabled = val;
209 	return 1;
210 }
211 __setup("vdso=", vdso_setup);
212