xref: /linux/arch/x86/entry/vdso/vma.c (revision 0d08df6c493898e679d9c517e77ea95c063d40ec)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/pvclock.h>
16 #include <asm/vgtod.h>
17 #include <asm/proto.h>
18 #include <asm/vdso.h>
19 #include <asm/vvar.h>
20 #include <asm/page.h>
21 #include <asm/desc.h>
22 #include <asm/cpufeature.h>
23 
24 #if defined(CONFIG_X86_64)
25 unsigned int __read_mostly vdso64_enabled = 1;
26 #endif
27 
28 void __init init_vdso_image(const struct vdso_image *image)
29 {
30 	BUG_ON(image->size % PAGE_SIZE != 0);
31 
32 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
33 			   (struct alt_instr *)(image->data + image->alt +
34 						image->alt_len));
35 }
36 
37 struct linux_binprm;
38 
39 /*
40  * Put the vdso above the (randomized) stack with another randomized
41  * offset.  This way there is no hole in the middle of address space.
42  * To save memory make sure it is still in the same PTE as the stack
43  * top.  This doesn't give that many random bits.
44  *
45  * Note that this algorithm is imperfect: the distribution of the vdso
46  * start address within a PMD is biased toward the end.
47  *
48  * Only used for the 64-bit and x32 vdsos.
49  */
50 static unsigned long vdso_addr(unsigned long start, unsigned len)
51 {
52 #ifdef CONFIG_X86_32
53 	return 0;
54 #else
55 	unsigned long addr, end;
56 	unsigned offset;
57 
58 	/*
59 	 * Round up the start address.  It can start out unaligned as a result
60 	 * of stack start randomization.
61 	 */
62 	start = PAGE_ALIGN(start);
63 
64 	/* Round the lowest possible end address up to a PMD boundary. */
65 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
66 	if (end >= TASK_SIZE_MAX)
67 		end = TASK_SIZE_MAX;
68 	end -= len;
69 
70 	if (end > start) {
71 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
72 		addr = start + (offset << PAGE_SHIFT);
73 	} else {
74 		addr = start;
75 	}
76 
77 	/*
78 	 * Forcibly align the final address in case we have a hardware
79 	 * issue that requires alignment for performance reasons.
80 	 */
81 	addr = align_vdso_addr(addr);
82 
83 	return addr;
84 #endif
85 }
86 
87 static int vdso_fault(const struct vm_special_mapping *sm,
88 		      struct vm_area_struct *vma, struct vm_fault *vmf)
89 {
90 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
91 
92 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
93 		return VM_FAULT_SIGBUS;
94 
95 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
96 	get_page(vmf->page);
97 	return 0;
98 }
99 
100 static const struct vm_special_mapping text_mapping = {
101 	.name = "[vdso]",
102 	.fault = vdso_fault,
103 };
104 
105 static int vvar_fault(const struct vm_special_mapping *sm,
106 		      struct vm_area_struct *vma, struct vm_fault *vmf)
107 {
108 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
109 	long sym_offset;
110 	int ret = -EFAULT;
111 
112 	if (!image)
113 		return VM_FAULT_SIGBUS;
114 
115 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
116 		image->sym_vvar_start;
117 
118 	/*
119 	 * Sanity check: a symbol offset of zero means that the page
120 	 * does not exist for this vdso image, not that the page is at
121 	 * offset zero relative to the text mapping.  This should be
122 	 * impossible here, because sym_offset should only be zero for
123 	 * the page past the end of the vvar mapping.
124 	 */
125 	if (sym_offset == 0)
126 		return VM_FAULT_SIGBUS;
127 
128 	if (sym_offset == image->sym_vvar_page) {
129 		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
130 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
131 	} else if (sym_offset == image->sym_pvclock_page) {
132 		struct pvclock_vsyscall_time_info *pvti =
133 			pvclock_pvti_cpu0_va();
134 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
135 			ret = vm_insert_pfn(
136 				vma,
137 				(unsigned long)vmf->virtual_address,
138 				__pa(pvti) >> PAGE_SHIFT);
139 		}
140 	}
141 
142 	if (ret == 0 || ret == -EBUSY)
143 		return VM_FAULT_NOPAGE;
144 
145 	return VM_FAULT_SIGBUS;
146 }
147 
148 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
149 {
150 	struct mm_struct *mm = current->mm;
151 	struct vm_area_struct *vma;
152 	unsigned long addr, text_start;
153 	int ret = 0;
154 	static const struct vm_special_mapping vvar_mapping = {
155 		.name = "[vvar]",
156 		.fault = vvar_fault,
157 	};
158 
159 	if (calculate_addr) {
160 		addr = vdso_addr(current->mm->start_stack,
161 				 image->size - image->sym_vvar_start);
162 	} else {
163 		addr = 0;
164 	}
165 
166 	down_write(&mm->mmap_sem);
167 
168 	addr = get_unmapped_area(NULL, addr,
169 				 image->size - image->sym_vvar_start, 0, 0);
170 	if (IS_ERR_VALUE(addr)) {
171 		ret = addr;
172 		goto up_fail;
173 	}
174 
175 	text_start = addr - image->sym_vvar_start;
176 	current->mm->context.vdso = (void __user *)text_start;
177 	current->mm->context.vdso_image = image;
178 
179 	/*
180 	 * MAYWRITE to allow gdb to COW and set breakpoints
181 	 */
182 	vma = _install_special_mapping(mm,
183 				       text_start,
184 				       image->size,
185 				       VM_READ|VM_EXEC|
186 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
187 				       &text_mapping);
188 
189 	if (IS_ERR(vma)) {
190 		ret = PTR_ERR(vma);
191 		goto up_fail;
192 	}
193 
194 	vma = _install_special_mapping(mm,
195 				       addr,
196 				       -image->sym_vvar_start,
197 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
198 				       VM_PFNMAP,
199 				       &vvar_mapping);
200 
201 	if (IS_ERR(vma)) {
202 		ret = PTR_ERR(vma);
203 		goto up_fail;
204 	}
205 
206 up_fail:
207 	if (ret)
208 		current->mm->context.vdso = NULL;
209 
210 	up_write(&mm->mmap_sem);
211 	return ret;
212 }
213 
214 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
215 static int load_vdso32(void)
216 {
217 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
218 		return 0;
219 
220 	return map_vdso(&vdso_image_32, false);
221 }
222 #endif
223 
224 #ifdef CONFIG_X86_64
225 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
226 {
227 	if (!vdso64_enabled)
228 		return 0;
229 
230 	return map_vdso(&vdso_image_64, true);
231 }
232 
233 #ifdef CONFIG_COMPAT
234 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
235 				       int uses_interp)
236 {
237 #ifdef CONFIG_X86_X32_ABI
238 	if (test_thread_flag(TIF_X32)) {
239 		if (!vdso64_enabled)
240 			return 0;
241 
242 		return map_vdso(&vdso_image_x32, true);
243 	}
244 #endif
245 #ifdef CONFIG_IA32_EMULATION
246 	return load_vdso32();
247 #else
248 	return 0;
249 #endif
250 }
251 #endif
252 #else
253 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
254 {
255 	return load_vdso32();
256 }
257 #endif
258 
259 #ifdef CONFIG_X86_64
260 static __init int vdso_setup(char *s)
261 {
262 	vdso64_enabled = simple_strtoul(s, NULL, 0);
263 	return 0;
264 }
265 __setup("vdso=", vdso_setup);
266 #endif
267 
268 #ifdef CONFIG_X86_64
269 static void vgetcpu_cpu_init(void *arg)
270 {
271 	int cpu = smp_processor_id();
272 	struct desc_struct d = { };
273 	unsigned long node = 0;
274 #ifdef CONFIG_NUMA
275 	node = cpu_to_node(cpu);
276 #endif
277 	if (static_cpu_has(X86_FEATURE_RDTSCP))
278 		write_rdtscp_aux((node << 12) | cpu);
279 
280 	/*
281 	 * Store cpu number in limit so that it can be loaded
282 	 * quickly in user space in vgetcpu. (12 bits for the CPU
283 	 * and 8 bits for the node)
284 	 */
285 	d.limit0 = cpu | ((node & 0xf) << 12);
286 	d.limit = node >> 4;
287 	d.type = 5;		/* RO data, expand down, accessed */
288 	d.dpl = 3;		/* Visible to user code */
289 	d.s = 1;		/* Not a system segment */
290 	d.p = 1;		/* Present */
291 	d.d = 1;		/* 32-bit */
292 
293 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
294 }
295 
296 static int
297 vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
298 {
299 	long cpu = (long)arg;
300 
301 	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
302 		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
303 
304 	return NOTIFY_DONE;
305 }
306 
307 static int __init init_vdso(void)
308 {
309 	init_vdso_image(&vdso_image_64);
310 
311 #ifdef CONFIG_X86_X32_ABI
312 	init_vdso_image(&vdso_image_x32);
313 #endif
314 
315 	cpu_notifier_register_begin();
316 
317 	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
318 	/* notifier priority > KVM */
319 	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
320 
321 	cpu_notifier_register_done();
322 
323 	return 0;
324 }
325 subsys_initcall(init_vdso);
326 #endif /* CONFIG_X86_64 */
327