xref: /linux/arch/x86/entry/vdso/vma.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright 2007 Andi Kleen, SUSE Labs.
3  * Subject to the GPL, v.2
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
20 #include <asm/vdso.h>
21 #include <asm/vvar.h>
22 #include <asm/page.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25 #include <asm/mshyperv.h>
26 
27 #if defined(CONFIG_X86_64)
28 unsigned int __read_mostly vdso64_enabled = 1;
29 #endif
30 
31 void __init init_vdso_image(const struct vdso_image *image)
32 {
33 	BUG_ON(image->size % PAGE_SIZE != 0);
34 
35 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 			   (struct alt_instr *)(image->data + image->alt +
37 						image->alt_len));
38 }
39 
40 struct linux_binprm;
41 
42 static int vdso_fault(const struct vm_special_mapping *sm,
43 		      struct vm_area_struct *vma, struct vm_fault *vmf)
44 {
45 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
46 
47 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
48 		return VM_FAULT_SIGBUS;
49 
50 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
51 	get_page(vmf->page);
52 	return 0;
53 }
54 
55 static void vdso_fix_landing(const struct vdso_image *image,
56 		struct vm_area_struct *new_vma)
57 {
58 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
59 	if (in_ia32_syscall() && image == &vdso_image_32) {
60 		struct pt_regs *regs = current_pt_regs();
61 		unsigned long vdso_land = image->sym_int80_landing_pad;
62 		unsigned long old_land_addr = vdso_land +
63 			(unsigned long)current->mm->context.vdso;
64 
65 		/* Fixing userspace landing - look at do_fast_syscall_32 */
66 		if (regs->ip == old_land_addr)
67 			regs->ip = new_vma->vm_start + vdso_land;
68 	}
69 #endif
70 }
71 
72 static int vdso_mremap(const struct vm_special_mapping *sm,
73 		struct vm_area_struct *new_vma)
74 {
75 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
76 	const struct vdso_image *image = current->mm->context.vdso_image;
77 
78 	if (image->size != new_size)
79 		return -EINVAL;
80 
81 	vdso_fix_landing(image, new_vma);
82 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
83 
84 	return 0;
85 }
86 
87 static int vvar_fault(const struct vm_special_mapping *sm,
88 		      struct vm_area_struct *vma, struct vm_fault *vmf)
89 {
90 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
91 	long sym_offset;
92 	int ret = -EFAULT;
93 
94 	if (!image)
95 		return VM_FAULT_SIGBUS;
96 
97 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
98 		image->sym_vvar_start;
99 
100 	/*
101 	 * Sanity check: a symbol offset of zero means that the page
102 	 * does not exist for this vdso image, not that the page is at
103 	 * offset zero relative to the text mapping.  This should be
104 	 * impossible here, because sym_offset should only be zero for
105 	 * the page past the end of the vvar mapping.
106 	 */
107 	if (sym_offset == 0)
108 		return VM_FAULT_SIGBUS;
109 
110 	if (sym_offset == image->sym_vvar_page) {
111 		ret = vm_insert_pfn(vma, vmf->address,
112 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
113 	} else if (sym_offset == image->sym_pvclock_page) {
114 		struct pvclock_vsyscall_time_info *pvti =
115 			pvclock_get_pvti_cpu0_va();
116 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
117 			ret = vm_insert_pfn_prot(
118 				vma,
119 				vmf->address,
120 				__pa(pvti) >> PAGE_SHIFT,
121 				pgprot_decrypted(vma->vm_page_prot));
122 		}
123 	} else if (sym_offset == image->sym_hvclock_page) {
124 		struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
125 
126 		if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
127 			ret = vm_insert_pfn(vma, vmf->address,
128 					    vmalloc_to_pfn(tsc_pg));
129 	}
130 
131 	if (ret == 0 || ret == -EBUSY)
132 		return VM_FAULT_NOPAGE;
133 
134 	return VM_FAULT_SIGBUS;
135 }
136 
137 static const struct vm_special_mapping vdso_mapping = {
138 	.name = "[vdso]",
139 	.fault = vdso_fault,
140 	.mremap = vdso_mremap,
141 };
142 static const struct vm_special_mapping vvar_mapping = {
143 	.name = "[vvar]",
144 	.fault = vvar_fault,
145 };
146 
147 /*
148  * Add vdso and vvar mappings to current process.
149  * @image          - blob to map
150  * @addr           - request a specific address (zero to map at free addr)
151  */
152 static int map_vdso(const struct vdso_image *image, unsigned long addr)
153 {
154 	struct mm_struct *mm = current->mm;
155 	struct vm_area_struct *vma;
156 	unsigned long text_start;
157 	int ret = 0;
158 
159 	if (down_write_killable(&mm->mmap_sem))
160 		return -EINTR;
161 
162 	addr = get_unmapped_area(NULL, addr,
163 				 image->size - image->sym_vvar_start, 0, 0);
164 	if (IS_ERR_VALUE(addr)) {
165 		ret = addr;
166 		goto up_fail;
167 	}
168 
169 	text_start = addr - image->sym_vvar_start;
170 
171 	/*
172 	 * MAYWRITE to allow gdb to COW and set breakpoints
173 	 */
174 	vma = _install_special_mapping(mm,
175 				       text_start,
176 				       image->size,
177 				       VM_READ|VM_EXEC|
178 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
179 				       &vdso_mapping);
180 
181 	if (IS_ERR(vma)) {
182 		ret = PTR_ERR(vma);
183 		goto up_fail;
184 	}
185 
186 	vma = _install_special_mapping(mm,
187 				       addr,
188 				       -image->sym_vvar_start,
189 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
190 				       VM_PFNMAP,
191 				       &vvar_mapping);
192 
193 	if (IS_ERR(vma)) {
194 		ret = PTR_ERR(vma);
195 		do_munmap(mm, text_start, image->size, NULL);
196 	} else {
197 		current->mm->context.vdso = (void __user *)text_start;
198 		current->mm->context.vdso_image = image;
199 	}
200 
201 up_fail:
202 	up_write(&mm->mmap_sem);
203 	return ret;
204 }
205 
206 #ifdef CONFIG_X86_64
207 /*
208  * Put the vdso above the (randomized) stack with another randomized
209  * offset.  This way there is no hole in the middle of address space.
210  * To save memory make sure it is still in the same PTE as the stack
211  * top.  This doesn't give that many random bits.
212  *
213  * Note that this algorithm is imperfect: the distribution of the vdso
214  * start address within a PMD is biased toward the end.
215  *
216  * Only used for the 64-bit and x32 vdsos.
217  */
218 static unsigned long vdso_addr(unsigned long start, unsigned len)
219 {
220 	unsigned long addr, end;
221 	unsigned offset;
222 
223 	/*
224 	 * Round up the start address.  It can start out unaligned as a result
225 	 * of stack start randomization.
226 	 */
227 	start = PAGE_ALIGN(start);
228 
229 	/* Round the lowest possible end address up to a PMD boundary. */
230 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
231 	if (end >= TASK_SIZE_MAX)
232 		end = TASK_SIZE_MAX;
233 	end -= len;
234 
235 	if (end > start) {
236 		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
237 		addr = start + (offset << PAGE_SHIFT);
238 	} else {
239 		addr = start;
240 	}
241 
242 	/*
243 	 * Forcibly align the final address in case we have a hardware
244 	 * issue that requires alignment for performance reasons.
245 	 */
246 	addr = align_vdso_addr(addr);
247 
248 	return addr;
249 }
250 
251 static int map_vdso_randomized(const struct vdso_image *image)
252 {
253 	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
254 
255 	return map_vdso(image, addr);
256 }
257 #endif
258 
259 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
260 {
261 	struct mm_struct *mm = current->mm;
262 	struct vm_area_struct *vma;
263 
264 	down_write(&mm->mmap_sem);
265 	/*
266 	 * Check if we have already mapped vdso blob - fail to prevent
267 	 * abusing from userspace install_speciall_mapping, which may
268 	 * not do accounting and rlimit right.
269 	 * We could search vma near context.vdso, but it's a slowpath,
270 	 * so let's explicitely check all VMAs to be completely sure.
271 	 */
272 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
273 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
274 				vma_is_special_mapping(vma, &vvar_mapping)) {
275 			up_write(&mm->mmap_sem);
276 			return -EEXIST;
277 		}
278 	}
279 	up_write(&mm->mmap_sem);
280 
281 	return map_vdso(image, addr);
282 }
283 
284 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
285 static int load_vdso32(void)
286 {
287 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
288 		return 0;
289 
290 	return map_vdso(&vdso_image_32, 0);
291 }
292 #endif
293 
294 #ifdef CONFIG_X86_64
295 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
296 {
297 	if (!vdso64_enabled)
298 		return 0;
299 
300 	return map_vdso_randomized(&vdso_image_64);
301 }
302 
303 #ifdef CONFIG_COMPAT
304 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
305 				       int uses_interp)
306 {
307 #ifdef CONFIG_X86_X32_ABI
308 	if (test_thread_flag(TIF_X32)) {
309 		if (!vdso64_enabled)
310 			return 0;
311 		return map_vdso_randomized(&vdso_image_x32);
312 	}
313 #endif
314 #ifdef CONFIG_IA32_EMULATION
315 	return load_vdso32();
316 #else
317 	return 0;
318 #endif
319 }
320 #endif
321 #else
322 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
323 {
324 	return load_vdso32();
325 }
326 #endif
327 
328 #ifdef CONFIG_X86_64
329 static __init int vdso_setup(char *s)
330 {
331 	vdso64_enabled = simple_strtoul(s, NULL, 0);
332 	return 0;
333 }
334 __setup("vdso=", vdso_setup);
335 #endif
336 
337 #ifdef CONFIG_X86_64
338 static void vgetcpu_cpu_init(void *arg)
339 {
340 	int cpu = smp_processor_id();
341 	struct desc_struct d = { };
342 	unsigned long node = 0;
343 #ifdef CONFIG_NUMA
344 	node = cpu_to_node(cpu);
345 #endif
346 	if (static_cpu_has(X86_FEATURE_RDTSCP))
347 		write_rdtscp_aux((node << 12) | cpu);
348 
349 	/*
350 	 * Store cpu number in limit so that it can be loaded
351 	 * quickly in user space in vgetcpu. (12 bits for the CPU
352 	 * and 8 bits for the node)
353 	 */
354 	d.limit0 = cpu | ((node & 0xf) << 12);
355 	d.limit1 = node >> 4;
356 	d.type = 5;		/* RO data, expand down, accessed */
357 	d.dpl = 3;		/* Visible to user code */
358 	d.s = 1;		/* Not a system segment */
359 	d.p = 1;		/* Present */
360 	d.d = 1;		/* 32-bit */
361 
362 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
363 }
364 
365 static int vgetcpu_online(unsigned int cpu)
366 {
367 	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
368 }
369 
370 static int __init init_vdso(void)
371 {
372 	init_vdso_image(&vdso_image_64);
373 
374 #ifdef CONFIG_X86_X32_ABI
375 	init_vdso_image(&vdso_image_x32);
376 #endif
377 
378 	/* notifier priority > KVM */
379 	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
380 				 "x86/vdso/vma:online", vgetcpu_online, NULL);
381 }
382 subsys_initcall(init_vdso);
383 #endif /* CONFIG_X86_64 */
384