xref: /linux/arch/x86/entry/vdso/vma.c (revision fcb3ad4366b9c810cbb9da34c076a9a52d8aa1e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18 
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/tlb.h>
24 #include <asm/page.h>
25 #include <asm/desc.h>
26 #include <asm/cpufeature.h>
27 #include <asm/vdso/vsyscall.h>
28 #include <clocksource/hyperv_timer.h>
29 
30 struct vdso_data *arch_get_vdso_data(void *vvar_page)
31 {
32 	return (struct vdso_data *)vvar_page;
33 }
34 
35 static union vdso_data_store vdso_data_store __page_aligned_data;
36 struct vdso_data *vdso_data = vdso_data_store.data;
37 
38 unsigned int vclocks_used __read_mostly;
39 
40 #if defined(CONFIG_X86_64)
41 unsigned int __read_mostly vdso64_enabled = 1;
42 #endif
43 
44 int __init init_vdso_image(const struct vdso_image *image)
45 {
46 	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
47 	BUG_ON(image->size % PAGE_SIZE != 0);
48 
49 	apply_alternatives((struct alt_instr *)(image->data + image->alt),
50 			   (struct alt_instr *)(image->data + image->alt +
51 						image->alt_len));
52 
53 	return 0;
54 }
55 
56 static const struct vm_special_mapping vvar_mapping;
57 struct linux_binprm;
58 
59 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
60 		      struct vm_area_struct *vma, struct vm_fault *vmf)
61 {
62 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
63 
64 	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
65 		return VM_FAULT_SIGBUS;
66 
67 	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
68 	get_page(vmf->page);
69 	return 0;
70 }
71 
72 static void vdso_fix_landing(const struct vdso_image *image,
73 		struct vm_area_struct *new_vma)
74 {
75 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
76 	if (in_ia32_syscall() && image == &vdso_image_32) {
77 		struct pt_regs *regs = current_pt_regs();
78 		unsigned long vdso_land = image->sym_int80_landing_pad;
79 		unsigned long old_land_addr = vdso_land +
80 			(unsigned long)current->mm->context.vdso;
81 
82 		/* Fixing userspace landing - look at do_fast_syscall_32 */
83 		if (regs->ip == old_land_addr)
84 			regs->ip = new_vma->vm_start + vdso_land;
85 	}
86 #endif
87 }
88 
89 static int vdso_mremap(const struct vm_special_mapping *sm,
90 		struct vm_area_struct *new_vma)
91 {
92 	const struct vdso_image *image = current->mm->context.vdso_image;
93 
94 	vdso_fix_landing(image, new_vma);
95 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
96 
97 	return 0;
98 }
99 
100 #ifdef CONFIG_TIME_NS
101 /*
102  * The vvar page layout depends on whether a task belongs to the root or
103  * non-root time namespace. Whenever a task changes its namespace, the VVAR
104  * page tables are cleared and then they will re-faulted with a
105  * corresponding layout.
106  * See also the comment near timens_setup_vdso_data() for details.
107  */
108 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
109 {
110 	struct mm_struct *mm = task->mm;
111 	struct vm_area_struct *vma;
112 	VMA_ITERATOR(vmi, mm, 0);
113 
114 	mmap_read_lock(mm);
115 	for_each_vma(vmi, vma) {
116 		if (vma_is_special_mapping(vma, &vvar_mapping))
117 			zap_vma_pages(vma);
118 	}
119 	mmap_read_unlock(mm);
120 
121 	return 0;
122 }
123 #endif
124 
125 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
126 		      struct vm_area_struct *vma, struct vm_fault *vmf)
127 {
128 	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
129 	unsigned long pfn;
130 	long sym_offset;
131 
132 	if (!image)
133 		return VM_FAULT_SIGBUS;
134 
135 	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
136 		image->sym_vvar_start;
137 
138 	/*
139 	 * Sanity check: a symbol offset of zero means that the page
140 	 * does not exist for this vdso image, not that the page is at
141 	 * offset zero relative to the text mapping.  This should be
142 	 * impossible here, because sym_offset should only be zero for
143 	 * the page past the end of the vvar mapping.
144 	 */
145 	if (sym_offset == 0)
146 		return VM_FAULT_SIGBUS;
147 
148 	if (sym_offset == image->sym_vvar_page) {
149 		struct page *timens_page = find_timens_vvar_page(vma);
150 
151 		pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT;
152 
153 		/*
154 		 * If a task belongs to a time namespace then a namespace
155 		 * specific VVAR is mapped with the sym_vvar_page offset and
156 		 * the real VVAR page is mapped with the sym_timens_page
157 		 * offset.
158 		 * See also the comment near timens_setup_vdso_data().
159 		 */
160 		if (timens_page) {
161 			unsigned long addr;
162 			vm_fault_t err;
163 
164 			/*
165 			 * Optimization: inside time namespace pre-fault
166 			 * VVAR page too. As on timens page there are only
167 			 * offsets for clocks on VVAR, it'll be faulted
168 			 * shortly by VDSO code.
169 			 */
170 			addr = vmf->address + (image->sym_timens_page - sym_offset);
171 			err = vmf_insert_pfn(vma, addr, pfn);
172 			if (unlikely(err & VM_FAULT_ERROR))
173 				return err;
174 
175 			pfn = page_to_pfn(timens_page);
176 		}
177 
178 		return vmf_insert_pfn(vma, vmf->address, pfn);
179 
180 	} else if (sym_offset == image->sym_timens_page) {
181 		struct page *timens_page = find_timens_vvar_page(vma);
182 
183 		if (!timens_page)
184 			return VM_FAULT_SIGBUS;
185 
186 		pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT;
187 		return vmf_insert_pfn(vma, vmf->address, pfn);
188 	}
189 
190 	return VM_FAULT_SIGBUS;
191 }
192 
193 static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
194 				    struct vm_area_struct *vma, struct vm_fault *vmf)
195 {
196 	switch (vmf->pgoff) {
197 #ifdef CONFIG_PARAVIRT_CLOCK
198 	case VDSO_PAGE_PVCLOCK_OFFSET:
199 	{
200 		struct pvclock_vsyscall_time_info *pvti =
201 			pvclock_get_pvti_cpu0_va();
202 
203 		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK))
204 			return vmf_insert_pfn_prot(vma, vmf->address,
205 					__pa(pvti) >> PAGE_SHIFT,
206 					pgprot_decrypted(vma->vm_page_prot));
207 		break;
208 	}
209 #endif /* CONFIG_PARAVIRT_CLOCK */
210 #ifdef CONFIG_HYPERV_TIMER
211 	case VDSO_PAGE_HVCLOCK_OFFSET:
212 	{
213 		unsigned long pfn = hv_get_tsc_pfn();
214 
215 		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
216 			return vmf_insert_pfn(vma, vmf->address, pfn);
217 		break;
218 	}
219 #endif /* CONFIG_HYPERV_TIMER */
220 	}
221 
222 	return VM_FAULT_SIGBUS;
223 }
224 
225 static const struct vm_special_mapping vdso_mapping = {
226 	.name = "[vdso]",
227 	.fault = vdso_fault,
228 	.mremap = vdso_mremap,
229 };
230 static const struct vm_special_mapping vvar_mapping = {
231 	.name = "[vvar]",
232 	.fault = vvar_fault,
233 };
234 static const struct vm_special_mapping vvar_vclock_mapping = {
235 	.name = "[vvar_vclock]",
236 	.fault = vvar_vclock_fault,
237 };
238 
239 /*
240  * Add vdso and vvar mappings to current process.
241  * @image          - blob to map
242  * @addr           - request a specific address (zero to map at free addr)
243  */
244 static int map_vdso(const struct vdso_image *image, unsigned long addr)
245 {
246 	struct mm_struct *mm = current->mm;
247 	struct vm_area_struct *vma;
248 	unsigned long text_start;
249 	int ret = 0;
250 
251 	if (mmap_write_lock_killable(mm))
252 		return -EINTR;
253 
254 	addr = get_unmapped_area(NULL, addr,
255 				 image->size - image->sym_vvar_start, 0, 0);
256 	if (IS_ERR_VALUE(addr)) {
257 		ret = addr;
258 		goto up_fail;
259 	}
260 
261 	text_start = addr - image->sym_vvar_start;
262 
263 	/*
264 	 * MAYWRITE to allow gdb to COW and set breakpoints
265 	 */
266 	vma = _install_special_mapping(mm,
267 				       text_start,
268 				       image->size,
269 				       VM_READ|VM_EXEC|
270 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
271 				       &vdso_mapping);
272 
273 	if (IS_ERR(vma)) {
274 		ret = PTR_ERR(vma);
275 		goto up_fail;
276 	}
277 
278 	vma = _install_special_mapping(mm,
279 				       addr,
280 				       (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
281 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
282 				       VM_PFNMAP,
283 				       &vvar_mapping);
284 
285 	if (IS_ERR(vma)) {
286 		ret = PTR_ERR(vma);
287 		do_munmap(mm, text_start, image->size, NULL);
288 		goto up_fail;
289 	}
290 
291 	vma = _install_special_mapping(mm,
292 				       addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
293 				       VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
294 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
295 				       VM_PFNMAP,
296 				       &vvar_vclock_mapping);
297 
298 	if (IS_ERR(vma)) {
299 		ret = PTR_ERR(vma);
300 		do_munmap(mm, text_start, image->size, NULL);
301 		do_munmap(mm, addr, image->size, NULL);
302 		goto up_fail;
303 	}
304 
305 	current->mm->context.vdso = (void __user *)text_start;
306 	current->mm->context.vdso_image = image;
307 
308 up_fail:
309 	mmap_write_unlock(mm);
310 	return ret;
311 }
312 
313 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
314 {
315 	struct mm_struct *mm = current->mm;
316 	struct vm_area_struct *vma;
317 	VMA_ITERATOR(vmi, mm, 0);
318 
319 	mmap_write_lock(mm);
320 	/*
321 	 * Check if we have already mapped vdso blob - fail to prevent
322 	 * abusing from userspace install_special_mapping, which may
323 	 * not do accounting and rlimit right.
324 	 * We could search vma near context.vdso, but it's a slowpath,
325 	 * so let's explicitly check all VMAs to be completely sure.
326 	 */
327 	for_each_vma(vmi, vma) {
328 		if (vma_is_special_mapping(vma, &vdso_mapping) ||
329 				vma_is_special_mapping(vma, &vvar_mapping) ||
330 				vma_is_special_mapping(vma, &vvar_vclock_mapping)) {
331 			mmap_write_unlock(mm);
332 			return -EEXIST;
333 		}
334 	}
335 	mmap_write_unlock(mm);
336 
337 	return map_vdso(image, addr);
338 }
339 
340 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
341 static int load_vdso32(void)
342 {
343 	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
344 		return 0;
345 
346 	return map_vdso(&vdso_image_32, 0);
347 }
348 #endif
349 
350 #ifdef CONFIG_X86_64
351 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
352 {
353 	if (!vdso64_enabled)
354 		return 0;
355 
356 	return map_vdso(&vdso_image_64, 0);
357 }
358 
359 #ifdef CONFIG_COMPAT
360 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
361 				       int uses_interp, bool x32)
362 {
363 #ifdef CONFIG_X86_X32_ABI
364 	if (x32) {
365 		if (!vdso64_enabled)
366 			return 0;
367 		return map_vdso(&vdso_image_x32, 0);
368 	}
369 #endif
370 #ifdef CONFIG_IA32_EMULATION
371 	return load_vdso32();
372 #else
373 	return 0;
374 #endif
375 }
376 #endif
377 #else
378 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
379 {
380 	return load_vdso32();
381 }
382 #endif
383 
384 bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
385 {
386 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
387 	const struct vdso_image *image = current->mm->context.vdso_image;
388 	unsigned long vdso = (unsigned long) current->mm->context.vdso;
389 
390 	if (in_ia32_syscall() && image == &vdso_image_32) {
391 		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
392 		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
393 			return true;
394 	}
395 #endif
396 	return false;
397 }
398 
399 #ifdef CONFIG_X86_64
400 static __init int vdso_setup(char *s)
401 {
402 	vdso64_enabled = simple_strtoul(s, NULL, 0);
403 	return 1;
404 }
405 __setup("vdso=", vdso_setup);
406 #endif /* CONFIG_X86_64 */
407