xref: /linux/arch/s390/kernel/vdso.c (revision 2a2153a2bac7d9388b661a18d49707a8d885b231)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/binfmts.h>
10 #include <linux/elf.h>
11 #include <linux/errno.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/random.h>
19 #include <linux/vdso_datastore.h>
20 #include <vdso/datapage.h>
21 #include <asm/vdso/vsyscall.h>
22 #include <asm/alternative.h>
23 #include <asm/vdso.h>
24 
25 extern char vdso64_start[], vdso64_end[];
26 
27 static int vdso_mremap(const struct vm_special_mapping *sm,
28 		       struct vm_area_struct *vma)
29 {
30 	current->mm->context.vdso_base = vma->vm_start;
31 	return 0;
32 }
33 
34 static struct vm_special_mapping vdso64_mapping = {
35 	.name = "[vdso]",
36 	.mremap = vdso_mremap,
37 };
38 
39 int vdso_getcpu_init(void)
40 {
41 	set_tod_programmable_field(smp_processor_id());
42 	return 0;
43 }
44 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
45 
46 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
47 {
48 	unsigned long vvar_start, vdso_text_start, vdso_text_len;
49 	struct vm_special_mapping *vdso_mapping;
50 	struct mm_struct *mm = current->mm;
51 	struct vm_area_struct *vma;
52 	int rc;
53 
54 	BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
55 	if (mmap_write_lock_killable(mm))
56 		return -EINTR;
57 
58 	vdso_text_len = vdso64_end - vdso64_start;
59 	vdso_mapping = &vdso64_mapping;
60 	vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
61 	rc = vvar_start;
62 	if (IS_ERR_VALUE(vvar_start))
63 		goto out;
64 	vma = vdso_install_vvar_mapping(mm, vvar_start);
65 	rc = PTR_ERR(vma);
66 	if (IS_ERR(vma))
67 		goto out;
68 	vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
69 	/* VM_MAYWRITE for COW so gdb can set breakpoints */
70 	vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
71 				       VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
72 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
73 				       vdso_mapping);
74 	if (IS_ERR(vma)) {
75 		do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
76 		rc = PTR_ERR(vma);
77 	} else {
78 		current->mm->context.vdso_base = vdso_text_start;
79 		rc = 0;
80 	}
81 out:
82 	mmap_write_unlock(mm);
83 	return rc;
84 }
85 
86 static unsigned long vdso_addr(unsigned long start, unsigned long len)
87 {
88 	unsigned long addr, end, offset;
89 
90 	/*
91 	 * Round up the start address. It can start out unaligned as a result
92 	 * of stack start randomization.
93 	 */
94 	start = PAGE_ALIGN(start);
95 
96 	/* Round the lowest possible end address up to a PMD boundary. */
97 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
98 	if (end >= VDSO_BASE)
99 		end = VDSO_BASE;
100 	end -= len;
101 
102 	if (end > start) {
103 		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
104 		addr = start + (offset << PAGE_SHIFT);
105 	} else {
106 		addr = start;
107 	}
108 	return addr;
109 }
110 
111 unsigned long vdso_text_size(void)
112 {
113 	return PAGE_ALIGN(vdso64_end - vdso64_start);
114 }
115 
116 unsigned long vdso_size(void)
117 {
118 	return vdso_text_size() + VDSO_NR_PAGES * PAGE_SIZE;
119 }
120 
121 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
122 {
123 	unsigned long addr = VDSO_BASE;
124 	unsigned long size = vdso_size();
125 
126 	if (current->flags & PF_RANDOMIZE)
127 		addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
128 	return map_vdso(addr, size);
129 }
130 
131 static struct page ** __init vdso_setup_pages(void *start, void *end)
132 {
133 	int pages = (end - start) >> PAGE_SHIFT;
134 	struct page **pagelist;
135 	int i;
136 
137 	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
138 	if (!pagelist)
139 		panic("%s: Cannot allocate page list for VDSO", __func__);
140 	for (i = 0; i < pages; i++)
141 		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
142 	return pagelist;
143 }
144 
145 static void vdso_apply_alternatives(void)
146 {
147 	const struct elf64_shdr *alt, *shdr;
148 	struct alt_instr *start, *end;
149 	const struct elf64_hdr *hdr;
150 
151 	hdr = (struct elf64_hdr *)vdso64_start;
152 	shdr = (void *)hdr + hdr->e_shoff;
153 	alt = find_section(hdr, shdr, ".altinstructions");
154 	if (!alt)
155 		return;
156 	start = (void *)hdr + alt->sh_offset;
157 	end = (void *)hdr + alt->sh_offset + alt->sh_size;
158 	apply_alternatives(start, end);
159 }
160 
161 static int __init vdso_init(void)
162 {
163 	vdso_apply_alternatives();
164 	vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
165 	return 0;
166 }
167 arch_initcall(vdso_init);
168