1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
8
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/random.h>
20 #include <linux/vdso_datastore.h>
21 #include <vdso/datapage.h>
22 #include <asm/vdso/vsyscall.h>
23 #include <asm/alternative.h>
24 #include <asm/vdso.h>
25
26 extern char vdso64_start[], vdso64_end[];
27 extern char vdso32_start[], vdso32_end[];
28
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * vma)29 static int vdso_mremap(const struct vm_special_mapping *sm,
30 struct vm_area_struct *vma)
31 {
32 current->mm->context.vdso_base = vma->vm_start;
33 return 0;
34 }
35
36 static struct vm_special_mapping vdso64_mapping = {
37 .name = "[vdso]",
38 .mremap = vdso_mremap,
39 };
40
41 static struct vm_special_mapping vdso32_mapping = {
42 .name = "[vdso]",
43 .mremap = vdso_mremap,
44 };
45
vdso_getcpu_init(void)46 int vdso_getcpu_init(void)
47 {
48 set_tod_programmable_field(smp_processor_id());
49 return 0;
50 }
51 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
52
map_vdso(unsigned long addr,unsigned long vdso_mapping_len)53 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
54 {
55 unsigned long vvar_start, vdso_text_start, vdso_text_len;
56 struct vm_special_mapping *vdso_mapping;
57 struct mm_struct *mm = current->mm;
58 struct vm_area_struct *vma;
59 int rc;
60
61 BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
62 if (mmap_write_lock_killable(mm))
63 return -EINTR;
64
65 if (is_compat_task()) {
66 vdso_text_len = vdso32_end - vdso32_start;
67 vdso_mapping = &vdso32_mapping;
68 } else {
69 vdso_text_len = vdso64_end - vdso64_start;
70 vdso_mapping = &vdso64_mapping;
71 }
72 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
73 rc = vvar_start;
74 if (IS_ERR_VALUE(vvar_start))
75 goto out;
76 vma = vdso_install_vvar_mapping(mm, vvar_start);
77 rc = PTR_ERR(vma);
78 if (IS_ERR(vma))
79 goto out;
80 vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
81 /* VM_MAYWRITE for COW so gdb can set breakpoints */
82 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
83 VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
84 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
85 vdso_mapping);
86 if (IS_ERR(vma)) {
87 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
88 rc = PTR_ERR(vma);
89 } else {
90 current->mm->context.vdso_base = vdso_text_start;
91 rc = 0;
92 }
93 out:
94 mmap_write_unlock(mm);
95 return rc;
96 }
97
vdso_addr(unsigned long start,unsigned long len)98 static unsigned long vdso_addr(unsigned long start, unsigned long len)
99 {
100 unsigned long addr, end, offset;
101
102 /*
103 * Round up the start address. It can start out unaligned as a result
104 * of stack start randomization.
105 */
106 start = PAGE_ALIGN(start);
107
108 /* Round the lowest possible end address up to a PMD boundary. */
109 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
110 if (end >= VDSO_BASE)
111 end = VDSO_BASE;
112 end -= len;
113
114 if (end > start) {
115 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
116 addr = start + (offset << PAGE_SHIFT);
117 } else {
118 addr = start;
119 }
120 return addr;
121 }
122
vdso_text_size(void)123 unsigned long vdso_text_size(void)
124 {
125 unsigned long size;
126
127 if (is_compat_task())
128 size = vdso32_end - vdso32_start;
129 else
130 size = vdso64_end - vdso64_start;
131 return PAGE_ALIGN(size);
132 }
133
vdso_size(void)134 unsigned long vdso_size(void)
135 {
136 return vdso_text_size() + VDSO_NR_PAGES * PAGE_SIZE;
137 }
138
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)139 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
140 {
141 unsigned long addr = VDSO_BASE;
142 unsigned long size = vdso_size();
143
144 if (current->flags & PF_RANDOMIZE)
145 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
146 return map_vdso(addr, size);
147 }
148
vdso_setup_pages(void * start,void * end)149 static struct page ** __init vdso_setup_pages(void *start, void *end)
150 {
151 int pages = (end - start) >> PAGE_SHIFT;
152 struct page **pagelist;
153 int i;
154
155 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
156 if (!pagelist)
157 panic("%s: Cannot allocate page list for VDSO", __func__);
158 for (i = 0; i < pages; i++)
159 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
160 return pagelist;
161 }
162
vdso_apply_alternatives(void)163 static void vdso_apply_alternatives(void)
164 {
165 const struct elf64_shdr *alt, *shdr;
166 struct alt_instr *start, *end;
167 const struct elf64_hdr *hdr;
168
169 hdr = (struct elf64_hdr *)vdso64_start;
170 shdr = (void *)hdr + hdr->e_shoff;
171 alt = find_section(hdr, shdr, ".altinstructions");
172 if (!alt)
173 return;
174 start = (void *)hdr + alt->sh_offset;
175 end = (void *)hdr + alt->sh_offset + alt->sh_size;
176 apply_alternatives(start, end);
177 }
178
vdso_init(void)179 static int __init vdso_init(void)
180 {
181 vdso_apply_alternatives();
182 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
183 if (IS_ENABLED(CONFIG_COMPAT))
184 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
185 return 0;
186 }
187 arch_initcall(vdso_init);
188