xref: /linux/arch/powerpc/kernel/vdso.c (revision 417552999d0b6681ac30e117ae890828ca7e46b3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  *    Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5  *			 <benh@kernel.crashing.org>
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/slab.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/syscalls.h>
20 #include <linux/vdso_datastore.h>
21 #include <vdso/datapage.h>
22 
23 #include <asm/syscall.h>
24 #include <asm/syscalls.h>
25 #include <asm/processor.h>
26 #include <asm/mmu.h>
27 #include <asm/mmu_context.h>
28 #include <asm/machdep.h>
29 #include <asm/cputable.h>
30 #include <asm/sections.h>
31 #include <asm/firmware.h>
32 #include <asm/vdso.h>
33 #include <asm/vdso_datapage.h>
34 #include <asm/setup.h>
35 
36 static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
37 
38 /* The alignment of the vDSO */
39 #define VDSO_ALIGNMENT	(1 << 16)
40 
41 extern char vdso32_start, vdso32_end;
42 extern char vdso64_start, vdso64_end;
43 
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma,unsigned long text_size)44 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
45 		       unsigned long text_size)
46 {
47 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
48 
49 	if (new_size != text_size)
50 		return -EINVAL;
51 
52 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
53 
54 	return 0;
55 }
56 
vdso32_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)57 static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
58 {
59 	return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
60 }
61 
vdso64_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)62 static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
63 {
64 	return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
65 }
66 
vdso_close(const struct vm_special_mapping * sm,struct vm_area_struct * vma)67 static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
68 {
69 	struct mm_struct *mm = vma->vm_mm;
70 
71 	/*
72 	 * close() is called for munmap() but also for mremap(). In the mremap()
73 	 * case the vdso pointer has already been updated by the mremap() hook
74 	 * above, so it must not be set to NULL here.
75 	 */
76 	if (vma->vm_start != (unsigned long)mm->context.vdso)
77 		return;
78 
79 	mm->context.vdso = NULL;
80 }
81 
82 static struct vm_special_mapping vdso32_spec __ro_after_init = {
83 	.name = "[vdso]",
84 	.mremap = vdso32_mremap,
85 	.close = vdso_close,
86 };
87 
88 static struct vm_special_mapping vdso64_spec __ro_after_init = {
89 	.name = "[vdso]",
90 	.mremap = vdso64_mremap,
91 	.close = vdso_close,
92 };
93 
94 /*
95  * This is called from binfmt_elf, we create the special vma for the
96  * vDSO and insert it into the mm struct tree
97  */
__arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)98 static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
99 {
100 	unsigned long vdso_size, vdso_base, mappings_size;
101 	struct vm_special_mapping *vdso_spec;
102 	unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE;
103 	struct mm_struct *mm = current->mm;
104 	struct vm_area_struct *vma;
105 
106 	if (is_32bit_task()) {
107 		vdso_spec = &vdso32_spec;
108 		vdso_size = &vdso32_end - &vdso32_start;
109 	} else {
110 		vdso_spec = &vdso64_spec;
111 		vdso_size = &vdso64_end - &vdso64_start;
112 	}
113 
114 	mappings_size = vdso_size + vvar_size;
115 	mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
116 
117 	/*
118 	 * Pick a base address for the vDSO in process space.
119 	 * Add enough to the size so that the result can be aligned.
120 	 */
121 	vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
122 	if (IS_ERR_VALUE(vdso_base))
123 		return vdso_base;
124 
125 	/* Add required alignment. */
126 	vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
127 
128 	vma = vdso_install_vvar_mapping(mm, vdso_base);
129 	if (IS_ERR(vma))
130 		return PTR_ERR(vma);
131 
132 	/*
133 	 * our vma flags don't have VM_WRITE so by default, the process isn't
134 	 * allowed to write those pages.
135 	 * gdb can break that with ptrace interface, and thus trigger COW on
136 	 * those pages but it's then your responsibility to never do that on
137 	 * the "data" page of the vDSO or you'll stop getting kernel updates
138 	 * and your nice userland gettimeofday will be totally dead.
139 	 * It's fine to use that for setting breakpoints in the vDSO code
140 	 * pages though.
141 	 */
142 	vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
143 				       VM_READ | VM_EXEC | VM_MAYREAD |
144 				       VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
145 	if (IS_ERR(vma)) {
146 		do_munmap(mm, vdso_base, vvar_size, NULL);
147 		return PTR_ERR(vma);
148 	}
149 
150 	// Now that the mappings are in place, set the mm VDSO pointer
151 	mm->context.vdso = (void __user *)vdso_base + vvar_size;
152 
153 	return 0;
154 }
155 
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)156 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
157 {
158 	struct mm_struct *mm = current->mm;
159 	int rc;
160 
161 	mm->context.vdso = NULL;
162 
163 	if (mmap_write_lock_killable(mm))
164 		return -EINTR;
165 
166 	rc = __arch_setup_additional_pages(bprm, uses_interp);
167 
168 	mmap_write_unlock(mm);
169 	return rc;
170 }
171 
172 #define VDSO_DO_FIXUPS(type, value, bits, sec) do {					\
173 	void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start);	\
174 	void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end);	\
175 											\
176 	do_##type##_fixups((value), __start, __end);					\
177 } while (0)
178 
vdso_fixup_features(void)179 static void __init vdso_fixup_features(void)
180 {
181 #ifdef CONFIG_PPC64
182 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
183 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
184 	VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
185 	VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
186 #endif /* CONFIG_PPC64 */
187 
188 #ifdef CONFIG_VDSO32
189 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
190 	VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
191 #ifdef CONFIG_PPC64
192 	VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
193 #endif /* CONFIG_PPC64 */
194 	VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
195 #endif
196 }
197 
198 /*
199  * Called from setup_arch to initialize the bitmap of available
200  * syscalls in the systemcfg page
201  */
vdso_setup_syscall_map(void)202 static void __init vdso_setup_syscall_map(void)
203 {
204 	unsigned int i;
205 
206 	for (i = 0; i < NR_syscalls; i++) {
207 		if (sys_call_table[i] != (void *)&sys_ni_syscall)
208 			vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
209 		if (IS_ENABLED(CONFIG_COMPAT) &&
210 		    compat_sys_call_table[i] != (void *)&sys_ni_syscall)
211 			vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
212 	}
213 }
214 
215 #ifdef CONFIG_PPC64
vdso_getcpu_init(void)216 int vdso_getcpu_init(void)
217 {
218 	unsigned long cpu, node, val;
219 
220 	/*
221 	 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
222 	 * in the next 16 bits.  The VDSO uses this to implement getcpu().
223 	 */
224 	cpu = get_cpu();
225 	WARN_ON_ONCE(cpu > 0xffff);
226 
227 	node = cpu_to_node(cpu);
228 	WARN_ON_ONCE(node > 0xffff);
229 
230 	val = (cpu & 0xffff) | ((node & 0xffff) << 16);
231 	mtspr(SPRN_SPRG_VDSO_WRITE, val);
232 	get_paca()->sprg_vdso = val;
233 
234 	put_cpu();
235 
236 	return 0;
237 }
238 /* We need to call this before SMP init */
239 early_initcall(vdso_getcpu_init);
240 #endif
241 
vdso_setup_pages(void * start,void * end)242 static struct page ** __init vdso_setup_pages(void *start, void *end)
243 {
244 	int i;
245 	struct page **pagelist;
246 	int pages = (end - start) >> PAGE_SHIFT;
247 
248 	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
249 	if (!pagelist)
250 		panic("%s: Cannot allocate page list for VDSO", __func__);
251 
252 	for (i = 0; i < pages; i++)
253 		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
254 
255 	return pagelist;
256 }
257 
vdso_init(void)258 static int __init vdso_init(void)
259 {
260 #ifdef CONFIG_PPC64
261 	vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size;
262 	vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size;
263 	vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
264 	vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
265 #endif /* CONFIG_PPC64 */
266 
267 	vdso_setup_syscall_map();
268 
269 	vdso_fixup_features();
270 
271 	if (IS_ENABLED(CONFIG_VDSO32))
272 		vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
273 
274 	if (IS_ENABLED(CONFIG_PPC64))
275 		vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
276 
277 	smp_wmb();
278 
279 	return 0;
280 }
281 arch_initcall(vdso_init);
282