1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDSO implementations. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #include <linux/cache.h> 11 #include <linux/clocksource.h> 12 #include <linux/elf.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/gfp.h> 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/slab.h> 21 #include <linux/timekeeper_internal.h> 22 #include <linux/vmalloc.h> 23 #include <vdso/datapage.h> 24 #include <vdso/helpers.h> 25 #include <vdso/vsyscall.h> 26 27 #include <asm/cacheflush.h> 28 #include <asm/signal32.h> 29 #include <asm/vdso.h> 30 31 extern char vdso_start[], vdso_end[]; 32 #ifdef CONFIG_COMPAT_VDSO 33 extern char vdso32_start[], vdso32_end[]; 34 #endif /* CONFIG_COMPAT_VDSO */ 35 36 /* vdso_lookup arch_index */ 37 enum arch_vdso_type { 38 ARM64_VDSO = 0, 39 #ifdef CONFIG_COMPAT_VDSO 40 ARM64_VDSO32 = 1, 41 #endif /* CONFIG_COMPAT_VDSO */ 42 }; 43 #ifdef CONFIG_COMPAT_VDSO 44 #define VDSO_TYPES (ARM64_VDSO32 + 1) 45 #else 46 #define VDSO_TYPES (ARM64_VDSO + 1) 47 #endif /* CONFIG_COMPAT_VDSO */ 48 49 struct __vdso_abi { 50 const char *name; 51 const char *vdso_code_start; 52 const char *vdso_code_end; 53 unsigned long vdso_pages; 54 /* Data Mapping */ 55 struct vm_special_mapping *dm; 56 /* Code Mapping */ 57 struct vm_special_mapping *cm; 58 }; 59 60 static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = { 61 { 62 .name = "vdso", 63 .vdso_code_start = vdso_start, 64 .vdso_code_end = vdso_end, 65 }, 66 #ifdef CONFIG_COMPAT_VDSO 67 { 68 .name = "vdso32", 69 .vdso_code_start = vdso32_start, 70 .vdso_code_end = vdso32_end, 71 }, 72 #endif /* CONFIG_COMPAT_VDSO */ 73 }; 74 75 /* 76 * The vDSO data page. 77 */ 78 static union { 79 struct vdso_data data[CS_BASES]; 80 u8 page[PAGE_SIZE]; 81 } vdso_data_store __page_aligned_data; 82 struct vdso_data *vdso_data = vdso_data_store.data; 83 84 static int __vdso_remap(enum arch_vdso_type arch_index, 85 const struct vm_special_mapping *sm, 86 struct vm_area_struct *new_vma) 87 { 88 unsigned long new_size = new_vma->vm_end - new_vma->vm_start; 89 unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end - 90 vdso_lookup[arch_index].vdso_code_start; 91 92 if (vdso_size != new_size) 93 return -EINVAL; 94 95 current->mm->context.vdso = (void *)new_vma->vm_start; 96 97 return 0; 98 } 99 100 static int __vdso_init(enum arch_vdso_type arch_index) 101 { 102 int i; 103 struct page **vdso_pagelist; 104 unsigned long pfn; 105 106 if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) { 107 pr_err("vDSO is not a valid ELF object!\n"); 108 return -EINVAL; 109 } 110 111 vdso_lookup[arch_index].vdso_pages = ( 112 vdso_lookup[arch_index].vdso_code_end - 113 vdso_lookup[arch_index].vdso_code_start) >> 114 PAGE_SHIFT; 115 116 /* Allocate the vDSO pagelist, plus a page for the data. */ 117 vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1, 118 sizeof(struct page *), 119 GFP_KERNEL); 120 if (vdso_pagelist == NULL) 121 return -ENOMEM; 122 123 /* Grab the vDSO data page. */ 124 vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data)); 125 126 127 /* Grab the vDSO code pages. */ 128 pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start); 129 130 for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++) 131 vdso_pagelist[i + 1] = pfn_to_page(pfn + i); 132 133 vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0]; 134 vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1]; 135 136 return 0; 137 } 138 139 static int __setup_additional_pages(enum arch_vdso_type arch_index, 140 struct mm_struct *mm, 141 struct linux_binprm *bprm, 142 int uses_interp) 143 { 144 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 145 void *ret; 146 147 vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT; 148 /* Be sure to map the data page */ 149 vdso_mapping_len = vdso_text_len + PAGE_SIZE; 150 151 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 152 if (IS_ERR_VALUE(vdso_base)) { 153 ret = ERR_PTR(vdso_base); 154 goto up_fail; 155 } 156 157 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, 158 VM_READ|VM_MAYREAD, 159 vdso_lookup[arch_index].dm); 160 if (IS_ERR(ret)) 161 goto up_fail; 162 163 vdso_base += PAGE_SIZE; 164 mm->context.vdso = (void *)vdso_base; 165 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, 166 VM_READ|VM_EXEC| 167 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 168 vdso_lookup[arch_index].cm); 169 if (IS_ERR(ret)) 170 goto up_fail; 171 172 return 0; 173 174 up_fail: 175 mm->context.vdso = NULL; 176 return PTR_ERR(ret); 177 } 178 179 #ifdef CONFIG_COMPAT 180 /* 181 * Create and map the vectors page for AArch32 tasks. 182 */ 183 #ifdef CONFIG_COMPAT_VDSO 184 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm, 185 struct vm_area_struct *new_vma) 186 { 187 return __vdso_remap(ARM64_VDSO32, sm, new_vma); 188 } 189 #endif /* CONFIG_COMPAT_VDSO */ 190 191 /* 192 * aarch32_vdso_pages: 193 * 0 - kuser helpers 194 * 1 - sigreturn code 195 * or (CONFIG_COMPAT_VDSO): 196 * 0 - kuser helpers 197 * 1 - vdso data 198 * 2 - vdso code 199 */ 200 #define C_VECTORS 0 201 #ifdef CONFIG_COMPAT_VDSO 202 #define C_VVAR 1 203 #define C_VDSO 2 204 #define C_PAGES (C_VDSO + 1) 205 #else 206 #define C_SIGPAGE 1 207 #define C_PAGES (C_SIGPAGE + 1) 208 #endif /* CONFIG_COMPAT_VDSO */ 209 210 static struct page *aarch32_vectors_page __ro_after_init; 211 #ifndef CONFIG_COMPAT_VDSO 212 static struct page *aarch32_sig_page __ro_after_init; 213 #endif 214 215 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { 216 { 217 .name = "[vectors]", /* ABI */ 218 .pages = &aarch32_vectors_page, 219 }, 220 #ifdef CONFIG_COMPAT_VDSO 221 { 222 .name = "[vvar]", 223 }, 224 { 225 .name = "[vdso]", 226 .mremap = aarch32_vdso_mremap, 227 }, 228 #else 229 { 230 .name = "[sigpage]", /* ABI */ 231 .pages = &aarch32_sig_page, 232 }, 233 #endif /* CONFIG_COMPAT_VDSO */ 234 }; 235 236 static int aarch32_alloc_kuser_vdso_page(void) 237 { 238 extern char __kuser_helper_start[], __kuser_helper_end[]; 239 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 240 unsigned long vdso_page; 241 242 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 243 return 0; 244 245 vdso_page = get_zeroed_page(GFP_ATOMIC); 246 if (!vdso_page) 247 return -ENOMEM; 248 249 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, 250 kuser_sz); 251 aarch32_vectors_page = virt_to_page(vdso_page); 252 flush_dcache_page(aarch32_vectors_page); 253 return 0; 254 } 255 256 #ifdef CONFIG_COMPAT_VDSO 257 static int __aarch32_alloc_vdso_pages(void) 258 { 259 int ret; 260 261 vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR]; 262 vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO]; 263 264 ret = __vdso_init(ARM64_VDSO32); 265 if (ret) 266 return ret; 267 268 return aarch32_alloc_kuser_vdso_page(); 269 } 270 #else 271 static int __aarch32_alloc_vdso_pages(void) 272 { 273 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; 274 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; 275 unsigned long sigpage; 276 int ret; 277 278 sigpage = get_zeroed_page(GFP_ATOMIC); 279 if (!sigpage) 280 return -ENOMEM; 281 282 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); 283 aarch32_sig_page = virt_to_page(sigpage); 284 flush_dcache_page(aarch32_sig_page); 285 286 ret = aarch32_alloc_kuser_vdso_page(); 287 if (ret) 288 free_page(sigpage); 289 290 return ret; 291 } 292 #endif /* CONFIG_COMPAT_VDSO */ 293 294 static int __init aarch32_alloc_vdso_pages(void) 295 { 296 return __aarch32_alloc_vdso_pages(); 297 } 298 arch_initcall(aarch32_alloc_vdso_pages); 299 300 static int aarch32_kuser_helpers_setup(struct mm_struct *mm) 301 { 302 void *ret; 303 304 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 305 return 0; 306 307 /* 308 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's 309 * not safe to CoW the page containing the CPU exception vectors. 310 */ 311 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, 312 VM_READ | VM_EXEC | 313 VM_MAYREAD | VM_MAYEXEC, 314 &aarch32_vdso_spec[C_VECTORS]); 315 316 return PTR_ERR_OR_ZERO(ret); 317 } 318 319 #ifndef CONFIG_COMPAT_VDSO 320 static int aarch32_sigreturn_setup(struct mm_struct *mm) 321 { 322 unsigned long addr; 323 void *ret; 324 325 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 326 if (IS_ERR_VALUE(addr)) { 327 ret = ERR_PTR(addr); 328 goto out; 329 } 330 331 /* 332 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and 333 * set breakpoints. 334 */ 335 ret = _install_special_mapping(mm, addr, PAGE_SIZE, 336 VM_READ | VM_EXEC | VM_MAYREAD | 337 VM_MAYWRITE | VM_MAYEXEC, 338 &aarch32_vdso_spec[C_SIGPAGE]); 339 if (IS_ERR(ret)) 340 goto out; 341 342 mm->context.vdso = (void *)addr; 343 344 out: 345 return PTR_ERR_OR_ZERO(ret); 346 } 347 #endif /* !CONFIG_COMPAT_VDSO */ 348 349 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 350 { 351 struct mm_struct *mm = current->mm; 352 int ret; 353 354 if (down_write_killable(&mm->mmap_sem)) 355 return -EINTR; 356 357 ret = aarch32_kuser_helpers_setup(mm); 358 if (ret) 359 goto out; 360 361 #ifdef CONFIG_COMPAT_VDSO 362 ret = __setup_additional_pages(ARM64_VDSO32, 363 mm, 364 bprm, 365 uses_interp); 366 #else 367 ret = aarch32_sigreturn_setup(mm); 368 #endif /* CONFIG_COMPAT_VDSO */ 369 370 out: 371 up_write(&mm->mmap_sem); 372 return ret; 373 } 374 #endif /* CONFIG_COMPAT */ 375 376 static int vdso_mremap(const struct vm_special_mapping *sm, 377 struct vm_area_struct *new_vma) 378 { 379 return __vdso_remap(ARM64_VDSO, sm, new_vma); 380 } 381 382 /* 383 * aarch64_vdso_pages: 384 * 0 - vvar 385 * 1 - vdso 386 */ 387 #define A_VVAR 0 388 #define A_VDSO 1 389 #define A_PAGES (A_VDSO + 1) 390 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = { 391 { 392 .name = "[vvar]", 393 }, 394 { 395 .name = "[vdso]", 396 .mremap = vdso_mremap, 397 }, 398 }; 399 400 static int __init vdso_init(void) 401 { 402 vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR]; 403 vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO]; 404 405 return __vdso_init(ARM64_VDSO); 406 } 407 arch_initcall(vdso_init); 408 409 int arch_setup_additional_pages(struct linux_binprm *bprm, 410 int uses_interp) 411 { 412 struct mm_struct *mm = current->mm; 413 int ret; 414 415 if (down_write_killable(&mm->mmap_sem)) 416 return -EINTR; 417 418 ret = __setup_additional_pages(ARM64_VDSO, 419 mm, 420 bprm, 421 uses_interp); 422 423 up_write(&mm->mmap_sem); 424 425 return ret; 426 } 427