1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDSO implementations. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #include <linux/cache.h> 11 #include <linux/clocksource.h> 12 #include <linux/elf.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/gfp.h> 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/slab.h> 21 #include <linux/time_namespace.h> 22 #include <linux/timekeeper_internal.h> 23 #include <linux/vmalloc.h> 24 #include <vdso/datapage.h> 25 #include <vdso/helpers.h> 26 #include <vdso/vsyscall.h> 27 28 #include <asm/cacheflush.h> 29 #include <asm/signal32.h> 30 #include <asm/vdso.h> 31 32 enum vdso_abi { 33 VDSO_ABI_AA64, 34 VDSO_ABI_AA32, 35 }; 36 37 enum vvar_pages { 38 VVAR_DATA_PAGE_OFFSET, 39 VVAR_TIMENS_PAGE_OFFSET, 40 VVAR_NR_PAGES, 41 }; 42 43 struct vdso_abi_info { 44 const char *name; 45 const char *vdso_code_start; 46 const char *vdso_code_end; 47 unsigned long vdso_pages; 48 /* Data Mapping */ 49 struct vm_special_mapping *dm; 50 /* Code Mapping */ 51 struct vm_special_mapping *cm; 52 }; 53 54 static struct vdso_abi_info vdso_info[] __ro_after_init = { 55 [VDSO_ABI_AA64] = { 56 .name = "vdso", 57 .vdso_code_start = vdso_start, 58 .vdso_code_end = vdso_end, 59 }, 60 #ifdef CONFIG_COMPAT_VDSO 61 [VDSO_ABI_AA32] = { 62 .name = "vdso32", 63 .vdso_code_start = vdso32_start, 64 .vdso_code_end = vdso32_end, 65 }, 66 #endif /* CONFIG_COMPAT_VDSO */ 67 }; 68 69 /* 70 * The vDSO data page. 71 */ 72 static union { 73 struct vdso_data data[CS_BASES]; 74 u8 page[PAGE_SIZE]; 75 } vdso_data_store __page_aligned_data; 76 struct vdso_data *vdso_data = vdso_data_store.data; 77 78 static int vdso_mremap(const struct vm_special_mapping *sm, 79 struct vm_area_struct *new_vma) 80 { 81 current->mm->context.vdso = (void *)new_vma->vm_start; 82 83 return 0; 84 } 85 86 static int __init __vdso_init(enum vdso_abi abi) 87 { 88 int i; 89 struct page **vdso_pagelist; 90 unsigned long pfn; 91 92 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) { 93 pr_err("vDSO is not a valid ELF object!\n"); 94 return -EINVAL; 95 } 96 97 vdso_info[abi].vdso_pages = ( 98 vdso_info[abi].vdso_code_end - 99 vdso_info[abi].vdso_code_start) >> 100 PAGE_SHIFT; 101 102 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages, 103 sizeof(struct page *), 104 GFP_KERNEL); 105 if (vdso_pagelist == NULL) 106 return -ENOMEM; 107 108 /* Grab the vDSO code pages. */ 109 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start); 110 111 for (i = 0; i < vdso_info[abi].vdso_pages; i++) 112 vdso_pagelist[i] = pfn_to_page(pfn + i); 113 114 vdso_info[abi].cm->pages = vdso_pagelist; 115 116 return 0; 117 } 118 119 #ifdef CONFIG_TIME_NS 120 struct vdso_data *arch_get_vdso_data(void *vvar_page) 121 { 122 return (struct vdso_data *)(vvar_page); 123 } 124 125 /* 126 * The vvar mapping contains data for a specific time namespace, so when a task 127 * changes namespace we must unmap its vvar data for the old namespace. 128 * Subsequent faults will map in data for the new namespace. 129 * 130 * For more details see timens_setup_vdso_data(). 131 */ 132 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 133 { 134 struct mm_struct *mm = task->mm; 135 struct vm_area_struct *vma; 136 137 mmap_read_lock(mm); 138 139 for (vma = mm->mmap; vma; vma = vma->vm_next) { 140 unsigned long size = vma->vm_end - vma->vm_start; 141 142 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm)) 143 zap_page_range(vma, vma->vm_start, size); 144 #ifdef CONFIG_COMPAT_VDSO 145 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm)) 146 zap_page_range(vma, vma->vm_start, size); 147 #endif 148 } 149 150 mmap_read_unlock(mm); 151 return 0; 152 } 153 154 static struct page *find_timens_vvar_page(struct vm_area_struct *vma) 155 { 156 if (likely(vma->vm_mm == current->mm)) 157 return current->nsproxy->time_ns->vvar_page; 158 159 /* 160 * VM_PFNMAP | VM_IO protect .fault() handler from being called 161 * through interfaces like /proc/$pid/mem or 162 * process_vm_{readv,writev}() as long as there's no .access() 163 * in special_mapping_vmops. 164 * For more details check_vma_flags() and __access_remote_vm() 165 */ 166 WARN(1, "vvar_page accessed remotely"); 167 168 return NULL; 169 } 170 #else 171 static struct page *find_timens_vvar_page(struct vm_area_struct *vma) 172 { 173 return NULL; 174 } 175 #endif 176 177 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 178 struct vm_area_struct *vma, struct vm_fault *vmf) 179 { 180 struct page *timens_page = find_timens_vvar_page(vma); 181 unsigned long pfn; 182 183 switch (vmf->pgoff) { 184 case VVAR_DATA_PAGE_OFFSET: 185 if (timens_page) 186 pfn = page_to_pfn(timens_page); 187 else 188 pfn = sym_to_pfn(vdso_data); 189 break; 190 #ifdef CONFIG_TIME_NS 191 case VVAR_TIMENS_PAGE_OFFSET: 192 /* 193 * If a task belongs to a time namespace then a namespace 194 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 195 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 196 * offset. 197 * See also the comment near timens_setup_vdso_data(). 198 */ 199 if (!timens_page) 200 return VM_FAULT_SIGBUS; 201 pfn = sym_to_pfn(vdso_data); 202 break; 203 #endif /* CONFIG_TIME_NS */ 204 default: 205 return VM_FAULT_SIGBUS; 206 } 207 208 return vmf_insert_pfn(vma, vmf->address, pfn); 209 } 210 211 static int __setup_additional_pages(enum vdso_abi abi, 212 struct mm_struct *mm, 213 struct linux_binprm *bprm, 214 int uses_interp) 215 { 216 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 217 unsigned long gp_flags = 0; 218 void *ret; 219 220 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); 221 222 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; 223 /* Be sure to map the data page */ 224 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; 225 226 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 227 if (IS_ERR_VALUE(vdso_base)) { 228 ret = ERR_PTR(vdso_base); 229 goto up_fail; 230 } 231 232 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, 233 VM_READ|VM_MAYREAD|VM_PFNMAP, 234 vdso_info[abi].dm); 235 if (IS_ERR(ret)) 236 goto up_fail; 237 238 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) 239 gp_flags = VM_ARM64_BTI; 240 241 vdso_base += VVAR_NR_PAGES * PAGE_SIZE; 242 mm->context.vdso = (void *)vdso_base; 243 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, 244 VM_READ|VM_EXEC|gp_flags| 245 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 246 vdso_info[abi].cm); 247 if (IS_ERR(ret)) 248 goto up_fail; 249 250 return 0; 251 252 up_fail: 253 mm->context.vdso = NULL; 254 return PTR_ERR(ret); 255 } 256 257 #ifdef CONFIG_COMPAT 258 /* 259 * Create and map the vectors page for AArch32 tasks. 260 */ 261 enum aarch32_map { 262 AA32_MAP_VECTORS, /* kuser helpers */ 263 AA32_MAP_SIGPAGE, 264 AA32_MAP_VVAR, 265 AA32_MAP_VDSO, 266 }; 267 268 static struct page *aarch32_vectors_page __ro_after_init; 269 static struct page *aarch32_sig_page __ro_after_init; 270 271 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm, 272 struct vm_area_struct *new_vma) 273 { 274 current->mm->context.sigpage = (void *)new_vma->vm_start; 275 276 return 0; 277 } 278 279 static struct vm_special_mapping aarch32_vdso_maps[] = { 280 [AA32_MAP_VECTORS] = { 281 .name = "[vectors]", /* ABI */ 282 .pages = &aarch32_vectors_page, 283 }, 284 [AA32_MAP_SIGPAGE] = { 285 .name = "[sigpage]", /* ABI */ 286 .pages = &aarch32_sig_page, 287 .mremap = aarch32_sigpage_mremap, 288 }, 289 [AA32_MAP_VVAR] = { 290 .name = "[vvar]", 291 .fault = vvar_fault, 292 }, 293 [AA32_MAP_VDSO] = { 294 .name = "[vdso]", 295 .mremap = vdso_mremap, 296 }, 297 }; 298 299 static int aarch32_alloc_kuser_vdso_page(void) 300 { 301 extern char __kuser_helper_start[], __kuser_helper_end[]; 302 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 303 unsigned long vdso_page; 304 305 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 306 return 0; 307 308 vdso_page = get_zeroed_page(GFP_KERNEL); 309 if (!vdso_page) 310 return -ENOMEM; 311 312 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, 313 kuser_sz); 314 aarch32_vectors_page = virt_to_page(vdso_page); 315 return 0; 316 } 317 318 #define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1 319 static int aarch32_alloc_sigpage(void) 320 { 321 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; 322 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; 323 __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD); 324 void *sigpage; 325 326 sigpage = (void *)__get_free_page(GFP_KERNEL); 327 if (!sigpage) 328 return -ENOMEM; 329 330 memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison)); 331 memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz); 332 aarch32_sig_page = virt_to_page(sigpage); 333 return 0; 334 } 335 336 static int __init __aarch32_alloc_vdso_pages(void) 337 { 338 339 if (!IS_ENABLED(CONFIG_COMPAT_VDSO)) 340 return 0; 341 342 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; 343 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; 344 345 return __vdso_init(VDSO_ABI_AA32); 346 } 347 348 static int __init aarch32_alloc_vdso_pages(void) 349 { 350 int ret; 351 352 ret = __aarch32_alloc_vdso_pages(); 353 if (ret) 354 return ret; 355 356 ret = aarch32_alloc_sigpage(); 357 if (ret) 358 return ret; 359 360 return aarch32_alloc_kuser_vdso_page(); 361 } 362 arch_initcall(aarch32_alloc_vdso_pages); 363 364 static int aarch32_kuser_helpers_setup(struct mm_struct *mm) 365 { 366 void *ret; 367 368 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 369 return 0; 370 371 /* 372 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's 373 * not safe to CoW the page containing the CPU exception vectors. 374 */ 375 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, 376 VM_READ | VM_EXEC | 377 VM_MAYREAD | VM_MAYEXEC, 378 &aarch32_vdso_maps[AA32_MAP_VECTORS]); 379 380 return PTR_ERR_OR_ZERO(ret); 381 } 382 383 static int aarch32_sigreturn_setup(struct mm_struct *mm) 384 { 385 unsigned long addr; 386 void *ret; 387 388 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 389 if (IS_ERR_VALUE(addr)) { 390 ret = ERR_PTR(addr); 391 goto out; 392 } 393 394 /* 395 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and 396 * set breakpoints. 397 */ 398 ret = _install_special_mapping(mm, addr, PAGE_SIZE, 399 VM_READ | VM_EXEC | VM_MAYREAD | 400 VM_MAYWRITE | VM_MAYEXEC, 401 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]); 402 if (IS_ERR(ret)) 403 goto out; 404 405 mm->context.sigpage = (void *)addr; 406 407 out: 408 return PTR_ERR_OR_ZERO(ret); 409 } 410 411 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 412 { 413 struct mm_struct *mm = current->mm; 414 int ret; 415 416 if (mmap_write_lock_killable(mm)) 417 return -EINTR; 418 419 ret = aarch32_kuser_helpers_setup(mm); 420 if (ret) 421 goto out; 422 423 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) { 424 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm, 425 uses_interp); 426 if (ret) 427 goto out; 428 } 429 430 ret = aarch32_sigreturn_setup(mm); 431 out: 432 mmap_write_unlock(mm); 433 return ret; 434 } 435 #endif /* CONFIG_COMPAT */ 436 437 enum aarch64_map { 438 AA64_MAP_VVAR, 439 AA64_MAP_VDSO, 440 }; 441 442 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = { 443 [AA64_MAP_VVAR] = { 444 .name = "[vvar]", 445 .fault = vvar_fault, 446 }, 447 [AA64_MAP_VDSO] = { 448 .name = "[vdso]", 449 .mremap = vdso_mremap, 450 }, 451 }; 452 453 static int __init vdso_init(void) 454 { 455 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR]; 456 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO]; 457 458 return __vdso_init(VDSO_ABI_AA64); 459 } 460 arch_initcall(vdso_init); 461 462 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 463 { 464 struct mm_struct *mm = current->mm; 465 int ret; 466 467 if (mmap_write_lock_killable(mm)) 468 return -EINTR; 469 470 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp); 471 mmap_write_unlock(mm); 472 473 return ret; 474 } 475