1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDSO implementations. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #include <linux/cache.h> 11 #include <linux/clocksource.h> 12 #include <linux/elf.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/gfp.h> 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/sched.h> 19 #include <linux/signal.h> 20 #include <linux/slab.h> 21 #include <linux/time_namespace.h> 22 #include <linux/vmalloc.h> 23 #include <vdso/datapage.h> 24 #include <vdso/helpers.h> 25 #include <vdso/vsyscall.h> 26 27 #include <asm/cacheflush.h> 28 #include <asm/signal32.h> 29 #include <asm/vdso.h> 30 31 enum vdso_abi { 32 VDSO_ABI_AA64, 33 VDSO_ABI_AA32, 34 }; 35 36 struct vdso_abi_info { 37 const char *name; 38 const char *vdso_code_start; 39 const char *vdso_code_end; 40 unsigned long vdso_pages; 41 /* Data Mapping */ 42 struct vm_special_mapping *dm; 43 /* Code Mapping */ 44 struct vm_special_mapping *cm; 45 }; 46 47 static struct vdso_abi_info vdso_info[] __ro_after_init = { 48 [VDSO_ABI_AA64] = { 49 .name = "vdso", 50 .vdso_code_start = vdso_start, 51 .vdso_code_end = vdso_end, 52 }, 53 #ifdef CONFIG_COMPAT_VDSO 54 [VDSO_ABI_AA32] = { 55 .name = "vdso32", 56 .vdso_code_start = vdso32_start, 57 .vdso_code_end = vdso32_end, 58 }, 59 #endif /* CONFIG_COMPAT_VDSO */ 60 }; 61 62 /* 63 * The vDSO data page. 64 */ 65 static union vdso_data_store vdso_data_store __page_aligned_data; 66 struct vdso_data *vdso_data = vdso_data_store.data; 67 68 static int vdso_mremap(const struct vm_special_mapping *sm, 69 struct vm_area_struct *new_vma) 70 { 71 current->mm->context.vdso = (void *)new_vma->vm_start; 72 73 return 0; 74 } 75 76 static int __init __vdso_init(enum vdso_abi abi) 77 { 78 int i; 79 struct page **vdso_pagelist; 80 unsigned long pfn; 81 82 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) { 83 pr_err("vDSO is not a valid ELF object!\n"); 84 return -EINVAL; 85 } 86 87 vdso_info[abi].vdso_pages = ( 88 vdso_info[abi].vdso_code_end - 89 vdso_info[abi].vdso_code_start) >> 90 PAGE_SHIFT; 91 92 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages, 93 sizeof(struct page *), 94 GFP_KERNEL); 95 if (vdso_pagelist == NULL) 96 return -ENOMEM; 97 98 /* Grab the vDSO code pages. */ 99 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start); 100 101 for (i = 0; i < vdso_info[abi].vdso_pages; i++) 102 vdso_pagelist[i] = pfn_to_page(pfn + i); 103 104 vdso_info[abi].cm->pages = vdso_pagelist; 105 106 return 0; 107 } 108 109 #ifdef CONFIG_TIME_NS 110 struct vdso_data *arch_get_vdso_data(void *vvar_page) 111 { 112 return (struct vdso_data *)(vvar_page); 113 } 114 115 /* 116 * The vvar mapping contains data for a specific time namespace, so when a task 117 * changes namespace we must unmap its vvar data for the old namespace. 118 * Subsequent faults will map in data for the new namespace. 119 * 120 * For more details see timens_setup_vdso_data(). 121 */ 122 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 123 { 124 struct mm_struct *mm = task->mm; 125 struct vm_area_struct *vma; 126 VMA_ITERATOR(vmi, mm, 0); 127 128 mmap_read_lock(mm); 129 130 for_each_vma(vmi, vma) { 131 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm)) 132 zap_vma_pages(vma); 133 #ifdef CONFIG_COMPAT_VDSO 134 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm)) 135 zap_vma_pages(vma); 136 #endif 137 } 138 139 mmap_read_unlock(mm); 140 return 0; 141 } 142 #endif 143 144 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 145 struct vm_area_struct *vma, struct vm_fault *vmf) 146 { 147 struct page *timens_page = find_timens_vvar_page(vma); 148 unsigned long pfn; 149 150 switch (vmf->pgoff) { 151 case VVAR_DATA_PAGE_OFFSET: 152 if (timens_page) 153 pfn = page_to_pfn(timens_page); 154 else 155 pfn = sym_to_pfn(vdso_data); 156 break; 157 #ifdef CONFIG_TIME_NS 158 case VVAR_TIMENS_PAGE_OFFSET: 159 /* 160 * If a task belongs to a time namespace then a namespace 161 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 162 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 163 * offset. 164 * See also the comment near timens_setup_vdso_data(). 165 */ 166 if (!timens_page) 167 return VM_FAULT_SIGBUS; 168 pfn = sym_to_pfn(vdso_data); 169 break; 170 #endif /* CONFIG_TIME_NS */ 171 default: 172 return VM_FAULT_SIGBUS; 173 } 174 175 return vmf_insert_pfn(vma, vmf->address, pfn); 176 } 177 178 static int __setup_additional_pages(enum vdso_abi abi, 179 struct mm_struct *mm, 180 struct linux_binprm *bprm, 181 int uses_interp) 182 { 183 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 184 unsigned long gp_flags = 0; 185 void *ret; 186 187 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); 188 189 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; 190 /* Be sure to map the data page */ 191 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; 192 193 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 194 if (IS_ERR_VALUE(vdso_base)) { 195 ret = ERR_PTR(vdso_base); 196 goto up_fail; 197 } 198 199 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, 200 VM_READ|VM_MAYREAD|VM_PFNMAP, 201 vdso_info[abi].dm); 202 if (IS_ERR(ret)) 203 goto up_fail; 204 205 if (system_supports_bti_kernel()) 206 gp_flags = VM_ARM64_BTI; 207 208 vdso_base += VVAR_NR_PAGES * PAGE_SIZE; 209 mm->context.vdso = (void *)vdso_base; 210 ret = _install_special_mapping(mm, vdso_base, vdso_text_len, 211 VM_READ|VM_EXEC|gp_flags| 212 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 213 vdso_info[abi].cm); 214 if (IS_ERR(ret)) 215 goto up_fail; 216 217 return 0; 218 219 up_fail: 220 mm->context.vdso = NULL; 221 return PTR_ERR(ret); 222 } 223 224 #ifdef CONFIG_COMPAT 225 /* 226 * Create and map the vectors page for AArch32 tasks. 227 */ 228 enum aarch32_map { 229 AA32_MAP_VECTORS, /* kuser helpers */ 230 AA32_MAP_SIGPAGE, 231 AA32_MAP_VVAR, 232 AA32_MAP_VDSO, 233 }; 234 235 static struct page *aarch32_vectors_page __ro_after_init; 236 static struct page *aarch32_sig_page __ro_after_init; 237 238 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm, 239 struct vm_area_struct *new_vma) 240 { 241 current->mm->context.sigpage = (void *)new_vma->vm_start; 242 243 return 0; 244 } 245 246 static struct vm_special_mapping aarch32_vdso_maps[] = { 247 [AA32_MAP_VECTORS] = { 248 .name = "[vectors]", /* ABI */ 249 .pages = &aarch32_vectors_page, 250 }, 251 [AA32_MAP_SIGPAGE] = { 252 .name = "[sigpage]", /* ABI */ 253 .pages = &aarch32_sig_page, 254 .mremap = aarch32_sigpage_mremap, 255 }, 256 [AA32_MAP_VVAR] = { 257 .name = "[vvar]", 258 .fault = vvar_fault, 259 }, 260 [AA32_MAP_VDSO] = { 261 .name = "[vdso]", 262 .mremap = vdso_mremap, 263 }, 264 }; 265 266 static int aarch32_alloc_kuser_vdso_page(void) 267 { 268 extern char __kuser_helper_start[], __kuser_helper_end[]; 269 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 270 unsigned long vdso_page; 271 272 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 273 return 0; 274 275 vdso_page = get_zeroed_page(GFP_KERNEL); 276 if (!vdso_page) 277 return -ENOMEM; 278 279 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, 280 kuser_sz); 281 aarch32_vectors_page = virt_to_page((void *)vdso_page); 282 return 0; 283 } 284 285 #define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1 286 static int aarch32_alloc_sigpage(void) 287 { 288 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; 289 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; 290 __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD); 291 void *sigpage; 292 293 sigpage = (void *)__get_free_page(GFP_KERNEL); 294 if (!sigpage) 295 return -ENOMEM; 296 297 memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison)); 298 memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz); 299 aarch32_sig_page = virt_to_page(sigpage); 300 return 0; 301 } 302 303 static int __init __aarch32_alloc_vdso_pages(void) 304 { 305 306 if (!IS_ENABLED(CONFIG_COMPAT_VDSO)) 307 return 0; 308 309 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; 310 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; 311 312 return __vdso_init(VDSO_ABI_AA32); 313 } 314 315 static int __init aarch32_alloc_vdso_pages(void) 316 { 317 int ret; 318 319 ret = __aarch32_alloc_vdso_pages(); 320 if (ret) 321 return ret; 322 323 ret = aarch32_alloc_sigpage(); 324 if (ret) 325 return ret; 326 327 return aarch32_alloc_kuser_vdso_page(); 328 } 329 arch_initcall(aarch32_alloc_vdso_pages); 330 331 static int aarch32_kuser_helpers_setup(struct mm_struct *mm) 332 { 333 void *ret; 334 335 if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) 336 return 0; 337 338 /* 339 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's 340 * not safe to CoW the page containing the CPU exception vectors. 341 */ 342 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, 343 VM_READ | VM_EXEC | 344 VM_MAYREAD | VM_MAYEXEC, 345 &aarch32_vdso_maps[AA32_MAP_VECTORS]); 346 347 return PTR_ERR_OR_ZERO(ret); 348 } 349 350 static int aarch32_sigreturn_setup(struct mm_struct *mm) 351 { 352 unsigned long addr; 353 void *ret; 354 355 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 356 if (IS_ERR_VALUE(addr)) { 357 ret = ERR_PTR(addr); 358 goto out; 359 } 360 361 /* 362 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and 363 * set breakpoints. 364 */ 365 ret = _install_special_mapping(mm, addr, PAGE_SIZE, 366 VM_READ | VM_EXEC | VM_MAYREAD | 367 VM_MAYWRITE | VM_MAYEXEC, 368 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]); 369 if (IS_ERR(ret)) 370 goto out; 371 372 mm->context.sigpage = (void *)addr; 373 374 out: 375 return PTR_ERR_OR_ZERO(ret); 376 } 377 378 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 379 { 380 struct mm_struct *mm = current->mm; 381 int ret; 382 383 if (mmap_write_lock_killable(mm)) 384 return -EINTR; 385 386 ret = aarch32_kuser_helpers_setup(mm); 387 if (ret) 388 goto out; 389 390 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) { 391 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm, 392 uses_interp); 393 if (ret) 394 goto out; 395 } 396 397 ret = aarch32_sigreturn_setup(mm); 398 out: 399 mmap_write_unlock(mm); 400 return ret; 401 } 402 #endif /* CONFIG_COMPAT */ 403 404 enum aarch64_map { 405 AA64_MAP_VVAR, 406 AA64_MAP_VDSO, 407 }; 408 409 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = { 410 [AA64_MAP_VVAR] = { 411 .name = "[vvar]", 412 .fault = vvar_fault, 413 }, 414 [AA64_MAP_VDSO] = { 415 .name = "[vdso]", 416 .mremap = vdso_mremap, 417 }, 418 }; 419 420 static int __init vdso_init(void) 421 { 422 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR]; 423 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO]; 424 425 return __vdso_init(VDSO_ABI_AA64); 426 } 427 arch_initcall(vdso_init); 428 429 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 430 { 431 struct mm_struct *mm = current->mm; 432 int ret; 433 434 if (mmap_write_lock_killable(mm)) 435 return -EINTR; 436 437 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp); 438 mmap_write_unlock(mm); 439 440 return ret; 441 } 442