1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at> 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 #include <linux/mm.h> 9 #include <asm/page.h> 10 #include <asm/elf.h> 11 #include <linux/init.h> 12 13 static unsigned int __read_mostly vdso_enabled = 1; 14 unsigned long um_vdso_addr; 15 static struct page *um_vdso; 16 17 extern unsigned long task_size; 18 extern char vdso_start[], vdso_end[]; 19 20 static int __init init_vdso(void) 21 { 22 BUG_ON(vdso_end - vdso_start > PAGE_SIZE); 23 24 um_vdso_addr = task_size - PAGE_SIZE; 25 26 um_vdso = alloc_page(GFP_KERNEL); 27 if (!um_vdso) 28 goto oom; 29 30 copy_page(page_address(um_vdso), vdso_start); 31 32 return 0; 33 34 oom: 35 printk(KERN_ERR "Cannot allocate vdso\n"); 36 vdso_enabled = 0; 37 38 return -ENOMEM; 39 } 40 subsys_initcall(init_vdso); 41 42 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 43 { 44 struct vm_area_struct *vma; 45 struct mm_struct *mm = current->mm; 46 static struct vm_special_mapping vdso_mapping = { 47 .name = "[vdso]", 48 .pages = &um_vdso, 49 }; 50 51 if (!vdso_enabled) 52 return 0; 53 54 if (mmap_write_lock_killable(mm)) 55 return -EINTR; 56 57 vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, 58 VM_READ|VM_EXEC| 59 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 60 &vdso_mapping); 61 62 mmap_write_unlock(mm); 63 64 return IS_ERR(vma) ? PTR_ERR(vma) : 0; 65 } 66