1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/binfmts.h> 5 #include <linux/elf.h> 6 #include <linux/err.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 10 #include <asm/page.h> 11 #include <vdso/datapage.h> 12 13 extern char vdso_start[], vdso_end[]; 14 15 static unsigned int vdso_pages; 16 static struct page **vdso_pagelist; 17 18 static union vdso_data_store vdso_data_store __page_aligned_data; 19 struct vdso_data *vdso_data = vdso_data_store.data; 20 21 static int __init vdso_init(void) 22 { 23 unsigned int i; 24 25 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; 26 vdso_pagelist = 27 kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); 28 if (unlikely(vdso_pagelist == NULL)) { 29 pr_err("vdso: pagelist allocation failed\n"); 30 return -ENOMEM; 31 } 32 33 for (i = 0; i < vdso_pages; i++) { 34 struct page *pg; 35 36 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); 37 vdso_pagelist[i] = pg; 38 } 39 vdso_pagelist[i] = virt_to_page(vdso_data); 40 41 return 0; 42 } 43 arch_initcall(vdso_init); 44 45 int arch_setup_additional_pages(struct linux_binprm *bprm, 46 int uses_interp) 47 { 48 struct vm_area_struct *vma; 49 struct mm_struct *mm = current->mm; 50 unsigned long vdso_base, vdso_len; 51 int ret; 52 static struct vm_special_mapping vdso_mapping = { 53 .name = "[vdso]", 54 }; 55 static struct vm_special_mapping vvar_mapping = { 56 .name = "[vvar]", 57 }; 58 59 vdso_len = (vdso_pages + 1) << PAGE_SHIFT; 60 61 mmap_write_lock(mm); 62 vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); 63 if (IS_ERR_VALUE(vdso_base)) { 64 ret = vdso_base; 65 goto end; 66 } 67 68 /* 69 * Put vDSO base into mm struct. We need to do this before calling 70 * install_special_mapping or the perf counter mmap tracking code 71 * will fail to recognise it as a vDSO (since arch_vma_name fails). 72 */ 73 mm->context.vdso = (void *)vdso_base; 74 75 vdso_mapping.pages = vdso_pagelist; 76 vma = 77 _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 78 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), 79 &vdso_mapping); 80 81 if (IS_ERR(vma)) { 82 ret = PTR_ERR(vma); 83 mm->context.vdso = NULL; 84 goto end; 85 } 86 87 vdso_base += (vdso_pages << PAGE_SHIFT); 88 vvar_mapping.pages = &vdso_pagelist[vdso_pages]; 89 vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE, 90 (VM_READ | VM_MAYREAD), &vvar_mapping); 91 92 if (IS_ERR(vma)) { 93 ret = PTR_ERR(vma); 94 mm->context.vdso = NULL; 95 goto end; 96 } 97 ret = 0; 98 end: 99 mmap_write_unlock(mm); 100 return ret; 101 } 102 103 const char *arch_vma_name(struct vm_area_struct *vma) 104 { 105 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) 106 return "[vdso]"; 107 if (vma->vm_mm && (vma->vm_start == 108 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) 109 return "[vdso_data]"; 110 return NULL; 111 } 112