1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author: Huacai Chen <chenhuacai@loongson.cn> 4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 5 */ 6 7 #include <linux/binfmts.h> 8 #include <linux/elf.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/ioport.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/random.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/vdso_datastore.h> 18 19 #include <asm/page.h> 20 #include <asm/vdso.h> 21 #include <vdso/helpers.h> 22 #include <vdso/vsyscall.h> 23 #include <vdso/datapage.h> 24 #include <generated/vdso-offsets.h> 25 26 extern char vdso_start[], vdso_end[]; 27 28 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) 29 { 30 current->mm->context.vdso = (void *)(new_vma->vm_start); 31 32 return 0; 33 } 34 35 struct loongarch_vdso_info vdso_info = { 36 .vdso = vdso_start, 37 .code_mapping = { 38 .name = "[vdso]", 39 .mremap = vdso_mremap, 40 }, 41 .offset_sigreturn = vdso_offset_sigreturn, 42 }; 43 44 static int __init init_vdso(void) 45 { 46 unsigned long i, cpu, pfn; 47 48 BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); 49 50 for_each_possible_cpu(cpu) 51 vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu); 52 53 vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start); 54 vdso_info.code_mapping.pages = 55 kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); 56 57 if (!vdso_info.code_mapping.pages) 58 return -ENOMEM; 59 60 pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); 61 for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) 62 vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); 63 64 return 0; 65 } 66 subsys_initcall(init_vdso); 67 68 static unsigned long vdso_base(void) 69 { 70 unsigned long base = STACK_TOP; 71 72 if (current->flags & PF_RANDOMIZE) { 73 base += get_random_u32_below(VDSO_RANDOMIZE_SIZE); 74 base = PAGE_ALIGN(base); 75 } 76 77 return base; 78 } 79 80 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 81 { 82 int ret; 83 unsigned long size, data_addr, vdso_addr; 84 struct mm_struct *mm = current->mm; 85 struct vm_area_struct *vma; 86 struct loongarch_vdso_info *info = current->thread.vdso; 87 88 if (mmap_write_lock_killable(mm)) 89 return -EINTR; 90 91 /* 92 * Determine total area size. This includes the VDSO data itself 93 * and the data pages. 94 */ 95 size = VVAR_SIZE + info->size; 96 97 data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0); 98 if (IS_ERR_VALUE(data_addr)) { 99 ret = data_addr; 100 goto out; 101 } 102 103 vma = vdso_install_vvar_mapping(mm, data_addr); 104 if (IS_ERR(vma)) { 105 ret = PTR_ERR(vma); 106 goto out; 107 } 108 109 vdso_addr = data_addr + VVAR_SIZE; 110 vma = _install_special_mapping(mm, vdso_addr, info->size, 111 VM_READ | VM_EXEC | 112 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | 113 VM_SEALED_SYSMAP, 114 &info->code_mapping); 115 if (IS_ERR(vma)) { 116 ret = PTR_ERR(vma); 117 goto out; 118 } 119 120 mm->context.vdso = (void *)vdso_addr; 121 ret = 0; 122 123 out: 124 mmap_write_unlock(mm); 125 return ret; 126 } 127