1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 4 * <benh@kernel.crashing.org> 5 * Copyright (C) 2012 ARM Limited 6 * Copyright (C) 2015 Regents of the University of California 7 */ 8 9 #include <linux/elf.h> 10 #include <linux/mm.h> 11 #include <linux/slab.h> 12 #include <linux/binfmts.h> 13 #include <linux/err.h> 14 #include <asm/page.h> 15 #include <asm/vdso.h> 16 #include <linux/time_namespace.h> 17 #include <vdso/datapage.h> 18 #include <vdso/vsyscall.h> 19 20 enum vvar_pages { 21 VVAR_DATA_PAGE_OFFSET, 22 VVAR_TIMENS_PAGE_OFFSET, 23 VVAR_NR_PAGES, 24 }; 25 26 enum rv_vdso_map { 27 RV_VDSO_MAP_VVAR, 28 RV_VDSO_MAP_VDSO, 29 }; 30 31 #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) 32 33 static union vdso_data_store vdso_data_store __page_aligned_data; 34 struct vdso_data *vdso_data = vdso_data_store.data; 35 36 struct __vdso_info { 37 const char *name; 38 const char *vdso_code_start; 39 const char *vdso_code_end; 40 unsigned long vdso_pages; 41 /* Data Mapping */ 42 struct vm_special_mapping *dm; 43 /* Code Mapping */ 44 struct vm_special_mapping *cm; 45 }; 46 47 static struct __vdso_info vdso_info; 48 #ifdef CONFIG_COMPAT 49 static struct __vdso_info compat_vdso_info; 50 #endif 51 52 static int vdso_mremap(const struct vm_special_mapping *sm, 53 struct vm_area_struct *new_vma) 54 { 55 current->mm->context.vdso = (void *)new_vma->vm_start; 56 57 return 0; 58 } 59 60 static void __init __vdso_init(struct __vdso_info *vdso_info) 61 { 62 unsigned int i; 63 struct page **vdso_pagelist; 64 unsigned long pfn; 65 66 if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4)) 67 panic("vDSO is not a valid ELF object!\n"); 68 69 vdso_info->vdso_pages = ( 70 vdso_info->vdso_code_end - 71 vdso_info->vdso_code_start) >> 72 PAGE_SHIFT; 73 74 vdso_pagelist = kcalloc(vdso_info->vdso_pages, 75 sizeof(struct page *), 76 GFP_KERNEL); 77 if (vdso_pagelist == NULL) 78 panic("vDSO kcalloc failed!\n"); 79 80 /* Grab the vDSO code pages. */ 81 pfn = sym_to_pfn(vdso_info->vdso_code_start); 82 83 for (i = 0; i < vdso_info->vdso_pages; i++) 84 vdso_pagelist[i] = pfn_to_page(pfn + i); 85 86 vdso_info->cm->pages = vdso_pagelist; 87 } 88 89 #ifdef CONFIG_TIME_NS 90 struct vdso_data *arch_get_vdso_data(void *vvar_page) 91 { 92 return (struct vdso_data *)(vvar_page); 93 } 94 95 /* 96 * The vvar mapping contains data for a specific time namespace, so when a task 97 * changes namespace we must unmap its vvar data for the old namespace. 98 * Subsequent faults will map in data for the new namespace. 99 * 100 * For more details see timens_setup_vdso_data(). 101 */ 102 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 103 { 104 struct mm_struct *mm = task->mm; 105 struct vm_area_struct *vma; 106 VMA_ITERATOR(vmi, mm, 0); 107 108 mmap_read_lock(mm); 109 110 for_each_vma(vmi, vma) { 111 if (vma_is_special_mapping(vma, vdso_info.dm)) 112 zap_vma_pages(vma); 113 #ifdef CONFIG_COMPAT 114 if (vma_is_special_mapping(vma, compat_vdso_info.dm)) 115 zap_vma_pages(vma); 116 #endif 117 } 118 119 mmap_read_unlock(mm); 120 return 0; 121 } 122 #endif 123 124 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 125 struct vm_area_struct *vma, struct vm_fault *vmf) 126 { 127 struct page *timens_page = find_timens_vvar_page(vma); 128 unsigned long pfn; 129 130 switch (vmf->pgoff) { 131 case VVAR_DATA_PAGE_OFFSET: 132 if (timens_page) 133 pfn = page_to_pfn(timens_page); 134 else 135 pfn = sym_to_pfn(vdso_data); 136 break; 137 #ifdef CONFIG_TIME_NS 138 case VVAR_TIMENS_PAGE_OFFSET: 139 /* 140 * If a task belongs to a time namespace then a namespace 141 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 142 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 143 * offset. 144 * See also the comment near timens_setup_vdso_data(). 145 */ 146 if (!timens_page) 147 return VM_FAULT_SIGBUS; 148 pfn = sym_to_pfn(vdso_data); 149 break; 150 #endif /* CONFIG_TIME_NS */ 151 default: 152 return VM_FAULT_SIGBUS; 153 } 154 155 return vmf_insert_pfn(vma, vmf->address, pfn); 156 } 157 158 static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = { 159 [RV_VDSO_MAP_VVAR] = { 160 .name = "[vvar]", 161 .fault = vvar_fault, 162 }, 163 [RV_VDSO_MAP_VDSO] = { 164 .name = "[vdso]", 165 .mremap = vdso_mremap, 166 }, 167 }; 168 169 static struct __vdso_info vdso_info __ro_after_init = { 170 .name = "vdso", 171 .vdso_code_start = vdso_start, 172 .vdso_code_end = vdso_end, 173 .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR], 174 .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO], 175 }; 176 177 #ifdef CONFIG_COMPAT 178 static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = { 179 [RV_VDSO_MAP_VVAR] = { 180 .name = "[vvar]", 181 .fault = vvar_fault, 182 }, 183 [RV_VDSO_MAP_VDSO] = { 184 .name = "[vdso]", 185 .mremap = vdso_mremap, 186 }, 187 }; 188 189 static struct __vdso_info compat_vdso_info __ro_after_init = { 190 .name = "compat_vdso", 191 .vdso_code_start = compat_vdso_start, 192 .vdso_code_end = compat_vdso_end, 193 .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR], 194 .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO], 195 }; 196 #endif 197 198 static int __init vdso_init(void) 199 { 200 __vdso_init(&vdso_info); 201 #ifdef CONFIG_COMPAT 202 __vdso_init(&compat_vdso_info); 203 #endif 204 205 return 0; 206 } 207 arch_initcall(vdso_init); 208 209 static int __setup_additional_pages(struct mm_struct *mm, 210 struct linux_binprm *bprm, 211 int uses_interp, 212 struct __vdso_info *vdso_info) 213 { 214 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 215 void *ret; 216 217 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); 218 219 vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT; 220 /* Be sure to map the data page */ 221 vdso_mapping_len = vdso_text_len + VVAR_SIZE; 222 223 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 224 if (IS_ERR_VALUE(vdso_base)) { 225 ret = ERR_PTR(vdso_base); 226 goto up_fail; 227 } 228 229 ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE, 230 (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm); 231 if (IS_ERR(ret)) 232 goto up_fail; 233 234 vdso_base += VVAR_SIZE; 235 mm->context.vdso = (void *)vdso_base; 236 237 ret = 238 _install_special_mapping(mm, vdso_base, vdso_text_len, 239 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), 240 vdso_info->cm); 241 242 if (IS_ERR(ret)) 243 goto up_fail; 244 245 return 0; 246 247 up_fail: 248 mm->context.vdso = NULL; 249 return PTR_ERR(ret); 250 } 251 252 #ifdef CONFIG_COMPAT 253 int compat_arch_setup_additional_pages(struct linux_binprm *bprm, 254 int uses_interp) 255 { 256 struct mm_struct *mm = current->mm; 257 int ret; 258 259 if (mmap_write_lock_killable(mm)) 260 return -EINTR; 261 262 ret = __setup_additional_pages(mm, bprm, uses_interp, 263 &compat_vdso_info); 264 mmap_write_unlock(mm); 265 266 return ret; 267 } 268 #endif 269 270 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 271 { 272 struct mm_struct *mm = current->mm; 273 int ret; 274 275 if (mmap_write_lock_killable(mm)) 276 return -EINTR; 277 278 ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info); 279 mmap_write_unlock(mm); 280 281 return ret; 282 } 283