1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 4 * <benh@kernel.crashing.org> 5 * Copyright (C) 2012 ARM Limited 6 * Copyright (C) 2015 Regents of the University of California 7 */ 8 9 #include <linux/elf.h> 10 #include <linux/mm.h> 11 #include <linux/slab.h> 12 #include <linux/binfmts.h> 13 #include <linux/err.h> 14 #include <asm/page.h> 15 #include <asm/vdso.h> 16 #include <linux/time_namespace.h> 17 18 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 19 #include <vdso/datapage.h> 20 #else 21 struct vdso_data { 22 }; 23 #endif 24 25 enum vvar_pages { 26 VVAR_DATA_PAGE_OFFSET, 27 VVAR_TIMENS_PAGE_OFFSET, 28 VVAR_NR_PAGES, 29 }; 30 31 enum rv_vdso_map { 32 RV_VDSO_MAP_VVAR, 33 RV_VDSO_MAP_VDSO, 34 }; 35 36 #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) 37 38 /* 39 * The vDSO data page. 40 */ 41 static union { 42 struct vdso_data data; 43 u8 page[PAGE_SIZE]; 44 } vdso_data_store __page_aligned_data; 45 struct vdso_data *vdso_data = &vdso_data_store.data; 46 47 struct __vdso_info { 48 const char *name; 49 const char *vdso_code_start; 50 const char *vdso_code_end; 51 unsigned long vdso_pages; 52 /* Data Mapping */ 53 struct vm_special_mapping *dm; 54 /* Code Mapping */ 55 struct vm_special_mapping *cm; 56 }; 57 58 static struct __vdso_info vdso_info; 59 #ifdef CONFIG_COMPAT 60 static struct __vdso_info compat_vdso_info; 61 #endif 62 63 static int vdso_mremap(const struct vm_special_mapping *sm, 64 struct vm_area_struct *new_vma) 65 { 66 current->mm->context.vdso = (void *)new_vma->vm_start; 67 68 return 0; 69 } 70 71 static void __init __vdso_init(struct __vdso_info *vdso_info) 72 { 73 unsigned int i; 74 struct page **vdso_pagelist; 75 unsigned long pfn; 76 77 if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4)) 78 panic("vDSO is not a valid ELF object!\n"); 79 80 vdso_info->vdso_pages = ( 81 vdso_info->vdso_code_end - 82 vdso_info->vdso_code_start) >> 83 PAGE_SHIFT; 84 85 vdso_pagelist = kcalloc(vdso_info->vdso_pages, 86 sizeof(struct page *), 87 GFP_KERNEL); 88 if (vdso_pagelist == NULL) 89 panic("vDSO kcalloc failed!\n"); 90 91 /* Grab the vDSO code pages. */ 92 pfn = sym_to_pfn(vdso_info->vdso_code_start); 93 94 for (i = 0; i < vdso_info->vdso_pages; i++) 95 vdso_pagelist[i] = pfn_to_page(pfn + i); 96 97 vdso_info->cm->pages = vdso_pagelist; 98 } 99 100 #ifdef CONFIG_TIME_NS 101 struct vdso_data *arch_get_vdso_data(void *vvar_page) 102 { 103 return (struct vdso_data *)(vvar_page); 104 } 105 106 /* 107 * The vvar mapping contains data for a specific time namespace, so when a task 108 * changes namespace we must unmap its vvar data for the old namespace. 109 * Subsequent faults will map in data for the new namespace. 110 * 111 * For more details see timens_setup_vdso_data(). 112 */ 113 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 114 { 115 struct mm_struct *mm = task->mm; 116 struct vm_area_struct *vma; 117 VMA_ITERATOR(vmi, mm, 0); 118 119 mmap_read_lock(mm); 120 121 for_each_vma(vmi, vma) { 122 if (vma_is_special_mapping(vma, vdso_info.dm)) 123 zap_vma_pages(vma); 124 #ifdef CONFIG_COMPAT 125 if (vma_is_special_mapping(vma, compat_vdso_info.dm)) 126 zap_vma_pages(vma); 127 #endif 128 } 129 130 mmap_read_unlock(mm); 131 return 0; 132 } 133 #endif 134 135 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 136 struct vm_area_struct *vma, struct vm_fault *vmf) 137 { 138 struct page *timens_page = find_timens_vvar_page(vma); 139 unsigned long pfn; 140 141 switch (vmf->pgoff) { 142 case VVAR_DATA_PAGE_OFFSET: 143 if (timens_page) 144 pfn = page_to_pfn(timens_page); 145 else 146 pfn = sym_to_pfn(vdso_data); 147 break; 148 #ifdef CONFIG_TIME_NS 149 case VVAR_TIMENS_PAGE_OFFSET: 150 /* 151 * If a task belongs to a time namespace then a namespace 152 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 153 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 154 * offset. 155 * See also the comment near timens_setup_vdso_data(). 156 */ 157 if (!timens_page) 158 return VM_FAULT_SIGBUS; 159 pfn = sym_to_pfn(vdso_data); 160 break; 161 #endif /* CONFIG_TIME_NS */ 162 default: 163 return VM_FAULT_SIGBUS; 164 } 165 166 return vmf_insert_pfn(vma, vmf->address, pfn); 167 } 168 169 static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = { 170 [RV_VDSO_MAP_VVAR] = { 171 .name = "[vvar]", 172 .fault = vvar_fault, 173 }, 174 [RV_VDSO_MAP_VDSO] = { 175 .name = "[vdso]", 176 .mremap = vdso_mremap, 177 }, 178 }; 179 180 static struct __vdso_info vdso_info __ro_after_init = { 181 .name = "vdso", 182 .vdso_code_start = vdso_start, 183 .vdso_code_end = vdso_end, 184 .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR], 185 .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO], 186 }; 187 188 #ifdef CONFIG_COMPAT 189 static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = { 190 [RV_VDSO_MAP_VVAR] = { 191 .name = "[vvar]", 192 .fault = vvar_fault, 193 }, 194 [RV_VDSO_MAP_VDSO] = { 195 .name = "[vdso]", 196 .mremap = vdso_mremap, 197 }, 198 }; 199 200 static struct __vdso_info compat_vdso_info __ro_after_init = { 201 .name = "compat_vdso", 202 .vdso_code_start = compat_vdso_start, 203 .vdso_code_end = compat_vdso_end, 204 .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR], 205 .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO], 206 }; 207 #endif 208 209 static int __init vdso_init(void) 210 { 211 __vdso_init(&vdso_info); 212 #ifdef CONFIG_COMPAT 213 __vdso_init(&compat_vdso_info); 214 #endif 215 216 return 0; 217 } 218 arch_initcall(vdso_init); 219 220 static int __setup_additional_pages(struct mm_struct *mm, 221 struct linux_binprm *bprm, 222 int uses_interp, 223 struct __vdso_info *vdso_info) 224 { 225 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 226 void *ret; 227 228 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); 229 230 vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT; 231 /* Be sure to map the data page */ 232 vdso_mapping_len = vdso_text_len + VVAR_SIZE; 233 234 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 235 if (IS_ERR_VALUE(vdso_base)) { 236 ret = ERR_PTR(vdso_base); 237 goto up_fail; 238 } 239 240 ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE, 241 (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm); 242 if (IS_ERR(ret)) 243 goto up_fail; 244 245 vdso_base += VVAR_SIZE; 246 mm->context.vdso = (void *)vdso_base; 247 248 ret = 249 _install_special_mapping(mm, vdso_base, vdso_text_len, 250 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), 251 vdso_info->cm); 252 253 if (IS_ERR(ret)) 254 goto up_fail; 255 256 return 0; 257 258 up_fail: 259 mm->context.vdso = NULL; 260 return PTR_ERR(ret); 261 } 262 263 #ifdef CONFIG_COMPAT 264 int compat_arch_setup_additional_pages(struct linux_binprm *bprm, 265 int uses_interp) 266 { 267 struct mm_struct *mm = current->mm; 268 int ret; 269 270 if (mmap_write_lock_killable(mm)) 271 return -EINTR; 272 273 ret = __setup_additional_pages(mm, bprm, uses_interp, 274 &compat_vdso_info); 275 mmap_write_unlock(mm); 276 277 return ret; 278 } 279 #endif 280 281 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 282 { 283 struct mm_struct *mm = current->mm; 284 int ret; 285 286 if (mmap_write_lock_killable(mm)) 287 return -EINTR; 288 289 ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info); 290 mmap_write_unlock(mm); 291 292 return ret; 293 } 294