1 /* 2 * vdso setup for s390 3 * 4 * Copyright IBM Corp. 2008 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License (version 2 only) 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/smp.h> 18 #include <linux/stddef.h> 19 #include <linux/unistd.h> 20 #include <linux/slab.h> 21 #include <linux/user.h> 22 #include <linux/elf.h> 23 #include <linux/security.h> 24 #include <linux/bootmem.h> 25 #include <linux/compat.h> 26 #include <asm/asm-offsets.h> 27 #include <asm/pgtable.h> 28 #include <asm/processor.h> 29 #include <asm/mmu.h> 30 #include <asm/mmu_context.h> 31 #include <asm/sections.h> 32 #include <asm/vdso.h> 33 #include <asm/facility.h> 34 35 #ifdef CONFIG_COMPAT 36 extern char vdso32_start, vdso32_end; 37 static void *vdso32_kbase = &vdso32_start; 38 static unsigned int vdso32_pages; 39 static struct page **vdso32_pagelist; 40 #endif 41 42 extern char vdso64_start, vdso64_end; 43 static void *vdso64_kbase = &vdso64_start; 44 static unsigned int vdso64_pages; 45 static struct page **vdso64_pagelist; 46 47 /* 48 * Should the kernel map a VDSO page into processes and pass its 49 * address down to glibc upon exec()? 50 */ 51 unsigned int __read_mostly vdso_enabled = 1; 52 53 static int vdso_fault(const struct vm_special_mapping *sm, 54 struct vm_area_struct *vma, struct vm_fault *vmf) 55 { 56 struct page **vdso_pagelist; 57 unsigned long vdso_pages; 58 59 vdso_pagelist = vdso64_pagelist; 60 vdso_pages = vdso64_pages; 61 #ifdef CONFIG_COMPAT 62 if (is_compat_task()) { 63 vdso_pagelist = vdso32_pagelist; 64 vdso_pages = vdso32_pages; 65 } 66 #endif 67 68 if (vmf->pgoff >= vdso_pages) 69 return VM_FAULT_SIGBUS; 70 71 vmf->page = vdso_pagelist[vmf->pgoff]; 72 get_page(vmf->page); 73 return 0; 74 } 75 76 static int vdso_mremap(const struct vm_special_mapping *sm, 77 struct vm_area_struct *vma) 78 { 79 unsigned long vdso_pages; 80 81 vdso_pages = vdso64_pages; 82 #ifdef CONFIG_COMPAT 83 if (is_compat_task()) 84 vdso_pages = vdso32_pages; 85 #endif 86 87 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) 88 return -EINVAL; 89 90 if (WARN_ON_ONCE(current->mm != vma->vm_mm)) 91 return -EFAULT; 92 93 current->mm->context.vdso_base = vma->vm_start; 94 return 0; 95 } 96 97 static const struct vm_special_mapping vdso_mapping = { 98 .name = "[vdso]", 99 .fault = vdso_fault, 100 .mremap = vdso_mremap, 101 }; 102 103 static int __init vdso_setup(char *s) 104 { 105 unsigned long val; 106 int rc; 107 108 rc = 0; 109 if (strncmp(s, "on", 3) == 0) 110 vdso_enabled = 1; 111 else if (strncmp(s, "off", 4) == 0) 112 vdso_enabled = 0; 113 else { 114 rc = kstrtoul(s, 0, &val); 115 vdso_enabled = rc ? 0 : !!val; 116 } 117 return !rc; 118 } 119 __setup("vdso=", vdso_setup); 120 121 /* 122 * The vdso data page 123 */ 124 static union { 125 struct vdso_data data; 126 u8 page[PAGE_SIZE]; 127 } vdso_data_store __page_aligned_data; 128 struct vdso_data *vdso_data = &vdso_data_store.data; 129 130 /* 131 * Setup vdso data page. 132 */ 133 static void __init vdso_init_data(struct vdso_data *vd) 134 { 135 vd->ectg_available = test_facility(31); 136 } 137 138 /* 139 * Allocate/free per cpu vdso data. 140 */ 141 #define SEGMENT_ORDER 2 142 143 /* 144 * The initial vdso_data structure for the boot CPU. Eventually 145 * it is replaced with a properly allocated structure in vdso_init. 146 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data 147 * pointer is required to be able to return from an interrupt or 148 * program check. See the exit paths in entry.S. 149 */ 150 struct vdso_data boot_vdso_data __initdata; 151 152 void __init vdso_alloc_boot_cpu(struct lowcore *lowcore) 153 { 154 lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data; 155 } 156 157 int vdso_alloc_per_cpu(struct lowcore *lowcore) 158 { 159 unsigned long segment_table, page_table, page_frame; 160 struct vdso_per_cpu_data *vd; 161 u32 *psal, *aste; 162 int i; 163 164 lowcore->vdso_per_cpu_data = __LC_PASTE; 165 166 if (!vdso_enabled) 167 return 0; 168 169 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 170 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); 171 page_frame = get_zeroed_page(GFP_KERNEL); 172 if (!segment_table || !page_table || !page_frame) 173 goto out; 174 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); 175 arch_set_page_dat(virt_to_page(page_table), 0); 176 177 /* Initialize per-cpu vdso data page */ 178 vd = (struct vdso_per_cpu_data *) page_frame; 179 vd->cpu_nr = lowcore->cpu_nr; 180 vd->node_id = cpu_to_node(vd->cpu_nr); 181 182 /* Set up access register mode page table */ 183 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES); 184 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE); 185 186 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 187 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; 188 189 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); 190 aste = psal + 32; 191 192 for (i = 4; i < 32; i += 4) 193 psal[i] = 0x80000000; 194 195 lowcore->paste[4] = (u32)(addr_t) psal; 196 psal[0] = 0x02000000; 197 psal[2] = (u32)(addr_t) aste; 198 *(unsigned long *) (aste + 2) = segment_table + 199 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 200 aste[4] = (u32)(addr_t) psal; 201 lowcore->vdso_per_cpu_data = page_frame; 202 203 return 0; 204 205 out: 206 free_page(page_frame); 207 free_page(page_table); 208 free_pages(segment_table, SEGMENT_ORDER); 209 return -ENOMEM; 210 } 211 212 void vdso_free_per_cpu(struct lowcore *lowcore) 213 { 214 unsigned long segment_table, page_table, page_frame; 215 u32 *psal, *aste; 216 217 if (!vdso_enabled) 218 return; 219 220 psal = (u32 *)(addr_t) lowcore->paste[4]; 221 aste = (u32 *)(addr_t) psal[2]; 222 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; 223 page_table = *(unsigned long *) segment_table; 224 page_frame = *(unsigned long *) page_table; 225 226 free_page(page_frame); 227 free_page(page_table); 228 free_pages(segment_table, SEGMENT_ORDER); 229 } 230 231 static void vdso_init_cr5(void) 232 { 233 unsigned long cr5; 234 235 if (!vdso_enabled) 236 return; 237 cr5 = offsetof(struct lowcore, paste); 238 __ctl_load(cr5, 5, 5); 239 } 240 241 /* 242 * This is called from binfmt_elf, we create the special vma for the 243 * vDSO and insert it into the mm struct tree 244 */ 245 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 246 { 247 struct mm_struct *mm = current->mm; 248 struct vm_area_struct *vma; 249 unsigned long vdso_pages; 250 unsigned long vdso_base; 251 int rc; 252 253 if (!vdso_enabled) 254 return 0; 255 /* 256 * Only map the vdso for dynamically linked elf binaries. 257 */ 258 if (!uses_interp) 259 return 0; 260 261 vdso_pages = vdso64_pages; 262 #ifdef CONFIG_COMPAT 263 if (is_compat_task()) 264 vdso_pages = vdso32_pages; 265 #endif 266 /* 267 * vDSO has a problem and was disabled, just don't "enable" it for 268 * the process 269 */ 270 if (vdso_pages == 0) 271 return 0; 272 273 /* 274 * pick a base address for the vDSO in process space. We try to put 275 * it at vdso_base which is the "natural" base for it, but we might 276 * fail and end up putting it elsewhere. 277 */ 278 if (down_write_killable(&mm->mmap_sem)) 279 return -EINTR; 280 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 281 if (IS_ERR_VALUE(vdso_base)) { 282 rc = vdso_base; 283 goto out_up; 284 } 285 286 /* 287 * our vma flags don't have VM_WRITE so by default, the process 288 * isn't allowed to write those pages. 289 * gdb can break that with ptrace interface, and thus trigger COW 290 * on those pages but it's then your responsibility to never do that 291 * on the "data" page of the vDSO or you'll stop getting kernel 292 * updates and your nice userland gettimeofday will be totally dead. 293 * It's fine to use that for setting breakpoints in the vDSO code 294 * pages though. 295 */ 296 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 297 VM_READ|VM_EXEC| 298 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 299 &vdso_mapping); 300 if (IS_ERR(vma)) { 301 rc = PTR_ERR(vma); 302 goto out_up; 303 } 304 305 current->mm->context.vdso_base = vdso_base; 306 rc = 0; 307 308 out_up: 309 up_write(&mm->mmap_sem); 310 return rc; 311 } 312 313 static int __init vdso_init(void) 314 { 315 int i; 316 317 if (!vdso_enabled) 318 return 0; 319 vdso_init_data(vdso_data); 320 #ifdef CONFIG_COMPAT 321 /* Calculate the size of the 32 bit vDSO */ 322 vdso32_pages = ((&vdso32_end - &vdso32_start 323 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 324 325 /* Make sure pages are in the correct state */ 326 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), 327 GFP_KERNEL); 328 BUG_ON(vdso32_pagelist == NULL); 329 for (i = 0; i < vdso32_pages - 1; i++) { 330 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 331 ClearPageReserved(pg); 332 get_page(pg); 333 vdso32_pagelist[i] = pg; 334 } 335 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); 336 vdso32_pagelist[vdso32_pages] = NULL; 337 #endif 338 339 /* Calculate the size of the 64 bit vDSO */ 340 vdso64_pages = ((&vdso64_end - &vdso64_start 341 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 342 343 /* Make sure pages are in the correct state */ 344 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), 345 GFP_KERNEL); 346 BUG_ON(vdso64_pagelist == NULL); 347 for (i = 0; i < vdso64_pages - 1; i++) { 348 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 349 ClearPageReserved(pg); 350 get_page(pg); 351 vdso64_pagelist[i] = pg; 352 } 353 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 354 vdso64_pagelist[vdso64_pages] = NULL; 355 if (vdso_alloc_per_cpu(&S390_lowcore)) 356 BUG(); 357 vdso_init_cr5(); 358 359 get_page(virt_to_page(vdso_data)); 360 361 return 0; 362 } 363 early_initcall(vdso_init); 364