1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/binfmt_elf.c 4 * 5 * These are the functions used to load ELF format executables as used 6 * on SVr4 machines. Information on the format may be found in the book 7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support 8 * Tools". 9 * 10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/fs.h> 16 #include <linux/log2.h> 17 #include <linux/mm.h> 18 #include <linux/mman.h> 19 #include <linux/errno.h> 20 #include <linux/signal.h> 21 #include <linux/binfmts.h> 22 #include <linux/string.h> 23 #include <linux/file.h> 24 #include <linux/slab.h> 25 #include <linux/personality.h> 26 #include <linux/elfcore.h> 27 #include <linux/init.h> 28 #include <linux/highuid.h> 29 #include <linux/compiler.h> 30 #include <linux/highmem.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vmalloc.h> 34 #include <linux/security.h> 35 #include <linux/random.h> 36 #include <linux/elf.h> 37 #include <linux/elf-randomize.h> 38 #include <linux/utsname.h> 39 #include <linux/coredump.h> 40 #include <linux/sched.h> 41 #include <linux/sched/coredump.h> 42 #include <linux/sched/task_stack.h> 43 #include <linux/sched/cputime.h> 44 #include <linux/sizes.h> 45 #include <linux/types.h> 46 #include <linux/cred.h> 47 #include <linux/dax.h> 48 #include <linux/uaccess.h> 49 #include <linux/rseq.h> 50 #include <asm/param.h> 51 #include <asm/page.h> 52 53 #ifndef ELF_COMPAT 54 #define ELF_COMPAT 0 55 #endif 56 57 #ifndef user_long_t 58 #define user_long_t long 59 #endif 60 #ifndef user_siginfo_t 61 #define user_siginfo_t siginfo_t 62 #endif 63 64 /* That's for binfmt_elf_fdpic to deal with */ 65 #ifndef elf_check_fdpic 66 #define elf_check_fdpic(ex) false 67 #endif 68 69 static int load_elf_binary(struct linux_binprm *bprm); 70 71 #ifdef CONFIG_USELIB 72 static int load_elf_library(struct file *); 73 #else 74 #define load_elf_library NULL 75 #endif 76 77 /* 78 * If we don't support core dumping, then supply a NULL so we 79 * don't even try. 80 */ 81 #ifdef CONFIG_ELF_CORE 82 static int elf_core_dump(struct coredump_params *cprm); 83 #else 84 #define elf_core_dump NULL 85 #endif 86 87 #if ELF_EXEC_PAGESIZE > PAGE_SIZE 88 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE 89 #else 90 #define ELF_MIN_ALIGN PAGE_SIZE 91 #endif 92 93 #ifndef ELF_CORE_EFLAGS 94 #define ELF_CORE_EFLAGS 0 95 #endif 96 97 #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1)) 98 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) 99 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) 100 101 static struct linux_binfmt elf_format = { 102 .module = THIS_MODULE, 103 .load_binary = load_elf_binary, 104 .load_shlib = load_elf_library, 105 #ifdef CONFIG_COREDUMP 106 .core_dump = elf_core_dump, 107 .min_coredump = ELF_EXEC_PAGESIZE, 108 #endif 109 }; 110 111 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) 112 113 /* 114 * We need to explicitly zero any trailing portion of the page that follows 115 * p_filesz when it ends before the page ends (e.g. bss), otherwise this 116 * memory will contain the junk from the file that should not be present. 117 */ 118 static int padzero(unsigned long address) 119 { 120 unsigned long nbyte; 121 122 nbyte = ELF_PAGEOFFSET(address); 123 if (nbyte) { 124 nbyte = ELF_MIN_ALIGN - nbyte; 125 if (clear_user((void __user *)address, nbyte)) 126 return -EFAULT; 127 } 128 return 0; 129 } 130 131 /* Let's use some macros to make this stack manipulation a little clearer */ 132 #ifdef CONFIG_STACK_GROWSUP 133 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) 134 #define STACK_ROUND(sp, items) \ 135 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) 136 #define STACK_ALLOC(sp, len) ({ \ 137 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ 138 old_sp; }) 139 #else 140 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) 141 #define STACK_ROUND(sp, items) \ 142 (((unsigned long) (sp - items)) &~ 15UL) 143 #define STACK_ALLOC(sp, len) (sp -= len) 144 #endif 145 146 #ifndef ELF_BASE_PLATFORM 147 /* 148 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. 149 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value 150 * will be copied to the user stack in the same manner as AT_PLATFORM. 151 */ 152 #define ELF_BASE_PLATFORM NULL 153 #endif 154 155 static int 156 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, 157 unsigned long interp_load_addr, 158 unsigned long e_entry, unsigned long phdr_addr) 159 { 160 struct mm_struct *mm = current->mm; 161 unsigned long p = bprm->p; 162 int argc = bprm->argc; 163 int envc = bprm->envc; 164 elf_addr_t __user *sp; 165 elf_addr_t __user *u_platform; 166 elf_addr_t __user *u_base_platform; 167 elf_addr_t __user *u_rand_bytes; 168 const char *k_platform = ELF_PLATFORM; 169 const char *k_base_platform = ELF_BASE_PLATFORM; 170 unsigned char k_rand_bytes[16]; 171 int items; 172 elf_addr_t *elf_info; 173 elf_addr_t flags = 0; 174 int ei_index; 175 const struct cred *cred = current_cred(); 176 struct vm_area_struct *vma; 177 178 /* 179 * In some cases (e.g. Hyper-Threading), we want to avoid L1 180 * evictions by the processes running on the same package. One 181 * thing we can do is to shuffle the initial stack for them. 182 */ 183 184 p = arch_align_stack(p); 185 186 /* 187 * If this architecture has a platform capability string, copy it 188 * to userspace. In some cases (Sparc), this info is impossible 189 * for userspace to get any other way, in others (i386) it is 190 * merely difficult. 191 */ 192 u_platform = NULL; 193 if (k_platform) { 194 size_t len = strlen(k_platform) + 1; 195 196 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 197 if (copy_to_user(u_platform, k_platform, len)) 198 return -EFAULT; 199 } 200 201 /* 202 * If this architecture has a "base" platform capability 203 * string, copy it to userspace. 204 */ 205 u_base_platform = NULL; 206 if (k_base_platform) { 207 size_t len = strlen(k_base_platform) + 1; 208 209 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 210 if (copy_to_user(u_base_platform, k_base_platform, len)) 211 return -EFAULT; 212 } 213 214 /* 215 * Generate 16 random bytes for userspace PRNG seeding. 216 */ 217 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); 218 u_rand_bytes = (elf_addr_t __user *) 219 STACK_ALLOC(p, sizeof(k_rand_bytes)); 220 if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) 221 return -EFAULT; 222 223 /* Create the ELF interpreter info */ 224 elf_info = (elf_addr_t *)mm->saved_auxv; 225 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 226 #define NEW_AUX_ENT(id, val) \ 227 do { \ 228 *elf_info++ = id; \ 229 *elf_info++ = val; \ 230 } while (0) 231 232 #ifdef ARCH_DLINFO 233 /* 234 * ARCH_DLINFO must come first so PPC can do its special alignment of 235 * AUXV. 236 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in 237 * ARCH_DLINFO changes 238 */ 239 ARCH_DLINFO; 240 #endif 241 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); 242 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); 243 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); 244 NEW_AUX_ENT(AT_PHDR, phdr_addr); 245 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); 246 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); 247 NEW_AUX_ENT(AT_BASE, interp_load_addr); 248 if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0) 249 flags |= AT_FLAGS_PRESERVE_ARGV0; 250 NEW_AUX_ENT(AT_FLAGS, flags); 251 NEW_AUX_ENT(AT_ENTRY, e_entry); 252 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); 253 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); 254 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); 255 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); 256 NEW_AUX_ENT(AT_SECURE, bprm->secureexec); 257 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); 258 #ifdef ELF_HWCAP2 259 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); 260 #endif 261 NEW_AUX_ENT(AT_EXECFN, bprm->exec); 262 if (k_platform) { 263 NEW_AUX_ENT(AT_PLATFORM, 264 (elf_addr_t)(unsigned long)u_platform); 265 } 266 if (k_base_platform) { 267 NEW_AUX_ENT(AT_BASE_PLATFORM, 268 (elf_addr_t)(unsigned long)u_base_platform); 269 } 270 if (bprm->have_execfd) { 271 NEW_AUX_ENT(AT_EXECFD, bprm->execfd); 272 } 273 #ifdef CONFIG_RSEQ 274 NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end)); 275 NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq)); 276 #endif 277 #undef NEW_AUX_ENT 278 /* AT_NULL is zero; clear the rest too */ 279 memset(elf_info, 0, (char *)mm->saved_auxv + 280 sizeof(mm->saved_auxv) - (char *)elf_info); 281 282 /* And advance past the AT_NULL entry. */ 283 elf_info += 2; 284 285 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv; 286 sp = STACK_ADD(p, ei_index); 287 288 items = (argc + 1) + (envc + 1) + 1; 289 bprm->p = STACK_ROUND(sp, items); 290 291 /* Point sp at the lowest address on the stack */ 292 #ifdef CONFIG_STACK_GROWSUP 293 sp = (elf_addr_t __user *)bprm->p - items - ei_index; 294 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ 295 #else 296 sp = (elf_addr_t __user *)bprm->p; 297 #endif 298 299 300 /* 301 * Grow the stack manually; some architectures have a limit on how 302 * far ahead a user-space access may be in order to grow the stack. 303 */ 304 if (mmap_write_lock_killable(mm)) 305 return -EINTR; 306 vma = find_extend_vma_locked(mm, bprm->p); 307 mmap_write_unlock(mm); 308 if (!vma) 309 return -EFAULT; 310 311 /* Now, let's put argc (and argv, envp if appropriate) on the stack */ 312 if (put_user(argc, sp++)) 313 return -EFAULT; 314 315 /* Populate list of argv pointers back to argv strings. */ 316 p = mm->arg_end = mm->arg_start; 317 while (argc-- > 0) { 318 size_t len; 319 if (put_user((elf_addr_t)p, sp++)) 320 return -EFAULT; 321 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 322 if (!len || len > MAX_ARG_STRLEN) 323 return -EINVAL; 324 p += len; 325 } 326 if (put_user(0, sp++)) 327 return -EFAULT; 328 mm->arg_end = p; 329 330 /* Populate list of envp pointers back to envp strings. */ 331 mm->env_end = mm->env_start = p; 332 while (envc-- > 0) { 333 size_t len; 334 if (put_user((elf_addr_t)p, sp++)) 335 return -EFAULT; 336 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 337 if (!len || len > MAX_ARG_STRLEN) 338 return -EINVAL; 339 p += len; 340 } 341 if (put_user(0, sp++)) 342 return -EFAULT; 343 mm->env_end = p; 344 345 /* Put the elf_info on the stack in the right place. */ 346 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t))) 347 return -EFAULT; 348 return 0; 349 } 350 351 /* 352 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" 353 * into memory at "addr". (Note that p_filesz is rounded up to the 354 * next page, so any extra bytes from the file must be wiped.) 355 */ 356 static unsigned long elf_map(struct file *filep, unsigned long addr, 357 const struct elf_phdr *eppnt, int prot, int type, 358 unsigned long total_size) 359 { 360 unsigned long map_addr; 361 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); 362 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); 363 addr = ELF_PAGESTART(addr); 364 size = ELF_PAGEALIGN(size); 365 366 /* mmap() will return -EINVAL if given a zero size, but a 367 * segment with zero filesize is perfectly valid */ 368 if (!size) 369 return addr; 370 371 /* 372 * total_size is the size of the ELF (interpreter) image. 373 * The _first_ mmap needs to know the full size, otherwise 374 * randomization might put this image into an overlapping 375 * position with the ELF binary image. (since size < total_size) 376 * So we first map the 'big' image - and unmap the remainder at 377 * the end. (which unmap is needed for ELF images with holes.) 378 */ 379 if (total_size) { 380 total_size = ELF_PAGEALIGN(total_size); 381 map_addr = vm_mmap(filep, addr, total_size, prot, type, off); 382 if (!BAD_ADDR(map_addr)) 383 vm_munmap(map_addr+size, total_size-size); 384 } else 385 map_addr = vm_mmap(filep, addr, size, prot, type, off); 386 387 if ((type & MAP_FIXED_NOREPLACE) && 388 PTR_ERR((void *)map_addr) == -EEXIST) 389 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", 390 task_pid_nr(current), current->comm, (void *)addr); 391 392 return(map_addr); 393 } 394 395 /* 396 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" 397 * into memory at "addr". Memory from "p_filesz" through "p_memsz" 398 * rounded up to the next page is zeroed. 399 */ 400 static unsigned long elf_load(struct file *filep, unsigned long addr, 401 const struct elf_phdr *eppnt, int prot, int type, 402 unsigned long total_size) 403 { 404 unsigned long zero_start, zero_end; 405 unsigned long map_addr; 406 407 if (eppnt->p_filesz) { 408 map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); 409 if (BAD_ADDR(map_addr)) 410 return map_addr; 411 if (eppnt->p_memsz > eppnt->p_filesz) { 412 zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + 413 eppnt->p_filesz; 414 zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + 415 eppnt->p_memsz; 416 417 /* 418 * Zero the end of the last mapped page but ignore 419 * any errors if the segment isn't writable. 420 */ 421 if (padzero(zero_start) && (prot & PROT_WRITE)) 422 return -EFAULT; 423 } 424 } else { 425 map_addr = zero_start = ELF_PAGESTART(addr); 426 zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + 427 eppnt->p_memsz; 428 } 429 if (eppnt->p_memsz > eppnt->p_filesz) { 430 /* 431 * Map the last of the segment. 432 * If the header is requesting these pages to be 433 * executable, honour that (ppc32 needs this). 434 */ 435 int error; 436 437 zero_start = ELF_PAGEALIGN(zero_start); 438 zero_end = ELF_PAGEALIGN(zero_end); 439 440 error = vm_brk_flags(zero_start, zero_end - zero_start, 441 prot & PROT_EXEC ? VM_EXEC : 0); 442 if (error) 443 map_addr = error; 444 } 445 return map_addr; 446 } 447 448 449 static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr) 450 { 451 elf_addr_t min_addr = -1; 452 elf_addr_t max_addr = 0; 453 bool pt_load = false; 454 int i; 455 456 for (i = 0; i < nr; i++) { 457 if (phdr[i].p_type == PT_LOAD) { 458 min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr)); 459 max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz); 460 pt_load = true; 461 } 462 } 463 return pt_load ? (max_addr - min_addr) : 0; 464 } 465 466 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos) 467 { 468 ssize_t rv; 469 470 rv = kernel_read(file, buf, len, &pos); 471 if (unlikely(rv != len)) { 472 return (rv < 0) ? rv : -EIO; 473 } 474 return 0; 475 } 476 477 static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr) 478 { 479 unsigned long alignment = 0; 480 int i; 481 482 for (i = 0; i < nr; i++) { 483 if (cmds[i].p_type == PT_LOAD) { 484 unsigned long p_align = cmds[i].p_align; 485 486 /* skip non-power of two alignments as invalid */ 487 if (!is_power_of_2(p_align)) 488 continue; 489 alignment = max(alignment, p_align); 490 } 491 } 492 493 /* ensure we align to at least one page */ 494 return ELF_PAGEALIGN(alignment); 495 } 496 497 /** 498 * load_elf_phdrs() - load ELF program headers 499 * @elf_ex: ELF header of the binary whose program headers should be loaded 500 * @elf_file: the opened ELF binary file 501 * 502 * Loads ELF program headers from the binary file elf_file, which has the ELF 503 * header pointed to by elf_ex, into a newly allocated array. The caller is 504 * responsible for freeing the allocated data. Returns NULL upon failure. 505 */ 506 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, 507 struct file *elf_file) 508 { 509 struct elf_phdr *elf_phdata = NULL; 510 int retval = -1; 511 unsigned int size; 512 513 /* 514 * If the size of this structure has changed, then punt, since 515 * we will be doing the wrong thing. 516 */ 517 if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) 518 goto out; 519 520 /* Sanity check the number of program headers... */ 521 /* ...and their total size. */ 522 size = sizeof(struct elf_phdr) * elf_ex->e_phnum; 523 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) 524 goto out; 525 526 elf_phdata = kmalloc(size, GFP_KERNEL); 527 if (!elf_phdata) 528 goto out; 529 530 /* Read in the program headers */ 531 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff); 532 533 out: 534 if (retval) { 535 kfree(elf_phdata); 536 elf_phdata = NULL; 537 } 538 return elf_phdata; 539 } 540 541 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE 542 543 /** 544 * struct arch_elf_state - arch-specific ELF loading state 545 * 546 * This structure is used to preserve architecture specific data during 547 * the loading of an ELF file, throughout the checking of architecture 548 * specific ELF headers & through to the point where the ELF load is 549 * known to be proceeding (ie. SET_PERSONALITY). 550 * 551 * This implementation is a dummy for architectures which require no 552 * specific state. 553 */ 554 struct arch_elf_state { 555 }; 556 557 #define INIT_ARCH_ELF_STATE {} 558 559 /** 560 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header 561 * @ehdr: The main ELF header 562 * @phdr: The program header to check 563 * @elf: The open ELF file 564 * @is_interp: True if the phdr is from the interpreter of the ELF being 565 * loaded, else false. 566 * @state: Architecture-specific state preserved throughout the process 567 * of loading the ELF. 568 * 569 * Inspects the program header phdr to validate its correctness and/or 570 * suitability for the system. Called once per ELF program header in the 571 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its 572 * interpreter. 573 * 574 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 575 * with that return code. 576 */ 577 static inline int arch_elf_pt_proc(struct elfhdr *ehdr, 578 struct elf_phdr *phdr, 579 struct file *elf, bool is_interp, 580 struct arch_elf_state *state) 581 { 582 /* Dummy implementation, always proceed */ 583 return 0; 584 } 585 586 /** 587 * arch_check_elf() - check an ELF executable 588 * @ehdr: The main ELF header 589 * @has_interp: True if the ELF has an interpreter, else false. 590 * @interp_ehdr: The interpreter's ELF header 591 * @state: Architecture-specific state preserved throughout the process 592 * of loading the ELF. 593 * 594 * Provides a final opportunity for architecture code to reject the loading 595 * of the ELF & cause an exec syscall to return an error. This is called after 596 * all program headers to be checked by arch_elf_pt_proc have been. 597 * 598 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 599 * with that return code. 600 */ 601 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, 602 struct elfhdr *interp_ehdr, 603 struct arch_elf_state *state) 604 { 605 /* Dummy implementation, always proceed */ 606 return 0; 607 } 608 609 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ 610 611 static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state, 612 bool has_interp, bool is_interp) 613 { 614 int prot = 0; 615 616 if (p_flags & PF_R) 617 prot |= PROT_READ; 618 if (p_flags & PF_W) 619 prot |= PROT_WRITE; 620 if (p_flags & PF_X) 621 prot |= PROT_EXEC; 622 623 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp); 624 } 625 626 /* This is much more generalized than the library routine read function, 627 so we keep this separate. Technically the library read function 628 is only provided so that we can read a.out libraries that have 629 an ELF header */ 630 631 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 632 struct file *interpreter, 633 unsigned long no_base, struct elf_phdr *interp_elf_phdata, 634 struct arch_elf_state *arch_state) 635 { 636 struct elf_phdr *eppnt; 637 unsigned long load_addr = 0; 638 int load_addr_set = 0; 639 unsigned long error = ~0UL; 640 unsigned long total_size; 641 int i; 642 643 /* First of all, some simple consistency checks */ 644 if (interp_elf_ex->e_type != ET_EXEC && 645 interp_elf_ex->e_type != ET_DYN) 646 goto out; 647 if (!elf_check_arch(interp_elf_ex) || 648 elf_check_fdpic(interp_elf_ex)) 649 goto out; 650 if (!interpreter->f_op->mmap) 651 goto out; 652 653 total_size = total_mapping_size(interp_elf_phdata, 654 interp_elf_ex->e_phnum); 655 if (!total_size) { 656 error = -EINVAL; 657 goto out; 658 } 659 660 eppnt = interp_elf_phdata; 661 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 662 if (eppnt->p_type == PT_LOAD) { 663 int elf_type = MAP_PRIVATE; 664 int elf_prot = make_prot(eppnt->p_flags, arch_state, 665 true, true); 666 unsigned long vaddr = 0; 667 unsigned long k, map_addr; 668 669 vaddr = eppnt->p_vaddr; 670 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 671 elf_type |= MAP_FIXED; 672 else if (no_base && interp_elf_ex->e_type == ET_DYN) 673 load_addr = -vaddr; 674 675 map_addr = elf_load(interpreter, load_addr + vaddr, 676 eppnt, elf_prot, elf_type, total_size); 677 total_size = 0; 678 error = map_addr; 679 if (BAD_ADDR(map_addr)) 680 goto out; 681 682 if (!load_addr_set && 683 interp_elf_ex->e_type == ET_DYN) { 684 load_addr = map_addr - ELF_PAGESTART(vaddr); 685 load_addr_set = 1; 686 } 687 688 /* 689 * Check to see if the section's size will overflow the 690 * allowed task size. Note that p_filesz must always be 691 * <= p_memsize so it's only necessary to check p_memsz. 692 */ 693 k = load_addr + eppnt->p_vaddr; 694 if (BAD_ADDR(k) || 695 eppnt->p_filesz > eppnt->p_memsz || 696 eppnt->p_memsz > TASK_SIZE || 697 TASK_SIZE - eppnt->p_memsz < k) { 698 error = -ENOMEM; 699 goto out; 700 } 701 } 702 } 703 704 error = load_addr; 705 out: 706 return error; 707 } 708 709 /* 710 * These are the functions used to load ELF style executables and shared 711 * libraries. There is no binary dependent code anywhere else. 712 */ 713 714 static int parse_elf_property(const char *data, size_t *off, size_t datasz, 715 struct arch_elf_state *arch, 716 bool have_prev_type, u32 *prev_type) 717 { 718 size_t o, step; 719 const struct gnu_property *pr; 720 int ret; 721 722 if (*off == datasz) 723 return -ENOENT; 724 725 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN)) 726 return -EIO; 727 o = *off; 728 datasz -= *off; 729 730 if (datasz < sizeof(*pr)) 731 return -ENOEXEC; 732 pr = (const struct gnu_property *)(data + o); 733 o += sizeof(*pr); 734 datasz -= sizeof(*pr); 735 736 if (pr->pr_datasz > datasz) 737 return -ENOEXEC; 738 739 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN); 740 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN); 741 if (step > datasz) 742 return -ENOEXEC; 743 744 /* Properties are supposed to be unique and sorted on pr_type: */ 745 if (have_prev_type && pr->pr_type <= *prev_type) 746 return -ENOEXEC; 747 *prev_type = pr->pr_type; 748 749 ret = arch_parse_elf_property(pr->pr_type, data + o, 750 pr->pr_datasz, ELF_COMPAT, arch); 751 if (ret) 752 return ret; 753 754 *off = o + step; 755 return 0; 756 } 757 758 #define NOTE_DATA_SZ SZ_1K 759 #define GNU_PROPERTY_TYPE_0_NAME "GNU" 760 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME)) 761 762 static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, 763 struct arch_elf_state *arch) 764 { 765 union { 766 struct elf_note nhdr; 767 char data[NOTE_DATA_SZ]; 768 } note; 769 loff_t pos; 770 ssize_t n; 771 size_t off, datasz; 772 int ret; 773 bool have_prev_type; 774 u32 prev_type; 775 776 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr) 777 return 0; 778 779 /* load_elf_binary() shouldn't call us unless this is true... */ 780 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY)) 781 return -ENOEXEC; 782 783 /* If the properties are crazy large, that's too bad (for now): */ 784 if (phdr->p_filesz > sizeof(note)) 785 return -ENOEXEC; 786 787 pos = phdr->p_offset; 788 n = kernel_read(f, ¬e, phdr->p_filesz, &pos); 789 790 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ); 791 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ) 792 return -EIO; 793 794 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 795 note.nhdr.n_namesz != NOTE_NAME_SZ || 796 strncmp(note.data + sizeof(note.nhdr), 797 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr))) 798 return -ENOEXEC; 799 800 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ, 801 ELF_GNU_PROPERTY_ALIGN); 802 if (off > n) 803 return -ENOEXEC; 804 805 if (note.nhdr.n_descsz > n - off) 806 return -ENOEXEC; 807 datasz = off + note.nhdr.n_descsz; 808 809 have_prev_type = false; 810 do { 811 ret = parse_elf_property(note.data, &off, datasz, arch, 812 have_prev_type, &prev_type); 813 have_prev_type = true; 814 } while (!ret); 815 816 return ret == -ENOENT ? 0 : ret; 817 } 818 819 static int load_elf_binary(struct linux_binprm *bprm) 820 { 821 struct file *interpreter = NULL; /* to shut gcc up */ 822 unsigned long load_bias = 0, phdr_addr = 0; 823 int first_pt_load = 1; 824 unsigned long error; 825 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; 826 struct elf_phdr *elf_property_phdata = NULL; 827 unsigned long elf_brk; 828 int retval, i; 829 unsigned long elf_entry; 830 unsigned long e_entry; 831 unsigned long interp_load_addr = 0; 832 unsigned long start_code, end_code, start_data, end_data; 833 unsigned long reloc_func_desc __maybe_unused = 0; 834 int executable_stack = EXSTACK_DEFAULT; 835 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf; 836 struct elfhdr *interp_elf_ex = NULL; 837 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; 838 struct mm_struct *mm; 839 struct pt_regs *regs; 840 841 retval = -ENOEXEC; 842 /* First of all, some simple consistency checks */ 843 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) 844 goto out; 845 846 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) 847 goto out; 848 if (!elf_check_arch(elf_ex)) 849 goto out; 850 if (elf_check_fdpic(elf_ex)) 851 goto out; 852 if (!bprm->file->f_op->mmap) 853 goto out; 854 855 elf_phdata = load_elf_phdrs(elf_ex, bprm->file); 856 if (!elf_phdata) 857 goto out; 858 859 elf_ppnt = elf_phdata; 860 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) { 861 char *elf_interpreter; 862 863 if (elf_ppnt->p_type == PT_GNU_PROPERTY) { 864 elf_property_phdata = elf_ppnt; 865 continue; 866 } 867 868 if (elf_ppnt->p_type != PT_INTERP) 869 continue; 870 871 /* 872 * This is the program interpreter used for shared libraries - 873 * for now assume that this is an a.out format binary. 874 */ 875 retval = -ENOEXEC; 876 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) 877 goto out_free_ph; 878 879 retval = -ENOMEM; 880 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); 881 if (!elf_interpreter) 882 goto out_free_ph; 883 884 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz, 885 elf_ppnt->p_offset); 886 if (retval < 0) 887 goto out_free_interp; 888 /* make sure path is NULL terminated */ 889 retval = -ENOEXEC; 890 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') 891 goto out_free_interp; 892 893 interpreter = open_exec(elf_interpreter); 894 kfree(elf_interpreter); 895 retval = PTR_ERR(interpreter); 896 if (IS_ERR(interpreter)) 897 goto out_free_ph; 898 899 /* 900 * If the binary is not readable then enforce mm->dumpable = 0 901 * regardless of the interpreter's permissions. 902 */ 903 would_dump(bprm, interpreter); 904 905 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL); 906 if (!interp_elf_ex) { 907 retval = -ENOMEM; 908 goto out_free_file; 909 } 910 911 /* Get the exec headers */ 912 retval = elf_read(interpreter, interp_elf_ex, 913 sizeof(*interp_elf_ex), 0); 914 if (retval < 0) 915 goto out_free_dentry; 916 917 break; 918 919 out_free_interp: 920 kfree(elf_interpreter); 921 goto out_free_ph; 922 } 923 924 elf_ppnt = elf_phdata; 925 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) 926 switch (elf_ppnt->p_type) { 927 case PT_GNU_STACK: 928 if (elf_ppnt->p_flags & PF_X) 929 executable_stack = EXSTACK_ENABLE_X; 930 else 931 executable_stack = EXSTACK_DISABLE_X; 932 break; 933 934 case PT_LOPROC ... PT_HIPROC: 935 retval = arch_elf_pt_proc(elf_ex, elf_ppnt, 936 bprm->file, false, 937 &arch_state); 938 if (retval) 939 goto out_free_dentry; 940 break; 941 } 942 943 /* Some simple consistency checks for the interpreter */ 944 if (interpreter) { 945 retval = -ELIBBAD; 946 /* Not an ELF interpreter */ 947 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0) 948 goto out_free_dentry; 949 /* Verify the interpreter has a valid arch */ 950 if (!elf_check_arch(interp_elf_ex) || 951 elf_check_fdpic(interp_elf_ex)) 952 goto out_free_dentry; 953 954 /* Load the interpreter program headers */ 955 interp_elf_phdata = load_elf_phdrs(interp_elf_ex, 956 interpreter); 957 if (!interp_elf_phdata) 958 goto out_free_dentry; 959 960 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ 961 elf_property_phdata = NULL; 962 elf_ppnt = interp_elf_phdata; 963 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++) 964 switch (elf_ppnt->p_type) { 965 case PT_GNU_PROPERTY: 966 elf_property_phdata = elf_ppnt; 967 break; 968 969 case PT_LOPROC ... PT_HIPROC: 970 retval = arch_elf_pt_proc(interp_elf_ex, 971 elf_ppnt, interpreter, 972 true, &arch_state); 973 if (retval) 974 goto out_free_dentry; 975 break; 976 } 977 } 978 979 retval = parse_elf_properties(interpreter ?: bprm->file, 980 elf_property_phdata, &arch_state); 981 if (retval) 982 goto out_free_dentry; 983 984 /* 985 * Allow arch code to reject the ELF at this point, whilst it's 986 * still possible to return an error to the code that invoked 987 * the exec syscall. 988 */ 989 retval = arch_check_elf(elf_ex, 990 !!interpreter, interp_elf_ex, 991 &arch_state); 992 if (retval) 993 goto out_free_dentry; 994 995 /* Flush all traces of the currently running executable */ 996 retval = begin_new_exec(bprm); 997 if (retval) 998 goto out_free_dentry; 999 1000 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 1001 may depend on the personality. */ 1002 SET_PERSONALITY2(*elf_ex, &arch_state); 1003 if (elf_read_implies_exec(*elf_ex, executable_stack)) 1004 current->personality |= READ_IMPLIES_EXEC; 1005 1006 const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space); 1007 if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space) 1008 current->flags |= PF_RANDOMIZE; 1009 1010 setup_new_exec(bprm); 1011 1012 /* Do this so that we can load the interpreter, if need be. We will 1013 change some of these later */ 1014 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), 1015 executable_stack); 1016 if (retval < 0) 1017 goto out_free_dentry; 1018 1019 elf_brk = 0; 1020 1021 start_code = ~0UL; 1022 end_code = 0; 1023 start_data = 0; 1024 end_data = 0; 1025 1026 /* Now we do a little grungy work by mmapping the ELF image into 1027 the correct location in memory. */ 1028 for(i = 0, elf_ppnt = elf_phdata; 1029 i < elf_ex->e_phnum; i++, elf_ppnt++) { 1030 int elf_prot, elf_flags; 1031 unsigned long k, vaddr; 1032 unsigned long total_size = 0; 1033 unsigned long alignment; 1034 1035 if (elf_ppnt->p_type != PT_LOAD) 1036 continue; 1037 1038 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, 1039 !!interpreter, false); 1040 1041 elf_flags = MAP_PRIVATE; 1042 1043 vaddr = elf_ppnt->p_vaddr; 1044 /* 1045 * The first time through the loop, first_pt_load is true: 1046 * layout will be calculated. Once set, use MAP_FIXED since 1047 * we know we've already safely mapped the entire region with 1048 * MAP_FIXED_NOREPLACE in the once-per-binary logic following. 1049 */ 1050 if (!first_pt_load) { 1051 elf_flags |= MAP_FIXED; 1052 } else if (elf_ex->e_type == ET_EXEC) { 1053 /* 1054 * This logic is run once for the first LOAD Program 1055 * Header for ET_EXEC binaries. No special handling 1056 * is needed. 1057 */ 1058 elf_flags |= MAP_FIXED_NOREPLACE; 1059 } else if (elf_ex->e_type == ET_DYN) { 1060 /* 1061 * This logic is run once for the first LOAD Program 1062 * Header for ET_DYN binaries to calculate the 1063 * randomization (load_bias) for all the LOAD 1064 * Program Headers. 1065 */ 1066 1067 /* 1068 * Calculate the entire size of the ELF mapping 1069 * (total_size), used for the initial mapping, 1070 * due to load_addr_set which is set to true later 1071 * once the initial mapping is performed. 1072 * 1073 * Note that this is only sensible when the LOAD 1074 * segments are contiguous (or overlapping). If 1075 * used for LOADs that are far apart, this would 1076 * cause the holes between LOADs to be mapped, 1077 * running the risk of having the mapping fail, 1078 * as it would be larger than the ELF file itself. 1079 * 1080 * As a result, only ET_DYN does this, since 1081 * some ET_EXEC (e.g. ia64) may have large virtual 1082 * memory holes between LOADs. 1083 * 1084 */ 1085 total_size = total_mapping_size(elf_phdata, 1086 elf_ex->e_phnum); 1087 if (!total_size) { 1088 retval = -EINVAL; 1089 goto out_free_dentry; 1090 } 1091 1092 /* Calculate any requested alignment. */ 1093 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); 1094 1095 /* 1096 * There are effectively two types of ET_DYN 1097 * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP) 1098 * and loaders (ET_DYN without PT_INTERP, since they 1099 * _are_ the ELF interpreter). The loaders must 1100 * be loaded away from programs since the program 1101 * may otherwise collide with the loader (especially 1102 * for ET_EXEC which does not have a randomized 1103 * position). For example to handle invocations of 1104 * "./ld.so someprog" to test out a new version of 1105 * the loader, the subsequent program that the 1106 * loader loads must avoid the loader itself, so 1107 * they cannot share the same load range. Sufficient 1108 * room for the brk must be allocated with the 1109 * loader as well, since brk must be available with 1110 * the loader. 1111 * 1112 * Therefore, programs are loaded offset from 1113 * ELF_ET_DYN_BASE and loaders are loaded into the 1114 * independently randomized mmap region (0 load_bias 1115 * without MAP_FIXED nor MAP_FIXED_NOREPLACE). 1116 */ 1117 if (interpreter) { 1118 /* On ET_DYN with PT_INTERP, we do the ASLR. */ 1119 load_bias = ELF_ET_DYN_BASE; 1120 if (current->flags & PF_RANDOMIZE) 1121 load_bias += arch_mmap_rnd(); 1122 /* Adjust alignment as requested. */ 1123 if (alignment) 1124 load_bias &= ~(alignment - 1); 1125 elf_flags |= MAP_FIXED_NOREPLACE; 1126 } else { 1127 /* 1128 * For ET_DYN without PT_INTERP, we rely on 1129 * the architectures's (potentially ASLR) mmap 1130 * base address (via a load_bias of 0). 1131 * 1132 * When a large alignment is requested, we 1133 * must do the allocation at address "0" right 1134 * now to discover where things will load so 1135 * that we can adjust the resulting alignment. 1136 * In this case (load_bias != 0), we can use 1137 * MAP_FIXED_NOREPLACE to make sure the mapping 1138 * doesn't collide with anything. 1139 */ 1140 if (alignment > ELF_MIN_ALIGN) { 1141 load_bias = elf_load(bprm->file, 0, elf_ppnt, 1142 elf_prot, elf_flags, total_size); 1143 if (BAD_ADDR(load_bias)) { 1144 retval = IS_ERR_VALUE(load_bias) ? 1145 PTR_ERR((void*)load_bias) : -EINVAL; 1146 goto out_free_dentry; 1147 } 1148 vm_munmap(load_bias, total_size); 1149 /* Adjust alignment as requested. */ 1150 if (alignment) 1151 load_bias &= ~(alignment - 1); 1152 elf_flags |= MAP_FIXED_NOREPLACE; 1153 } else 1154 load_bias = 0; 1155 } 1156 1157 /* 1158 * Since load_bias is used for all subsequent loading 1159 * calculations, we must lower it by the first vaddr 1160 * so that the remaining calculations based on the 1161 * ELF vaddrs will be correctly offset. The result 1162 * is then page aligned. 1163 */ 1164 load_bias = ELF_PAGESTART(load_bias - vaddr); 1165 } 1166 1167 error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, 1168 elf_prot, elf_flags, total_size); 1169 if (BAD_ADDR(error)) { 1170 retval = IS_ERR_VALUE(error) ? 1171 PTR_ERR((void*)error) : -EINVAL; 1172 goto out_free_dentry; 1173 } 1174 1175 if (first_pt_load) { 1176 first_pt_load = 0; 1177 if (elf_ex->e_type == ET_DYN) { 1178 load_bias += error - 1179 ELF_PAGESTART(load_bias + vaddr); 1180 reloc_func_desc = load_bias; 1181 } 1182 } 1183 1184 /* 1185 * Figure out which segment in the file contains the Program 1186 * Header table, and map to the associated memory address. 1187 */ 1188 if (elf_ppnt->p_offset <= elf_ex->e_phoff && 1189 elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { 1190 phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + 1191 elf_ppnt->p_vaddr; 1192 } 1193 1194 k = elf_ppnt->p_vaddr; 1195 if ((elf_ppnt->p_flags & PF_X) && k < start_code) 1196 start_code = k; 1197 if (start_data < k) 1198 start_data = k; 1199 1200 /* 1201 * Check to see if the section's size will overflow the 1202 * allowed task size. Note that p_filesz must always be 1203 * <= p_memsz so it is only necessary to check p_memsz. 1204 */ 1205 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || 1206 elf_ppnt->p_memsz > TASK_SIZE || 1207 TASK_SIZE - elf_ppnt->p_memsz < k) { 1208 /* set_brk can never work. Avoid overflows. */ 1209 retval = -EINVAL; 1210 goto out_free_dentry; 1211 } 1212 1213 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1214 1215 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1216 end_code = k; 1217 if (end_data < k) 1218 end_data = k; 1219 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1220 if (k > elf_brk) 1221 elf_brk = k; 1222 } 1223 1224 e_entry = elf_ex->e_entry + load_bias; 1225 phdr_addr += load_bias; 1226 elf_brk += load_bias; 1227 start_code += load_bias; 1228 end_code += load_bias; 1229 start_data += load_bias; 1230 end_data += load_bias; 1231 1232 current->mm->start_brk = current->mm->brk = ELF_PAGEALIGN(elf_brk); 1233 1234 if (interpreter) { 1235 elf_entry = load_elf_interp(interp_elf_ex, 1236 interpreter, 1237 load_bias, interp_elf_phdata, 1238 &arch_state); 1239 if (!IS_ERR_VALUE(elf_entry)) { 1240 /* 1241 * load_elf_interp() returns relocation 1242 * adjustment 1243 */ 1244 interp_load_addr = elf_entry; 1245 elf_entry += interp_elf_ex->e_entry; 1246 } 1247 if (BAD_ADDR(elf_entry)) { 1248 retval = IS_ERR_VALUE(elf_entry) ? 1249 (int)elf_entry : -EINVAL; 1250 goto out_free_dentry; 1251 } 1252 reloc_func_desc = interp_load_addr; 1253 1254 allow_write_access(interpreter); 1255 fput(interpreter); 1256 1257 kfree(interp_elf_ex); 1258 kfree(interp_elf_phdata); 1259 } else { 1260 elf_entry = e_entry; 1261 if (BAD_ADDR(elf_entry)) { 1262 retval = -EINVAL; 1263 goto out_free_dentry; 1264 } 1265 } 1266 1267 kfree(elf_phdata); 1268 1269 set_binfmt(&elf_format); 1270 1271 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES 1272 retval = ARCH_SETUP_ADDITIONAL_PAGES(bprm, elf_ex, !!interpreter); 1273 if (retval < 0) 1274 goto out; 1275 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 1276 1277 retval = create_elf_tables(bprm, elf_ex, interp_load_addr, 1278 e_entry, phdr_addr); 1279 if (retval < 0) 1280 goto out; 1281 1282 mm = current->mm; 1283 mm->end_code = end_code; 1284 mm->start_code = start_code; 1285 mm->start_data = start_data; 1286 mm->end_data = end_data; 1287 mm->start_stack = bprm->p; 1288 1289 if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { 1290 /* 1291 * For architectures with ELF randomization, when executing 1292 * a loader directly (i.e. no interpreter listed in ELF 1293 * headers), move the brk area out of the mmap region 1294 * (since it grows up, and may collide early with the stack 1295 * growing down), and into the unused ELF_ET_DYN_BASE region. 1296 */ 1297 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && 1298 elf_ex->e_type == ET_DYN && !interpreter) { 1299 mm->brk = mm->start_brk = ELF_ET_DYN_BASE; 1300 } else { 1301 /* Otherwise leave a gap between .bss and brk. */ 1302 mm->brk = mm->start_brk = mm->brk + PAGE_SIZE; 1303 } 1304 1305 mm->brk = mm->start_brk = arch_randomize_brk(mm); 1306 #ifdef compat_brk_randomized 1307 current->brk_randomized = 1; 1308 #endif 1309 } 1310 1311 if (current->personality & MMAP_PAGE_ZERO) { 1312 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1313 and some applications "depend" upon this behavior. 1314 Since we do not have the power to recompile these, we 1315 emulate the SVr4 behavior. Sigh. */ 1316 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 1317 MAP_FIXED | MAP_PRIVATE, 0); 1318 } 1319 1320 regs = current_pt_regs(); 1321 #ifdef ELF_PLAT_INIT 1322 /* 1323 * The ABI may specify that certain registers be set up in special 1324 * ways (on i386 %edx is the address of a DT_FINI function, for 1325 * example. In addition, it may also specify (eg, PowerPC64 ELF) 1326 * that the e_entry field is the address of the function descriptor 1327 * for the startup routine, rather than the address of the startup 1328 * routine itself. This macro performs whatever initialization to 1329 * the regs structure is required as well as any relocations to the 1330 * function descriptor entries when executing dynamically links apps. 1331 */ 1332 ELF_PLAT_INIT(regs, reloc_func_desc); 1333 #endif 1334 1335 finalize_exec(bprm); 1336 START_THREAD(elf_ex, regs, elf_entry, bprm->p); 1337 retval = 0; 1338 out: 1339 return retval; 1340 1341 /* error cleanup */ 1342 out_free_dentry: 1343 kfree(interp_elf_ex); 1344 kfree(interp_elf_phdata); 1345 out_free_file: 1346 allow_write_access(interpreter); 1347 if (interpreter) 1348 fput(interpreter); 1349 out_free_ph: 1350 kfree(elf_phdata); 1351 goto out; 1352 } 1353 1354 #ifdef CONFIG_USELIB 1355 /* This is really simpleminded and specialized - we are loading an 1356 a.out library that is given an ELF header. */ 1357 static int load_elf_library(struct file *file) 1358 { 1359 struct elf_phdr *elf_phdata; 1360 struct elf_phdr *eppnt; 1361 int retval, error, i, j; 1362 struct elfhdr elf_ex; 1363 1364 error = -ENOEXEC; 1365 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0); 1366 if (retval < 0) 1367 goto out; 1368 1369 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 1370 goto out; 1371 1372 /* First of all, some simple consistency checks */ 1373 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 1374 !elf_check_arch(&elf_ex) || !file->f_op->mmap) 1375 goto out; 1376 if (elf_check_fdpic(&elf_ex)) 1377 goto out; 1378 1379 /* Now read in all of the header information */ 1380 1381 j = sizeof(struct elf_phdr) * elf_ex.e_phnum; 1382 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ 1383 1384 error = -ENOMEM; 1385 elf_phdata = kmalloc(j, GFP_KERNEL); 1386 if (!elf_phdata) 1387 goto out; 1388 1389 eppnt = elf_phdata; 1390 error = -ENOEXEC; 1391 retval = elf_read(file, eppnt, j, elf_ex.e_phoff); 1392 if (retval < 0) 1393 goto out_free_ph; 1394 1395 for (j = 0, i = 0; i<elf_ex.e_phnum; i++) 1396 if ((eppnt + i)->p_type == PT_LOAD) 1397 j++; 1398 if (j != 1) 1399 goto out_free_ph; 1400 1401 while (eppnt->p_type != PT_LOAD) 1402 eppnt++; 1403 1404 /* Now use mmap to map the library into memory. */ 1405 error = elf_load(file, ELF_PAGESTART(eppnt->p_vaddr), 1406 eppnt, 1407 PROT_READ | PROT_WRITE | PROT_EXEC, 1408 MAP_FIXED_NOREPLACE | MAP_PRIVATE, 1409 0); 1410 1411 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1412 goto out_free_ph; 1413 1414 error = 0; 1415 1416 out_free_ph: 1417 kfree(elf_phdata); 1418 out: 1419 return error; 1420 } 1421 #endif /* #ifdef CONFIG_USELIB */ 1422 1423 #ifdef CONFIG_ELF_CORE 1424 /* 1425 * ELF core dumper 1426 * 1427 * Modelled on fs/exec.c:aout_core_dump() 1428 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 1429 */ 1430 1431 /* An ELF note in memory */ 1432 struct memelfnote 1433 { 1434 const char *name; 1435 int type; 1436 unsigned int datasz; 1437 void *data; 1438 }; 1439 1440 static int notesize(struct memelfnote *en) 1441 { 1442 int sz; 1443 1444 sz = sizeof(struct elf_note); 1445 sz += roundup(strlen(en->name) + 1, 4); 1446 sz += roundup(en->datasz, 4); 1447 1448 return sz; 1449 } 1450 1451 static int writenote(struct memelfnote *men, struct coredump_params *cprm) 1452 { 1453 struct elf_note en; 1454 en.n_namesz = strlen(men->name) + 1; 1455 en.n_descsz = men->datasz; 1456 en.n_type = men->type; 1457 1458 return dump_emit(cprm, &en, sizeof(en)) && 1459 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && 1460 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); 1461 } 1462 1463 static void fill_elf_header(struct elfhdr *elf, int segs, 1464 u16 machine, u32 flags) 1465 { 1466 memset(elf, 0, sizeof(*elf)); 1467 1468 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1469 elf->e_ident[EI_CLASS] = ELF_CLASS; 1470 elf->e_ident[EI_DATA] = ELF_DATA; 1471 elf->e_ident[EI_VERSION] = EV_CURRENT; 1472 elf->e_ident[EI_OSABI] = ELF_OSABI; 1473 1474 elf->e_type = ET_CORE; 1475 elf->e_machine = machine; 1476 elf->e_version = EV_CURRENT; 1477 elf->e_phoff = sizeof(struct elfhdr); 1478 elf->e_flags = flags; 1479 elf->e_ehsize = sizeof(struct elfhdr); 1480 elf->e_phentsize = sizeof(struct elf_phdr); 1481 elf->e_phnum = segs; 1482 } 1483 1484 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) 1485 { 1486 phdr->p_type = PT_NOTE; 1487 phdr->p_offset = offset; 1488 phdr->p_vaddr = 0; 1489 phdr->p_paddr = 0; 1490 phdr->p_filesz = sz; 1491 phdr->p_memsz = 0; 1492 phdr->p_flags = 0; 1493 phdr->p_align = 4; 1494 } 1495 1496 static void fill_note(struct memelfnote *note, const char *name, int type, 1497 unsigned int sz, void *data) 1498 { 1499 note->name = name; 1500 note->type = type; 1501 note->datasz = sz; 1502 note->data = data; 1503 } 1504 1505 /* 1506 * fill up all the fields in prstatus from the given task struct, except 1507 * registers which need to be filled up separately. 1508 */ 1509 static void fill_prstatus(struct elf_prstatus_common *prstatus, 1510 struct task_struct *p, long signr) 1511 { 1512 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 1513 prstatus->pr_sigpend = p->pending.signal.sig[0]; 1514 prstatus->pr_sighold = p->blocked.sig[0]; 1515 rcu_read_lock(); 1516 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1517 rcu_read_unlock(); 1518 prstatus->pr_pid = task_pid_vnr(p); 1519 prstatus->pr_pgrp = task_pgrp_vnr(p); 1520 prstatus->pr_sid = task_session_vnr(p); 1521 if (thread_group_leader(p)) { 1522 struct task_cputime cputime; 1523 1524 /* 1525 * This is the record for the group leader. It shows the 1526 * group-wide total, not its individual thread total. 1527 */ 1528 thread_group_cputime(p, &cputime); 1529 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime); 1530 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime); 1531 } else { 1532 u64 utime, stime; 1533 1534 task_cputime(p, &utime, &stime); 1535 prstatus->pr_utime = ns_to_kernel_old_timeval(utime); 1536 prstatus->pr_stime = ns_to_kernel_old_timeval(stime); 1537 } 1538 1539 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime); 1540 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime); 1541 } 1542 1543 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, 1544 struct mm_struct *mm) 1545 { 1546 const struct cred *cred; 1547 unsigned int i, len; 1548 unsigned int state; 1549 1550 /* first copy the parameters from user space */ 1551 memset(psinfo, 0, sizeof(struct elf_prpsinfo)); 1552 1553 len = mm->arg_end - mm->arg_start; 1554 if (len >= ELF_PRARGSZ) 1555 len = ELF_PRARGSZ-1; 1556 if (copy_from_user(&psinfo->pr_psargs, 1557 (const char __user *)mm->arg_start, len)) 1558 return -EFAULT; 1559 for(i = 0; i < len; i++) 1560 if (psinfo->pr_psargs[i] == 0) 1561 psinfo->pr_psargs[i] = ' '; 1562 psinfo->pr_psargs[len] = 0; 1563 1564 rcu_read_lock(); 1565 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1566 rcu_read_unlock(); 1567 psinfo->pr_pid = task_pid_vnr(p); 1568 psinfo->pr_pgrp = task_pgrp_vnr(p); 1569 psinfo->pr_sid = task_session_vnr(p); 1570 1571 state = READ_ONCE(p->__state); 1572 i = state ? ffz(~state) + 1 : 0; 1573 psinfo->pr_state = i; 1574 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; 1575 psinfo->pr_zomb = psinfo->pr_sname == 'Z'; 1576 psinfo->pr_nice = task_nice(p); 1577 psinfo->pr_flag = p->flags; 1578 rcu_read_lock(); 1579 cred = __task_cred(p); 1580 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); 1581 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); 1582 rcu_read_unlock(); 1583 get_task_comm(psinfo->pr_fname, p); 1584 1585 return 0; 1586 } 1587 1588 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) 1589 { 1590 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; 1591 int i = 0; 1592 do 1593 i += 2; 1594 while (auxv[i - 2] != AT_NULL); 1595 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); 1596 } 1597 1598 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, 1599 const kernel_siginfo_t *siginfo) 1600 { 1601 copy_siginfo_to_external(csigdata, siginfo); 1602 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); 1603 } 1604 1605 /* 1606 * Format of NT_FILE note: 1607 * 1608 * long count -- how many files are mapped 1609 * long page_size -- units for file_ofs 1610 * array of [COUNT] elements of 1611 * long start 1612 * long end 1613 * long file_ofs 1614 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1615 */ 1616 static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm) 1617 { 1618 unsigned count, size, names_ofs, remaining, n; 1619 user_long_t *data; 1620 user_long_t *start_end_ofs; 1621 char *name_base, *name_curpos; 1622 int i; 1623 1624 /* *Estimated* file count and total data size needed */ 1625 count = cprm->vma_count; 1626 if (count > UINT_MAX / 64) 1627 return -EINVAL; 1628 size = count * 64; 1629 1630 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1631 alloc: 1632 /* paranoia check */ 1633 if (size >= core_file_note_size_limit) { 1634 pr_warn_once("coredump Note size too large: %u (does kernel.core_file_note_size_limit sysctl need adjustment?\n", 1635 size); 1636 return -EINVAL; 1637 } 1638 size = round_up(size, PAGE_SIZE); 1639 /* 1640 * "size" can be 0 here legitimately. 1641 * Let it ENOMEM and omit NT_FILE section which will be empty anyway. 1642 */ 1643 data = kvmalloc(size, GFP_KERNEL); 1644 if (ZERO_OR_NULL_PTR(data)) 1645 return -ENOMEM; 1646 1647 start_end_ofs = data + 2; 1648 name_base = name_curpos = ((char *)data) + names_ofs; 1649 remaining = size - names_ofs; 1650 count = 0; 1651 for (i = 0; i < cprm->vma_count; i++) { 1652 struct core_vma_metadata *m = &cprm->vma_meta[i]; 1653 struct file *file; 1654 const char *filename; 1655 1656 file = m->file; 1657 if (!file) 1658 continue; 1659 filename = file_path(file, name_curpos, remaining); 1660 if (IS_ERR(filename)) { 1661 if (PTR_ERR(filename) == -ENAMETOOLONG) { 1662 kvfree(data); 1663 size = size * 5 / 4; 1664 goto alloc; 1665 } 1666 continue; 1667 } 1668 1669 /* file_path() fills at the end, move name down */ 1670 /* n = strlen(filename) + 1: */ 1671 n = (name_curpos + remaining) - filename; 1672 remaining = filename - name_curpos; 1673 memmove(name_curpos, filename, n); 1674 name_curpos += n; 1675 1676 *start_end_ofs++ = m->start; 1677 *start_end_ofs++ = m->end; 1678 *start_end_ofs++ = m->pgoff; 1679 count++; 1680 } 1681 1682 /* Now we know exact count of files, can store it */ 1683 data[0] = count; 1684 data[1] = PAGE_SIZE; 1685 /* 1686 * Count usually is less than mm->map_count, 1687 * we need to move filenames down. 1688 */ 1689 n = cprm->vma_count - count; 1690 if (n != 0) { 1691 unsigned shift_bytes = n * 3 * sizeof(data[0]); 1692 memmove(name_base - shift_bytes, name_base, 1693 name_curpos - name_base); 1694 name_curpos -= shift_bytes; 1695 } 1696 1697 size = name_curpos - (char *)data; 1698 fill_note(note, "CORE", NT_FILE, size, data); 1699 return 0; 1700 } 1701 1702 #include <linux/regset.h> 1703 1704 struct elf_thread_core_info { 1705 struct elf_thread_core_info *next; 1706 struct task_struct *task; 1707 struct elf_prstatus prstatus; 1708 struct memelfnote notes[]; 1709 }; 1710 1711 struct elf_note_info { 1712 struct elf_thread_core_info *thread; 1713 struct memelfnote psinfo; 1714 struct memelfnote signote; 1715 struct memelfnote auxv; 1716 struct memelfnote files; 1717 user_siginfo_t csigdata; 1718 size_t size; 1719 int thread_notes; 1720 }; 1721 1722 #ifdef CORE_DUMP_USE_REGSET 1723 /* 1724 * When a regset has a writeback hook, we call it on each thread before 1725 * dumping user memory. On register window machines, this makes sure the 1726 * user memory backing the register data is up to date before we read it. 1727 */ 1728 static void do_thread_regset_writeback(struct task_struct *task, 1729 const struct user_regset *regset) 1730 { 1731 if (regset->writeback) 1732 regset->writeback(task, regset, 1); 1733 } 1734 1735 #ifndef PRSTATUS_SIZE 1736 #define PRSTATUS_SIZE sizeof(struct elf_prstatus) 1737 #endif 1738 1739 #ifndef SET_PR_FPVALID 1740 #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1) 1741 #endif 1742 1743 static int fill_thread_core_info(struct elf_thread_core_info *t, 1744 const struct user_regset_view *view, 1745 long signr, struct elf_note_info *info) 1746 { 1747 unsigned int note_iter, view_iter; 1748 1749 /* 1750 * NT_PRSTATUS is the one special case, because the regset data 1751 * goes into the pr_reg field inside the note contents, rather 1752 * than being the whole note contents. We fill the regset in here. 1753 * We assume that regset 0 is NT_PRSTATUS. 1754 */ 1755 fill_prstatus(&t->prstatus.common, t->task, signr); 1756 regset_get(t->task, &view->regsets[0], 1757 sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); 1758 1759 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, 1760 PRSTATUS_SIZE, &t->prstatus); 1761 info->size += notesize(&t->notes[0]); 1762 1763 do_thread_regset_writeback(t->task, &view->regsets[0]); 1764 1765 /* 1766 * Each other regset might generate a note too. For each regset 1767 * that has no core_note_type or is inactive, skip it. 1768 */ 1769 note_iter = 1; 1770 for (view_iter = 1; view_iter < view->n; ++view_iter) { 1771 const struct user_regset *regset = &view->regsets[view_iter]; 1772 int note_type = regset->core_note_type; 1773 bool is_fpreg = note_type == NT_PRFPREG; 1774 void *data; 1775 int ret; 1776 1777 do_thread_regset_writeback(t->task, regset); 1778 if (!note_type) // not for coredumps 1779 continue; 1780 if (regset->active && regset->active(t->task, regset) <= 0) 1781 continue; 1782 1783 ret = regset_get_alloc(t->task, regset, ~0U, &data); 1784 if (ret < 0) 1785 continue; 1786 1787 if (WARN_ON_ONCE(note_iter >= info->thread_notes)) 1788 break; 1789 1790 if (is_fpreg) 1791 SET_PR_FPVALID(&t->prstatus); 1792 1793 fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX", 1794 note_type, ret, data); 1795 1796 info->size += notesize(&t->notes[note_iter]); 1797 note_iter++; 1798 } 1799 1800 return 1; 1801 } 1802 #else 1803 static int fill_thread_core_info(struct elf_thread_core_info *t, 1804 const struct user_regset_view *view, 1805 long signr, struct elf_note_info *info) 1806 { 1807 struct task_struct *p = t->task; 1808 elf_fpregset_t *fpu; 1809 1810 fill_prstatus(&t->prstatus.common, p, signr); 1811 elf_core_copy_task_regs(p, &t->prstatus.pr_reg); 1812 1813 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), 1814 &(t->prstatus)); 1815 info->size += notesize(&t->notes[0]); 1816 1817 fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL); 1818 if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) { 1819 kfree(fpu); 1820 return 1; 1821 } 1822 1823 t->prstatus.pr_fpvalid = 1; 1824 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(*fpu), fpu); 1825 info->size += notesize(&t->notes[1]); 1826 1827 return 1; 1828 } 1829 #endif 1830 1831 static int fill_note_info(struct elfhdr *elf, int phdrs, 1832 struct elf_note_info *info, 1833 struct coredump_params *cprm) 1834 { 1835 struct task_struct *dump_task = current; 1836 const struct user_regset_view *view; 1837 struct elf_thread_core_info *t; 1838 struct elf_prpsinfo *psinfo; 1839 struct core_thread *ct; 1840 1841 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); 1842 if (!psinfo) 1843 return 0; 1844 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); 1845 1846 #ifdef CORE_DUMP_USE_REGSET 1847 view = task_user_regset_view(dump_task); 1848 1849 /* 1850 * Figure out how many notes we're going to need for each thread. 1851 */ 1852 info->thread_notes = 0; 1853 for (int i = 0; i < view->n; ++i) 1854 if (view->regsets[i].core_note_type != 0) 1855 ++info->thread_notes; 1856 1857 /* 1858 * Sanity check. We rely on regset 0 being in NT_PRSTATUS, 1859 * since it is our one special case. 1860 */ 1861 if (unlikely(info->thread_notes == 0) || 1862 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { 1863 WARN_ON(1); 1864 return 0; 1865 } 1866 1867 /* 1868 * Initialize the ELF file header. 1869 */ 1870 fill_elf_header(elf, phdrs, 1871 view->e_machine, view->e_flags); 1872 #else 1873 view = NULL; 1874 info->thread_notes = 2; 1875 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); 1876 #endif 1877 1878 /* 1879 * Allocate a structure for each thread. 1880 */ 1881 info->thread = kzalloc(offsetof(struct elf_thread_core_info, 1882 notes[info->thread_notes]), 1883 GFP_KERNEL); 1884 if (unlikely(!info->thread)) 1885 return 0; 1886 1887 info->thread->task = dump_task; 1888 for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) { 1889 t = kzalloc(offsetof(struct elf_thread_core_info, 1890 notes[info->thread_notes]), 1891 GFP_KERNEL); 1892 if (unlikely(!t)) 1893 return 0; 1894 1895 t->task = ct->task; 1896 t->next = info->thread->next; 1897 info->thread->next = t; 1898 } 1899 1900 /* 1901 * Now fill in each thread's information. 1902 */ 1903 for (t = info->thread; t != NULL; t = t->next) 1904 if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info)) 1905 return 0; 1906 1907 /* 1908 * Fill in the two process-wide notes. 1909 */ 1910 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); 1911 info->size += notesize(&info->psinfo); 1912 1913 fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo); 1914 info->size += notesize(&info->signote); 1915 1916 fill_auxv_note(&info->auxv, current->mm); 1917 info->size += notesize(&info->auxv); 1918 1919 if (fill_files_note(&info->files, cprm) == 0) 1920 info->size += notesize(&info->files); 1921 1922 return 1; 1923 } 1924 1925 /* 1926 * Write all the notes for each thread. When writing the first thread, the 1927 * process-wide notes are interleaved after the first thread-specific note. 1928 */ 1929 static int write_note_info(struct elf_note_info *info, 1930 struct coredump_params *cprm) 1931 { 1932 bool first = true; 1933 struct elf_thread_core_info *t = info->thread; 1934 1935 do { 1936 int i; 1937 1938 if (!writenote(&t->notes[0], cprm)) 1939 return 0; 1940 1941 if (first && !writenote(&info->psinfo, cprm)) 1942 return 0; 1943 if (first && !writenote(&info->signote, cprm)) 1944 return 0; 1945 if (first && !writenote(&info->auxv, cprm)) 1946 return 0; 1947 if (first && info->files.data && 1948 !writenote(&info->files, cprm)) 1949 return 0; 1950 1951 for (i = 1; i < info->thread_notes; ++i) 1952 if (t->notes[i].data && 1953 !writenote(&t->notes[i], cprm)) 1954 return 0; 1955 1956 first = false; 1957 t = t->next; 1958 } while (t); 1959 1960 return 1; 1961 } 1962 1963 static void free_note_info(struct elf_note_info *info) 1964 { 1965 struct elf_thread_core_info *threads = info->thread; 1966 while (threads) { 1967 unsigned int i; 1968 struct elf_thread_core_info *t = threads; 1969 threads = t->next; 1970 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); 1971 for (i = 1; i < info->thread_notes; ++i) 1972 kvfree(t->notes[i].data); 1973 kfree(t); 1974 } 1975 kfree(info->psinfo.data); 1976 kvfree(info->files.data); 1977 } 1978 1979 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, 1980 elf_addr_t e_shoff, int segs) 1981 { 1982 elf->e_shoff = e_shoff; 1983 elf->e_shentsize = sizeof(*shdr4extnum); 1984 elf->e_shnum = 1; 1985 elf->e_shstrndx = SHN_UNDEF; 1986 1987 memset(shdr4extnum, 0, sizeof(*shdr4extnum)); 1988 1989 shdr4extnum->sh_type = SHT_NULL; 1990 shdr4extnum->sh_size = elf->e_shnum; 1991 shdr4extnum->sh_link = elf->e_shstrndx; 1992 shdr4extnum->sh_info = segs; 1993 } 1994 1995 /* 1996 * Actual dumper 1997 * 1998 * This is a two-pass process; first we find the offsets of the bits, 1999 * and then they are actually written out. If we run out of core limit 2000 * we just truncate. 2001 */ 2002 static int elf_core_dump(struct coredump_params *cprm) 2003 { 2004 int has_dumped = 0; 2005 int segs, i; 2006 struct elfhdr elf; 2007 loff_t offset = 0, dataoff; 2008 struct elf_note_info info = { }; 2009 struct elf_phdr *phdr4note = NULL; 2010 struct elf_shdr *shdr4extnum = NULL; 2011 Elf_Half e_phnum; 2012 elf_addr_t e_shoff; 2013 2014 /* 2015 * The number of segs are recored into ELF header as 16bit value. 2016 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. 2017 */ 2018 segs = cprm->vma_count + elf_core_extra_phdrs(cprm); 2019 2020 /* for notes section */ 2021 segs++; 2022 2023 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid 2024 * this, kernel supports extended numbering. Have a look at 2025 * include/linux/elf.h for further information. */ 2026 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; 2027 2028 /* 2029 * Collect all the non-memory information about the process for the 2030 * notes. This also sets up the file header. 2031 */ 2032 if (!fill_note_info(&elf, e_phnum, &info, cprm)) 2033 goto end_coredump; 2034 2035 has_dumped = 1; 2036 2037 offset += sizeof(elf); /* ELF header */ 2038 offset += segs * sizeof(struct elf_phdr); /* Program headers */ 2039 2040 /* Write notes phdr entry */ 2041 { 2042 size_t sz = info.size; 2043 2044 /* For cell spufs */ 2045 sz += elf_coredump_extra_notes_size(); 2046 2047 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); 2048 if (!phdr4note) 2049 goto end_coredump; 2050 2051 fill_elf_note_phdr(phdr4note, sz, offset); 2052 offset += sz; 2053 } 2054 2055 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 2056 2057 offset += cprm->vma_data_size; 2058 offset += elf_core_extra_data_size(cprm); 2059 e_shoff = offset; 2060 2061 if (e_phnum == PN_XNUM) { 2062 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); 2063 if (!shdr4extnum) 2064 goto end_coredump; 2065 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs); 2066 } 2067 2068 offset = dataoff; 2069 2070 if (!dump_emit(cprm, &elf, sizeof(elf))) 2071 goto end_coredump; 2072 2073 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) 2074 goto end_coredump; 2075 2076 /* Write program headers for segments dump */ 2077 for (i = 0; i < cprm->vma_count; i++) { 2078 struct core_vma_metadata *meta = cprm->vma_meta + i; 2079 struct elf_phdr phdr; 2080 2081 phdr.p_type = PT_LOAD; 2082 phdr.p_offset = offset; 2083 phdr.p_vaddr = meta->start; 2084 phdr.p_paddr = 0; 2085 phdr.p_filesz = meta->dump_size; 2086 phdr.p_memsz = meta->end - meta->start; 2087 offset += phdr.p_filesz; 2088 phdr.p_flags = 0; 2089 if (meta->flags & VM_READ) 2090 phdr.p_flags |= PF_R; 2091 if (meta->flags & VM_WRITE) 2092 phdr.p_flags |= PF_W; 2093 if (meta->flags & VM_EXEC) 2094 phdr.p_flags |= PF_X; 2095 phdr.p_align = ELF_EXEC_PAGESIZE; 2096 2097 if (!dump_emit(cprm, &phdr, sizeof(phdr))) 2098 goto end_coredump; 2099 } 2100 2101 if (!elf_core_write_extra_phdrs(cprm, offset)) 2102 goto end_coredump; 2103 2104 /* write out the notes section */ 2105 if (!write_note_info(&info, cprm)) 2106 goto end_coredump; 2107 2108 /* For cell spufs */ 2109 if (elf_coredump_extra_notes_write(cprm)) 2110 goto end_coredump; 2111 2112 /* Align to page */ 2113 dump_skip_to(cprm, dataoff); 2114 2115 for (i = 0; i < cprm->vma_count; i++) { 2116 struct core_vma_metadata *meta = cprm->vma_meta + i; 2117 2118 if (!dump_user_range(cprm, meta->start, meta->dump_size)) 2119 goto end_coredump; 2120 } 2121 2122 if (!elf_core_write_extra_data(cprm)) 2123 goto end_coredump; 2124 2125 if (e_phnum == PN_XNUM) { 2126 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) 2127 goto end_coredump; 2128 } 2129 2130 end_coredump: 2131 free_note_info(&info); 2132 kfree(shdr4extnum); 2133 kfree(phdr4note); 2134 return has_dumped; 2135 } 2136 2137 #endif /* CONFIG_ELF_CORE */ 2138 2139 static int __init init_elf_binfmt(void) 2140 { 2141 register_binfmt(&elf_format); 2142 return 0; 2143 } 2144 2145 static void __exit exit_elf_binfmt(void) 2146 { 2147 /* Remove the COFF and ELF loaders. */ 2148 unregister_binfmt(&elf_format); 2149 } 2150 2151 core_initcall(init_elf_binfmt); 2152 module_exit(exit_elf_binfmt); 2153 2154 #ifdef CONFIG_BINFMT_ELF_KUNIT_TEST 2155 #include "tests/binfmt_elf_kunit.c" 2156 #endif 2157