1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/binfmt_elf.c 4 * 5 * These are the functions used to load ELF format executables as used 6 * on SVr4 machines. Information on the format may be found in the book 7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support 8 * Tools". 9 * 10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/fs.h> 16 #include <linux/log2.h> 17 #include <linux/mm.h> 18 #include <linux/mman.h> 19 #include <linux/errno.h> 20 #include <linux/signal.h> 21 #include <linux/binfmts.h> 22 #include <linux/string.h> 23 #include <linux/file.h> 24 #include <linux/slab.h> 25 #include <linux/personality.h> 26 #include <linux/elfcore.h> 27 #include <linux/init.h> 28 #include <linux/highuid.h> 29 #include <linux/compiler.h> 30 #include <linux/highmem.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vmalloc.h> 34 #include <linux/security.h> 35 #include <linux/random.h> 36 #include <linux/elf.h> 37 #include <linux/elf-randomize.h> 38 #include <linux/utsname.h> 39 #include <linux/coredump.h> 40 #include <linux/sched.h> 41 #include <linux/sched/coredump.h> 42 #include <linux/sched/task_stack.h> 43 #include <linux/sched/cputime.h> 44 #include <linux/sizes.h> 45 #include <linux/types.h> 46 #include <linux/cred.h> 47 #include <linux/dax.h> 48 #include <linux/uaccess.h> 49 #include <linux/rseq.h> 50 #include <asm/param.h> 51 #include <asm/page.h> 52 53 #ifndef ELF_COMPAT 54 #define ELF_COMPAT 0 55 #endif 56 57 #ifndef user_long_t 58 #define user_long_t long 59 #endif 60 #ifndef user_siginfo_t 61 #define user_siginfo_t siginfo_t 62 #endif 63 64 /* That's for binfmt_elf_fdpic to deal with */ 65 #ifndef elf_check_fdpic 66 #define elf_check_fdpic(ex) false 67 #endif 68 69 static int load_elf_binary(struct linux_binprm *bprm); 70 71 #ifdef CONFIG_USELIB 72 static int load_elf_library(struct file *); 73 #else 74 #define load_elf_library NULL 75 #endif 76 77 /* 78 * If we don't support core dumping, then supply a NULL so we 79 * don't even try. 80 */ 81 #ifdef CONFIG_ELF_CORE 82 static int elf_core_dump(struct coredump_params *cprm); 83 #else 84 #define elf_core_dump NULL 85 #endif 86 87 #if ELF_EXEC_PAGESIZE > PAGE_SIZE 88 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE 89 #else 90 #define ELF_MIN_ALIGN PAGE_SIZE 91 #endif 92 93 #ifndef ELF_CORE_EFLAGS 94 #define ELF_CORE_EFLAGS 0 95 #endif 96 97 #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1)) 98 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) 99 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) 100 101 static struct linux_binfmt elf_format = { 102 .module = THIS_MODULE, 103 .load_binary = load_elf_binary, 104 .load_shlib = load_elf_library, 105 #ifdef CONFIG_COREDUMP 106 .core_dump = elf_core_dump, 107 .min_coredump = ELF_EXEC_PAGESIZE, 108 #endif 109 }; 110 111 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) 112 113 /* 114 * We need to explicitly zero any trailing portion of the page that follows 115 * p_filesz when it ends before the page ends (e.g. bss), otherwise this 116 * memory will contain the junk from the file that should not be present. 117 */ 118 static int padzero(unsigned long address) 119 { 120 unsigned long nbyte; 121 122 nbyte = ELF_PAGEOFFSET(address); 123 if (nbyte) { 124 nbyte = ELF_MIN_ALIGN - nbyte; 125 if (clear_user((void __user *)address, nbyte)) 126 return -EFAULT; 127 } 128 return 0; 129 } 130 131 /* Let's use some macros to make this stack manipulation a little clearer */ 132 #ifdef CONFIG_STACK_GROWSUP 133 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) 134 #define STACK_ROUND(sp, items) \ 135 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) 136 #define STACK_ALLOC(sp, len) ({ \ 137 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ 138 old_sp; }) 139 #else 140 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) 141 #define STACK_ROUND(sp, items) \ 142 (((unsigned long) (sp - items)) &~ 15UL) 143 #define STACK_ALLOC(sp, len) (sp -= len) 144 #endif 145 146 #ifndef ELF_BASE_PLATFORM 147 /* 148 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. 149 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value 150 * will be copied to the user stack in the same manner as AT_PLATFORM. 151 */ 152 #define ELF_BASE_PLATFORM NULL 153 #endif 154 155 static int 156 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, 157 unsigned long interp_load_addr, 158 unsigned long e_entry, unsigned long phdr_addr) 159 { 160 struct mm_struct *mm = current->mm; 161 unsigned long p = bprm->p; 162 int argc = bprm->argc; 163 int envc = bprm->envc; 164 elf_addr_t __user *sp; 165 elf_addr_t __user *u_platform; 166 elf_addr_t __user *u_base_platform; 167 elf_addr_t __user *u_rand_bytes; 168 const char *k_platform = ELF_PLATFORM; 169 const char *k_base_platform = ELF_BASE_PLATFORM; 170 unsigned char k_rand_bytes[16]; 171 int items; 172 elf_addr_t *elf_info; 173 elf_addr_t flags = 0; 174 int ei_index; 175 const struct cred *cred = current_cred(); 176 struct vm_area_struct *vma; 177 178 /* 179 * In some cases (e.g. Hyper-Threading), we want to avoid L1 180 * evictions by the processes running on the same package. One 181 * thing we can do is to shuffle the initial stack for them. 182 */ 183 184 p = arch_align_stack(p); 185 186 /* 187 * If this architecture has a platform capability string, copy it 188 * to userspace. In some cases (Sparc), this info is impossible 189 * for userspace to get any other way, in others (i386) it is 190 * merely difficult. 191 */ 192 u_platform = NULL; 193 if (k_platform) { 194 size_t len = strlen(k_platform) + 1; 195 196 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 197 if (copy_to_user(u_platform, k_platform, len)) 198 return -EFAULT; 199 } 200 201 /* 202 * If this architecture has a "base" platform capability 203 * string, copy it to userspace. 204 */ 205 u_base_platform = NULL; 206 if (k_base_platform) { 207 size_t len = strlen(k_base_platform) + 1; 208 209 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); 210 if (copy_to_user(u_base_platform, k_base_platform, len)) 211 return -EFAULT; 212 } 213 214 /* 215 * Generate 16 random bytes for userspace PRNG seeding. 216 */ 217 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); 218 u_rand_bytes = (elf_addr_t __user *) 219 STACK_ALLOC(p, sizeof(k_rand_bytes)); 220 if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) 221 return -EFAULT; 222 223 /* Create the ELF interpreter info */ 224 elf_info = (elf_addr_t *)mm->saved_auxv; 225 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ 226 #define NEW_AUX_ENT(id, val) \ 227 do { \ 228 *elf_info++ = id; \ 229 *elf_info++ = val; \ 230 } while (0) 231 232 #ifdef ARCH_DLINFO 233 /* 234 * ARCH_DLINFO must come first so PPC can do its special alignment of 235 * AUXV. 236 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in 237 * ARCH_DLINFO changes 238 */ 239 ARCH_DLINFO; 240 #endif 241 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); 242 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); 243 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); 244 NEW_AUX_ENT(AT_PHDR, phdr_addr); 245 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); 246 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); 247 NEW_AUX_ENT(AT_BASE, interp_load_addr); 248 if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0) 249 flags |= AT_FLAGS_PRESERVE_ARGV0; 250 NEW_AUX_ENT(AT_FLAGS, flags); 251 NEW_AUX_ENT(AT_ENTRY, e_entry); 252 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); 253 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); 254 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); 255 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); 256 NEW_AUX_ENT(AT_SECURE, bprm->secureexec); 257 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); 258 #ifdef ELF_HWCAP2 259 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); 260 #endif 261 NEW_AUX_ENT(AT_EXECFN, bprm->exec); 262 if (k_platform) { 263 NEW_AUX_ENT(AT_PLATFORM, 264 (elf_addr_t)(unsigned long)u_platform); 265 } 266 if (k_base_platform) { 267 NEW_AUX_ENT(AT_BASE_PLATFORM, 268 (elf_addr_t)(unsigned long)u_base_platform); 269 } 270 if (bprm->have_execfd) { 271 NEW_AUX_ENT(AT_EXECFD, bprm->execfd); 272 } 273 #ifdef CONFIG_RSEQ 274 NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end)); 275 NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq)); 276 #endif 277 #undef NEW_AUX_ENT 278 /* AT_NULL is zero; clear the rest too */ 279 memset(elf_info, 0, (char *)mm->saved_auxv + 280 sizeof(mm->saved_auxv) - (char *)elf_info); 281 282 /* And advance past the AT_NULL entry. */ 283 elf_info += 2; 284 285 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv; 286 sp = STACK_ADD(p, ei_index); 287 288 items = (argc + 1) + (envc + 1) + 1; 289 bprm->p = STACK_ROUND(sp, items); 290 291 /* Point sp at the lowest address on the stack */ 292 #ifdef CONFIG_STACK_GROWSUP 293 sp = (elf_addr_t __user *)bprm->p - items - ei_index; 294 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ 295 #else 296 sp = (elf_addr_t __user *)bprm->p; 297 #endif 298 299 300 /* 301 * Grow the stack manually; some architectures have a limit on how 302 * far ahead a user-space access may be in order to grow the stack. 303 */ 304 if (mmap_write_lock_killable(mm)) 305 return -EINTR; 306 vma = find_extend_vma_locked(mm, bprm->p); 307 mmap_write_unlock(mm); 308 if (!vma) 309 return -EFAULT; 310 311 /* Now, let's put argc (and argv, envp if appropriate) on the stack */ 312 if (put_user(argc, sp++)) 313 return -EFAULT; 314 315 /* Populate list of argv pointers back to argv strings. */ 316 p = mm->arg_end = mm->arg_start; 317 while (argc-- > 0) { 318 size_t len; 319 if (put_user((elf_addr_t)p, sp++)) 320 return -EFAULT; 321 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 322 if (!len || len > MAX_ARG_STRLEN) 323 return -EINVAL; 324 p += len; 325 } 326 if (put_user(0, sp++)) 327 return -EFAULT; 328 mm->arg_end = p; 329 330 /* Populate list of envp pointers back to envp strings. */ 331 mm->env_end = mm->env_start = p; 332 while (envc-- > 0) { 333 size_t len; 334 if (put_user((elf_addr_t)p, sp++)) 335 return -EFAULT; 336 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); 337 if (!len || len > MAX_ARG_STRLEN) 338 return -EINVAL; 339 p += len; 340 } 341 if (put_user(0, sp++)) 342 return -EFAULT; 343 mm->env_end = p; 344 345 /* Put the elf_info on the stack in the right place. */ 346 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t))) 347 return -EFAULT; 348 return 0; 349 } 350 351 /* 352 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" 353 * into memory at "addr". (Note that p_filesz is rounded up to the 354 * next page, so any extra bytes from the file must be wiped.) 355 */ 356 static unsigned long elf_map(struct file *filep, unsigned long addr, 357 const struct elf_phdr *eppnt, int prot, int type, 358 unsigned long total_size) 359 { 360 unsigned long map_addr; 361 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); 362 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); 363 addr = ELF_PAGESTART(addr); 364 size = ELF_PAGEALIGN(size); 365 366 /* mmap() will return -EINVAL if given a zero size, but a 367 * segment with zero filesize is perfectly valid */ 368 if (!size) 369 return addr; 370 371 /* 372 * total_size is the size of the ELF (interpreter) image. 373 * The _first_ mmap needs to know the full size, otherwise 374 * randomization might put this image into an overlapping 375 * position with the ELF binary image. (since size < total_size) 376 * So we first map the 'big' image - and unmap the remainder at 377 * the end. (which unmap is needed for ELF images with holes.) 378 */ 379 if (total_size) { 380 total_size = ELF_PAGEALIGN(total_size); 381 map_addr = vm_mmap(filep, addr, total_size, prot, type, off); 382 if (!BAD_ADDR(map_addr)) 383 vm_munmap(map_addr+size, total_size-size); 384 } else 385 map_addr = vm_mmap(filep, addr, size, prot, type, off); 386 387 if ((type & MAP_FIXED_NOREPLACE) && 388 PTR_ERR((void *)map_addr) == -EEXIST) 389 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", 390 task_pid_nr(current), current->comm, (void *)addr); 391 392 return(map_addr); 393 } 394 395 /* 396 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset" 397 * into memory at "addr". Memory from "p_filesz" through "p_memsz" 398 * rounded up to the next page is zeroed. 399 */ 400 static unsigned long elf_load(struct file *filep, unsigned long addr, 401 const struct elf_phdr *eppnt, int prot, int type, 402 unsigned long total_size) 403 { 404 unsigned long zero_start, zero_end; 405 unsigned long map_addr; 406 407 if (eppnt->p_filesz) { 408 map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); 409 if (BAD_ADDR(map_addr)) 410 return map_addr; 411 if (eppnt->p_memsz > eppnt->p_filesz) { 412 zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + 413 eppnt->p_filesz; 414 zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + 415 eppnt->p_memsz; 416 417 /* 418 * Zero the end of the last mapped page but ignore 419 * any errors if the segment isn't writable. 420 */ 421 if (padzero(zero_start) && (prot & PROT_WRITE)) 422 return -EFAULT; 423 } 424 } else { 425 map_addr = zero_start = ELF_PAGESTART(addr); 426 zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + 427 eppnt->p_memsz; 428 } 429 if (eppnt->p_memsz > eppnt->p_filesz) { 430 /* 431 * Map the last of the segment. 432 * If the header is requesting these pages to be 433 * executable, honour that (ppc32 needs this). 434 */ 435 int error; 436 437 zero_start = ELF_PAGEALIGN(zero_start); 438 zero_end = ELF_PAGEALIGN(zero_end); 439 440 error = vm_brk_flags(zero_start, zero_end - zero_start, 441 prot & PROT_EXEC ? VM_EXEC : 0); 442 if (error) 443 map_addr = error; 444 } 445 return map_addr; 446 } 447 448 449 static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr) 450 { 451 elf_addr_t min_addr = -1; 452 elf_addr_t max_addr = 0; 453 bool pt_load = false; 454 int i; 455 456 for (i = 0; i < nr; i++) { 457 if (phdr[i].p_type == PT_LOAD) { 458 min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr)); 459 max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz); 460 pt_load = true; 461 } 462 } 463 return pt_load ? (max_addr - min_addr) : 0; 464 } 465 466 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos) 467 { 468 ssize_t rv; 469 470 rv = kernel_read(file, buf, len, &pos); 471 if (unlikely(rv != len)) { 472 return (rv < 0) ? rv : -EIO; 473 } 474 return 0; 475 } 476 477 static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr) 478 { 479 unsigned long alignment = 0; 480 int i; 481 482 for (i = 0; i < nr; i++) { 483 if (cmds[i].p_type == PT_LOAD) { 484 unsigned long p_align = cmds[i].p_align; 485 486 /* skip non-power of two alignments as invalid */ 487 if (!is_power_of_2(p_align)) 488 continue; 489 alignment = max(alignment, p_align); 490 } 491 } 492 493 /* ensure we align to at least one page */ 494 return ELF_PAGEALIGN(alignment); 495 } 496 497 /** 498 * load_elf_phdrs() - load ELF program headers 499 * @elf_ex: ELF header of the binary whose program headers should be loaded 500 * @elf_file: the opened ELF binary file 501 * 502 * Loads ELF program headers from the binary file elf_file, which has the ELF 503 * header pointed to by elf_ex, into a newly allocated array. The caller is 504 * responsible for freeing the allocated data. Returns NULL upon failure. 505 */ 506 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, 507 struct file *elf_file) 508 { 509 struct elf_phdr *elf_phdata = NULL; 510 int retval = -1; 511 unsigned int size; 512 513 /* 514 * If the size of this structure has changed, then punt, since 515 * we will be doing the wrong thing. 516 */ 517 if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) 518 goto out; 519 520 /* Sanity check the number of program headers... */ 521 /* ...and their total size. */ 522 size = sizeof(struct elf_phdr) * elf_ex->e_phnum; 523 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) 524 goto out; 525 526 elf_phdata = kmalloc(size, GFP_KERNEL); 527 if (!elf_phdata) 528 goto out; 529 530 /* Read in the program headers */ 531 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff); 532 533 out: 534 if (retval) { 535 kfree(elf_phdata); 536 elf_phdata = NULL; 537 } 538 return elf_phdata; 539 } 540 541 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE 542 543 /** 544 * struct arch_elf_state - arch-specific ELF loading state 545 * 546 * This structure is used to preserve architecture specific data during 547 * the loading of an ELF file, throughout the checking of architecture 548 * specific ELF headers & through to the point where the ELF load is 549 * known to be proceeding (ie. SET_PERSONALITY). 550 * 551 * This implementation is a dummy for architectures which require no 552 * specific state. 553 */ 554 struct arch_elf_state { 555 }; 556 557 #define INIT_ARCH_ELF_STATE {} 558 559 /** 560 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header 561 * @ehdr: The main ELF header 562 * @phdr: The program header to check 563 * @elf: The open ELF file 564 * @is_interp: True if the phdr is from the interpreter of the ELF being 565 * loaded, else false. 566 * @state: Architecture-specific state preserved throughout the process 567 * of loading the ELF. 568 * 569 * Inspects the program header phdr to validate its correctness and/or 570 * suitability for the system. Called once per ELF program header in the 571 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its 572 * interpreter. 573 * 574 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 575 * with that return code. 576 */ 577 static inline int arch_elf_pt_proc(struct elfhdr *ehdr, 578 struct elf_phdr *phdr, 579 struct file *elf, bool is_interp, 580 struct arch_elf_state *state) 581 { 582 /* Dummy implementation, always proceed */ 583 return 0; 584 } 585 586 /** 587 * arch_check_elf() - check an ELF executable 588 * @ehdr: The main ELF header 589 * @has_interp: True if the ELF has an interpreter, else false. 590 * @interp_ehdr: The interpreter's ELF header 591 * @state: Architecture-specific state preserved throughout the process 592 * of loading the ELF. 593 * 594 * Provides a final opportunity for architecture code to reject the loading 595 * of the ELF & cause an exec syscall to return an error. This is called after 596 * all program headers to be checked by arch_elf_pt_proc have been. 597 * 598 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load 599 * with that return code. 600 */ 601 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, 602 struct elfhdr *interp_ehdr, 603 struct arch_elf_state *state) 604 { 605 /* Dummy implementation, always proceed */ 606 return 0; 607 } 608 609 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ 610 611 static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state, 612 bool has_interp, bool is_interp) 613 { 614 int prot = 0; 615 616 if (p_flags & PF_R) 617 prot |= PROT_READ; 618 if (p_flags & PF_W) 619 prot |= PROT_WRITE; 620 if (p_flags & PF_X) 621 prot |= PROT_EXEC; 622 623 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp); 624 } 625 626 /* This is much more generalized than the library routine read function, 627 so we keep this separate. Technically the library read function 628 is only provided so that we can read a.out libraries that have 629 an ELF header */ 630 631 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 632 struct file *interpreter, 633 unsigned long no_base, struct elf_phdr *interp_elf_phdata, 634 struct arch_elf_state *arch_state) 635 { 636 struct elf_phdr *eppnt; 637 unsigned long load_addr = 0; 638 int load_addr_set = 0; 639 unsigned long error = ~0UL; 640 unsigned long total_size; 641 int i; 642 643 /* First of all, some simple consistency checks */ 644 if (interp_elf_ex->e_type != ET_EXEC && 645 interp_elf_ex->e_type != ET_DYN) 646 goto out; 647 if (!elf_check_arch(interp_elf_ex) || 648 elf_check_fdpic(interp_elf_ex)) 649 goto out; 650 if (!interpreter->f_op->mmap) 651 goto out; 652 653 total_size = total_mapping_size(interp_elf_phdata, 654 interp_elf_ex->e_phnum); 655 if (!total_size) { 656 error = -EINVAL; 657 goto out; 658 } 659 660 eppnt = interp_elf_phdata; 661 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 662 if (eppnt->p_type == PT_LOAD) { 663 int elf_type = MAP_PRIVATE; 664 int elf_prot = make_prot(eppnt->p_flags, arch_state, 665 true, true); 666 unsigned long vaddr = 0; 667 unsigned long k, map_addr; 668 669 vaddr = eppnt->p_vaddr; 670 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 671 elf_type |= MAP_FIXED; 672 else if (no_base && interp_elf_ex->e_type == ET_DYN) 673 load_addr = -vaddr; 674 675 map_addr = elf_load(interpreter, load_addr + vaddr, 676 eppnt, elf_prot, elf_type, total_size); 677 total_size = 0; 678 error = map_addr; 679 if (BAD_ADDR(map_addr)) 680 goto out; 681 682 if (!load_addr_set && 683 interp_elf_ex->e_type == ET_DYN) { 684 load_addr = map_addr - ELF_PAGESTART(vaddr); 685 load_addr_set = 1; 686 } 687 688 /* 689 * Check to see if the section's size will overflow the 690 * allowed task size. Note that p_filesz must always be 691 * <= p_memsize so it's only necessary to check p_memsz. 692 */ 693 k = load_addr + eppnt->p_vaddr; 694 if (BAD_ADDR(k) || 695 eppnt->p_filesz > eppnt->p_memsz || 696 eppnt->p_memsz > TASK_SIZE || 697 TASK_SIZE - eppnt->p_memsz < k) { 698 error = -ENOMEM; 699 goto out; 700 } 701 } 702 } 703 704 error = load_addr; 705 out: 706 return error; 707 } 708 709 /* 710 * These are the functions used to load ELF style executables and shared 711 * libraries. There is no binary dependent code anywhere else. 712 */ 713 714 static int parse_elf_property(const char *data, size_t *off, size_t datasz, 715 struct arch_elf_state *arch, 716 bool have_prev_type, u32 *prev_type) 717 { 718 size_t o, step; 719 const struct gnu_property *pr; 720 int ret; 721 722 if (*off == datasz) 723 return -ENOENT; 724 725 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN)) 726 return -EIO; 727 o = *off; 728 datasz -= *off; 729 730 if (datasz < sizeof(*pr)) 731 return -ENOEXEC; 732 pr = (const struct gnu_property *)(data + o); 733 o += sizeof(*pr); 734 datasz -= sizeof(*pr); 735 736 if (pr->pr_datasz > datasz) 737 return -ENOEXEC; 738 739 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN); 740 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN); 741 if (step > datasz) 742 return -ENOEXEC; 743 744 /* Properties are supposed to be unique and sorted on pr_type: */ 745 if (have_prev_type && pr->pr_type <= *prev_type) 746 return -ENOEXEC; 747 *prev_type = pr->pr_type; 748 749 ret = arch_parse_elf_property(pr->pr_type, data + o, 750 pr->pr_datasz, ELF_COMPAT, arch); 751 if (ret) 752 return ret; 753 754 *off = o + step; 755 return 0; 756 } 757 758 #define NOTE_DATA_SZ SZ_1K 759 #define GNU_PROPERTY_TYPE_0_NAME "GNU" 760 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME)) 761 762 static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, 763 struct arch_elf_state *arch) 764 { 765 union { 766 struct elf_note nhdr; 767 char data[NOTE_DATA_SZ]; 768 } note; 769 loff_t pos; 770 ssize_t n; 771 size_t off, datasz; 772 int ret; 773 bool have_prev_type; 774 u32 prev_type; 775 776 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr) 777 return 0; 778 779 /* load_elf_binary() shouldn't call us unless this is true... */ 780 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY)) 781 return -ENOEXEC; 782 783 /* If the properties are crazy large, that's too bad (for now): */ 784 if (phdr->p_filesz > sizeof(note)) 785 return -ENOEXEC; 786 787 pos = phdr->p_offset; 788 n = kernel_read(f, ¬e, phdr->p_filesz, &pos); 789 790 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ); 791 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ) 792 return -EIO; 793 794 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 795 note.nhdr.n_namesz != NOTE_NAME_SZ || 796 strncmp(note.data + sizeof(note.nhdr), 797 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr))) 798 return -ENOEXEC; 799 800 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ, 801 ELF_GNU_PROPERTY_ALIGN); 802 if (off > n) 803 return -ENOEXEC; 804 805 if (note.nhdr.n_descsz > n - off) 806 return -ENOEXEC; 807 datasz = off + note.nhdr.n_descsz; 808 809 have_prev_type = false; 810 do { 811 ret = parse_elf_property(note.data, &off, datasz, arch, 812 have_prev_type, &prev_type); 813 have_prev_type = true; 814 } while (!ret); 815 816 return ret == -ENOENT ? 0 : ret; 817 } 818 819 static int load_elf_binary(struct linux_binprm *bprm) 820 { 821 struct file *interpreter = NULL; /* to shut gcc up */ 822 unsigned long load_bias = 0, phdr_addr = 0; 823 int first_pt_load = 1; 824 unsigned long error; 825 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; 826 struct elf_phdr *elf_property_phdata = NULL; 827 unsigned long elf_brk; 828 int retval, i; 829 unsigned long elf_entry; 830 unsigned long e_entry; 831 unsigned long interp_load_addr = 0; 832 unsigned long start_code, end_code, start_data, end_data; 833 unsigned long reloc_func_desc __maybe_unused = 0; 834 int executable_stack = EXSTACK_DEFAULT; 835 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf; 836 struct elfhdr *interp_elf_ex = NULL; 837 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; 838 struct mm_struct *mm; 839 struct pt_regs *regs; 840 841 retval = -ENOEXEC; 842 /* First of all, some simple consistency checks */ 843 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) 844 goto out; 845 846 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) 847 goto out; 848 if (!elf_check_arch(elf_ex)) 849 goto out; 850 if (elf_check_fdpic(elf_ex)) 851 goto out; 852 if (!bprm->file->f_op->mmap) 853 goto out; 854 855 elf_phdata = load_elf_phdrs(elf_ex, bprm->file); 856 if (!elf_phdata) 857 goto out; 858 859 elf_ppnt = elf_phdata; 860 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) { 861 char *elf_interpreter; 862 863 if (elf_ppnt->p_type == PT_GNU_PROPERTY) { 864 elf_property_phdata = elf_ppnt; 865 continue; 866 } 867 868 if (elf_ppnt->p_type != PT_INTERP) 869 continue; 870 871 /* 872 * This is the program interpreter used for shared libraries - 873 * for now assume that this is an a.out format binary. 874 */ 875 retval = -ENOEXEC; 876 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) 877 goto out_free_ph; 878 879 retval = -ENOMEM; 880 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); 881 if (!elf_interpreter) 882 goto out_free_ph; 883 884 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz, 885 elf_ppnt->p_offset); 886 if (retval < 0) 887 goto out_free_interp; 888 /* make sure path is NULL terminated */ 889 retval = -ENOEXEC; 890 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') 891 goto out_free_interp; 892 893 interpreter = open_exec(elf_interpreter); 894 kfree(elf_interpreter); 895 retval = PTR_ERR(interpreter); 896 if (IS_ERR(interpreter)) 897 goto out_free_ph; 898 899 /* 900 * If the binary is not readable then enforce mm->dumpable = 0 901 * regardless of the interpreter's permissions. 902 */ 903 would_dump(bprm, interpreter); 904 905 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL); 906 if (!interp_elf_ex) { 907 retval = -ENOMEM; 908 goto out_free_file; 909 } 910 911 /* Get the exec headers */ 912 retval = elf_read(interpreter, interp_elf_ex, 913 sizeof(*interp_elf_ex), 0); 914 if (retval < 0) 915 goto out_free_dentry; 916 917 break; 918 919 out_free_interp: 920 kfree(elf_interpreter); 921 goto out_free_ph; 922 } 923 924 elf_ppnt = elf_phdata; 925 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) 926 switch (elf_ppnt->p_type) { 927 case PT_GNU_STACK: 928 if (elf_ppnt->p_flags & PF_X) 929 executable_stack = EXSTACK_ENABLE_X; 930 else 931 executable_stack = EXSTACK_DISABLE_X; 932 break; 933 934 case PT_LOPROC ... PT_HIPROC: 935 retval = arch_elf_pt_proc(elf_ex, elf_ppnt, 936 bprm->file, false, 937 &arch_state); 938 if (retval) 939 goto out_free_dentry; 940 break; 941 } 942 943 /* Some simple consistency checks for the interpreter */ 944 if (interpreter) { 945 retval = -ELIBBAD; 946 /* Not an ELF interpreter */ 947 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0) 948 goto out_free_dentry; 949 /* Verify the interpreter has a valid arch */ 950 if (!elf_check_arch(interp_elf_ex) || 951 elf_check_fdpic(interp_elf_ex)) 952 goto out_free_dentry; 953 954 /* Load the interpreter program headers */ 955 interp_elf_phdata = load_elf_phdrs(interp_elf_ex, 956 interpreter); 957 if (!interp_elf_phdata) 958 goto out_free_dentry; 959 960 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ 961 elf_property_phdata = NULL; 962 elf_ppnt = interp_elf_phdata; 963 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++) 964 switch (elf_ppnt->p_type) { 965 case PT_GNU_PROPERTY: 966 elf_property_phdata = elf_ppnt; 967 break; 968 969 case PT_LOPROC ... PT_HIPROC: 970 retval = arch_elf_pt_proc(interp_elf_ex, 971 elf_ppnt, interpreter, 972 true, &arch_state); 973 if (retval) 974 goto out_free_dentry; 975 break; 976 } 977 } 978 979 retval = parse_elf_properties(interpreter ?: bprm->file, 980 elf_property_phdata, &arch_state); 981 if (retval) 982 goto out_free_dentry; 983 984 /* 985 * Allow arch code to reject the ELF at this point, whilst it's 986 * still possible to return an error to the code that invoked 987 * the exec syscall. 988 */ 989 retval = arch_check_elf(elf_ex, 990 !!interpreter, interp_elf_ex, 991 &arch_state); 992 if (retval) 993 goto out_free_dentry; 994 995 /* Flush all traces of the currently running executable */ 996 retval = begin_new_exec(bprm); 997 if (retval) 998 goto out_free_dentry; 999 1000 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 1001 may depend on the personality. */ 1002 SET_PERSONALITY2(*elf_ex, &arch_state); 1003 if (elf_read_implies_exec(*elf_ex, executable_stack)) 1004 current->personality |= READ_IMPLIES_EXEC; 1005 1006 const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space); 1007 if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space) 1008 current->flags |= PF_RANDOMIZE; 1009 1010 setup_new_exec(bprm); 1011 1012 /* Do this so that we can load the interpreter, if need be. We will 1013 change some of these later */ 1014 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), 1015 executable_stack); 1016 if (retval < 0) 1017 goto out_free_dentry; 1018 1019 elf_brk = 0; 1020 1021 start_code = ~0UL; 1022 end_code = 0; 1023 start_data = 0; 1024 end_data = 0; 1025 1026 /* Now we do a little grungy work by mmapping the ELF image into 1027 the correct location in memory. */ 1028 for(i = 0, elf_ppnt = elf_phdata; 1029 i < elf_ex->e_phnum; i++, elf_ppnt++) { 1030 int elf_prot, elf_flags; 1031 unsigned long k, vaddr; 1032 unsigned long total_size = 0; 1033 unsigned long alignment; 1034 1035 if (elf_ppnt->p_type != PT_LOAD) 1036 continue; 1037 1038 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, 1039 !!interpreter, false); 1040 1041 elf_flags = MAP_PRIVATE; 1042 1043 vaddr = elf_ppnt->p_vaddr; 1044 /* 1045 * The first time through the loop, first_pt_load is true: 1046 * layout will be calculated. Once set, use MAP_FIXED since 1047 * we know we've already safely mapped the entire region with 1048 * MAP_FIXED_NOREPLACE in the once-per-binary logic following. 1049 */ 1050 if (!first_pt_load) { 1051 elf_flags |= MAP_FIXED; 1052 } else if (elf_ex->e_type == ET_EXEC) { 1053 /* 1054 * This logic is run once for the first LOAD Program 1055 * Header for ET_EXEC binaries. No special handling 1056 * is needed. 1057 */ 1058 elf_flags |= MAP_FIXED_NOREPLACE; 1059 } else if (elf_ex->e_type == ET_DYN) { 1060 /* 1061 * This logic is run once for the first LOAD Program 1062 * Header for ET_DYN binaries to calculate the 1063 * randomization (load_bias) for all the LOAD 1064 * Program Headers. 1065 */ 1066 1067 /* 1068 * Calculate the entire size of the ELF mapping 1069 * (total_size), used for the initial mapping, 1070 * due to load_addr_set which is set to true later 1071 * once the initial mapping is performed. 1072 * 1073 * Note that this is only sensible when the LOAD 1074 * segments are contiguous (or overlapping). If 1075 * used for LOADs that are far apart, this would 1076 * cause the holes between LOADs to be mapped, 1077 * running the risk of having the mapping fail, 1078 * as it would be larger than the ELF file itself. 1079 * 1080 * As a result, only ET_DYN does this, since 1081 * some ET_EXEC (e.g. ia64) may have large virtual 1082 * memory holes between LOADs. 1083 * 1084 */ 1085 total_size = total_mapping_size(elf_phdata, 1086 elf_ex->e_phnum); 1087 if (!total_size) { 1088 retval = -EINVAL; 1089 goto out_free_dentry; 1090 } 1091 1092 /* Calculate any requested alignment. */ 1093 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); 1094 1095 /* 1096 * There are effectively two types of ET_DYN 1097 * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP) 1098 * and loaders (ET_DYN without PT_INTERP, since they 1099 * _are_ the ELF interpreter). The loaders must 1100 * be loaded away from programs since the program 1101 * may otherwise collide with the loader (especially 1102 * for ET_EXEC which does not have a randomized 1103 * position). For example to handle invocations of 1104 * "./ld.so someprog" to test out a new version of 1105 * the loader, the subsequent program that the 1106 * loader loads must avoid the loader itself, so 1107 * they cannot share the same load range. Sufficient 1108 * room for the brk must be allocated with the 1109 * loader as well, since brk must be available with 1110 * the loader. 1111 * 1112 * Therefore, programs are loaded offset from 1113 * ELF_ET_DYN_BASE and loaders are loaded into the 1114 * independently randomized mmap region (0 load_bias 1115 * without MAP_FIXED nor MAP_FIXED_NOREPLACE). 1116 */ 1117 if (interpreter) { 1118 /* On ET_DYN with PT_INTERP, we do the ASLR. */ 1119 load_bias = ELF_ET_DYN_BASE; 1120 if (current->flags & PF_RANDOMIZE) 1121 load_bias += arch_mmap_rnd(); 1122 /* Adjust alignment as requested. */ 1123 if (alignment) 1124 load_bias &= ~(alignment - 1); 1125 elf_flags |= MAP_FIXED_NOREPLACE; 1126 } else { 1127 /* 1128 * For ET_DYN without PT_INTERP, we rely on 1129 * the architectures's (potentially ASLR) mmap 1130 * base address (via a load_bias of 0). 1131 * 1132 * When a large alignment is requested, we 1133 * must do the allocation at address "0" right 1134 * now to discover where things will load so 1135 * that we can adjust the resulting alignment. 1136 * In this case (load_bias != 0), we can use 1137 * MAP_FIXED_NOREPLACE to make sure the mapping 1138 * doesn't collide with anything. 1139 */ 1140 if (alignment > ELF_MIN_ALIGN) { 1141 load_bias = elf_load(bprm->file, 0, elf_ppnt, 1142 elf_prot, elf_flags, total_size); 1143 if (BAD_ADDR(load_bias)) { 1144 retval = IS_ERR_VALUE(load_bias) ? 1145 PTR_ERR((void*)load_bias) : -EINVAL; 1146 goto out_free_dentry; 1147 } 1148 vm_munmap(load_bias, total_size); 1149 /* Adjust alignment as requested. */ 1150 if (alignment) 1151 load_bias &= ~(alignment - 1); 1152 elf_flags |= MAP_FIXED_NOREPLACE; 1153 } else 1154 load_bias = 0; 1155 } 1156 1157 /* 1158 * Since load_bias is used for all subsequent loading 1159 * calculations, we must lower it by the first vaddr 1160 * so that the remaining calculations based on the 1161 * ELF vaddrs will be correctly offset. The result 1162 * is then page aligned. 1163 */ 1164 load_bias = ELF_PAGESTART(load_bias - vaddr); 1165 } 1166 1167 error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, 1168 elf_prot, elf_flags, total_size); 1169 if (BAD_ADDR(error)) { 1170 retval = IS_ERR_VALUE(error) ? 1171 PTR_ERR((void*)error) : -EINVAL; 1172 goto out_free_dentry; 1173 } 1174 1175 if (first_pt_load) { 1176 first_pt_load = 0; 1177 if (elf_ex->e_type == ET_DYN) { 1178 load_bias += error - 1179 ELF_PAGESTART(load_bias + vaddr); 1180 reloc_func_desc = load_bias; 1181 } 1182 } 1183 1184 /* 1185 * Figure out which segment in the file contains the Program 1186 * Header table, and map to the associated memory address. 1187 */ 1188 if (elf_ppnt->p_offset <= elf_ex->e_phoff && 1189 elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { 1190 phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + 1191 elf_ppnt->p_vaddr; 1192 } 1193 1194 k = elf_ppnt->p_vaddr; 1195 if ((elf_ppnt->p_flags & PF_X) && k < start_code) 1196 start_code = k; 1197 if (start_data < k) 1198 start_data = k; 1199 1200 /* 1201 * Check to see if the section's size will overflow the 1202 * allowed task size. Note that p_filesz must always be 1203 * <= p_memsz so it is only necessary to check p_memsz. 1204 */ 1205 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || 1206 elf_ppnt->p_memsz > TASK_SIZE || 1207 TASK_SIZE - elf_ppnt->p_memsz < k) { 1208 /* set_brk can never work. Avoid overflows. */ 1209 retval = -EINVAL; 1210 goto out_free_dentry; 1211 } 1212 1213 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1214 1215 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1216 end_code = k; 1217 if (end_data < k) 1218 end_data = k; 1219 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1220 if (k > elf_brk) 1221 elf_brk = k; 1222 } 1223 1224 e_entry = elf_ex->e_entry + load_bias; 1225 phdr_addr += load_bias; 1226 elf_brk += load_bias; 1227 start_code += load_bias; 1228 end_code += load_bias; 1229 start_data += load_bias; 1230 end_data += load_bias; 1231 1232 current->mm->start_brk = current->mm->brk = ELF_PAGEALIGN(elf_brk); 1233 1234 if (interpreter) { 1235 elf_entry = load_elf_interp(interp_elf_ex, 1236 interpreter, 1237 load_bias, interp_elf_phdata, 1238 &arch_state); 1239 if (!IS_ERR_VALUE(elf_entry)) { 1240 /* 1241 * load_elf_interp() returns relocation 1242 * adjustment 1243 */ 1244 interp_load_addr = elf_entry; 1245 elf_entry += interp_elf_ex->e_entry; 1246 } 1247 if (BAD_ADDR(elf_entry)) { 1248 retval = IS_ERR_VALUE(elf_entry) ? 1249 (int)elf_entry : -EINVAL; 1250 goto out_free_dentry; 1251 } 1252 reloc_func_desc = interp_load_addr; 1253 1254 fput(interpreter); 1255 1256 kfree(interp_elf_ex); 1257 kfree(interp_elf_phdata); 1258 } else { 1259 elf_entry = e_entry; 1260 if (BAD_ADDR(elf_entry)) { 1261 retval = -EINVAL; 1262 goto out_free_dentry; 1263 } 1264 } 1265 1266 kfree(elf_phdata); 1267 1268 set_binfmt(&elf_format); 1269 1270 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES 1271 retval = ARCH_SETUP_ADDITIONAL_PAGES(bprm, elf_ex, !!interpreter); 1272 if (retval < 0) 1273 goto out; 1274 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 1275 1276 retval = create_elf_tables(bprm, elf_ex, interp_load_addr, 1277 e_entry, phdr_addr); 1278 if (retval < 0) 1279 goto out; 1280 1281 mm = current->mm; 1282 mm->end_code = end_code; 1283 mm->start_code = start_code; 1284 mm->start_data = start_data; 1285 mm->end_data = end_data; 1286 mm->start_stack = bprm->p; 1287 1288 if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { 1289 /* 1290 * For architectures with ELF randomization, when executing 1291 * a loader directly (i.e. no interpreter listed in ELF 1292 * headers), move the brk area out of the mmap region 1293 * (since it grows up, and may collide early with the stack 1294 * growing down), and into the unused ELF_ET_DYN_BASE region. 1295 */ 1296 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && 1297 elf_ex->e_type == ET_DYN && !interpreter) { 1298 mm->brk = mm->start_brk = ELF_ET_DYN_BASE; 1299 } else { 1300 /* Otherwise leave a gap between .bss and brk. */ 1301 mm->brk = mm->start_brk = mm->brk + PAGE_SIZE; 1302 } 1303 1304 mm->brk = mm->start_brk = arch_randomize_brk(mm); 1305 #ifdef compat_brk_randomized 1306 current->brk_randomized = 1; 1307 #endif 1308 } 1309 1310 if (current->personality & MMAP_PAGE_ZERO) { 1311 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1312 and some applications "depend" upon this behavior. 1313 Since we do not have the power to recompile these, we 1314 emulate the SVr4 behavior. Sigh. */ 1315 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, 1316 MAP_FIXED | MAP_PRIVATE, 0); 1317 1318 retval = do_mseal(0, PAGE_SIZE, 0); 1319 if (retval) 1320 pr_warn_ratelimited("pid=%d, couldn't seal address 0, ret=%d.\n", 1321 task_pid_nr(current), retval); 1322 } 1323 1324 regs = current_pt_regs(); 1325 #ifdef ELF_PLAT_INIT 1326 /* 1327 * The ABI may specify that certain registers be set up in special 1328 * ways (on i386 %edx is the address of a DT_FINI function, for 1329 * example. In addition, it may also specify (eg, PowerPC64 ELF) 1330 * that the e_entry field is the address of the function descriptor 1331 * for the startup routine, rather than the address of the startup 1332 * routine itself. This macro performs whatever initialization to 1333 * the regs structure is required as well as any relocations to the 1334 * function descriptor entries when executing dynamically links apps. 1335 */ 1336 ELF_PLAT_INIT(regs, reloc_func_desc); 1337 #endif 1338 1339 finalize_exec(bprm); 1340 START_THREAD(elf_ex, regs, elf_entry, bprm->p); 1341 retval = 0; 1342 out: 1343 return retval; 1344 1345 /* error cleanup */ 1346 out_free_dentry: 1347 kfree(interp_elf_ex); 1348 kfree(interp_elf_phdata); 1349 out_free_file: 1350 if (interpreter) 1351 fput(interpreter); 1352 out_free_ph: 1353 kfree(elf_phdata); 1354 goto out; 1355 } 1356 1357 #ifdef CONFIG_USELIB 1358 /* This is really simpleminded and specialized - we are loading an 1359 a.out library that is given an ELF header. */ 1360 static int load_elf_library(struct file *file) 1361 { 1362 struct elf_phdr *elf_phdata; 1363 struct elf_phdr *eppnt; 1364 int retval, error, i, j; 1365 struct elfhdr elf_ex; 1366 1367 error = -ENOEXEC; 1368 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0); 1369 if (retval < 0) 1370 goto out; 1371 1372 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 1373 goto out; 1374 1375 /* First of all, some simple consistency checks */ 1376 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 1377 !elf_check_arch(&elf_ex) || !file->f_op->mmap) 1378 goto out; 1379 if (elf_check_fdpic(&elf_ex)) 1380 goto out; 1381 1382 /* Now read in all of the header information */ 1383 1384 j = sizeof(struct elf_phdr) * elf_ex.e_phnum; 1385 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ 1386 1387 error = -ENOMEM; 1388 elf_phdata = kmalloc(j, GFP_KERNEL); 1389 if (!elf_phdata) 1390 goto out; 1391 1392 eppnt = elf_phdata; 1393 error = -ENOEXEC; 1394 retval = elf_read(file, eppnt, j, elf_ex.e_phoff); 1395 if (retval < 0) 1396 goto out_free_ph; 1397 1398 for (j = 0, i = 0; i<elf_ex.e_phnum; i++) 1399 if ((eppnt + i)->p_type == PT_LOAD) 1400 j++; 1401 if (j != 1) 1402 goto out_free_ph; 1403 1404 while (eppnt->p_type != PT_LOAD) 1405 eppnt++; 1406 1407 /* Now use mmap to map the library into memory. */ 1408 error = elf_load(file, ELF_PAGESTART(eppnt->p_vaddr), 1409 eppnt, 1410 PROT_READ | PROT_WRITE | PROT_EXEC, 1411 MAP_FIXED_NOREPLACE | MAP_PRIVATE, 1412 0); 1413 1414 if (error != ELF_PAGESTART(eppnt->p_vaddr)) 1415 goto out_free_ph; 1416 1417 error = 0; 1418 1419 out_free_ph: 1420 kfree(elf_phdata); 1421 out: 1422 return error; 1423 } 1424 #endif /* #ifdef CONFIG_USELIB */ 1425 1426 #ifdef CONFIG_ELF_CORE 1427 /* 1428 * ELF core dumper 1429 * 1430 * Modelled on fs/exec.c:aout_core_dump() 1431 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 1432 */ 1433 1434 /* An ELF note in memory */ 1435 struct memelfnote 1436 { 1437 const char *name; 1438 int type; 1439 unsigned int datasz; 1440 void *data; 1441 }; 1442 1443 static int notesize(struct memelfnote *en) 1444 { 1445 int sz; 1446 1447 sz = sizeof(struct elf_note); 1448 sz += roundup(strlen(en->name) + 1, 4); 1449 sz += roundup(en->datasz, 4); 1450 1451 return sz; 1452 } 1453 1454 static int writenote(struct memelfnote *men, struct coredump_params *cprm) 1455 { 1456 struct elf_note en; 1457 en.n_namesz = strlen(men->name) + 1; 1458 en.n_descsz = men->datasz; 1459 en.n_type = men->type; 1460 1461 return dump_emit(cprm, &en, sizeof(en)) && 1462 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && 1463 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); 1464 } 1465 1466 static void fill_elf_header(struct elfhdr *elf, int segs, 1467 u16 machine, u32 flags) 1468 { 1469 memset(elf, 0, sizeof(*elf)); 1470 1471 memcpy(elf->e_ident, ELFMAG, SELFMAG); 1472 elf->e_ident[EI_CLASS] = ELF_CLASS; 1473 elf->e_ident[EI_DATA] = ELF_DATA; 1474 elf->e_ident[EI_VERSION] = EV_CURRENT; 1475 elf->e_ident[EI_OSABI] = ELF_OSABI; 1476 1477 elf->e_type = ET_CORE; 1478 elf->e_machine = machine; 1479 elf->e_version = EV_CURRENT; 1480 elf->e_phoff = sizeof(struct elfhdr); 1481 elf->e_flags = flags; 1482 elf->e_ehsize = sizeof(struct elfhdr); 1483 elf->e_phentsize = sizeof(struct elf_phdr); 1484 elf->e_phnum = segs; 1485 } 1486 1487 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) 1488 { 1489 phdr->p_type = PT_NOTE; 1490 phdr->p_offset = offset; 1491 phdr->p_vaddr = 0; 1492 phdr->p_paddr = 0; 1493 phdr->p_filesz = sz; 1494 phdr->p_memsz = 0; 1495 phdr->p_flags = 0; 1496 phdr->p_align = 4; 1497 } 1498 1499 static void fill_note(struct memelfnote *note, const char *name, int type, 1500 unsigned int sz, void *data) 1501 { 1502 note->name = name; 1503 note->type = type; 1504 note->datasz = sz; 1505 note->data = data; 1506 } 1507 1508 /* 1509 * fill up all the fields in prstatus from the given task struct, except 1510 * registers which need to be filled up separately. 1511 */ 1512 static void fill_prstatus(struct elf_prstatus_common *prstatus, 1513 struct task_struct *p, long signr) 1514 { 1515 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 1516 prstatus->pr_sigpend = p->pending.signal.sig[0]; 1517 prstatus->pr_sighold = p->blocked.sig[0]; 1518 rcu_read_lock(); 1519 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1520 rcu_read_unlock(); 1521 prstatus->pr_pid = task_pid_vnr(p); 1522 prstatus->pr_pgrp = task_pgrp_vnr(p); 1523 prstatus->pr_sid = task_session_vnr(p); 1524 if (thread_group_leader(p)) { 1525 struct task_cputime cputime; 1526 1527 /* 1528 * This is the record for the group leader. It shows the 1529 * group-wide total, not its individual thread total. 1530 */ 1531 thread_group_cputime(p, &cputime); 1532 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime); 1533 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime); 1534 } else { 1535 u64 utime, stime; 1536 1537 task_cputime(p, &utime, &stime); 1538 prstatus->pr_utime = ns_to_kernel_old_timeval(utime); 1539 prstatus->pr_stime = ns_to_kernel_old_timeval(stime); 1540 } 1541 1542 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime); 1543 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime); 1544 } 1545 1546 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, 1547 struct mm_struct *mm) 1548 { 1549 const struct cred *cred; 1550 unsigned int i, len; 1551 unsigned int state; 1552 1553 /* first copy the parameters from user space */ 1554 memset(psinfo, 0, sizeof(struct elf_prpsinfo)); 1555 1556 len = mm->arg_end - mm->arg_start; 1557 if (len >= ELF_PRARGSZ) 1558 len = ELF_PRARGSZ-1; 1559 if (copy_from_user(&psinfo->pr_psargs, 1560 (const char __user *)mm->arg_start, len)) 1561 return -EFAULT; 1562 for(i = 0; i < len; i++) 1563 if (psinfo->pr_psargs[i] == 0) 1564 psinfo->pr_psargs[i] = ' '; 1565 psinfo->pr_psargs[len] = 0; 1566 1567 rcu_read_lock(); 1568 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); 1569 rcu_read_unlock(); 1570 psinfo->pr_pid = task_pid_vnr(p); 1571 psinfo->pr_pgrp = task_pgrp_vnr(p); 1572 psinfo->pr_sid = task_session_vnr(p); 1573 1574 state = READ_ONCE(p->__state); 1575 i = state ? ffz(~state) + 1 : 0; 1576 psinfo->pr_state = i; 1577 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; 1578 psinfo->pr_zomb = psinfo->pr_sname == 'Z'; 1579 psinfo->pr_nice = task_nice(p); 1580 psinfo->pr_flag = p->flags; 1581 rcu_read_lock(); 1582 cred = __task_cred(p); 1583 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); 1584 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); 1585 rcu_read_unlock(); 1586 get_task_comm(psinfo->pr_fname, p); 1587 1588 return 0; 1589 } 1590 1591 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) 1592 { 1593 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; 1594 int i = 0; 1595 do 1596 i += 2; 1597 while (auxv[i - 2] != AT_NULL); 1598 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); 1599 } 1600 1601 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, 1602 const kernel_siginfo_t *siginfo) 1603 { 1604 copy_siginfo_to_external(csigdata, siginfo); 1605 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); 1606 } 1607 1608 /* 1609 * Format of NT_FILE note: 1610 * 1611 * long count -- how many files are mapped 1612 * long page_size -- units for file_ofs 1613 * array of [COUNT] elements of 1614 * long start 1615 * long end 1616 * long file_ofs 1617 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1618 */ 1619 static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm) 1620 { 1621 unsigned count, size, names_ofs, remaining, n; 1622 user_long_t *data; 1623 user_long_t *start_end_ofs; 1624 char *name_base, *name_curpos; 1625 int i; 1626 1627 /* *Estimated* file count and total data size needed */ 1628 count = cprm->vma_count; 1629 if (count > UINT_MAX / 64) 1630 return -EINVAL; 1631 size = count * 64; 1632 1633 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1634 alloc: 1635 /* paranoia check */ 1636 if (size >= core_file_note_size_limit) { 1637 pr_warn_once("coredump Note size too large: %u (does kernel.core_file_note_size_limit sysctl need adjustment?\n", 1638 size); 1639 return -EINVAL; 1640 } 1641 size = round_up(size, PAGE_SIZE); 1642 /* 1643 * "size" can be 0 here legitimately. 1644 * Let it ENOMEM and omit NT_FILE section which will be empty anyway. 1645 */ 1646 data = kvmalloc(size, GFP_KERNEL); 1647 if (ZERO_OR_NULL_PTR(data)) 1648 return -ENOMEM; 1649 1650 start_end_ofs = data + 2; 1651 name_base = name_curpos = ((char *)data) + names_ofs; 1652 remaining = size - names_ofs; 1653 count = 0; 1654 for (i = 0; i < cprm->vma_count; i++) { 1655 struct core_vma_metadata *m = &cprm->vma_meta[i]; 1656 struct file *file; 1657 const char *filename; 1658 1659 file = m->file; 1660 if (!file) 1661 continue; 1662 filename = file_path(file, name_curpos, remaining); 1663 if (IS_ERR(filename)) { 1664 if (PTR_ERR(filename) == -ENAMETOOLONG) { 1665 kvfree(data); 1666 size = size * 5 / 4; 1667 goto alloc; 1668 } 1669 continue; 1670 } 1671 1672 /* file_path() fills at the end, move name down */ 1673 /* n = strlen(filename) + 1: */ 1674 n = (name_curpos + remaining) - filename; 1675 remaining = filename - name_curpos; 1676 memmove(name_curpos, filename, n); 1677 name_curpos += n; 1678 1679 *start_end_ofs++ = m->start; 1680 *start_end_ofs++ = m->end; 1681 *start_end_ofs++ = m->pgoff; 1682 count++; 1683 } 1684 1685 /* Now we know exact count of files, can store it */ 1686 data[0] = count; 1687 data[1] = PAGE_SIZE; 1688 /* 1689 * Count usually is less than mm->map_count, 1690 * we need to move filenames down. 1691 */ 1692 n = cprm->vma_count - count; 1693 if (n != 0) { 1694 unsigned shift_bytes = n * 3 * sizeof(data[0]); 1695 memmove(name_base - shift_bytes, name_base, 1696 name_curpos - name_base); 1697 name_curpos -= shift_bytes; 1698 } 1699 1700 size = name_curpos - (char *)data; 1701 fill_note(note, "CORE", NT_FILE, size, data); 1702 return 0; 1703 } 1704 1705 #include <linux/regset.h> 1706 1707 struct elf_thread_core_info { 1708 struct elf_thread_core_info *next; 1709 struct task_struct *task; 1710 struct elf_prstatus prstatus; 1711 struct memelfnote notes[]; 1712 }; 1713 1714 struct elf_note_info { 1715 struct elf_thread_core_info *thread; 1716 struct memelfnote psinfo; 1717 struct memelfnote signote; 1718 struct memelfnote auxv; 1719 struct memelfnote files; 1720 user_siginfo_t csigdata; 1721 size_t size; 1722 int thread_notes; 1723 }; 1724 1725 #ifdef CORE_DUMP_USE_REGSET 1726 /* 1727 * When a regset has a writeback hook, we call it on each thread before 1728 * dumping user memory. On register window machines, this makes sure the 1729 * user memory backing the register data is up to date before we read it. 1730 */ 1731 static void do_thread_regset_writeback(struct task_struct *task, 1732 const struct user_regset *regset) 1733 { 1734 if (regset->writeback) 1735 regset->writeback(task, regset, 1); 1736 } 1737 1738 #ifndef PRSTATUS_SIZE 1739 #define PRSTATUS_SIZE sizeof(struct elf_prstatus) 1740 #endif 1741 1742 #ifndef SET_PR_FPVALID 1743 #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1) 1744 #endif 1745 1746 static int fill_thread_core_info(struct elf_thread_core_info *t, 1747 const struct user_regset_view *view, 1748 long signr, struct elf_note_info *info) 1749 { 1750 unsigned int note_iter, view_iter; 1751 1752 /* 1753 * NT_PRSTATUS is the one special case, because the regset data 1754 * goes into the pr_reg field inside the note contents, rather 1755 * than being the whole note contents. We fill the regset in here. 1756 * We assume that regset 0 is NT_PRSTATUS. 1757 */ 1758 fill_prstatus(&t->prstatus.common, t->task, signr); 1759 regset_get(t->task, &view->regsets[0], 1760 sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); 1761 1762 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, 1763 PRSTATUS_SIZE, &t->prstatus); 1764 info->size += notesize(&t->notes[0]); 1765 1766 do_thread_regset_writeback(t->task, &view->regsets[0]); 1767 1768 /* 1769 * Each other regset might generate a note too. For each regset 1770 * that has no core_note_type or is inactive, skip it. 1771 */ 1772 note_iter = 1; 1773 for (view_iter = 1; view_iter < view->n; ++view_iter) { 1774 const struct user_regset *regset = &view->regsets[view_iter]; 1775 int note_type = regset->core_note_type; 1776 bool is_fpreg = note_type == NT_PRFPREG; 1777 void *data; 1778 int ret; 1779 1780 do_thread_regset_writeback(t->task, regset); 1781 if (!note_type) // not for coredumps 1782 continue; 1783 if (regset->active && regset->active(t->task, regset) <= 0) 1784 continue; 1785 1786 ret = regset_get_alloc(t->task, regset, ~0U, &data); 1787 if (ret < 0) 1788 continue; 1789 1790 if (WARN_ON_ONCE(note_iter >= info->thread_notes)) 1791 break; 1792 1793 if (is_fpreg) 1794 SET_PR_FPVALID(&t->prstatus); 1795 1796 fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX", 1797 note_type, ret, data); 1798 1799 info->size += notesize(&t->notes[note_iter]); 1800 note_iter++; 1801 } 1802 1803 return 1; 1804 } 1805 #else 1806 static int fill_thread_core_info(struct elf_thread_core_info *t, 1807 const struct user_regset_view *view, 1808 long signr, struct elf_note_info *info) 1809 { 1810 struct task_struct *p = t->task; 1811 elf_fpregset_t *fpu; 1812 1813 fill_prstatus(&t->prstatus.common, p, signr); 1814 elf_core_copy_task_regs(p, &t->prstatus.pr_reg); 1815 1816 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), 1817 &(t->prstatus)); 1818 info->size += notesize(&t->notes[0]); 1819 1820 fpu = kzalloc(sizeof(elf_fpregset_t), GFP_KERNEL); 1821 if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) { 1822 kfree(fpu); 1823 return 1; 1824 } 1825 1826 t->prstatus.pr_fpvalid = 1; 1827 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(*fpu), fpu); 1828 info->size += notesize(&t->notes[1]); 1829 1830 return 1; 1831 } 1832 #endif 1833 1834 static int fill_note_info(struct elfhdr *elf, int phdrs, 1835 struct elf_note_info *info, 1836 struct coredump_params *cprm) 1837 { 1838 struct task_struct *dump_task = current; 1839 const struct user_regset_view *view; 1840 struct elf_thread_core_info *t; 1841 struct elf_prpsinfo *psinfo; 1842 struct core_thread *ct; 1843 1844 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); 1845 if (!psinfo) 1846 return 0; 1847 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); 1848 1849 #ifdef CORE_DUMP_USE_REGSET 1850 view = task_user_regset_view(dump_task); 1851 1852 /* 1853 * Figure out how many notes we're going to need for each thread. 1854 */ 1855 info->thread_notes = 0; 1856 for (int i = 0; i < view->n; ++i) 1857 if (view->regsets[i].core_note_type != 0) 1858 ++info->thread_notes; 1859 1860 /* 1861 * Sanity check. We rely on regset 0 being in NT_PRSTATUS, 1862 * since it is our one special case. 1863 */ 1864 if (unlikely(info->thread_notes == 0) || 1865 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { 1866 WARN_ON(1); 1867 return 0; 1868 } 1869 1870 /* 1871 * Initialize the ELF file header. 1872 */ 1873 fill_elf_header(elf, phdrs, 1874 view->e_machine, view->e_flags); 1875 #else 1876 view = NULL; 1877 info->thread_notes = 2; 1878 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); 1879 #endif 1880 1881 /* 1882 * Allocate a structure for each thread. 1883 */ 1884 info->thread = kzalloc(offsetof(struct elf_thread_core_info, 1885 notes[info->thread_notes]), 1886 GFP_KERNEL); 1887 if (unlikely(!info->thread)) 1888 return 0; 1889 1890 info->thread->task = dump_task; 1891 for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) { 1892 t = kzalloc(offsetof(struct elf_thread_core_info, 1893 notes[info->thread_notes]), 1894 GFP_KERNEL); 1895 if (unlikely(!t)) 1896 return 0; 1897 1898 t->task = ct->task; 1899 t->next = info->thread->next; 1900 info->thread->next = t; 1901 } 1902 1903 /* 1904 * Now fill in each thread's information. 1905 */ 1906 for (t = info->thread; t != NULL; t = t->next) 1907 if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info)) 1908 return 0; 1909 1910 /* 1911 * Fill in the two process-wide notes. 1912 */ 1913 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); 1914 info->size += notesize(&info->psinfo); 1915 1916 fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo); 1917 info->size += notesize(&info->signote); 1918 1919 fill_auxv_note(&info->auxv, current->mm); 1920 info->size += notesize(&info->auxv); 1921 1922 if (fill_files_note(&info->files, cprm) == 0) 1923 info->size += notesize(&info->files); 1924 1925 return 1; 1926 } 1927 1928 /* 1929 * Write all the notes for each thread. When writing the first thread, the 1930 * process-wide notes are interleaved after the first thread-specific note. 1931 */ 1932 static int write_note_info(struct elf_note_info *info, 1933 struct coredump_params *cprm) 1934 { 1935 bool first = true; 1936 struct elf_thread_core_info *t = info->thread; 1937 1938 do { 1939 int i; 1940 1941 if (!writenote(&t->notes[0], cprm)) 1942 return 0; 1943 1944 if (first && !writenote(&info->psinfo, cprm)) 1945 return 0; 1946 if (first && !writenote(&info->signote, cprm)) 1947 return 0; 1948 if (first && !writenote(&info->auxv, cprm)) 1949 return 0; 1950 if (first && info->files.data && 1951 !writenote(&info->files, cprm)) 1952 return 0; 1953 1954 for (i = 1; i < info->thread_notes; ++i) 1955 if (t->notes[i].data && 1956 !writenote(&t->notes[i], cprm)) 1957 return 0; 1958 1959 first = false; 1960 t = t->next; 1961 } while (t); 1962 1963 return 1; 1964 } 1965 1966 static void free_note_info(struct elf_note_info *info) 1967 { 1968 struct elf_thread_core_info *threads = info->thread; 1969 while (threads) { 1970 unsigned int i; 1971 struct elf_thread_core_info *t = threads; 1972 threads = t->next; 1973 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); 1974 for (i = 1; i < info->thread_notes; ++i) 1975 kvfree(t->notes[i].data); 1976 kfree(t); 1977 } 1978 kfree(info->psinfo.data); 1979 kvfree(info->files.data); 1980 } 1981 1982 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, 1983 elf_addr_t e_shoff, int segs) 1984 { 1985 elf->e_shoff = e_shoff; 1986 elf->e_shentsize = sizeof(*shdr4extnum); 1987 elf->e_shnum = 1; 1988 elf->e_shstrndx = SHN_UNDEF; 1989 1990 memset(shdr4extnum, 0, sizeof(*shdr4extnum)); 1991 1992 shdr4extnum->sh_type = SHT_NULL; 1993 shdr4extnum->sh_size = elf->e_shnum; 1994 shdr4extnum->sh_link = elf->e_shstrndx; 1995 shdr4extnum->sh_info = segs; 1996 } 1997 1998 /* 1999 * Actual dumper 2000 * 2001 * This is a two-pass process; first we find the offsets of the bits, 2002 * and then they are actually written out. If we run out of core limit 2003 * we just truncate. 2004 */ 2005 static int elf_core_dump(struct coredump_params *cprm) 2006 { 2007 int has_dumped = 0; 2008 int segs, i; 2009 struct elfhdr elf; 2010 loff_t offset = 0, dataoff; 2011 struct elf_note_info info = { }; 2012 struct elf_phdr *phdr4note = NULL; 2013 struct elf_shdr *shdr4extnum = NULL; 2014 Elf_Half e_phnum; 2015 elf_addr_t e_shoff; 2016 2017 /* 2018 * The number of segs are recored into ELF header as 16bit value. 2019 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. 2020 */ 2021 segs = cprm->vma_count + elf_core_extra_phdrs(cprm); 2022 2023 /* for notes section */ 2024 segs++; 2025 2026 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid 2027 * this, kernel supports extended numbering. Have a look at 2028 * include/linux/elf.h for further information. */ 2029 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; 2030 2031 /* 2032 * Collect all the non-memory information about the process for the 2033 * notes. This also sets up the file header. 2034 */ 2035 if (!fill_note_info(&elf, e_phnum, &info, cprm)) 2036 goto end_coredump; 2037 2038 has_dumped = 1; 2039 2040 offset += sizeof(elf); /* ELF header */ 2041 offset += segs * sizeof(struct elf_phdr); /* Program headers */ 2042 2043 /* Write notes phdr entry */ 2044 { 2045 size_t sz = info.size; 2046 2047 /* For cell spufs and x86 xstate */ 2048 sz += elf_coredump_extra_notes_size(); 2049 2050 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); 2051 if (!phdr4note) 2052 goto end_coredump; 2053 2054 fill_elf_note_phdr(phdr4note, sz, offset); 2055 offset += sz; 2056 } 2057 2058 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); 2059 2060 offset += cprm->vma_data_size; 2061 offset += elf_core_extra_data_size(cprm); 2062 e_shoff = offset; 2063 2064 if (e_phnum == PN_XNUM) { 2065 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); 2066 if (!shdr4extnum) 2067 goto end_coredump; 2068 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs); 2069 } 2070 2071 offset = dataoff; 2072 2073 if (!dump_emit(cprm, &elf, sizeof(elf))) 2074 goto end_coredump; 2075 2076 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) 2077 goto end_coredump; 2078 2079 /* Write program headers for segments dump */ 2080 for (i = 0; i < cprm->vma_count; i++) { 2081 struct core_vma_metadata *meta = cprm->vma_meta + i; 2082 struct elf_phdr phdr; 2083 2084 phdr.p_type = PT_LOAD; 2085 phdr.p_offset = offset; 2086 phdr.p_vaddr = meta->start; 2087 phdr.p_paddr = 0; 2088 phdr.p_filesz = meta->dump_size; 2089 phdr.p_memsz = meta->end - meta->start; 2090 offset += phdr.p_filesz; 2091 phdr.p_flags = 0; 2092 if (meta->flags & VM_READ) 2093 phdr.p_flags |= PF_R; 2094 if (meta->flags & VM_WRITE) 2095 phdr.p_flags |= PF_W; 2096 if (meta->flags & VM_EXEC) 2097 phdr.p_flags |= PF_X; 2098 phdr.p_align = ELF_EXEC_PAGESIZE; 2099 2100 if (!dump_emit(cprm, &phdr, sizeof(phdr))) 2101 goto end_coredump; 2102 } 2103 2104 if (!elf_core_write_extra_phdrs(cprm, offset)) 2105 goto end_coredump; 2106 2107 /* write out the notes section */ 2108 if (!write_note_info(&info, cprm)) 2109 goto end_coredump; 2110 2111 /* For cell spufs and x86 xstate */ 2112 if (elf_coredump_extra_notes_write(cprm)) 2113 goto end_coredump; 2114 2115 /* Align to page */ 2116 dump_skip_to(cprm, dataoff); 2117 2118 for (i = 0; i < cprm->vma_count; i++) { 2119 struct core_vma_metadata *meta = cprm->vma_meta + i; 2120 2121 if (!dump_user_range(cprm, meta->start, meta->dump_size)) 2122 goto end_coredump; 2123 } 2124 2125 if (!elf_core_write_extra_data(cprm)) 2126 goto end_coredump; 2127 2128 if (e_phnum == PN_XNUM) { 2129 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) 2130 goto end_coredump; 2131 } 2132 2133 end_coredump: 2134 free_note_info(&info); 2135 kfree(shdr4extnum); 2136 kfree(phdr4note); 2137 return has_dumped; 2138 } 2139 2140 #endif /* CONFIG_ELF_CORE */ 2141 2142 static int __init init_elf_binfmt(void) 2143 { 2144 register_binfmt(&elf_format); 2145 return 0; 2146 } 2147 2148 static void __exit exit_elf_binfmt(void) 2149 { 2150 /* Remove the COFF and ELF loaders. */ 2151 unregister_binfmt(&elf_format); 2152 } 2153 2154 core_initcall(init_elf_binfmt); 2155 module_exit(exit_elf_binfmt); 2156 2157 #ifdef CONFIG_BINFMT_ELF_KUNIT_TEST 2158 #include "tests/binfmt_elf_kunit.c" 2159 #endif 2160