Lines Matching +full:vs +full:- +full:p +full:- +full:supply
1 // SPDX-License-Identifier: GPL-2.0-only
37 #include <linux/elf-randomize.h>
72 * If we don't support core dumping, then supply a NULL so we
91 #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1))
92 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
93 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
109 mm->saved_e_flags = flags; in elf_coredump_set_mm_eflags()
116 flags = mm->saved_e_flags; in elf_coredump_get_mm_eflags()
132 nbyte = ELF_MIN_ALIGN - nbyte; in padzero()
134 return -EFAULT; in padzero()
148 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
150 (((unsigned long) (sp - items)) &~ 15UL)
151 #define STACK_ALLOC(sp, len) (sp -= len)
168 struct mm_struct *mm = current->mm; in create_elf_tables()
169 unsigned long p = bprm->p; in create_elf_tables() local
170 int argc = bprm->argc; in create_elf_tables()
171 int envc = bprm->envc; in create_elf_tables()
187 * In some cases (e.g. Hyper-Threading), we want to avoid L1 in create_elf_tables()
192 p = arch_align_stack(p); in create_elf_tables()
204 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); in create_elf_tables()
206 return -EFAULT; in create_elf_tables()
217 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); in create_elf_tables()
219 return -EFAULT; in create_elf_tables()
227 STACK_ALLOC(p, sizeof(k_rand_bytes)); in create_elf_tables()
229 return -EFAULT; in create_elf_tables()
232 elf_info = (elf_addr_t *)mm->saved_auxv; in create_elf_tables()
254 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); in create_elf_tables()
256 if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0) in create_elf_tables()
260 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); in create_elf_tables()
261 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); in create_elf_tables()
262 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); in create_elf_tables()
263 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); in create_elf_tables()
264 NEW_AUX_ENT(AT_SECURE, bprm->secureexec); in create_elf_tables()
275 NEW_AUX_ENT(AT_EXECFN, bprm->exec); in create_elf_tables()
284 if (bprm->have_execfd) { in create_elf_tables()
285 NEW_AUX_ENT(AT_EXECFD, bprm->execfd); in create_elf_tables()
293 memset(elf_info, 0, (char *)mm->saved_auxv + in create_elf_tables()
294 sizeof(mm->saved_auxv) - (char *)elf_info); in create_elf_tables()
299 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv; in create_elf_tables()
300 sp = STACK_ADD(p, ei_index); in create_elf_tables()
303 bprm->p = STACK_ROUND(sp, items); in create_elf_tables()
307 sp = (elf_addr_t __user *)bprm->p - items - ei_index; in create_elf_tables()
308 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ in create_elf_tables()
310 sp = (elf_addr_t __user *)bprm->p; in create_elf_tables()
316 * far ahead a user-space access may be in order to grow the stack. in create_elf_tables()
319 return -EINTR; in create_elf_tables()
320 vma = find_extend_vma_locked(mm, bprm->p); in create_elf_tables()
323 return -EFAULT; in create_elf_tables()
327 return -EFAULT; in create_elf_tables()
330 p = mm->arg_end = mm->arg_start; in create_elf_tables()
331 while (argc-- > 0) { in create_elf_tables()
333 if (put_user((elf_addr_t)p, sp++)) in create_elf_tables()
334 return -EFAULT; in create_elf_tables()
335 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); in create_elf_tables()
337 return -EINVAL; in create_elf_tables()
338 p += len; in create_elf_tables()
341 return -EFAULT; in create_elf_tables()
342 mm->arg_end = p; in create_elf_tables()
345 mm->env_end = mm->env_start = p; in create_elf_tables()
346 while (envc-- > 0) { in create_elf_tables()
348 if (put_user((elf_addr_t)p, sp++)) in create_elf_tables()
349 return -EFAULT; in create_elf_tables()
350 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); in create_elf_tables()
352 return -EINVAL; in create_elf_tables()
353 p += len; in create_elf_tables()
356 return -EFAULT; in create_elf_tables()
357 mm->env_end = p; in create_elf_tables()
360 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t))) in create_elf_tables()
361 return -EFAULT; in create_elf_tables()
366 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
375 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); in elf_map()
376 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); in elf_map()
380 /* mmap() will return -EINVAL if given a zero size, but a in elf_map()
390 * So we first map the 'big' image - and unmap the remainder at in elf_map()
397 vm_munmap(map_addr+size, total_size-size); in elf_map()
402 PTR_ERR((void *)map_addr) == -EEXIST) in elf_map()
404 task_pid_nr(current), current->comm, (void *)addr); in elf_map()
410 * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
421 if (eppnt->p_filesz) { in elf_load()
425 if (eppnt->p_memsz > eppnt->p_filesz) { in elf_load()
426 zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + in elf_load()
427 eppnt->p_filesz; in elf_load()
428 zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + in elf_load()
429 eppnt->p_memsz; in elf_load()
436 return -EFAULT; in elf_load()
440 zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + in elf_load()
441 eppnt->p_memsz; in elf_load()
443 if (eppnt->p_memsz > eppnt->p_filesz) { in elf_load()
454 error = vm_brk_flags(zero_start, zero_end - zero_start, in elf_load()
465 elf_addr_t min_addr = -1; in total_mapping_size()
477 return pt_load ? (max_addr - min_addr) : 0; in total_mapping_size()
486 return (rv < 0) ? rv : -EIO; in elf_read()
500 /* skip non-power of two alignments as invalid */ in maximum_alignment()
512 * load_elf_phdrs() - load ELF program headers
524 int retval = -1; in load_elf_phdrs()
531 if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) in load_elf_phdrs()
536 size = sizeof(struct elf_phdr) * elf_ex->e_phnum; in load_elf_phdrs()
545 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff); in load_elf_phdrs()
558 * struct arch_elf_state - arch-specific ELF loading state
574 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
580 * @state: Architecture-specific state preserved throughout the process
588 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
601 * arch_check_elf() - check an ELF executable
605 * @state: Architecture-specific state preserved throughout the process
612 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
658 if (interp_elf_ex->e_type != ET_EXEC && in load_elf_interp()
659 interp_elf_ex->e_type != ET_DYN) in load_elf_interp()
668 interp_elf_ex->e_phnum); in load_elf_interp()
670 error = -EINVAL; in load_elf_interp()
675 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { in load_elf_interp()
676 if (eppnt->p_type == PT_LOAD) { in load_elf_interp()
678 int elf_prot = make_prot(eppnt->p_flags, arch_state, in load_elf_interp()
683 vaddr = eppnt->p_vaddr; in load_elf_interp()
684 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) in load_elf_interp()
686 else if (no_base && interp_elf_ex->e_type == ET_DYN) in load_elf_interp()
687 load_addr = -vaddr; in load_elf_interp()
697 interp_elf_ex->e_type == ET_DYN) { in load_elf_interp()
698 load_addr = map_addr - ELF_PAGESTART(vaddr); in load_elf_interp()
707 k = load_addr + eppnt->p_vaddr; in load_elf_interp()
709 eppnt->p_filesz > eppnt->p_memsz || in load_elf_interp()
710 eppnt->p_memsz > TASK_SIZE || in load_elf_interp()
711 TASK_SIZE - eppnt->p_memsz < k) { in load_elf_interp()
712 error = -ENOMEM; in load_elf_interp()
737 return -ENOENT; in parse_elf_property()
740 return -EIO; in parse_elf_property()
742 datasz -= *off; in parse_elf_property()
745 return -ENOEXEC; in parse_elf_property()
748 datasz -= sizeof(*pr); in parse_elf_property()
750 if (pr->pr_datasz > datasz) in parse_elf_property()
751 return -ENOEXEC; in parse_elf_property()
754 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN); in parse_elf_property()
756 return -ENOEXEC; in parse_elf_property()
759 if (have_prev_type && pr->pr_type <= *prev_type) in parse_elf_property()
760 return -ENOEXEC; in parse_elf_property()
761 *prev_type = pr->pr_type; in parse_elf_property()
763 ret = arch_parse_elf_property(pr->pr_type, data + o, in parse_elf_property()
764 pr->pr_datasz, ELF_COMPAT, arch); in parse_elf_property()
793 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY)) in parse_elf_properties()
794 return -ENOEXEC; in parse_elf_properties()
797 if (phdr->p_filesz > sizeof(note)) in parse_elf_properties()
798 return -ENOEXEC; in parse_elf_properties()
800 pos = phdr->p_offset; in parse_elf_properties()
801 n = kernel_read(f, ¬e, phdr->p_filesz, &pos); in parse_elf_properties()
805 return -EIO; in parse_elf_properties()
810 NN_GNU_PROPERTY_TYPE_0, n - sizeof(note.nhdr))) in parse_elf_properties()
811 return -ENOEXEC; in parse_elf_properties()
816 return -ENOEXEC; in parse_elf_properties()
818 if (note.nhdr.n_descsz > n - off) in parse_elf_properties()
819 return -ENOEXEC; in parse_elf_properties()
829 return ret == -ENOENT ? 0 : ret; in parse_elf_properties()
849 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf; in load_elf_binary()
855 retval = -ENOEXEC; in load_elf_binary()
857 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) in load_elf_binary()
860 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) in load_elf_binary()
866 if (!can_mmap_file(bprm->file)) in load_elf_binary()
869 elf_phdata = load_elf_phdrs(elf_ex, bprm->file); in load_elf_binary()
874 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) { in load_elf_binary()
877 if (elf_ppnt->p_type == PT_GNU_PROPERTY) { in load_elf_binary()
882 if (elf_ppnt->p_type != PT_INTERP) in load_elf_binary()
886 * This is the program interpreter used for shared libraries - in load_elf_binary()
889 retval = -ENOEXEC; in load_elf_binary()
890 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) in load_elf_binary()
893 retval = -ENOMEM; in load_elf_binary()
894 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); in load_elf_binary()
898 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz, in load_elf_binary()
899 elf_ppnt->p_offset); in load_elf_binary()
903 retval = -ENOEXEC; in load_elf_binary()
904 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') in load_elf_binary()
914 * If the binary is not readable then enforce mm->dumpable = 0 in load_elf_binary()
921 retval = -ENOMEM; in load_elf_binary()
939 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) in load_elf_binary()
940 switch (elf_ppnt->p_type) { in load_elf_binary()
942 if (elf_ppnt->p_flags & PF_X) in load_elf_binary()
950 bprm->file, false, in load_elf_binary()
959 retval = -ELIBBAD; in load_elf_binary()
961 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0) in load_elf_binary()
977 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++) in load_elf_binary()
978 switch (elf_ppnt->p_type) { in load_elf_binary()
993 retval = parse_elf_properties(interpreter ?: bprm->file, in load_elf_binary()
1018 current->personality |= READ_IMPLIES_EXEC; in load_elf_binary()
1021 if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space) in load_elf_binary()
1022 current->flags |= PF_RANDOMIZE; in load_elf_binary()
1043 i < elf_ex->e_phnum; i++, elf_ppnt++) { in load_elf_binary()
1049 if (elf_ppnt->p_type != PT_LOAD) in load_elf_binary()
1052 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, in load_elf_binary()
1057 vaddr = elf_ppnt->p_vaddr; in load_elf_binary()
1062 * MAP_FIXED_NOREPLACE in the once-per-binary logic following. in load_elf_binary()
1066 } else if (elf_ex->e_type == ET_EXEC) { in load_elf_binary()
1073 } else if (elf_ex->e_type == ET_DYN) { in load_elf_binary()
1100 elf_ex->e_phnum); in load_elf_binary()
1102 retval = -EINVAL; in load_elf_binary()
1107 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); in load_elf_binary()
1136 * also affected by program vs loader and ASLR. in load_elf_binary()
1141 if (current->flags & PF_RANDOMIZE) in load_elf_binary()
1145 load_bias &= ~(alignment - 1); in load_elf_binary()
1162 load_bias = elf_load(bprm->file, 0, elf_ppnt, in load_elf_binary()
1166 PTR_ERR((void*)load_bias) : -EINVAL; in load_elf_binary()
1172 load_bias &= ~(alignment - 1); in load_elf_binary()
1185 load_bias = ELF_PAGESTART(load_bias - vaddr); in load_elf_binary()
1188 error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, in load_elf_binary()
1192 PTR_ERR((void*)error) : -EINVAL; in load_elf_binary()
1198 if (elf_ex->e_type == ET_DYN) { in load_elf_binary()
1199 load_bias += error - in load_elf_binary()
1209 if (elf_ppnt->p_offset <= elf_ex->e_phoff && in load_elf_binary()
1210 elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { in load_elf_binary()
1211 phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + in load_elf_binary()
1212 elf_ppnt->p_vaddr; in load_elf_binary()
1215 k = elf_ppnt->p_vaddr; in load_elf_binary()
1216 if ((elf_ppnt->p_flags & PF_X) && k < start_code) in load_elf_binary()
1226 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || in load_elf_binary()
1227 elf_ppnt->p_memsz > TASK_SIZE || in load_elf_binary()
1228 TASK_SIZE - elf_ppnt->p_memsz < k) { in load_elf_binary()
1230 retval = -EINVAL; in load_elf_binary()
1234 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; in load_elf_binary()
1236 if ((elf_ppnt->p_flags & PF_X) && end_code < k) in load_elf_binary()
1240 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; in load_elf_binary()
1245 e_entry = elf_ex->e_entry + load_bias; in load_elf_binary()
1264 elf_entry += interp_elf_ex->e_entry; in load_elf_binary()
1268 (int)elf_entry : -EINVAL; in load_elf_binary()
1281 retval = -EINVAL; in load_elf_binary()
1301 mm = current->mm; in load_elf_binary()
1302 mm->end_code = end_code; in load_elf_binary()
1303 mm->start_code = start_code; in load_elf_binary()
1304 mm->start_data = start_data; in load_elf_binary()
1305 mm->end_data = end_data; in load_elf_binary()
1306 mm->start_stack = bprm->p; in load_elf_binary()
1308 elf_coredump_set_mm_eflags(mm, elf_ex->e_flags); in load_elf_binary()
1325 elf_ex->e_type == ET_DYN && !interpreter) { in load_elf_binary()
1330 mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk); in load_elf_binary()
1332 if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) { in load_elf_binary()
1338 mm->brk = mm->start_brk = mm->brk + PAGE_SIZE; in load_elf_binary()
1340 mm->brk = mm->start_brk = arch_randomize_brk(mm); in load_elf_binary()
1346 current->brk_randomized = 1; in load_elf_binary()
1349 if (current->personality & MMAP_PAGE_ZERO) { in load_elf_binary()
1350 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, in load_elf_binary()
1379 START_THREAD(elf_ex, regs, elf_entry, bprm->p); in load_elf_binary()
1419 sz += roundup(strlen(en->name) + 1, 4); in notesize()
1420 sz += roundup(en->datasz, 4); in notesize()
1428 en.n_namesz = strlen(men->name) + 1; in writenote()
1429 en.n_descsz = men->datasz; in writenote()
1430 en.n_type = men->type; in writenote()
1433 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && in writenote()
1434 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); in writenote()
1442 memcpy(elf->e_ident, ELFMAG, SELFMAG); in fill_elf_header()
1443 elf->e_ident[EI_CLASS] = ELF_CLASS; in fill_elf_header()
1444 elf->e_ident[EI_DATA] = ELF_DATA; in fill_elf_header()
1445 elf->e_ident[EI_VERSION] = EV_CURRENT; in fill_elf_header()
1446 elf->e_ident[EI_OSABI] = ELF_OSABI; in fill_elf_header()
1448 elf->e_type = ET_CORE; in fill_elf_header()
1449 elf->e_machine = machine; in fill_elf_header()
1450 elf->e_version = EV_CURRENT; in fill_elf_header()
1451 elf->e_phoff = sizeof(struct elfhdr); in fill_elf_header()
1452 elf->e_flags = flags; in fill_elf_header()
1453 elf->e_ehsize = sizeof(struct elfhdr); in fill_elf_header()
1454 elf->e_phentsize = sizeof(struct elf_phdr); in fill_elf_header()
1455 elf->e_phnum = segs; in fill_elf_header()
1460 phdr->p_type = PT_NOTE; in fill_elf_note_phdr()
1461 phdr->p_offset = offset; in fill_elf_note_phdr()
1462 phdr->p_vaddr = 0; in fill_elf_note_phdr()
1463 phdr->p_paddr = 0; in fill_elf_note_phdr()
1464 phdr->p_filesz = sz; in fill_elf_note_phdr()
1465 phdr->p_memsz = 0; in fill_elf_note_phdr()
1466 phdr->p_flags = 0; in fill_elf_note_phdr()
1467 phdr->p_align = 4; in fill_elf_note_phdr()
1473 note->name = name; in __fill_note()
1474 note->type = type; in __fill_note()
1475 note->datasz = sz; in __fill_note()
1476 note->data = data; in __fill_note()
1487 struct task_struct *p, long signr) in fill_prstatus() argument
1489 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; in fill_prstatus()
1490 prstatus->pr_sigpend = p->pending.signal.sig[0]; in fill_prstatus()
1491 prstatus->pr_sighold = p->blocked.sig[0]; in fill_prstatus()
1493 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); in fill_prstatus()
1495 prstatus->pr_pid = task_pid_vnr(p); in fill_prstatus()
1496 prstatus->pr_pgrp = task_pgrp_vnr(p); in fill_prstatus()
1497 prstatus->pr_sid = task_session_vnr(p); in fill_prstatus()
1498 if (thread_group_leader(p)) { in fill_prstatus()
1503 * group-wide total, not its individual thread total. in fill_prstatus()
1505 thread_group_cputime(p, &cputime); in fill_prstatus()
1506 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime); in fill_prstatus()
1507 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime); in fill_prstatus()
1511 task_cputime(p, &utime, &stime); in fill_prstatus()
1512 prstatus->pr_utime = ns_to_kernel_old_timeval(utime); in fill_prstatus()
1513 prstatus->pr_stime = ns_to_kernel_old_timeval(stime); in fill_prstatus()
1516 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime); in fill_prstatus()
1517 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime); in fill_prstatus()
1520 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, in fill_psinfo() argument
1530 len = mm->arg_end - mm->arg_start; in fill_psinfo()
1532 len = ELF_PRARGSZ-1; in fill_psinfo()
1533 if (copy_from_user(&psinfo->pr_psargs, in fill_psinfo()
1534 (const char __user *)mm->arg_start, len)) in fill_psinfo()
1535 return -EFAULT; in fill_psinfo()
1537 if (psinfo->pr_psargs[i] == 0) in fill_psinfo()
1538 psinfo->pr_psargs[i] = ' '; in fill_psinfo()
1539 psinfo->pr_psargs[len] = 0; in fill_psinfo()
1542 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); in fill_psinfo()
1544 psinfo->pr_pid = task_pid_vnr(p); in fill_psinfo()
1545 psinfo->pr_pgrp = task_pgrp_vnr(p); in fill_psinfo()
1546 psinfo->pr_sid = task_session_vnr(p); in fill_psinfo()
1548 state = READ_ONCE(p->__state); in fill_psinfo()
1550 psinfo->pr_state = i; in fill_psinfo()
1551 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; in fill_psinfo()
1552 psinfo->pr_zomb = psinfo->pr_sname == 'Z'; in fill_psinfo()
1553 psinfo->pr_nice = task_nice(p); in fill_psinfo()
1554 psinfo->pr_flag = p->flags; in fill_psinfo()
1556 cred = __task_cred(p); in fill_psinfo()
1557 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); in fill_psinfo()
1558 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); in fill_psinfo()
1560 get_task_comm(psinfo->pr_fname, p); in fill_psinfo()
1567 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; in fill_auxv_note()
1571 while (auxv[i - 2] != AT_NULL); in fill_auxv_note()
1585 * long count -- how many files are mapped
1586 * long page_size -- units for file_ofs
1602 count = cprm->vma_count; in fill_files_note()
1604 return -EINVAL; in fill_files_note()
1613 return -EINVAL; in fill_files_note()
1622 return -ENOMEM; in fill_files_note()
1626 remaining = size - names_ofs; in fill_files_note()
1628 for (i = 0; i < cprm->vma_count; i++) { in fill_files_note()
1629 struct core_vma_metadata *m = &cprm->vma_meta[i]; in fill_files_note()
1633 file = m->file; in fill_files_note()
1638 if (PTR_ERR(filename) == -ENAMETOOLONG) { in fill_files_note()
1648 n = (name_curpos + remaining) - filename; in fill_files_note()
1649 remaining = filename - name_curpos; in fill_files_note()
1653 *start_end_ofs++ = m->start; in fill_files_note()
1654 *start_end_ofs++ = m->end; in fill_files_note()
1655 *start_end_ofs++ = m->pgoff; in fill_files_note()
1663 * Count usually is less than mm->map_count, in fill_files_note()
1666 n = cprm->vma_count - count; in fill_files_note()
1669 memmove(name_base - shift_bytes, name_base, in fill_files_note()
1670 name_curpos - name_base); in fill_files_note()
1671 name_curpos -= shift_bytes; in fill_files_note()
1674 size = name_curpos - (char *)data; in fill_files_note()
1708 if (regset->writeback) in do_thread_regset_writeback()
1709 regset->writeback(task, regset, 1); in do_thread_regset_writeback()
1717 #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1)
1732 fill_prstatus(&t->prstatus.common, t->task, signr); in fill_thread_core_info()
1733 regset_get(t->task, &view->regsets[0], in fill_thread_core_info()
1734 sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); in fill_thread_core_info()
1736 fill_note(&t->notes[0], PRSTATUS, PRSTATUS_SIZE, &t->prstatus); in fill_thread_core_info()
1737 info->size += notesize(&t->notes[0]); in fill_thread_core_info()
1739 do_thread_regset_writeback(t->task, &view->regsets[0]); in fill_thread_core_info()
1746 for (view_iter = 1; view_iter < view->n; ++view_iter) { in fill_thread_core_info()
1747 const struct user_regset *regset = &view->regsets[view_iter]; in fill_thread_core_info()
1748 int note_type = regset->core_note_type; in fill_thread_core_info()
1749 const char *note_name = regset->core_note_name; in fill_thread_core_info()
1754 do_thread_regset_writeback(t->task, regset); in fill_thread_core_info()
1757 if (regset->active && regset->active(t->task, regset) <= 0) in fill_thread_core_info()
1760 ret = regset_get_alloc(t->task, regset, ~0U, &data); in fill_thread_core_info()
1764 if (WARN_ON_ONCE(note_iter >= info->thread_notes)) in fill_thread_core_info()
1768 SET_PR_FPVALID(&t->prstatus); in fill_thread_core_info()
1774 /* Warn on non-legacy-compatible names, for now. */ in fill_thread_core_info()
1778 __fill_note(&t->notes[note_iter], note_name, note_type, in fill_thread_core_info()
1781 info->size += notesize(&t->notes[note_iter]); in fill_thread_core_info()
1792 struct task_struct *p = t->task; in fill_thread_core_info() local
1795 fill_prstatus(&t->prstatus.common, p, signr); in fill_thread_core_info()
1796 elf_core_copy_task_regs(p, &t->prstatus.pr_reg); in fill_thread_core_info()
1798 fill_note(&t->notes[0], PRSTATUS, sizeof(t->prstatus), &t->prstatus); in fill_thread_core_info()
1799 info->size += notesize(&t->notes[0]); in fill_thread_core_info()
1802 if (!fpu || !elf_core_copy_task_fpregs(p, fpu)) { in fill_thread_core_info()
1807 t->prstatus.pr_fpvalid = 1; in fill_thread_core_info()
1808 fill_note(&t->notes[1], PRFPREG, sizeof(*fpu), fpu); in fill_thread_core_info()
1809 info->size += notesize(&t->notes[1]); in fill_thread_core_info()
1830 fill_note(&info->psinfo, PRPSINFO, sizeof(*psinfo), psinfo); in fill_note_info()
1838 info->thread_notes = 0; in fill_note_info()
1839 for (int i = 0; i < view->n; ++i) in fill_note_info()
1840 if (view->regsets[i].core_note_type != 0) in fill_note_info()
1841 ++info->thread_notes; in fill_note_info()
1847 if (unlikely(info->thread_notes == 0) || in fill_note_info()
1848 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { in fill_note_info()
1853 machine = view->e_machine; in fill_note_info()
1854 flags = view->e_flags; in fill_note_info()
1857 info->thread_notes = 2; in fill_note_info()
1866 flags = elf_coredump_get_mm_eflags(dump_task->mm, flags); in fill_note_info()
1876 info->thread = kzalloc(struct_size(info->thread, notes, info->thread_notes), in fill_note_info()
1878 if (unlikely(!info->thread)) in fill_note_info()
1881 info->thread->task = dump_task; in fill_note_info()
1882 for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) { in fill_note_info()
1883 t = kzalloc(struct_size(t, notes, info->thread_notes), in fill_note_info()
1888 t->task = ct->task; in fill_note_info()
1889 t->next = info->thread->next; in fill_note_info()
1890 info->thread->next = t; in fill_note_info()
1896 for (t = info->thread; t != NULL; t = t->next) in fill_note_info()
1897 if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info)) in fill_note_info()
1901 * Fill in the two process-wide notes. in fill_note_info()
1903 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); in fill_note_info()
1904 info->size += notesize(&info->psinfo); in fill_note_info()
1906 fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo); in fill_note_info()
1907 info->size += notesize(&info->signote); in fill_note_info()
1909 fill_auxv_note(&info->auxv, current->mm); in fill_note_info()
1910 info->size += notesize(&info->auxv); in fill_note_info()
1912 if (fill_files_note(&info->files, cprm) == 0) in fill_note_info()
1913 info->size += notesize(&info->files); in fill_note_info()
1920 * process-wide notes are interleaved after the first thread-specific note.
1926 struct elf_thread_core_info *t = info->thread; in write_note_info()
1931 if (!writenote(&t->notes[0], cprm)) in write_note_info()
1934 if (first && !writenote(&info->psinfo, cprm)) in write_note_info()
1936 if (first && !writenote(&info->signote, cprm)) in write_note_info()
1938 if (first && !writenote(&info->auxv, cprm)) in write_note_info()
1940 if (first && info->files.data && in write_note_info()
1941 !writenote(&info->files, cprm)) in write_note_info()
1944 for (i = 1; i < info->thread_notes; ++i) in write_note_info()
1945 if (t->notes[i].data && in write_note_info()
1946 !writenote(&t->notes[i], cprm)) in write_note_info()
1950 t = t->next; in write_note_info()
1958 struct elf_thread_core_info *threads = info->thread; in free_note_info()
1962 threads = t->next; in free_note_info()
1963 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); in free_note_info()
1964 for (i = 1; i < info->thread_notes; ++i) in free_note_info()
1965 kvfree(t->notes[i].data); in free_note_info()
1968 kfree(info->psinfo.data); in free_note_info()
1969 kvfree(info->files.data); in free_note_info()
1975 elf->e_shoff = e_shoff; in fill_extnum_info()
1976 elf->e_shentsize = sizeof(*shdr4extnum); in fill_extnum_info()
1977 elf->e_shnum = 1; in fill_extnum_info()
1978 elf->e_shstrndx = SHN_UNDEF; in fill_extnum_info()
1982 shdr4extnum->sh_type = SHT_NULL; in fill_extnum_info()
1983 shdr4extnum->sh_size = elf->e_shnum; in fill_extnum_info()
1984 shdr4extnum->sh_link = elf->e_shstrndx; in fill_extnum_info()
1985 shdr4extnum->sh_info = segs; in fill_extnum_info()
1991 * This is a two-pass process; first we find the offsets of the bits,
2011 segs = cprm->vma_count + elf_core_extra_phdrs(cprm); in elf_core_dump()
2022 * Collect all the non-memory information about the process for the in elf_core_dump()
2050 offset += cprm->vma_data_size; in elf_core_dump()
2070 for (i = 0; i < cprm->vma_count; i++) { in elf_core_dump()
2071 struct core_vma_metadata *meta = cprm->vma_meta + i; in elf_core_dump()
2076 phdr.p_vaddr = meta->start; in elf_core_dump()
2078 phdr.p_filesz = meta->dump_size; in elf_core_dump()
2079 phdr.p_memsz = meta->end - meta->start; in elf_core_dump()
2082 if (meta->flags & VM_READ) in elf_core_dump()
2084 if (meta->flags & VM_WRITE) in elf_core_dump()
2086 if (meta->flags & VM_EXEC) in elf_core_dump()
2108 for (i = 0; i < cprm->vma_count; i++) { in elf_core_dump()
2109 struct core_vma_metadata *meta = cprm->vma_meta + i; in elf_core_dump()
2111 if (!dump_user_range(cprm, meta->start, meta->dump_size)) in elf_core_dump()