1 #include <linux/slab.h> 2 #include <linux/file.h> 3 #include <linux/fdtable.h> 4 #include <linux/freezer.h> 5 #include <linux/mm.h> 6 #include <linux/stat.h> 7 #include <linux/fcntl.h> 8 #include <linux/swap.h> 9 #include <linux/string.h> 10 #include <linux/init.h> 11 #include <linux/pagemap.h> 12 #include <linux/perf_event.h> 13 #include <linux/highmem.h> 14 #include <linux/spinlock.h> 15 #include <linux/key.h> 16 #include <linux/personality.h> 17 #include <linux/binfmts.h> 18 #include <linux/coredump.h> 19 #include <linux/sched/coredump.h> 20 #include <linux/sched/signal.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/utsname.h> 23 #include <linux/pid_namespace.h> 24 #include <linux/module.h> 25 #include <linux/namei.h> 26 #include <linux/mount.h> 27 #include <linux/security.h> 28 #include <linux/syscalls.h> 29 #include <linux/tsacct_kern.h> 30 #include <linux/cn_proc.h> 31 #include <linux/audit.h> 32 #include <linux/tracehook.h> 33 #include <linux/kmod.h> 34 #include <linux/fsnotify.h> 35 #include <linux/fs_struct.h> 36 #include <linux/pipe_fs_i.h> 37 #include <linux/oom.h> 38 #include <linux/compat.h> 39 #include <linux/fs.h> 40 #include <linux/path.h> 41 #include <linux/timekeeping.h> 42 43 #include <linux/uaccess.h> 44 #include <asm/mmu_context.h> 45 #include <asm/tlb.h> 46 #include <asm/exec.h> 47 48 #include <trace/events/task.h> 49 #include "internal.h" 50 51 #include <trace/events/sched.h> 52 53 int core_uses_pid; 54 unsigned int core_pipe_limit; 55 char core_pattern[CORENAME_MAX_SIZE] = "core"; 56 static int core_name_size = CORENAME_MAX_SIZE; 57 58 struct core_name { 59 char *corename; 60 int used, size; 61 }; 62 63 /* The maximal length of core_pattern is also specified in sysctl.c */ 64 65 static int expand_corename(struct core_name *cn, int size) 66 { 67 char *corename = krealloc(cn->corename, size, GFP_KERNEL); 68 69 if (!corename) 70 return -ENOMEM; 71 72 if (size > core_name_size) /* racy but harmless */ 73 core_name_size = size; 74 75 cn->size = ksize(corename); 76 cn->corename = corename; 77 return 0; 78 } 79 80 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, 81 va_list arg) 82 { 83 int free, need; 84 va_list arg_copy; 85 86 again: 87 free = cn->size - cn->used; 88 89 va_copy(arg_copy, arg); 90 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); 91 va_end(arg_copy); 92 93 if (need < free) { 94 cn->used += need; 95 return 0; 96 } 97 98 if (!expand_corename(cn, cn->size + need - free + 1)) 99 goto again; 100 101 return -ENOMEM; 102 } 103 104 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) 105 { 106 va_list arg; 107 int ret; 108 109 va_start(arg, fmt); 110 ret = cn_vprintf(cn, fmt, arg); 111 va_end(arg); 112 113 return ret; 114 } 115 116 static __printf(2, 3) 117 int cn_esc_printf(struct core_name *cn, const char *fmt, ...) 118 { 119 int cur = cn->used; 120 va_list arg; 121 int ret; 122 123 va_start(arg, fmt); 124 ret = cn_vprintf(cn, fmt, arg); 125 va_end(arg); 126 127 if (ret == 0) { 128 /* 129 * Ensure that this coredump name component can't cause the 130 * resulting corefile path to consist of a ".." or ".". 131 */ 132 if ((cn->used - cur == 1 && cn->corename[cur] == '.') || 133 (cn->used - cur == 2 && cn->corename[cur] == '.' 134 && cn->corename[cur+1] == '.')) 135 cn->corename[cur] = '!'; 136 137 /* 138 * Empty names are fishy and could be used to create a "//" in a 139 * corefile name, causing the coredump to happen one directory 140 * level too high. Enforce that all components of the core 141 * pattern are at least one character long. 142 */ 143 if (cn->used == cur) 144 ret = cn_printf(cn, "!"); 145 } 146 147 for (; cur < cn->used; ++cur) { 148 if (cn->corename[cur] == '/') 149 cn->corename[cur] = '!'; 150 } 151 return ret; 152 } 153 154 static int cn_print_exe_file(struct core_name *cn) 155 { 156 struct file *exe_file; 157 char *pathbuf, *path; 158 int ret; 159 160 exe_file = get_mm_exe_file(current->mm); 161 if (!exe_file) 162 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 163 164 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); 165 if (!pathbuf) { 166 ret = -ENOMEM; 167 goto put_exe_file; 168 } 169 170 path = file_path(exe_file, pathbuf, PATH_MAX); 171 if (IS_ERR(path)) { 172 ret = PTR_ERR(path); 173 goto free_buf; 174 } 175 176 ret = cn_esc_printf(cn, "%s", path); 177 178 free_buf: 179 kfree(pathbuf); 180 put_exe_file: 181 fput(exe_file); 182 return ret; 183 } 184 185 /* format_corename will inspect the pattern parameter, and output a 186 * name into corename, which must have space for at least 187 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 188 */ 189 static int format_corename(struct core_name *cn, struct coredump_params *cprm) 190 { 191 const struct cred *cred = current_cred(); 192 const char *pat_ptr = core_pattern; 193 int ispipe = (*pat_ptr == '|'); 194 int pid_in_pattern = 0; 195 int err = 0; 196 197 cn->used = 0; 198 cn->corename = NULL; 199 if (expand_corename(cn, core_name_size)) 200 return -ENOMEM; 201 cn->corename[0] = '\0'; 202 203 if (ispipe) 204 ++pat_ptr; 205 206 /* Repeat as long as we have more pattern to process and more output 207 space */ 208 while (*pat_ptr) { 209 if (*pat_ptr != '%') { 210 err = cn_printf(cn, "%c", *pat_ptr++); 211 } else { 212 switch (*++pat_ptr) { 213 /* single % at the end, drop that */ 214 case 0: 215 goto out; 216 /* Double percent, output one percent */ 217 case '%': 218 err = cn_printf(cn, "%c", '%'); 219 break; 220 /* pid */ 221 case 'p': 222 pid_in_pattern = 1; 223 err = cn_printf(cn, "%d", 224 task_tgid_vnr(current)); 225 break; 226 /* global pid */ 227 case 'P': 228 err = cn_printf(cn, "%d", 229 task_tgid_nr(current)); 230 break; 231 case 'i': 232 err = cn_printf(cn, "%d", 233 task_pid_vnr(current)); 234 break; 235 case 'I': 236 err = cn_printf(cn, "%d", 237 task_pid_nr(current)); 238 break; 239 /* uid */ 240 case 'u': 241 err = cn_printf(cn, "%u", 242 from_kuid(&init_user_ns, 243 cred->uid)); 244 break; 245 /* gid */ 246 case 'g': 247 err = cn_printf(cn, "%u", 248 from_kgid(&init_user_ns, 249 cred->gid)); 250 break; 251 case 'd': 252 err = cn_printf(cn, "%d", 253 __get_dumpable(cprm->mm_flags)); 254 break; 255 /* signal that caused the coredump */ 256 case 's': 257 err = cn_printf(cn, "%d", 258 cprm->siginfo->si_signo); 259 break; 260 /* UNIX time of coredump */ 261 case 't': { 262 time64_t time; 263 264 time = ktime_get_real_seconds(); 265 err = cn_printf(cn, "%lld", time); 266 break; 267 } 268 /* hostname */ 269 case 'h': 270 down_read(&uts_sem); 271 err = cn_esc_printf(cn, "%s", 272 utsname()->nodename); 273 up_read(&uts_sem); 274 break; 275 /* executable */ 276 case 'e': 277 err = cn_esc_printf(cn, "%s", current->comm); 278 break; 279 case 'E': 280 err = cn_print_exe_file(cn); 281 break; 282 /* core limit size */ 283 case 'c': 284 err = cn_printf(cn, "%lu", 285 rlimit(RLIMIT_CORE)); 286 break; 287 default: 288 break; 289 } 290 ++pat_ptr; 291 } 292 293 if (err) 294 return err; 295 } 296 297 out: 298 /* Backward compatibility with core_uses_pid: 299 * 300 * If core_pattern does not include a %p (as is the default) 301 * and core_uses_pid is set, then .%pid will be appended to 302 * the filename. Do not do this for piped commands. */ 303 if (!ispipe && !pid_in_pattern && core_uses_pid) { 304 err = cn_printf(cn, ".%d", task_tgid_vnr(current)); 305 if (err) 306 return err; 307 } 308 return ispipe; 309 } 310 311 static int zap_process(struct task_struct *start, int exit_code, int flags) 312 { 313 struct task_struct *t; 314 int nr = 0; 315 316 /* ignore all signals except SIGKILL, see prepare_signal() */ 317 start->signal->flags = SIGNAL_GROUP_COREDUMP | flags; 318 start->signal->group_exit_code = exit_code; 319 start->signal->group_stop_count = 0; 320 321 for_each_thread(start, t) { 322 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 323 if (t != current && t->mm) { 324 sigaddset(&t->pending.signal, SIGKILL); 325 signal_wake_up(t, 1); 326 nr++; 327 } 328 } 329 330 return nr; 331 } 332 333 static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, 334 struct core_state *core_state, int exit_code) 335 { 336 struct task_struct *g, *p; 337 unsigned long flags; 338 int nr = -EAGAIN; 339 340 spin_lock_irq(&tsk->sighand->siglock); 341 if (!signal_group_exit(tsk->signal)) { 342 mm->core_state = core_state; 343 tsk->signal->group_exit_task = tsk; 344 nr = zap_process(tsk, exit_code, 0); 345 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 346 } 347 spin_unlock_irq(&tsk->sighand->siglock); 348 if (unlikely(nr < 0)) 349 return nr; 350 351 tsk->flags |= PF_DUMPCORE; 352 if (atomic_read(&mm->mm_users) == nr + 1) 353 goto done; 354 /* 355 * We should find and kill all tasks which use this mm, and we should 356 * count them correctly into ->nr_threads. We don't take tasklist 357 * lock, but this is safe wrt: 358 * 359 * fork: 360 * None of sub-threads can fork after zap_process(leader). All 361 * processes which were created before this point should be 362 * visible to zap_threads() because copy_process() adds the new 363 * process to the tail of init_task.tasks list, and lock/unlock 364 * of ->siglock provides a memory barrier. 365 * 366 * do_exit: 367 * The caller holds mm->mmap_sem. This means that the task which 368 * uses this mm can't pass exit_mm(), so it can't exit or clear 369 * its ->mm. 370 * 371 * de_thread: 372 * It does list_replace_rcu(&leader->tasks, ¤t->tasks), 373 * we must see either old or new leader, this does not matter. 374 * However, it can change p->sighand, so lock_task_sighand(p) 375 * must be used. Since p->mm != NULL and we hold ->mmap_sem 376 * it can't fail. 377 * 378 * Note also that "g" can be the old leader with ->mm == NULL 379 * and already unhashed and thus removed from ->thread_group. 380 * This is OK, __unhash_process()->list_del_rcu() does not 381 * clear the ->next pointer, we will find the new leader via 382 * next_thread(). 383 */ 384 rcu_read_lock(); 385 for_each_process(g) { 386 if (g == tsk->group_leader) 387 continue; 388 if (g->flags & PF_KTHREAD) 389 continue; 390 391 for_each_thread(g, p) { 392 if (unlikely(!p->mm)) 393 continue; 394 if (unlikely(p->mm == mm)) { 395 lock_task_sighand(p, &flags); 396 nr += zap_process(p, exit_code, 397 SIGNAL_GROUP_EXIT); 398 unlock_task_sighand(p, &flags); 399 } 400 break; 401 } 402 } 403 rcu_read_unlock(); 404 done: 405 atomic_set(&core_state->nr_threads, nr); 406 return nr; 407 } 408 409 static int coredump_wait(int exit_code, struct core_state *core_state) 410 { 411 struct task_struct *tsk = current; 412 struct mm_struct *mm = tsk->mm; 413 int core_waiters = -EBUSY; 414 415 init_completion(&core_state->startup); 416 core_state->dumper.task = tsk; 417 core_state->dumper.next = NULL; 418 419 if (down_write_killable(&mm->mmap_sem)) 420 return -EINTR; 421 422 if (!mm->core_state) 423 core_waiters = zap_threads(tsk, mm, core_state, exit_code); 424 up_write(&mm->mmap_sem); 425 426 if (core_waiters > 0) { 427 struct core_thread *ptr; 428 429 freezer_do_not_count(); 430 wait_for_completion(&core_state->startup); 431 freezer_count(); 432 /* 433 * Wait for all the threads to become inactive, so that 434 * all the thread context (extended register state, like 435 * fpu etc) gets copied to the memory. 436 */ 437 ptr = core_state->dumper.next; 438 while (ptr != NULL) { 439 wait_task_inactive(ptr->task, 0); 440 ptr = ptr->next; 441 } 442 } 443 444 return core_waiters; 445 } 446 447 static void coredump_finish(struct mm_struct *mm, bool core_dumped) 448 { 449 struct core_thread *curr, *next; 450 struct task_struct *task; 451 452 spin_lock_irq(¤t->sighand->siglock); 453 if (core_dumped && !__fatal_signal_pending(current)) 454 current->signal->group_exit_code |= 0x80; 455 current->signal->group_exit_task = NULL; 456 current->signal->flags = SIGNAL_GROUP_EXIT; 457 spin_unlock_irq(¤t->sighand->siglock); 458 459 next = mm->core_state->dumper.next; 460 while ((curr = next) != NULL) { 461 next = curr->next; 462 task = curr->task; 463 /* 464 * see exit_mm(), curr->task must not see 465 * ->task == NULL before we read ->next. 466 */ 467 smp_mb(); 468 curr->task = NULL; 469 wake_up_process(task); 470 } 471 472 mm->core_state = NULL; 473 } 474 475 static bool dump_interrupted(void) 476 { 477 /* 478 * SIGKILL or freezing() interrupt the coredumping. Perhaps we 479 * can do try_to_freeze() and check __fatal_signal_pending(), 480 * but then we need to teach dump_write() to restart and clear 481 * TIF_SIGPENDING. 482 */ 483 return signal_pending(current); 484 } 485 486 static void wait_for_dump_helpers(struct file *file) 487 { 488 struct pipe_inode_info *pipe = file->private_data; 489 490 pipe_lock(pipe); 491 pipe->readers++; 492 pipe->writers--; 493 wake_up_interruptible_sync(&pipe->wait); 494 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 495 pipe_unlock(pipe); 496 497 /* 498 * We actually want wait_event_freezable() but then we need 499 * to clear TIF_SIGPENDING and improve dump_interrupted(). 500 */ 501 wait_event_interruptible(pipe->wait, pipe->readers == 1); 502 503 pipe_lock(pipe); 504 pipe->readers--; 505 pipe->writers++; 506 pipe_unlock(pipe); 507 } 508 509 /* 510 * umh_pipe_setup 511 * helper function to customize the process used 512 * to collect the core in userspace. Specifically 513 * it sets up a pipe and installs it as fd 0 (stdin) 514 * for the process. Returns 0 on success, or 515 * PTR_ERR on failure. 516 * Note that it also sets the core limit to 1. This 517 * is a special value that we use to trap recursive 518 * core dumps 519 */ 520 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) 521 { 522 struct file *files[2]; 523 struct coredump_params *cp = (struct coredump_params *)info->data; 524 int err = create_pipe_files(files, 0); 525 if (err) 526 return err; 527 528 cp->file = files[1]; 529 530 err = replace_fd(0, files[0], 0); 531 fput(files[0]); 532 /* and disallow core files too */ 533 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; 534 535 return err; 536 } 537 538 void do_coredump(const siginfo_t *siginfo) 539 { 540 struct core_state core_state; 541 struct core_name cn; 542 struct mm_struct *mm = current->mm; 543 struct linux_binfmt * binfmt; 544 const struct cred *old_cred; 545 struct cred *cred; 546 int retval = 0; 547 int ispipe; 548 struct files_struct *displaced; 549 /* require nonrelative corefile path and be extra careful */ 550 bool need_suid_safe = false; 551 bool core_dumped = false; 552 static atomic_t core_dump_count = ATOMIC_INIT(0); 553 struct coredump_params cprm = { 554 .siginfo = siginfo, 555 .regs = signal_pt_regs(), 556 .limit = rlimit(RLIMIT_CORE), 557 /* 558 * We must use the same mm->flags while dumping core to avoid 559 * inconsistency of bit flags, since this flag is not protected 560 * by any locks. 561 */ 562 .mm_flags = mm->flags, 563 }; 564 565 audit_core_dumps(siginfo->si_signo); 566 567 binfmt = mm->binfmt; 568 if (!binfmt || !binfmt->core_dump) 569 goto fail; 570 if (!__get_dumpable(cprm.mm_flags)) 571 goto fail; 572 573 cred = prepare_creds(); 574 if (!cred) 575 goto fail; 576 /* 577 * We cannot trust fsuid as being the "true" uid of the process 578 * nor do we know its entire history. We only know it was tainted 579 * so we dump it as root in mode 2, and only into a controlled 580 * environment (pipe handler or fully qualified path). 581 */ 582 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { 583 /* Setuid core dump mode */ 584 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ 585 need_suid_safe = true; 586 } 587 588 retval = coredump_wait(siginfo->si_signo, &core_state); 589 if (retval < 0) 590 goto fail_creds; 591 592 old_cred = override_creds(cred); 593 594 ispipe = format_corename(&cn, &cprm); 595 596 if (ispipe) { 597 int dump_count; 598 char **helper_argv; 599 struct subprocess_info *sub_info; 600 601 if (ispipe < 0) { 602 printk(KERN_WARNING "format_corename failed\n"); 603 printk(KERN_WARNING "Aborting core\n"); 604 goto fail_unlock; 605 } 606 607 if (cprm.limit == 1) { 608 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. 609 * 610 * Normally core limits are irrelevant to pipes, since 611 * we're not writing to the file system, but we use 612 * cprm.limit of 1 here as a special value, this is a 613 * consistent way to catch recursive crashes. 614 * We can still crash if the core_pattern binary sets 615 * RLIM_CORE = !1, but it runs as root, and can do 616 * lots of stupid things. 617 * 618 * Note that we use task_tgid_vnr here to grab the pid 619 * of the process group leader. That way we get the 620 * right pid if a thread in a multi-threaded 621 * core_pattern process dies. 622 */ 623 printk(KERN_WARNING 624 "Process %d(%s) has RLIMIT_CORE set to 1\n", 625 task_tgid_vnr(current), current->comm); 626 printk(KERN_WARNING "Aborting core\n"); 627 goto fail_unlock; 628 } 629 cprm.limit = RLIM_INFINITY; 630 631 dump_count = atomic_inc_return(&core_dump_count); 632 if (core_pipe_limit && (core_pipe_limit < dump_count)) { 633 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", 634 task_tgid_vnr(current), current->comm); 635 printk(KERN_WARNING "Skipping core dump\n"); 636 goto fail_dropcount; 637 } 638 639 helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); 640 if (!helper_argv) { 641 printk(KERN_WARNING "%s failed to allocate memory\n", 642 __func__); 643 goto fail_dropcount; 644 } 645 646 retval = -ENOMEM; 647 sub_info = call_usermodehelper_setup(helper_argv[0], 648 helper_argv, NULL, GFP_KERNEL, 649 umh_pipe_setup, NULL, &cprm); 650 if (sub_info) 651 retval = call_usermodehelper_exec(sub_info, 652 UMH_WAIT_EXEC); 653 654 argv_free(helper_argv); 655 if (retval) { 656 printk(KERN_INFO "Core dump to |%s pipe failed\n", 657 cn.corename); 658 goto close_fail; 659 } 660 } else { 661 struct inode *inode; 662 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW | 663 O_LARGEFILE | O_EXCL; 664 665 if (cprm.limit < binfmt->min_coredump) 666 goto fail_unlock; 667 668 if (need_suid_safe && cn.corename[0] != '/') { 669 printk(KERN_WARNING "Pid %d(%s) can only dump core "\ 670 "to fully qualified path!\n", 671 task_tgid_vnr(current), current->comm); 672 printk(KERN_WARNING "Skipping core dump\n"); 673 goto fail_unlock; 674 } 675 676 /* 677 * Unlink the file if it exists unless this is a SUID 678 * binary - in that case, we're running around with root 679 * privs and don't want to unlink another user's coredump. 680 */ 681 if (!need_suid_safe) { 682 mm_segment_t old_fs; 683 684 old_fs = get_fs(); 685 set_fs(KERNEL_DS); 686 /* 687 * If it doesn't exist, that's fine. If there's some 688 * other problem, we'll catch it at the filp_open(). 689 */ 690 (void) sys_unlink((const char __user *)cn.corename); 691 set_fs(old_fs); 692 } 693 694 /* 695 * There is a race between unlinking and creating the 696 * file, but if that causes an EEXIST here, that's 697 * fine - another process raced with us while creating 698 * the corefile, and the other process won. To userspace, 699 * what matters is that at least one of the two processes 700 * writes its coredump successfully, not which one. 701 */ 702 if (need_suid_safe) { 703 /* 704 * Using user namespaces, normal user tasks can change 705 * their current->fs->root to point to arbitrary 706 * directories. Since the intention of the "only dump 707 * with a fully qualified path" rule is to control where 708 * coredumps may be placed using root privileges, 709 * current->fs->root must not be used. Instead, use the 710 * root directory of init_task. 711 */ 712 struct path root; 713 714 task_lock(&init_task); 715 get_fs_root(init_task.fs, &root); 716 task_unlock(&init_task); 717 cprm.file = file_open_root(root.dentry, root.mnt, 718 cn.corename, open_flags, 0600); 719 path_put(&root); 720 } else { 721 cprm.file = filp_open(cn.corename, open_flags, 0600); 722 } 723 if (IS_ERR(cprm.file)) 724 goto fail_unlock; 725 726 inode = file_inode(cprm.file); 727 if (inode->i_nlink > 1) 728 goto close_fail; 729 if (d_unhashed(cprm.file->f_path.dentry)) 730 goto close_fail; 731 /* 732 * AK: actually i see no reason to not allow this for named 733 * pipes etc, but keep the previous behaviour for now. 734 */ 735 if (!S_ISREG(inode->i_mode)) 736 goto close_fail; 737 /* 738 * Don't dump core if the filesystem changed owner or mode 739 * of the file during file creation. This is an issue when 740 * a process dumps core while its cwd is e.g. on a vfat 741 * filesystem. 742 */ 743 if (!uid_eq(inode->i_uid, current_fsuid())) 744 goto close_fail; 745 if ((inode->i_mode & 0677) != 0600) 746 goto close_fail; 747 if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) 748 goto close_fail; 749 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) 750 goto close_fail; 751 } 752 753 /* get us an unshared descriptor table; almost always a no-op */ 754 retval = unshare_files(&displaced); 755 if (retval) 756 goto close_fail; 757 if (displaced) 758 put_files_struct(displaced); 759 if (!dump_interrupted()) { 760 file_start_write(cprm.file); 761 core_dumped = binfmt->core_dump(&cprm); 762 file_end_write(cprm.file); 763 } 764 if (ispipe && core_pipe_limit) 765 wait_for_dump_helpers(cprm.file); 766 close_fail: 767 if (cprm.file) 768 filp_close(cprm.file, NULL); 769 fail_dropcount: 770 if (ispipe) 771 atomic_dec(&core_dump_count); 772 fail_unlock: 773 kfree(cn.corename); 774 coredump_finish(mm, core_dumped); 775 revert_creds(old_cred); 776 fail_creds: 777 put_cred(cred); 778 fail: 779 return; 780 } 781 782 /* 783 * Core dumping helper functions. These are the only things you should 784 * do on a core-file: use only these functions to write out all the 785 * necessary info. 786 */ 787 int dump_emit(struct coredump_params *cprm, const void *addr, int nr) 788 { 789 struct file *file = cprm->file; 790 loff_t pos = file->f_pos; 791 ssize_t n; 792 if (cprm->written + nr > cprm->limit) 793 return 0; 794 while (nr) { 795 if (dump_interrupted()) 796 return 0; 797 n = __kernel_write(file, addr, nr, &pos); 798 if (n <= 0) 799 return 0; 800 file->f_pos = pos; 801 cprm->written += n; 802 cprm->pos += n; 803 nr -= n; 804 } 805 return 1; 806 } 807 EXPORT_SYMBOL(dump_emit); 808 809 int dump_skip(struct coredump_params *cprm, size_t nr) 810 { 811 static char zeroes[PAGE_SIZE]; 812 struct file *file = cprm->file; 813 if (file->f_op->llseek && file->f_op->llseek != no_llseek) { 814 if (dump_interrupted() || 815 file->f_op->llseek(file, nr, SEEK_CUR) < 0) 816 return 0; 817 cprm->pos += nr; 818 return 1; 819 } else { 820 while (nr > PAGE_SIZE) { 821 if (!dump_emit(cprm, zeroes, PAGE_SIZE)) 822 return 0; 823 nr -= PAGE_SIZE; 824 } 825 return dump_emit(cprm, zeroes, nr); 826 } 827 } 828 EXPORT_SYMBOL(dump_skip); 829 830 int dump_align(struct coredump_params *cprm, int align) 831 { 832 unsigned mod = cprm->pos & (align - 1); 833 if (align & (align - 1)) 834 return 0; 835 return mod ? dump_skip(cprm, align - mod) : 1; 836 } 837 EXPORT_SYMBOL(dump_align); 838 839 /* 840 * Ensures that file size is big enough to contain the current file 841 * postion. This prevents gdb from complaining about a truncated file 842 * if the last "write" to the file was dump_skip. 843 */ 844 void dump_truncate(struct coredump_params *cprm) 845 { 846 struct file *file = cprm->file; 847 loff_t offset; 848 849 if (file->f_op->llseek && file->f_op->llseek != no_llseek) { 850 offset = file->f_op->llseek(file, 0, SEEK_CUR); 851 if (i_size_read(file->f_mapping->host) < offset) 852 do_truncate(file->f_path.dentry, offset, 0, file); 853 } 854 } 855 EXPORT_SYMBOL(dump_truncate); 856