1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/slab.h> 3 #include <linux/file.h> 4 #include <linux/fdtable.h> 5 #include <linux/freezer.h> 6 #include <linux/mm.h> 7 #include <linux/stat.h> 8 #include <linux/fcntl.h> 9 #include <linux/swap.h> 10 #include <linux/ctype.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/pagemap.h> 14 #include <linux/perf_event.h> 15 #include <linux/highmem.h> 16 #include <linux/spinlock.h> 17 #include <linux/key.h> 18 #include <linux/personality.h> 19 #include <linux/binfmts.h> 20 #include <linux/coredump.h> 21 #include <linux/sort.h> 22 #include <linux/sched/coredump.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/utsname.h> 26 #include <linux/pid_namespace.h> 27 #include <linux/module.h> 28 #include <linux/namei.h> 29 #include <linux/mount.h> 30 #include <linux/security.h> 31 #include <linux/syscalls.h> 32 #include <linux/tsacct_kern.h> 33 #include <linux/cn_proc.h> 34 #include <linux/audit.h> 35 #include <linux/kmod.h> 36 #include <linux/fsnotify.h> 37 #include <linux/fs_struct.h> 38 #include <linux/pipe_fs_i.h> 39 #include <linux/oom.h> 40 #include <linux/compat.h> 41 #include <linux/fs.h> 42 #include <linux/path.h> 43 #include <linux/timekeeping.h> 44 #include <linux/sysctl.h> 45 #include <linux/elf.h> 46 #include <linux/pidfs.h> 47 #include <linux/net.h> 48 #include <linux/socket.h> 49 #include <net/af_unix.h> 50 #include <net/net_namespace.h> 51 #include <net/sock.h> 52 #include <uapi/linux/pidfd.h> 53 #include <uapi/linux/un.h> 54 #include <uapi/linux/coredump.h> 55 56 #include <linux/uaccess.h> 57 #include <asm/mmu_context.h> 58 #include <asm/tlb.h> 59 #include <asm/exec.h> 60 61 #include <trace/events/task.h> 62 #include "internal.h" 63 64 #include <trace/events/sched.h> 65 66 static bool dump_vma_snapshot(struct coredump_params *cprm); 67 static void free_vma_snapshot(struct coredump_params *cprm); 68 69 #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024) 70 /* Define a reasonable max cap */ 71 #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024) 72 /* 73 * File descriptor number for the pidfd for the thread-group leader of 74 * the coredumping task installed into the usermode helper's file 75 * descriptor table. 76 */ 77 #define COREDUMP_PIDFD_NUMBER 3 78 79 static int core_uses_pid; 80 static unsigned int core_pipe_limit; 81 static unsigned int core_sort_vma; 82 static char core_pattern[CORENAME_MAX_SIZE] = "core"; 83 static int core_name_size = CORENAME_MAX_SIZE; 84 unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; 85 static atomic_t core_pipe_count = ATOMIC_INIT(0); 86 87 enum coredump_type_t { 88 COREDUMP_FILE = 1, 89 COREDUMP_PIPE = 2, 90 COREDUMP_SOCK = 3, 91 COREDUMP_SOCK_REQ = 4, 92 }; 93 94 struct core_name { 95 char *corename __counted_by_ptr(size); 96 int used, size; 97 unsigned int core_pipe_limit; 98 bool core_dumped; 99 enum coredump_type_t core_type; 100 u64 mask; 101 }; 102 103 static int expand_corename(struct core_name *cn, int size) 104 { 105 char *corename; 106 107 size = kmalloc_size_roundup(size); 108 corename = krealloc(cn->corename, size, GFP_KERNEL); 109 if (!corename) 110 return -ENOMEM; 111 112 cn->corename = corename; 113 cn->size = size; 114 115 if (size > core_name_size) /* racy but harmless */ 116 core_name_size = size; 117 118 return 0; 119 } 120 121 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, 122 va_list arg) 123 { 124 int free, need; 125 va_list arg_copy; 126 127 again: 128 free = cn->size - cn->used; 129 130 va_copy(arg_copy, arg); 131 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); 132 va_end(arg_copy); 133 134 if (need < free) { 135 cn->used += need; 136 return 0; 137 } 138 139 if (!expand_corename(cn, cn->size + need - free + 1)) 140 goto again; 141 142 return -ENOMEM; 143 } 144 145 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) 146 { 147 va_list arg; 148 int ret; 149 150 va_start(arg, fmt); 151 ret = cn_vprintf(cn, fmt, arg); 152 va_end(arg); 153 154 return ret; 155 } 156 157 static __printf(2, 3) 158 int cn_esc_printf(struct core_name *cn, const char *fmt, ...) 159 { 160 int cur = cn->used; 161 va_list arg; 162 int ret; 163 164 va_start(arg, fmt); 165 ret = cn_vprintf(cn, fmt, arg); 166 va_end(arg); 167 168 if (ret == 0) { 169 /* 170 * Ensure that this coredump name component can't cause the 171 * resulting corefile path to consist of a ".." or ".". 172 */ 173 if ((cn->used - cur == 1 && cn->corename[cur] == '.') || 174 (cn->used - cur == 2 && cn->corename[cur] == '.' 175 && cn->corename[cur+1] == '.')) 176 cn->corename[cur] = '!'; 177 178 /* 179 * Empty names are fishy and could be used to create a "//" in a 180 * corefile name, causing the coredump to happen one directory 181 * level too high. Enforce that all components of the core 182 * pattern are at least one character long. 183 */ 184 if (cn->used == cur) 185 ret = cn_printf(cn, "!"); 186 } 187 188 for (; cur < cn->used; ++cur) { 189 if (cn->corename[cur] == '/') 190 cn->corename[cur] = '!'; 191 } 192 return ret; 193 } 194 195 static int cn_print_exe_file(struct core_name *cn, bool name_only) 196 { 197 struct file *exe_file; 198 char *pathbuf, *path, *ptr; 199 int ret; 200 201 exe_file = get_mm_exe_file(current->mm); 202 if (!exe_file) 203 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 204 205 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 206 if (!pathbuf) { 207 ret = -ENOMEM; 208 goto put_exe_file; 209 } 210 211 path = file_path(exe_file, pathbuf, PATH_MAX); 212 if (IS_ERR(path)) { 213 ret = PTR_ERR(path); 214 goto free_buf; 215 } 216 217 if (name_only) { 218 ptr = strrchr(path, '/'); 219 if (ptr) 220 path = ptr + 1; 221 } 222 ret = cn_esc_printf(cn, "%s", path); 223 224 free_buf: 225 kfree(pathbuf); 226 put_exe_file: 227 fput(exe_file); 228 return ret; 229 } 230 231 /* 232 * coredump_parse will inspect the pattern parameter, and output a name 233 * into corename, which must have space for at least CORENAME_MAX_SIZE 234 * bytes plus one byte for the zero terminator. 235 */ 236 static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm, 237 size_t **argv, int *argc) 238 { 239 const struct cred *cred = current_cred(); 240 const char *pat_ptr = core_pattern; 241 bool was_space = false; 242 int pid_in_pattern = 0; 243 int err = 0; 244 245 cn->mask = COREDUMP_KERNEL; 246 if (core_pipe_limit) 247 cn->mask |= COREDUMP_WAIT; 248 cn->used = 0; 249 cn->corename = NULL; 250 cn->core_pipe_limit = 0; 251 cn->core_dumped = false; 252 if (*pat_ptr == '|') 253 cn->core_type = COREDUMP_PIPE; 254 else if (*pat_ptr == '@') 255 cn->core_type = COREDUMP_SOCK; 256 else 257 cn->core_type = COREDUMP_FILE; 258 if (expand_corename(cn, core_name_size)) 259 return false; 260 cn->corename[0] = '\0'; 261 262 switch (cn->core_type) { 263 case COREDUMP_PIPE: { 264 int argvs = sizeof(core_pattern) / 2; 265 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); 266 if (!(*argv)) 267 return false; 268 (*argv)[(*argc)++] = 0; 269 ++pat_ptr; 270 if (!(*pat_ptr)) 271 return false; 272 break; 273 } 274 case COREDUMP_SOCK: { 275 /* skip the @ */ 276 pat_ptr++; 277 if (!(*pat_ptr)) 278 return false; 279 if (*pat_ptr == '@') { 280 pat_ptr++; 281 if (!(*pat_ptr)) 282 return false; 283 284 cn->core_type = COREDUMP_SOCK_REQ; 285 } 286 287 err = cn_printf(cn, "%s", pat_ptr); 288 if (err) 289 return false; 290 291 /* Require absolute paths. */ 292 if (cn->corename[0] != '/') 293 return false; 294 295 /* 296 * Ensure we can uses spaces to indicate additional 297 * parameters in the future. 298 */ 299 if (strchr(cn->corename, ' ')) { 300 coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename); 301 return false; 302 } 303 304 /* Must not contain ".." in the path. */ 305 if (name_contains_dotdot(cn->corename)) { 306 coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename); 307 return false; 308 } 309 310 if (strlen(cn->corename) >= UNIX_PATH_MAX) { 311 coredump_report_failure("Coredump socket path %s too long", cn->corename); 312 return false; 313 } 314 315 /* 316 * Currently no need to parse any other options. 317 * Relevant information can be retrieved from the peer 318 * pidfd retrievable via SO_PEERPIDFD by the receiver or 319 * via /proc/<pid>, using the SO_PEERPIDFD to guard 320 * against pid recycling when opening /proc/<pid>. 321 */ 322 return true; 323 } 324 case COREDUMP_FILE: 325 break; 326 default: 327 WARN_ON_ONCE(true); 328 return false; 329 } 330 331 /* Repeat as long as we have more pattern to process and more output 332 space */ 333 while (*pat_ptr) { 334 /* 335 * Split on spaces before doing template expansion so that 336 * %e and %E don't get split if they have spaces in them 337 */ 338 if (cn->core_type == COREDUMP_PIPE) { 339 if (isspace(*pat_ptr)) { 340 if (cn->used != 0) 341 was_space = true; 342 pat_ptr++; 343 continue; 344 } else if (was_space) { 345 was_space = false; 346 err = cn_printf(cn, "%c", '\0'); 347 if (err) 348 return false; 349 (*argv)[(*argc)++] = cn->used; 350 } 351 } 352 if (*pat_ptr != '%') { 353 err = cn_printf(cn, "%c", *pat_ptr++); 354 } else { 355 switch (*++pat_ptr) { 356 /* single % at the end, drop that */ 357 case 0: 358 goto out; 359 /* Double percent, output one percent */ 360 case '%': 361 err = cn_printf(cn, "%c", '%'); 362 break; 363 /* pid */ 364 case 'p': 365 pid_in_pattern = 1; 366 err = cn_printf(cn, "%d", 367 task_tgid_vnr(current)); 368 break; 369 /* global pid */ 370 case 'P': 371 err = cn_printf(cn, "%d", 372 task_tgid_nr(current)); 373 break; 374 case 'i': 375 err = cn_printf(cn, "%d", 376 task_pid_vnr(current)); 377 break; 378 case 'I': 379 err = cn_printf(cn, "%d", 380 task_pid_nr(current)); 381 break; 382 /* uid */ 383 case 'u': 384 err = cn_printf(cn, "%u", 385 from_kuid(&init_user_ns, 386 cred->uid)); 387 break; 388 /* gid */ 389 case 'g': 390 err = cn_printf(cn, "%u", 391 from_kgid(&init_user_ns, 392 cred->gid)); 393 break; 394 case 'd': 395 err = cn_printf(cn, "%d", 396 __get_dumpable(cprm->mm_flags)); 397 break; 398 /* signal that caused the coredump */ 399 case 's': 400 err = cn_printf(cn, "%d", 401 cprm->siginfo->si_signo); 402 break; 403 /* UNIX time of coredump */ 404 case 't': { 405 time64_t time; 406 407 time = ktime_get_real_seconds(); 408 err = cn_printf(cn, "%lld", time); 409 break; 410 } 411 /* hostname */ 412 case 'h': 413 down_read(&uts_sem); 414 err = cn_esc_printf(cn, "%s", 415 utsname()->nodename); 416 up_read(&uts_sem); 417 break; 418 /* executable, could be changed by prctl PR_SET_NAME etc */ 419 case 'e': 420 err = cn_esc_printf(cn, "%s", current->comm); 421 break; 422 /* file name of executable */ 423 case 'f': 424 err = cn_print_exe_file(cn, true); 425 break; 426 case 'E': 427 err = cn_print_exe_file(cn, false); 428 break; 429 /* core limit size */ 430 case 'c': 431 err = cn_printf(cn, "%lu", 432 rlimit(RLIMIT_CORE)); 433 break; 434 /* CPU the task ran on */ 435 case 'C': 436 err = cn_printf(cn, "%d", cprm->cpu); 437 break; 438 /* pidfd number */ 439 case 'F': { 440 /* 441 * Installing a pidfd only makes sense if 442 * we actually spawn a usermode helper. 443 */ 444 if (cn->core_type != COREDUMP_PIPE) 445 break; 446 447 /* 448 * Note that we'll install a pidfd for the 449 * thread-group leader. We know that task 450 * linkage hasn't been removed yet and even if 451 * this @current isn't the actual thread-group 452 * leader we know that the thread-group leader 453 * cannot be reaped until @current has exited. 454 */ 455 cprm->pid = task_tgid(current); 456 err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); 457 break; 458 } 459 default: 460 break; 461 } 462 ++pat_ptr; 463 } 464 465 if (err) 466 return false; 467 } 468 469 out: 470 /* Backward compatibility with core_uses_pid: 471 * 472 * If core_pattern does not include a %p (as is the default) 473 * and core_uses_pid is set, then .%pid will be appended to 474 * the filename. Do not do this for piped commands. */ 475 if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid) 476 return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0; 477 478 return true; 479 } 480 481 static int zap_process(struct signal_struct *signal, int exit_code) 482 { 483 struct task_struct *t; 484 int nr = 0; 485 486 signal->flags = SIGNAL_GROUP_EXIT; 487 signal->group_exit_code = exit_code; 488 signal->group_stop_count = 0; 489 490 __for_each_thread(signal, t) { 491 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 492 if (t != current && !(t->flags & PF_POSTCOREDUMP)) { 493 sigaddset(&t->pending.signal, SIGKILL); 494 signal_wake_up(t, 1); 495 nr++; 496 } 497 } 498 499 return nr; 500 } 501 502 static int zap_threads(struct task_struct *tsk, 503 struct core_state *core_state, int exit_code) 504 { 505 struct signal_struct *signal = tsk->signal; 506 int nr = -EAGAIN; 507 508 spin_lock_irq(&tsk->sighand->siglock); 509 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) { 510 /* Allow SIGKILL, see prepare_signal() */ 511 signal->core_state = core_state; 512 nr = zap_process(signal, exit_code); 513 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 514 tsk->flags |= PF_DUMPCORE; 515 atomic_set(&core_state->nr_threads, nr); 516 } 517 spin_unlock_irq(&tsk->sighand->siglock); 518 return nr; 519 } 520 521 static int coredump_wait(int exit_code, struct core_state *core_state) 522 { 523 struct task_struct *tsk = current; 524 int core_waiters = -EBUSY; 525 526 init_completion(&core_state->startup); 527 core_state->dumper.task = tsk; 528 core_state->dumper.next = NULL; 529 530 core_waiters = zap_threads(tsk, core_state, exit_code); 531 if (core_waiters > 0) { 532 struct core_thread *ptr; 533 534 wait_for_completion_state(&core_state->startup, 535 TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 536 /* 537 * Wait for all the threads to become inactive, so that 538 * all the thread context (extended register state, like 539 * fpu etc) gets copied to the memory. 540 */ 541 ptr = core_state->dumper.next; 542 while (ptr != NULL) { 543 wait_task_inactive(ptr->task, TASK_ANY); 544 ptr = ptr->next; 545 } 546 } 547 548 return core_waiters; 549 } 550 551 static void coredump_finish(bool core_dumped) 552 { 553 struct core_thread *curr, *next; 554 struct task_struct *task; 555 556 spin_lock_irq(¤t->sighand->siglock); 557 if (core_dumped && !__fatal_signal_pending(current)) 558 current->signal->group_exit_code |= 0x80; 559 next = current->signal->core_state->dumper.next; 560 current->signal->core_state = NULL; 561 spin_unlock_irq(¤t->sighand->siglock); 562 563 while ((curr = next) != NULL) { 564 next = curr->next; 565 task = curr->task; 566 /* 567 * see coredump_task_exit(), curr->task must not see 568 * ->task == NULL before we read ->next. 569 */ 570 smp_mb(); 571 curr->task = NULL; 572 wake_up_process(task); 573 } 574 } 575 576 static bool dump_interrupted(void) 577 { 578 /* 579 * SIGKILL or freezing() interrupt the coredumping. Perhaps we 580 * can do try_to_freeze() and check __fatal_signal_pending(), 581 * but then we need to teach dump_write() to restart and clear 582 * TIF_SIGPENDING. 583 */ 584 return fatal_signal_pending(current) || freezing(current); 585 } 586 587 static void wait_for_dump_helpers(struct file *file) 588 { 589 struct pipe_inode_info *pipe = file->private_data; 590 591 pipe_lock(pipe); 592 pipe->readers++; 593 pipe->writers--; 594 wake_up_interruptible_sync(&pipe->rd_wait); 595 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 596 pipe_unlock(pipe); 597 598 /* 599 * We actually want wait_event_freezable() but then we need 600 * to clear TIF_SIGPENDING and improve dump_interrupted(). 601 */ 602 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1); 603 604 pipe_lock(pipe); 605 pipe->readers--; 606 pipe->writers++; 607 pipe_unlock(pipe); 608 } 609 610 /* 611 * umh_coredump_setup 612 * helper function to customize the process used 613 * to collect the core in userspace. Specifically 614 * it sets up a pipe and installs it as fd 0 (stdin) 615 * for the process. Returns 0 on success, or 616 * PTR_ERR on failure. 617 * Note that it also sets the core limit to 1. This 618 * is a special value that we use to trap recursive 619 * core dumps 620 */ 621 static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) 622 { 623 struct file *files[2]; 624 struct coredump_params *cp = (struct coredump_params *)info->data; 625 int err; 626 627 if (cp->pid) { 628 struct file *pidfs_file __free(fput) = NULL; 629 630 pidfs_file = pidfs_alloc_file(cp->pid, 0); 631 if (IS_ERR(pidfs_file)) 632 return PTR_ERR(pidfs_file); 633 634 pidfs_coredump(cp); 635 636 /* 637 * Usermode helpers are childen of either 638 * system_dfl_wq or of kthreadd. So we know that 639 * we're starting off with a clean file descriptor 640 * table. So we should always be able to use 641 * COREDUMP_PIDFD_NUMBER as our file descriptor value. 642 */ 643 err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); 644 if (err < 0) 645 return err; 646 } 647 648 err = create_pipe_files(files, 0); 649 if (err) 650 return err; 651 652 cp->file = files[1]; 653 654 err = replace_fd(0, files[0], 0); 655 fput(files[0]); 656 if (err < 0) 657 return err; 658 659 /* and disallow core files too */ 660 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; 661 662 return 0; 663 } 664 665 #ifdef CONFIG_UNIX 666 static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm) 667 { 668 struct file *file __free(fput) = NULL; 669 struct sockaddr_un addr = { 670 .sun_family = AF_UNIX, 671 }; 672 ssize_t addr_len; 673 int retval; 674 struct socket *socket; 675 676 addr_len = strscpy(addr.sun_path, cn->corename); 677 if (addr_len < 0) 678 return false; 679 addr_len += offsetof(struct sockaddr_un, sun_path) + 1; 680 681 /* 682 * It is possible that the userspace process which is supposed 683 * to handle the coredump and is listening on the AF_UNIX socket 684 * coredumps. Userspace should just mark itself non dumpable. 685 */ 686 687 retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket); 688 if (retval < 0) 689 return false; 690 691 file = sock_alloc_file(socket, 0, NULL); 692 if (IS_ERR(file)) 693 return false; 694 695 /* 696 * Set the thread-group leader pid which is used for the peer 697 * credentials during connect() below. Then immediately register 698 * it in pidfs... 699 */ 700 cprm->pid = task_tgid(current); 701 retval = pidfs_register_pid(cprm->pid); 702 if (retval) 703 return false; 704 705 /* 706 * ... and set the coredump information so userspace has it 707 * available after connect()... 708 */ 709 pidfs_coredump(cprm); 710 711 retval = kernel_connect(socket, (struct sockaddr_unsized *)(&addr), addr_len, 712 O_NONBLOCK | SOCK_COREDUMP); 713 714 if (retval) { 715 if (retval == -EAGAIN) 716 coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path); 717 else 718 coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval); 719 return false; 720 } 721 722 /* ... and validate that @sk_peer_pid matches @cprm.pid. */ 723 if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid)) 724 return false; 725 726 cprm->limit = RLIM_INFINITY; 727 cprm->file = no_free_ptr(file); 728 729 return true; 730 } 731 732 static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags) 733 { 734 struct msghdr msg = {}; 735 struct kvec iov = { .iov_base = ack, .iov_len = size }; 736 ssize_t ret; 737 738 memset(ack, 0, size); 739 ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags); 740 return ret == size; 741 } 742 743 static inline bool coredump_sock_send(struct file *file, struct coredump_req *req) 744 { 745 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; 746 struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) }; 747 ssize_t ret; 748 749 ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req)); 750 return ret == sizeof(*req); 751 } 752 753 static_assert(sizeof(enum coredump_mark) == sizeof(__u32)); 754 755 static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark) 756 { 757 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; 758 struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) }; 759 ssize_t ret; 760 761 ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark)); 762 return ret == sizeof(mark); 763 } 764 765 static inline void coredump_sock_wait(struct file *file) 766 { 767 ssize_t n; 768 769 /* 770 * We use a simple read to wait for the coredump processing to 771 * finish. Either the socket is closed or we get sent unexpected 772 * data. In both cases, we're done. 773 */ 774 n = __kernel_read(file, &(char){ 0 }, 1, NULL); 775 if (n > 0) 776 coredump_report_failure("Coredump socket had unexpected data"); 777 else if (n < 0) 778 coredump_report_failure("Coredump socket failed"); 779 } 780 781 static inline void coredump_sock_shutdown(struct file *file) 782 { 783 struct socket *socket; 784 785 socket = sock_from_file(file); 786 if (!socket) 787 return; 788 789 /* Let userspace know we're done processing the coredump. */ 790 kernel_sock_shutdown(socket, SHUT_WR); 791 } 792 793 static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm) 794 { 795 struct coredump_req req = { 796 .size = sizeof(struct coredump_req), 797 .mask = COREDUMP_KERNEL | COREDUMP_USERSPACE | 798 COREDUMP_REJECT | COREDUMP_WAIT, 799 .size_ack = sizeof(struct coredump_ack), 800 }; 801 struct coredump_ack ack = {}; 802 ssize_t usize; 803 804 if (cn->core_type != COREDUMP_SOCK_REQ) 805 return true; 806 807 /* Let userspace know what we support. */ 808 if (!coredump_sock_send(cprm->file, &req)) 809 return false; 810 811 /* Peek the size of the coredump_ack. */ 812 if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size), 813 MSG_PEEK | MSG_WAITALL)) 814 return false; 815 816 /* Refuse unknown coredump_ack sizes. */ 817 usize = ack.size; 818 if (usize < COREDUMP_ACK_SIZE_VER0) { 819 coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE); 820 return false; 821 } 822 823 if (usize > sizeof(ack)) { 824 coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE); 825 return false; 826 } 827 828 /* Now retrieve the coredump_ack. */ 829 if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL)) 830 return false; 831 if (ack.size != usize) 832 return false; 833 834 /* Refuse unknown coredump_ack flags. */ 835 if (ack.mask & ~req.mask) { 836 coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); 837 return false; 838 } 839 840 /* Refuse mutually exclusive options. */ 841 if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL | 842 COREDUMP_REJECT)) != 1) { 843 coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING); 844 return false; 845 } 846 847 if (ack.spare) { 848 coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); 849 return false; 850 } 851 852 cn->mask = ack.mask; 853 return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK); 854 } 855 856 static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) 857 { 858 if (!coredump_sock_connect(cn, cprm)) 859 return false; 860 861 return coredump_sock_request(cn, cprm); 862 } 863 #else 864 static inline void coredump_sock_wait(struct file *file) { } 865 static inline void coredump_sock_shutdown(struct file *file) { } 866 static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; } 867 #endif 868 869 /* cprm->mm_flags contains a stable snapshot of dumpability flags. */ 870 static inline bool coredump_force_suid_safe(const struct coredump_params *cprm) 871 { 872 /* Require nonrelative corefile path and be extra careful. */ 873 return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT; 874 } 875 876 static bool coredump_file(struct core_name *cn, struct coredump_params *cprm, 877 const struct linux_binfmt *binfmt) 878 { 879 struct mnt_idmap *idmap; 880 struct inode *inode; 881 struct file *file __free(fput) = NULL; 882 int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL; 883 884 if (cprm->limit < binfmt->min_coredump) 885 return false; 886 887 if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') { 888 coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump"); 889 return false; 890 } 891 892 /* 893 * Unlink the file if it exists unless this is a SUID 894 * binary - in that case, we're running around with root 895 * privs and don't want to unlink another user's coredump. 896 */ 897 if (!coredump_force_suid_safe(cprm)) { 898 CLASS(filename_kernel, name)(cn->corename); 899 /* 900 * If it doesn't exist, that's fine. If there's some 901 * other problem, we'll catch it at the filp_open(). 902 */ 903 filename_unlinkat(AT_FDCWD, name); 904 } 905 906 /* 907 * There is a race between unlinking and creating the 908 * file, but if that causes an EEXIST here, that's 909 * fine - another process raced with us while creating 910 * the corefile, and the other process won. To userspace, 911 * what matters is that at least one of the two processes 912 * writes its coredump successfully, not which one. 913 */ 914 if (coredump_force_suid_safe(cprm)) { 915 /* 916 * Using user namespaces, normal user tasks can change 917 * their current->fs->root to point to arbitrary 918 * directories. Since the intention of the "only dump 919 * with a fully qualified path" rule is to control where 920 * coredumps may be placed using root privileges, 921 * current->fs->root must not be used. Instead, use the 922 * root directory of init_task. 923 */ 924 struct path root; 925 926 task_lock(&init_task); 927 get_fs_root(init_task.fs, &root); 928 task_unlock(&init_task); 929 file = file_open_root(&root, cn->corename, open_flags, 0600); 930 path_put(&root); 931 } else { 932 file = filp_open(cn->corename, open_flags, 0600); 933 } 934 if (IS_ERR(file)) 935 return false; 936 937 inode = file_inode(file); 938 if (inode->i_nlink > 1) 939 return false; 940 if (d_unhashed(file->f_path.dentry)) 941 return false; 942 /* 943 * AK: actually i see no reason to not allow this for named 944 * pipes etc, but keep the previous behaviour for now. 945 */ 946 if (!S_ISREG(inode->i_mode)) 947 return false; 948 /* 949 * Don't dump core if the filesystem changed owner or mode 950 * of the file during file creation. This is an issue when 951 * a process dumps core while its cwd is e.g. on a vfat 952 * filesystem. 953 */ 954 idmap = file_mnt_idmap(file); 955 if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) { 956 coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename); 957 return false; 958 } 959 if ((inode->i_mode & 0677) != 0600) { 960 coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename); 961 return false; 962 } 963 if (!(file->f_mode & FMODE_CAN_WRITE)) 964 return false; 965 if (do_truncate(idmap, file->f_path.dentry, 0, 0, file)) 966 return false; 967 968 cprm->file = no_free_ptr(file); 969 return true; 970 } 971 972 static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm, 973 size_t *argv, int argc) 974 { 975 int argi; 976 char **helper_argv __free(kfree) = NULL; 977 struct subprocess_info *sub_info; 978 979 if (cprm->limit == 1) { 980 /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. 981 * 982 * Normally core limits are irrelevant to pipes, since 983 * we're not writing to the file system, but we use 984 * cprm.limit of 1 here as a special value, this is a 985 * consistent way to catch recursive crashes. 986 * We can still crash if the core_pattern binary sets 987 * RLIM_CORE = !1, but it runs as root, and can do 988 * lots of stupid things. 989 * 990 * Note that we use task_tgid_vnr here to grab the pid 991 * of the process group leader. That way we get the 992 * right pid if a thread in a multi-threaded 993 * core_pattern process dies. 994 */ 995 coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); 996 return false; 997 } 998 cprm->limit = RLIM_INFINITY; 999 1000 cn->core_pipe_limit = atomic_inc_return(&core_pipe_count); 1001 if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) { 1002 coredump_report_failure("over core_pipe_limit, skipping core dump"); 1003 return false; 1004 } 1005 1006 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL); 1007 if (!helper_argv) { 1008 coredump_report_failure("%s failed to allocate memory", __func__); 1009 return false; 1010 } 1011 for (argi = 0; argi < argc; argi++) 1012 helper_argv[argi] = cn->corename + argv[argi]; 1013 helper_argv[argi] = NULL; 1014 1015 sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL, 1016 GFP_KERNEL, umh_coredump_setup, 1017 NULL, cprm); 1018 if (!sub_info) 1019 return false; 1020 1021 if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) { 1022 coredump_report_failure("|%s pipe failed", cn->corename); 1023 return false; 1024 } 1025 1026 /* 1027 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would 1028 * have this set to NULL. 1029 */ 1030 if (!cprm->file) { 1031 coredump_report_failure("Core dump to |%s disabled", cn->corename); 1032 return false; 1033 } 1034 1035 return true; 1036 } 1037 1038 static bool coredump_write(struct core_name *cn, 1039 struct coredump_params *cprm, 1040 const struct linux_binfmt *binfmt) 1041 { 1042 1043 if (dump_interrupted()) 1044 return true; 1045 1046 if (!dump_vma_snapshot(cprm)) 1047 return false; 1048 1049 file_start_write(cprm->file); 1050 cn->core_dumped = binfmt->core_dump(cprm); 1051 /* 1052 * Ensures that file size is big enough to contain the current 1053 * file postion. This prevents gdb from complaining about 1054 * a truncated file if the last "write" to the file was 1055 * dump_skip. 1056 */ 1057 if (cprm->to_skip) { 1058 cprm->to_skip--; 1059 dump_emit(cprm, "", 1); 1060 } 1061 file_end_write(cprm->file); 1062 free_vma_snapshot(cprm); 1063 return true; 1064 } 1065 1066 static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm) 1067 { 1068 if (cprm->file) 1069 filp_close(cprm->file, NULL); 1070 if (cn->core_pipe_limit) { 1071 VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE); 1072 atomic_dec(&core_pipe_count); 1073 } 1074 kfree(cn->corename); 1075 coredump_finish(cn->core_dumped); 1076 } 1077 1078 static inline bool coredump_skip(const struct coredump_params *cprm, 1079 const struct linux_binfmt *binfmt) 1080 { 1081 if (!binfmt) 1082 return true; 1083 if (!binfmt->core_dump) 1084 return true; 1085 if (!__get_dumpable(cprm->mm_flags)) 1086 return true; 1087 return false; 1088 } 1089 1090 static void do_coredump(struct core_name *cn, struct coredump_params *cprm, 1091 size_t **argv, int *argc, const struct linux_binfmt *binfmt) 1092 { 1093 if (!coredump_parse(cn, cprm, argv, argc)) { 1094 coredump_report_failure("format_corename failed, aborting core"); 1095 return; 1096 } 1097 1098 switch (cn->core_type) { 1099 case COREDUMP_FILE: 1100 if (!coredump_file(cn, cprm, binfmt)) 1101 return; 1102 break; 1103 case COREDUMP_PIPE: 1104 if (!coredump_pipe(cn, cprm, *argv, *argc)) 1105 return; 1106 break; 1107 case COREDUMP_SOCK_REQ: 1108 fallthrough; 1109 case COREDUMP_SOCK: 1110 if (!coredump_socket(cn, cprm)) 1111 return; 1112 break; 1113 default: 1114 WARN_ON_ONCE(true); 1115 return; 1116 } 1117 1118 /* Don't even generate the coredump. */ 1119 if (cn->mask & COREDUMP_REJECT) 1120 return; 1121 1122 /* get us an unshared descriptor table; almost always a no-op */ 1123 /* The cell spufs coredump code reads the file descriptor tables */ 1124 if (unshare_files()) 1125 return; 1126 1127 if ((cn->mask & COREDUMP_KERNEL) && !coredump_write(cn, cprm, binfmt)) 1128 return; 1129 1130 coredump_sock_shutdown(cprm->file); 1131 1132 /* Let the parent know that a coredump was generated. */ 1133 if (cn->mask & COREDUMP_USERSPACE) 1134 cn->core_dumped = true; 1135 1136 /* 1137 * When core_pipe_limit is set we wait for the coredump server 1138 * or usermodehelper to finish before exiting so it can e.g., 1139 * inspect /proc/<pid>. 1140 */ 1141 if (cn->mask & COREDUMP_WAIT) { 1142 switch (cn->core_type) { 1143 case COREDUMP_PIPE: 1144 wait_for_dump_helpers(cprm->file); 1145 break; 1146 case COREDUMP_SOCK_REQ: 1147 fallthrough; 1148 case COREDUMP_SOCK: 1149 coredump_sock_wait(cprm->file); 1150 break; 1151 default: 1152 break; 1153 } 1154 } 1155 } 1156 1157 void vfs_coredump(const kernel_siginfo_t *siginfo) 1158 { 1159 size_t *argv __free(kfree) = NULL; 1160 struct core_state core_state; 1161 struct core_name cn; 1162 const struct mm_struct *mm = current->mm; 1163 const struct linux_binfmt *binfmt = mm->binfmt; 1164 int argc = 0; 1165 struct coredump_params cprm = { 1166 .siginfo = siginfo, 1167 .limit = rlimit(RLIMIT_CORE), 1168 /* 1169 * We must use the same mm->flags while dumping core to avoid 1170 * inconsistency of bit flags, since this flag is not protected 1171 * by any locks. 1172 * 1173 * Note that we only care about MMF_DUMP* flags. 1174 */ 1175 .mm_flags = __mm_flags_get_dumpable(mm), 1176 .vma_meta = NULL, 1177 .cpu = raw_smp_processor_id(), 1178 }; 1179 1180 audit_core_dumps(siginfo->si_signo); 1181 1182 if (coredump_skip(&cprm, binfmt)) 1183 return; 1184 1185 CLASS(prepare_creds, cred)(); 1186 if (!cred) 1187 return; 1188 /* 1189 * We cannot trust fsuid as being the "true" uid of the process 1190 * nor do we know its entire history. We only know it was tainted 1191 * so we dump it as root in mode 2, and only into a controlled 1192 * environment (pipe handler or fully qualified path). 1193 */ 1194 if (coredump_force_suid_safe(&cprm)) 1195 cred->fsuid = GLOBAL_ROOT_UID; 1196 1197 if (coredump_wait(siginfo->si_signo, &core_state) < 0) 1198 return; 1199 1200 scoped_with_creds(cred) 1201 do_coredump(&cn, &cprm, &argv, &argc, binfmt); 1202 coredump_cleanup(&cn, &cprm); 1203 return; 1204 } 1205 1206 /* 1207 * Core dumping helper functions. These are the only things you should 1208 * do on a core-file: use only these functions to write out all the 1209 * necessary info. 1210 */ 1211 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr) 1212 { 1213 struct file *file = cprm->file; 1214 loff_t pos = file->f_pos; 1215 ssize_t n; 1216 1217 if (cprm->written + nr > cprm->limit) 1218 return 0; 1219 if (dump_interrupted()) 1220 return 0; 1221 n = __kernel_write(file, addr, nr, &pos); 1222 if (n != nr) 1223 return 0; 1224 file->f_pos = pos; 1225 cprm->written += n; 1226 cprm->pos += n; 1227 1228 return 1; 1229 } 1230 1231 static int __dump_skip(struct coredump_params *cprm, size_t nr) 1232 { 1233 static char zeroes[PAGE_SIZE]; 1234 struct file *file = cprm->file; 1235 1236 if (file->f_mode & FMODE_LSEEK) { 1237 if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0) 1238 return 0; 1239 cprm->pos += nr; 1240 return 1; 1241 } 1242 1243 while (nr > PAGE_SIZE) { 1244 if (!__dump_emit(cprm, zeroes, PAGE_SIZE)) 1245 return 0; 1246 nr -= PAGE_SIZE; 1247 } 1248 1249 return __dump_emit(cprm, zeroes, nr); 1250 } 1251 1252 int dump_emit(struct coredump_params *cprm, const void *addr, int nr) 1253 { 1254 if (cprm->to_skip) { 1255 if (!__dump_skip(cprm, cprm->to_skip)) 1256 return 0; 1257 cprm->to_skip = 0; 1258 } 1259 return __dump_emit(cprm, addr, nr); 1260 } 1261 EXPORT_SYMBOL(dump_emit); 1262 1263 void dump_skip_to(struct coredump_params *cprm, unsigned long pos) 1264 { 1265 cprm->to_skip = pos - cprm->pos; 1266 } 1267 EXPORT_SYMBOL(dump_skip_to); 1268 1269 void dump_skip(struct coredump_params *cprm, size_t nr) 1270 { 1271 cprm->to_skip += nr; 1272 } 1273 EXPORT_SYMBOL(dump_skip); 1274 1275 #ifdef CONFIG_ELF_CORE 1276 static int dump_emit_page(struct coredump_params *cprm, struct page *page) 1277 { 1278 struct bio_vec bvec; 1279 struct iov_iter iter; 1280 struct file *file = cprm->file; 1281 loff_t pos; 1282 ssize_t n; 1283 1284 if (!page) 1285 return 0; 1286 1287 if (cprm->to_skip) { 1288 if (!__dump_skip(cprm, cprm->to_skip)) 1289 return 0; 1290 cprm->to_skip = 0; 1291 } 1292 if (cprm->written + PAGE_SIZE > cprm->limit) 1293 return 0; 1294 if (dump_interrupted()) 1295 return 0; 1296 pos = file->f_pos; 1297 bvec_set_page(&bvec, page, PAGE_SIZE, 0); 1298 iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE); 1299 n = __kernel_write_iter(cprm->file, &iter, &pos); 1300 if (n != PAGE_SIZE) 1301 return 0; 1302 file->f_pos = pos; 1303 cprm->written += PAGE_SIZE; 1304 cprm->pos += PAGE_SIZE; 1305 1306 return 1; 1307 } 1308 1309 /* 1310 * If we might get machine checks from kernel accesses during the 1311 * core dump, let's get those errors early rather than during the 1312 * IO. This is not performance-critical enough to warrant having 1313 * all the machine check logic in the iovec paths. 1314 */ 1315 #ifdef copy_mc_to_kernel 1316 1317 #define dump_page_alloc() alloc_page(GFP_KERNEL) 1318 #define dump_page_free(x) __free_page(x) 1319 static struct page *dump_page_copy(struct page *src, struct page *dst) 1320 { 1321 void *buf = kmap_local_page(src); 1322 size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE); 1323 kunmap_local(buf); 1324 return left ? NULL : dst; 1325 } 1326 1327 #else 1328 1329 /* We just want to return non-NULL; it's never used. */ 1330 #define dump_page_alloc() ERR_PTR(-EINVAL) 1331 #define dump_page_free(x) ((void)(x)) 1332 static inline struct page *dump_page_copy(struct page *src, struct page *dst) 1333 { 1334 return src; 1335 } 1336 #endif 1337 1338 int dump_user_range(struct coredump_params *cprm, unsigned long start, 1339 unsigned long len) 1340 { 1341 unsigned long addr; 1342 struct page *dump_page; 1343 int locked, ret; 1344 1345 dump_page = dump_page_alloc(); 1346 if (!dump_page) 1347 return 0; 1348 1349 ret = 0; 1350 locked = 0; 1351 for (addr = start; addr < start + len; addr += PAGE_SIZE) { 1352 struct page *page; 1353 1354 if (!locked) { 1355 if (mmap_read_lock_killable(current->mm)) 1356 goto out; 1357 locked = 1; 1358 } 1359 1360 /* 1361 * To avoid having to allocate page tables for virtual address 1362 * ranges that have never been used yet, and also to make it 1363 * easy to generate sparse core files, use a helper that returns 1364 * NULL when encountering an empty page table entry that would 1365 * otherwise have been filled with the zero page. 1366 */ 1367 page = get_dump_page(addr, &locked); 1368 if (page) { 1369 if (locked) { 1370 mmap_read_unlock(current->mm); 1371 locked = 0; 1372 } 1373 int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page)); 1374 put_page(page); 1375 if (stop) 1376 goto out; 1377 } else { 1378 dump_skip(cprm, PAGE_SIZE); 1379 } 1380 1381 if (dump_interrupted()) 1382 goto out; 1383 1384 if (!need_resched()) 1385 continue; 1386 if (locked) { 1387 mmap_read_unlock(current->mm); 1388 locked = 0; 1389 } 1390 cond_resched(); 1391 } 1392 ret = 1; 1393 out: 1394 if (locked) 1395 mmap_read_unlock(current->mm); 1396 1397 dump_page_free(dump_page); 1398 return ret; 1399 } 1400 #endif 1401 1402 int dump_align(struct coredump_params *cprm, int align) 1403 { 1404 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1); 1405 if (align & (align - 1)) 1406 return 0; 1407 if (mod) 1408 cprm->to_skip += align - mod; 1409 return 1; 1410 } 1411 EXPORT_SYMBOL(dump_align); 1412 1413 #ifdef CONFIG_SYSCTL 1414 1415 void validate_coredump_safety(void) 1416 { 1417 if (suid_dumpable == SUID_DUMP_ROOT && 1418 core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') { 1419 1420 coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: " 1421 "pipe handler or fully qualified core dump path required. " 1422 "Set kernel.core_pattern before fs.suid_dumpable."); 1423 } 1424 } 1425 1426 static inline bool check_coredump_socket(void) 1427 { 1428 const char *p; 1429 1430 if (core_pattern[0] != '@') 1431 return true; 1432 1433 /* 1434 * Coredump socket must be located in the initial mount 1435 * namespace. Don't give the impression that anything else is 1436 * supported right now. 1437 */ 1438 if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) 1439 return false; 1440 1441 /* Must be an absolute path... */ 1442 if (core_pattern[1] != '/') { 1443 /* ... or the socket request protocol... */ 1444 if (core_pattern[1] != '@') 1445 return false; 1446 /* ... and if so must be an absolute path. */ 1447 if (core_pattern[2] != '/') 1448 return false; 1449 p = &core_pattern[2]; 1450 } else { 1451 p = &core_pattern[1]; 1452 } 1453 1454 /* The path obviously cannot exceed UNIX_PATH_MAX. */ 1455 if (strlen(p) >= UNIX_PATH_MAX) 1456 return false; 1457 1458 /* Must not contain ".." in the path. */ 1459 if (name_contains_dotdot(core_pattern)) 1460 return false; 1461 1462 return true; 1463 } 1464 1465 static int proc_dostring_coredump(const struct ctl_table *table, int write, 1466 void *buffer, size_t *lenp, loff_t *ppos) 1467 { 1468 int error; 1469 ssize_t retval; 1470 char old_core_pattern[CORENAME_MAX_SIZE]; 1471 1472 if (!write) 1473 return proc_dostring(table, write, buffer, lenp, ppos); 1474 1475 retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE); 1476 1477 error = proc_dostring(table, write, buffer, lenp, ppos); 1478 if (error) 1479 return error; 1480 1481 if (!check_coredump_socket()) { 1482 strscpy(core_pattern, old_core_pattern, retval + 1); 1483 return -EINVAL; 1484 } 1485 1486 validate_coredump_safety(); 1487 return error; 1488 } 1489 1490 static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT; 1491 static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX; 1492 static char core_modes[] = { 1493 "file\npipe" 1494 #ifdef CONFIG_UNIX 1495 "\nsocket" 1496 #endif 1497 }; 1498 1499 static const struct ctl_table coredump_sysctls[] = { 1500 { 1501 .procname = "core_uses_pid", 1502 .data = &core_uses_pid, 1503 .maxlen = sizeof(int), 1504 .mode = 0644, 1505 .proc_handler = proc_dointvec, 1506 }, 1507 { 1508 .procname = "core_pattern", 1509 .data = core_pattern, 1510 .maxlen = CORENAME_MAX_SIZE, 1511 .mode = 0644, 1512 .proc_handler = proc_dostring_coredump, 1513 }, 1514 { 1515 .procname = "core_pipe_limit", 1516 .data = &core_pipe_limit, 1517 .maxlen = sizeof(unsigned int), 1518 .mode = 0644, 1519 .proc_handler = proc_dointvec_minmax, 1520 .extra1 = SYSCTL_ZERO, 1521 .extra2 = SYSCTL_INT_MAX, 1522 }, 1523 { 1524 .procname = "core_file_note_size_limit", 1525 .data = &core_file_note_size_limit, 1526 .maxlen = sizeof(unsigned int), 1527 .mode = 0644, 1528 .proc_handler = proc_douintvec_minmax, 1529 .extra1 = (unsigned int *)&core_file_note_size_min, 1530 .extra2 = (unsigned int *)&core_file_note_size_max, 1531 }, 1532 { 1533 .procname = "core_sort_vma", 1534 .data = &core_sort_vma, 1535 .maxlen = sizeof(int), 1536 .mode = 0644, 1537 .proc_handler = proc_douintvec_minmax, 1538 .extra1 = SYSCTL_ZERO, 1539 .extra2 = SYSCTL_ONE, 1540 }, 1541 { 1542 .procname = "core_modes", 1543 .data = core_modes, 1544 .maxlen = sizeof(core_modes) - 1, 1545 .mode = 0444, 1546 .proc_handler = proc_dostring, 1547 }, 1548 }; 1549 1550 static int __init init_fs_coredump_sysctls(void) 1551 { 1552 register_sysctl_init("kernel", coredump_sysctls); 1553 return 0; 1554 } 1555 fs_initcall(init_fs_coredump_sysctls); 1556 #endif /* CONFIG_SYSCTL */ 1557 1558 /* 1559 * The purpose of always_dump_vma() is to make sure that special kernel mappings 1560 * that are useful for post-mortem analysis are included in every core dump. 1561 * In that way we ensure that the core dump is fully interpretable later 1562 * without matching up the same kernel and hardware config to see what PC values 1563 * meant. These special mappings include - vDSO, vsyscall, and other 1564 * architecture specific mappings 1565 */ 1566 static bool always_dump_vma(struct vm_area_struct *vma) 1567 { 1568 /* Any vsyscall mappings? */ 1569 if (vma == get_gate_vma(vma->vm_mm)) 1570 return true; 1571 1572 /* 1573 * Assume that all vmas with a .name op should always be dumped. 1574 * If this changes, a new vm_ops field can easily be added. 1575 */ 1576 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) 1577 return true; 1578 1579 /* 1580 * arch_vma_name() returns non-NULL for special architecture mappings, 1581 * such as vDSO sections. 1582 */ 1583 if (arch_vma_name(vma)) 1584 return true; 1585 1586 return false; 1587 } 1588 1589 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 1590 1591 /* 1592 * Decide how much of @vma's contents should be included in a core dump. 1593 */ 1594 static unsigned long vma_dump_size(struct vm_area_struct *vma, 1595 unsigned long mm_flags) 1596 { 1597 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) 1598 1599 /* always dump the vdso and vsyscall sections */ 1600 if (always_dump_vma(vma)) 1601 goto whole; 1602 1603 if (vma->vm_flags & VM_DONTDUMP) 1604 return 0; 1605 1606 /* support for DAX */ 1607 if (vma_is_dax(vma)) { 1608 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) 1609 goto whole; 1610 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) 1611 goto whole; 1612 return 0; 1613 } 1614 1615 /* Hugetlb memory check */ 1616 if (is_vm_hugetlb_page(vma)) { 1617 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) 1618 goto whole; 1619 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1620 goto whole; 1621 return 0; 1622 } 1623 1624 /* Do not dump I/O mapped devices or special mappings */ 1625 if (vma->vm_flags & VM_IO) 1626 return 0; 1627 1628 /* By default, dump shared memory if mapped from an anonymous file. */ 1629 if (vma->vm_flags & VM_SHARED) { 1630 if (file_inode(vma->vm_file)->i_nlink == 0 ? 1631 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) 1632 goto whole; 1633 return 0; 1634 } 1635 1636 /* Dump segments that have been written to. */ 1637 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) 1638 goto whole; 1639 if (vma->vm_file == NULL) 1640 return 0; 1641 1642 if (FILTER(MAPPED_PRIVATE)) 1643 goto whole; 1644 1645 /* 1646 * If this is the beginning of an executable file mapping, 1647 * dump the first page to aid in determining what was mapped here. 1648 */ 1649 if (FILTER(ELF_HEADERS) && 1650 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { 1651 if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) 1652 return PAGE_SIZE; 1653 1654 /* 1655 * ELF libraries aren't always executable. 1656 * We'll want to check whether the mapping starts with the ELF 1657 * magic, but not now - we're holding the mmap lock, 1658 * so copy_from_user() doesn't work here. 1659 * Use a placeholder instead, and fix it up later in 1660 * dump_vma_snapshot(). 1661 */ 1662 return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; 1663 } 1664 1665 #undef FILTER 1666 1667 return 0; 1668 1669 whole: 1670 return vma->vm_end - vma->vm_start; 1671 } 1672 1673 /* 1674 * Helper function for iterating across a vma list. It ensures that the caller 1675 * will visit `gate_vma' prior to terminating the search. 1676 */ 1677 static struct vm_area_struct *coredump_next_vma(struct vma_iterator *vmi, 1678 struct vm_area_struct *vma, 1679 struct vm_area_struct *gate_vma) 1680 { 1681 if (gate_vma && (vma == gate_vma)) 1682 return NULL; 1683 1684 vma = vma_next(vmi); 1685 if (vma) 1686 return vma; 1687 return gate_vma; 1688 } 1689 1690 static void free_vma_snapshot(struct coredump_params *cprm) 1691 { 1692 if (cprm->vma_meta) { 1693 int i; 1694 for (i = 0; i < cprm->vma_count; i++) { 1695 struct file *file = cprm->vma_meta[i].file; 1696 if (file) 1697 fput(file); 1698 } 1699 kvfree(cprm->vma_meta); 1700 cprm->vma_meta = NULL; 1701 } 1702 } 1703 1704 static int cmp_vma_size(const void *vma_meta_lhs_ptr, const void *vma_meta_rhs_ptr) 1705 { 1706 const struct core_vma_metadata *vma_meta_lhs = vma_meta_lhs_ptr; 1707 const struct core_vma_metadata *vma_meta_rhs = vma_meta_rhs_ptr; 1708 1709 if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size) 1710 return -1; 1711 if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size) 1712 return 1; 1713 return 0; 1714 } 1715 1716 /* 1717 * Under the mmap_lock, take a snapshot of relevant information about the task's 1718 * VMAs. 1719 */ 1720 static bool dump_vma_snapshot(struct coredump_params *cprm) 1721 { 1722 struct vm_area_struct *gate_vma, *vma = NULL; 1723 struct mm_struct *mm = current->mm; 1724 VMA_ITERATOR(vmi, mm, 0); 1725 int i = 0; 1726 1727 /* 1728 * Once the stack expansion code is fixed to not change VMA bounds 1729 * under mmap_lock in read mode, this can be changed to take the 1730 * mmap_lock in read mode. 1731 */ 1732 if (mmap_write_lock_killable(mm)) 1733 return false; 1734 1735 cprm->vma_data_size = 0; 1736 gate_vma = get_gate_vma(mm); 1737 cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); 1738 1739 cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL); 1740 if (!cprm->vma_meta) { 1741 mmap_write_unlock(mm); 1742 return false; 1743 } 1744 1745 while ((vma = coredump_next_vma(&vmi, vma, gate_vma)) != NULL) { 1746 struct core_vma_metadata *m = cprm->vma_meta + i; 1747 1748 m->start = vma->vm_start; 1749 m->end = vma->vm_end; 1750 m->flags = vma->vm_flags; 1751 m->dump_size = vma_dump_size(vma, cprm->mm_flags); 1752 m->pgoff = vma->vm_pgoff; 1753 m->file = vma->vm_file; 1754 if (m->file) 1755 get_file(m->file); 1756 i++; 1757 } 1758 1759 mmap_write_unlock(mm); 1760 1761 for (i = 0; i < cprm->vma_count; i++) { 1762 struct core_vma_metadata *m = cprm->vma_meta + i; 1763 1764 if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { 1765 char elfmag[SELFMAG]; 1766 1767 if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || 1768 memcmp(elfmag, ELFMAG, SELFMAG) != 0) { 1769 m->dump_size = 0; 1770 } else { 1771 m->dump_size = PAGE_SIZE; 1772 } 1773 } 1774 1775 cprm->vma_data_size += m->dump_size; 1776 } 1777 1778 if (core_sort_vma) 1779 sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), 1780 cmp_vma_size, NULL); 1781 1782 return true; 1783 } 1784