1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/slab.h> 3 #include <linux/file.h> 4 #include <linux/fdtable.h> 5 #include <linux/freezer.h> 6 #include <linux/mm.h> 7 #include <linux/stat.h> 8 #include <linux/fcntl.h> 9 #include <linux/swap.h> 10 #include <linux/ctype.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/pagemap.h> 14 #include <linux/perf_event.h> 15 #include <linux/highmem.h> 16 #include <linux/spinlock.h> 17 #include <linux/key.h> 18 #include <linux/personality.h> 19 #include <linux/binfmts.h> 20 #include <linux/coredump.h> 21 #include <linux/sort.h> 22 #include <linux/sched/coredump.h> 23 #include <linux/sched/signal.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/utsname.h> 26 #include <linux/pid_namespace.h> 27 #include <linux/module.h> 28 #include <linux/namei.h> 29 #include <linux/mount.h> 30 #include <linux/security.h> 31 #include <linux/syscalls.h> 32 #include <linux/tsacct_kern.h> 33 #include <linux/cn_proc.h> 34 #include <linux/audit.h> 35 #include <linux/kmod.h> 36 #include <linux/fsnotify.h> 37 #include <linux/fs_struct.h> 38 #include <linux/pipe_fs_i.h> 39 #include <linux/oom.h> 40 #include <linux/compat.h> 41 #include <linux/fs.h> 42 #include <linux/path.h> 43 #include <linux/timekeeping.h> 44 #include <linux/sysctl.h> 45 #include <linux/elf.h> 46 #include <linux/pidfs.h> 47 #include <linux/net.h> 48 #include <linux/socket.h> 49 #include <net/af_unix.h> 50 #include <net/net_namespace.h> 51 #include <net/sock.h> 52 #include <uapi/linux/pidfd.h> 53 #include <uapi/linux/un.h> 54 #include <uapi/linux/coredump.h> 55 56 #include <linux/uaccess.h> 57 #include <asm/mmu_context.h> 58 #include <asm/tlb.h> 59 #include <asm/exec.h> 60 61 #include <trace/events/task.h> 62 #include "internal.h" 63 64 #include <trace/events/sched.h> 65 66 static bool dump_vma_snapshot(struct coredump_params *cprm); 67 static void free_vma_snapshot(struct coredump_params *cprm); 68 69 #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024) 70 /* Define a reasonable max cap */ 71 #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024) 72 /* 73 * File descriptor number for the pidfd for the thread-group leader of 74 * the coredumping task installed into the usermode helper's file 75 * descriptor table. 76 */ 77 #define COREDUMP_PIDFD_NUMBER 3 78 79 static int core_uses_pid; 80 static unsigned int core_pipe_limit; 81 static unsigned int core_sort_vma; 82 static char core_pattern[CORENAME_MAX_SIZE] = "core"; 83 static int core_name_size = CORENAME_MAX_SIZE; 84 unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; 85 static atomic_t core_pipe_count = ATOMIC_INIT(0); 86 87 enum coredump_type_t { 88 COREDUMP_FILE = 1, 89 COREDUMP_PIPE = 2, 90 COREDUMP_SOCK = 3, 91 COREDUMP_SOCK_REQ = 4, 92 }; 93 94 struct core_name { 95 char *corename; 96 int used, size; 97 unsigned int core_pipe_limit; 98 bool core_dumped; 99 enum coredump_type_t core_type; 100 u64 mask; 101 }; 102 103 static int expand_corename(struct core_name *cn, int size) 104 { 105 char *corename; 106 107 size = kmalloc_size_roundup(size); 108 corename = krealloc(cn->corename, size, GFP_KERNEL); 109 110 if (!corename) 111 return -ENOMEM; 112 113 if (size > core_name_size) /* racy but harmless */ 114 core_name_size = size; 115 116 cn->size = size; 117 cn->corename = corename; 118 return 0; 119 } 120 121 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, 122 va_list arg) 123 { 124 int free, need; 125 va_list arg_copy; 126 127 again: 128 free = cn->size - cn->used; 129 130 va_copy(arg_copy, arg); 131 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); 132 va_end(arg_copy); 133 134 if (need < free) { 135 cn->used += need; 136 return 0; 137 } 138 139 if (!expand_corename(cn, cn->size + need - free + 1)) 140 goto again; 141 142 return -ENOMEM; 143 } 144 145 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) 146 { 147 va_list arg; 148 int ret; 149 150 va_start(arg, fmt); 151 ret = cn_vprintf(cn, fmt, arg); 152 va_end(arg); 153 154 return ret; 155 } 156 157 static __printf(2, 3) 158 int cn_esc_printf(struct core_name *cn, const char *fmt, ...) 159 { 160 int cur = cn->used; 161 va_list arg; 162 int ret; 163 164 va_start(arg, fmt); 165 ret = cn_vprintf(cn, fmt, arg); 166 va_end(arg); 167 168 if (ret == 0) { 169 /* 170 * Ensure that this coredump name component can't cause the 171 * resulting corefile path to consist of a ".." or ".". 172 */ 173 if ((cn->used - cur == 1 && cn->corename[cur] == '.') || 174 (cn->used - cur == 2 && cn->corename[cur] == '.' 175 && cn->corename[cur+1] == '.')) 176 cn->corename[cur] = '!'; 177 178 /* 179 * Empty names are fishy and could be used to create a "//" in a 180 * corefile name, causing the coredump to happen one directory 181 * level too high. Enforce that all components of the core 182 * pattern are at least one character long. 183 */ 184 if (cn->used == cur) 185 ret = cn_printf(cn, "!"); 186 } 187 188 for (; cur < cn->used; ++cur) { 189 if (cn->corename[cur] == '/') 190 cn->corename[cur] = '!'; 191 } 192 return ret; 193 } 194 195 static int cn_print_exe_file(struct core_name *cn, bool name_only) 196 { 197 struct file *exe_file; 198 char *pathbuf, *path, *ptr; 199 int ret; 200 201 exe_file = get_mm_exe_file(current->mm); 202 if (!exe_file) 203 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 204 205 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 206 if (!pathbuf) { 207 ret = -ENOMEM; 208 goto put_exe_file; 209 } 210 211 path = file_path(exe_file, pathbuf, PATH_MAX); 212 if (IS_ERR(path)) { 213 ret = PTR_ERR(path); 214 goto free_buf; 215 } 216 217 if (name_only) { 218 ptr = strrchr(path, '/'); 219 if (ptr) 220 path = ptr + 1; 221 } 222 ret = cn_esc_printf(cn, "%s", path); 223 224 free_buf: 225 kfree(pathbuf); 226 put_exe_file: 227 fput(exe_file); 228 return ret; 229 } 230 231 /* 232 * coredump_parse will inspect the pattern parameter, and output a name 233 * into corename, which must have space for at least CORENAME_MAX_SIZE 234 * bytes plus one byte for the zero terminator. 235 */ 236 static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm, 237 size_t **argv, int *argc) 238 { 239 const struct cred *cred = current_cred(); 240 const char *pat_ptr = core_pattern; 241 bool was_space = false; 242 int pid_in_pattern = 0; 243 int err = 0; 244 245 cn->mask = COREDUMP_KERNEL; 246 if (core_pipe_limit) 247 cn->mask |= COREDUMP_WAIT; 248 cn->used = 0; 249 cn->corename = NULL; 250 cn->core_pipe_limit = 0; 251 cn->core_dumped = false; 252 if (*pat_ptr == '|') 253 cn->core_type = COREDUMP_PIPE; 254 else if (*pat_ptr == '@') 255 cn->core_type = COREDUMP_SOCK; 256 else 257 cn->core_type = COREDUMP_FILE; 258 if (expand_corename(cn, core_name_size)) 259 return false; 260 cn->corename[0] = '\0'; 261 262 switch (cn->core_type) { 263 case COREDUMP_PIPE: { 264 int argvs = sizeof(core_pattern) / 2; 265 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); 266 if (!(*argv)) 267 return false; 268 (*argv)[(*argc)++] = 0; 269 ++pat_ptr; 270 if (!(*pat_ptr)) 271 return false; 272 break; 273 } 274 case COREDUMP_SOCK: { 275 /* skip the @ */ 276 pat_ptr++; 277 if (!(*pat_ptr)) 278 return false; 279 if (*pat_ptr == '@') { 280 pat_ptr++; 281 if (!(*pat_ptr)) 282 return false; 283 284 cn->core_type = COREDUMP_SOCK_REQ; 285 } 286 287 err = cn_printf(cn, "%s", pat_ptr); 288 if (err) 289 return false; 290 291 /* Require absolute paths. */ 292 if (cn->corename[0] != '/') 293 return false; 294 295 /* 296 * Ensure we can uses spaces to indicate additional 297 * parameters in the future. 298 */ 299 if (strchr(cn->corename, ' ')) { 300 coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename); 301 return false; 302 } 303 304 /* Must not contain ".." in the path. */ 305 if (name_contains_dotdot(cn->corename)) { 306 coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename); 307 return false; 308 } 309 310 if (strlen(cn->corename) >= UNIX_PATH_MAX) { 311 coredump_report_failure("Coredump socket path %s too long", cn->corename); 312 return false; 313 } 314 315 /* 316 * Currently no need to parse any other options. 317 * Relevant information can be retrieved from the peer 318 * pidfd retrievable via SO_PEERPIDFD by the receiver or 319 * via /proc/<pid>, using the SO_PEERPIDFD to guard 320 * against pid recycling when opening /proc/<pid>. 321 */ 322 return true; 323 } 324 case COREDUMP_FILE: 325 break; 326 default: 327 WARN_ON_ONCE(true); 328 return false; 329 } 330 331 /* Repeat as long as we have more pattern to process and more output 332 space */ 333 while (*pat_ptr) { 334 /* 335 * Split on spaces before doing template expansion so that 336 * %e and %E don't get split if they have spaces in them 337 */ 338 if (cn->core_type == COREDUMP_PIPE) { 339 if (isspace(*pat_ptr)) { 340 if (cn->used != 0) 341 was_space = true; 342 pat_ptr++; 343 continue; 344 } else if (was_space) { 345 was_space = false; 346 err = cn_printf(cn, "%c", '\0'); 347 if (err) 348 return false; 349 (*argv)[(*argc)++] = cn->used; 350 } 351 } 352 if (*pat_ptr != '%') { 353 err = cn_printf(cn, "%c", *pat_ptr++); 354 } else { 355 switch (*++pat_ptr) { 356 /* single % at the end, drop that */ 357 case 0: 358 goto out; 359 /* Double percent, output one percent */ 360 case '%': 361 err = cn_printf(cn, "%c", '%'); 362 break; 363 /* pid */ 364 case 'p': 365 pid_in_pattern = 1; 366 err = cn_printf(cn, "%d", 367 task_tgid_vnr(current)); 368 break; 369 /* global pid */ 370 case 'P': 371 err = cn_printf(cn, "%d", 372 task_tgid_nr(current)); 373 break; 374 case 'i': 375 err = cn_printf(cn, "%d", 376 task_pid_vnr(current)); 377 break; 378 case 'I': 379 err = cn_printf(cn, "%d", 380 task_pid_nr(current)); 381 break; 382 /* uid */ 383 case 'u': 384 err = cn_printf(cn, "%u", 385 from_kuid(&init_user_ns, 386 cred->uid)); 387 break; 388 /* gid */ 389 case 'g': 390 err = cn_printf(cn, "%u", 391 from_kgid(&init_user_ns, 392 cred->gid)); 393 break; 394 case 'd': 395 err = cn_printf(cn, "%d", 396 __get_dumpable(cprm->mm_flags)); 397 break; 398 /* signal that caused the coredump */ 399 case 's': 400 err = cn_printf(cn, "%d", 401 cprm->siginfo->si_signo); 402 break; 403 /* UNIX time of coredump */ 404 case 't': { 405 time64_t time; 406 407 time = ktime_get_real_seconds(); 408 err = cn_printf(cn, "%lld", time); 409 break; 410 } 411 /* hostname */ 412 case 'h': 413 down_read(&uts_sem); 414 err = cn_esc_printf(cn, "%s", 415 utsname()->nodename); 416 up_read(&uts_sem); 417 break; 418 /* executable, could be changed by prctl PR_SET_NAME etc */ 419 case 'e': 420 err = cn_esc_printf(cn, "%s", current->comm); 421 break; 422 /* file name of executable */ 423 case 'f': 424 err = cn_print_exe_file(cn, true); 425 break; 426 case 'E': 427 err = cn_print_exe_file(cn, false); 428 break; 429 /* core limit size */ 430 case 'c': 431 err = cn_printf(cn, "%lu", 432 rlimit(RLIMIT_CORE)); 433 break; 434 /* CPU the task ran on */ 435 case 'C': 436 err = cn_printf(cn, "%d", cprm->cpu); 437 break; 438 /* pidfd number */ 439 case 'F': { 440 /* 441 * Installing a pidfd only makes sense if 442 * we actually spawn a usermode helper. 443 */ 444 if (cn->core_type != COREDUMP_PIPE) 445 break; 446 447 /* 448 * Note that we'll install a pidfd for the 449 * thread-group leader. We know that task 450 * linkage hasn't been removed yet and even if 451 * this @current isn't the actual thread-group 452 * leader we know that the thread-group leader 453 * cannot be reaped until @current has exited. 454 */ 455 cprm->pid = task_tgid(current); 456 err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); 457 break; 458 } 459 default: 460 break; 461 } 462 ++pat_ptr; 463 } 464 465 if (err) 466 return false; 467 } 468 469 out: 470 /* Backward compatibility with core_uses_pid: 471 * 472 * If core_pattern does not include a %p (as is the default) 473 * and core_uses_pid is set, then .%pid will be appended to 474 * the filename. Do not do this for piped commands. */ 475 if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid) 476 return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0; 477 478 return true; 479 } 480 481 static int zap_process(struct signal_struct *signal, int exit_code) 482 { 483 struct task_struct *t; 484 int nr = 0; 485 486 signal->flags = SIGNAL_GROUP_EXIT; 487 signal->group_exit_code = exit_code; 488 signal->group_stop_count = 0; 489 490 __for_each_thread(signal, t) { 491 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 492 if (t != current && !(t->flags & PF_POSTCOREDUMP)) { 493 sigaddset(&t->pending.signal, SIGKILL); 494 signal_wake_up(t, 1); 495 nr++; 496 } 497 } 498 499 return nr; 500 } 501 502 static int zap_threads(struct task_struct *tsk, 503 struct core_state *core_state, int exit_code) 504 { 505 struct signal_struct *signal = tsk->signal; 506 int nr = -EAGAIN; 507 508 spin_lock_irq(&tsk->sighand->siglock); 509 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) { 510 /* Allow SIGKILL, see prepare_signal() */ 511 signal->core_state = core_state; 512 nr = zap_process(signal, exit_code); 513 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 514 tsk->flags |= PF_DUMPCORE; 515 atomic_set(&core_state->nr_threads, nr); 516 } 517 spin_unlock_irq(&tsk->sighand->siglock); 518 return nr; 519 } 520 521 static int coredump_wait(int exit_code, struct core_state *core_state) 522 { 523 struct task_struct *tsk = current; 524 int core_waiters = -EBUSY; 525 526 init_completion(&core_state->startup); 527 core_state->dumper.task = tsk; 528 core_state->dumper.next = NULL; 529 530 core_waiters = zap_threads(tsk, core_state, exit_code); 531 if (core_waiters > 0) { 532 struct core_thread *ptr; 533 534 wait_for_completion_state(&core_state->startup, 535 TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 536 /* 537 * Wait for all the threads to become inactive, so that 538 * all the thread context (extended register state, like 539 * fpu etc) gets copied to the memory. 540 */ 541 ptr = core_state->dumper.next; 542 while (ptr != NULL) { 543 wait_task_inactive(ptr->task, TASK_ANY); 544 ptr = ptr->next; 545 } 546 } 547 548 return core_waiters; 549 } 550 551 static void coredump_finish(bool core_dumped) 552 { 553 struct core_thread *curr, *next; 554 struct task_struct *task; 555 556 spin_lock_irq(¤t->sighand->siglock); 557 if (core_dumped && !__fatal_signal_pending(current)) 558 current->signal->group_exit_code |= 0x80; 559 next = current->signal->core_state->dumper.next; 560 current->signal->core_state = NULL; 561 spin_unlock_irq(¤t->sighand->siglock); 562 563 while ((curr = next) != NULL) { 564 next = curr->next; 565 task = curr->task; 566 /* 567 * see coredump_task_exit(), curr->task must not see 568 * ->task == NULL before we read ->next. 569 */ 570 smp_mb(); 571 curr->task = NULL; 572 wake_up_process(task); 573 } 574 } 575 576 static bool dump_interrupted(void) 577 { 578 /* 579 * SIGKILL or freezing() interrupt the coredumping. Perhaps we 580 * can do try_to_freeze() and check __fatal_signal_pending(), 581 * but then we need to teach dump_write() to restart and clear 582 * TIF_SIGPENDING. 583 */ 584 return fatal_signal_pending(current) || freezing(current); 585 } 586 587 static void wait_for_dump_helpers(struct file *file) 588 { 589 struct pipe_inode_info *pipe = file->private_data; 590 591 pipe_lock(pipe); 592 pipe->readers++; 593 pipe->writers--; 594 wake_up_interruptible_sync(&pipe->rd_wait); 595 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 596 pipe_unlock(pipe); 597 598 /* 599 * We actually want wait_event_freezable() but then we need 600 * to clear TIF_SIGPENDING and improve dump_interrupted(). 601 */ 602 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1); 603 604 pipe_lock(pipe); 605 pipe->readers--; 606 pipe->writers++; 607 pipe_unlock(pipe); 608 } 609 610 /* 611 * umh_coredump_setup 612 * helper function to customize the process used 613 * to collect the core in userspace. Specifically 614 * it sets up a pipe and installs it as fd 0 (stdin) 615 * for the process. Returns 0 on success, or 616 * PTR_ERR on failure. 617 * Note that it also sets the core limit to 1. This 618 * is a special value that we use to trap recursive 619 * core dumps 620 */ 621 static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) 622 { 623 struct file *files[2]; 624 struct coredump_params *cp = (struct coredump_params *)info->data; 625 int err; 626 627 if (cp->pid) { 628 struct file *pidfs_file __free(fput) = NULL; 629 630 pidfs_file = pidfs_alloc_file(cp->pid, 0); 631 if (IS_ERR(pidfs_file)) 632 return PTR_ERR(pidfs_file); 633 634 pidfs_coredump(cp); 635 636 /* 637 * Usermode helpers are childen of either 638 * system_unbound_wq or of kthreadd. So we know that 639 * we're starting off with a clean file descriptor 640 * table. So we should always be able to use 641 * COREDUMP_PIDFD_NUMBER as our file descriptor value. 642 */ 643 err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); 644 if (err < 0) 645 return err; 646 } 647 648 err = create_pipe_files(files, 0); 649 if (err) 650 return err; 651 652 cp->file = files[1]; 653 654 err = replace_fd(0, files[0], 0); 655 fput(files[0]); 656 if (err < 0) 657 return err; 658 659 /* and disallow core files too */ 660 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; 661 662 return 0; 663 } 664 665 #ifdef CONFIG_UNIX 666 static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm) 667 { 668 struct file *file __free(fput) = NULL; 669 struct sockaddr_un addr = { 670 .sun_family = AF_UNIX, 671 }; 672 ssize_t addr_len; 673 int retval; 674 struct socket *socket; 675 676 addr_len = strscpy(addr.sun_path, cn->corename); 677 if (addr_len < 0) 678 return false; 679 addr_len += offsetof(struct sockaddr_un, sun_path) + 1; 680 681 /* 682 * It is possible that the userspace process which is supposed 683 * to handle the coredump and is listening on the AF_UNIX socket 684 * coredumps. Userspace should just mark itself non dumpable. 685 */ 686 687 retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket); 688 if (retval < 0) 689 return false; 690 691 file = sock_alloc_file(socket, 0, NULL); 692 if (IS_ERR(file)) 693 return false; 694 695 /* 696 * Set the thread-group leader pid which is used for the peer 697 * credentials during connect() below. Then immediately register 698 * it in pidfs... 699 */ 700 cprm->pid = task_tgid(current); 701 retval = pidfs_register_pid(cprm->pid); 702 if (retval) 703 return false; 704 705 /* 706 * ... and set the coredump information so userspace has it 707 * available after connect()... 708 */ 709 pidfs_coredump(cprm); 710 711 retval = kernel_connect(socket, (struct sockaddr *)(&addr), addr_len, 712 O_NONBLOCK | SOCK_COREDUMP); 713 714 if (retval) { 715 if (retval == -EAGAIN) 716 coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path); 717 else 718 coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval); 719 return false; 720 } 721 722 /* ... and validate that @sk_peer_pid matches @cprm.pid. */ 723 if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid)) 724 return false; 725 726 cprm->limit = RLIM_INFINITY; 727 cprm->file = no_free_ptr(file); 728 729 return true; 730 } 731 732 static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags) 733 { 734 struct msghdr msg = {}; 735 struct kvec iov = { .iov_base = ack, .iov_len = size }; 736 ssize_t ret; 737 738 memset(ack, 0, size); 739 ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags); 740 return ret == size; 741 } 742 743 static inline bool coredump_sock_send(struct file *file, struct coredump_req *req) 744 { 745 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; 746 struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) }; 747 ssize_t ret; 748 749 ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req)); 750 return ret == sizeof(*req); 751 } 752 753 static_assert(sizeof(enum coredump_mark) == sizeof(__u32)); 754 755 static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark) 756 { 757 struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; 758 struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) }; 759 ssize_t ret; 760 761 ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark)); 762 return ret == sizeof(mark); 763 } 764 765 static inline void coredump_sock_wait(struct file *file) 766 { 767 ssize_t n; 768 769 /* 770 * We use a simple read to wait for the coredump processing to 771 * finish. Either the socket is closed or we get sent unexpected 772 * data. In both cases, we're done. 773 */ 774 n = __kernel_read(file, &(char){ 0 }, 1, NULL); 775 if (n > 0) 776 coredump_report_failure("Coredump socket had unexpected data"); 777 else if (n < 0) 778 coredump_report_failure("Coredump socket failed"); 779 } 780 781 static inline void coredump_sock_shutdown(struct file *file) 782 { 783 struct socket *socket; 784 785 socket = sock_from_file(file); 786 if (!socket) 787 return; 788 789 /* Let userspace know we're done processing the coredump. */ 790 kernel_sock_shutdown(socket, SHUT_WR); 791 } 792 793 static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm) 794 { 795 struct coredump_req req = { 796 .size = sizeof(struct coredump_req), 797 .mask = COREDUMP_KERNEL | COREDUMP_USERSPACE | 798 COREDUMP_REJECT | COREDUMP_WAIT, 799 .size_ack = sizeof(struct coredump_ack), 800 }; 801 struct coredump_ack ack = {}; 802 ssize_t usize; 803 804 if (cn->core_type != COREDUMP_SOCK_REQ) 805 return true; 806 807 /* Let userspace know what we support. */ 808 if (!coredump_sock_send(cprm->file, &req)) 809 return false; 810 811 /* Peek the size of the coredump_ack. */ 812 if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size), 813 MSG_PEEK | MSG_WAITALL)) 814 return false; 815 816 /* Refuse unknown coredump_ack sizes. */ 817 usize = ack.size; 818 if (usize < COREDUMP_ACK_SIZE_VER0) { 819 coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE); 820 return false; 821 } 822 823 if (usize > sizeof(ack)) { 824 coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE); 825 return false; 826 } 827 828 /* Now retrieve the coredump_ack. */ 829 if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL)) 830 return false; 831 if (ack.size != usize) 832 return false; 833 834 /* Refuse unknown coredump_ack flags. */ 835 if (ack.mask & ~req.mask) { 836 coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); 837 return false; 838 } 839 840 /* Refuse mutually exclusive options. */ 841 if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL | 842 COREDUMP_REJECT)) != 1) { 843 coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING); 844 return false; 845 } 846 847 if (ack.spare) { 848 coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED); 849 return false; 850 } 851 852 cn->mask = ack.mask; 853 return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK); 854 } 855 856 static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) 857 { 858 if (!coredump_sock_connect(cn, cprm)) 859 return false; 860 861 return coredump_sock_request(cn, cprm); 862 } 863 #else 864 static inline void coredump_sock_wait(struct file *file) { } 865 static inline void coredump_sock_shutdown(struct file *file) { } 866 static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; } 867 #endif 868 869 /* cprm->mm_flags contains a stable snapshot of dumpability flags. */ 870 static inline bool coredump_force_suid_safe(const struct coredump_params *cprm) 871 { 872 /* Require nonrelative corefile path and be extra careful. */ 873 return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT; 874 } 875 876 static bool coredump_file(struct core_name *cn, struct coredump_params *cprm, 877 const struct linux_binfmt *binfmt) 878 { 879 struct mnt_idmap *idmap; 880 struct inode *inode; 881 struct file *file __free(fput) = NULL; 882 int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL; 883 884 if (cprm->limit < binfmt->min_coredump) 885 return false; 886 887 if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') { 888 coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump"); 889 return false; 890 } 891 892 /* 893 * Unlink the file if it exists unless this is a SUID 894 * binary - in that case, we're running around with root 895 * privs and don't want to unlink another user's coredump. 896 */ 897 if (!coredump_force_suid_safe(cprm)) { 898 /* 899 * If it doesn't exist, that's fine. If there's some 900 * other problem, we'll catch it at the filp_open(). 901 */ 902 do_unlinkat(AT_FDCWD, getname_kernel(cn->corename)); 903 } 904 905 /* 906 * There is a race between unlinking and creating the 907 * file, but if that causes an EEXIST here, that's 908 * fine - another process raced with us while creating 909 * the corefile, and the other process won. To userspace, 910 * what matters is that at least one of the two processes 911 * writes its coredump successfully, not which one. 912 */ 913 if (coredump_force_suid_safe(cprm)) { 914 /* 915 * Using user namespaces, normal user tasks can change 916 * their current->fs->root to point to arbitrary 917 * directories. Since the intention of the "only dump 918 * with a fully qualified path" rule is to control where 919 * coredumps may be placed using root privileges, 920 * current->fs->root must not be used. Instead, use the 921 * root directory of init_task. 922 */ 923 struct path root; 924 925 task_lock(&init_task); 926 get_fs_root(init_task.fs, &root); 927 task_unlock(&init_task); 928 file = file_open_root(&root, cn->corename, open_flags, 0600); 929 path_put(&root); 930 } else { 931 file = filp_open(cn->corename, open_flags, 0600); 932 } 933 if (IS_ERR(file)) 934 return false; 935 936 inode = file_inode(file); 937 if (inode->i_nlink > 1) 938 return false; 939 if (d_unhashed(file->f_path.dentry)) 940 return false; 941 /* 942 * AK: actually i see no reason to not allow this for named 943 * pipes etc, but keep the previous behaviour for now. 944 */ 945 if (!S_ISREG(inode->i_mode)) 946 return false; 947 /* 948 * Don't dump core if the filesystem changed owner or mode 949 * of the file during file creation. This is an issue when 950 * a process dumps core while its cwd is e.g. on a vfat 951 * filesystem. 952 */ 953 idmap = file_mnt_idmap(file); 954 if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) { 955 coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename); 956 return false; 957 } 958 if ((inode->i_mode & 0677) != 0600) { 959 coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename); 960 return false; 961 } 962 if (!(file->f_mode & FMODE_CAN_WRITE)) 963 return false; 964 if (do_truncate(idmap, file->f_path.dentry, 0, 0, file)) 965 return false; 966 967 cprm->file = no_free_ptr(file); 968 return true; 969 } 970 971 static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm, 972 size_t *argv, int argc) 973 { 974 int argi; 975 char **helper_argv __free(kfree) = NULL; 976 struct subprocess_info *sub_info; 977 978 if (cprm->limit == 1) { 979 /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. 980 * 981 * Normally core limits are irrelevant to pipes, since 982 * we're not writing to the file system, but we use 983 * cprm.limit of 1 here as a special value, this is a 984 * consistent way to catch recursive crashes. 985 * We can still crash if the core_pattern binary sets 986 * RLIM_CORE = !1, but it runs as root, and can do 987 * lots of stupid things. 988 * 989 * Note that we use task_tgid_vnr here to grab the pid 990 * of the process group leader. That way we get the 991 * right pid if a thread in a multi-threaded 992 * core_pattern process dies. 993 */ 994 coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); 995 return false; 996 } 997 cprm->limit = RLIM_INFINITY; 998 999 cn->core_pipe_limit = atomic_inc_return(&core_pipe_count); 1000 if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) { 1001 coredump_report_failure("over core_pipe_limit, skipping core dump"); 1002 return false; 1003 } 1004 1005 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL); 1006 if (!helper_argv) { 1007 coredump_report_failure("%s failed to allocate memory", __func__); 1008 return false; 1009 } 1010 for (argi = 0; argi < argc; argi++) 1011 helper_argv[argi] = cn->corename + argv[argi]; 1012 helper_argv[argi] = NULL; 1013 1014 sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL, 1015 GFP_KERNEL, umh_coredump_setup, 1016 NULL, cprm); 1017 if (!sub_info) 1018 return false; 1019 1020 if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) { 1021 coredump_report_failure("|%s pipe failed", cn->corename); 1022 return false; 1023 } 1024 1025 /* 1026 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would 1027 * have this set to NULL. 1028 */ 1029 if (!cprm->file) { 1030 coredump_report_failure("Core dump to |%s disabled", cn->corename); 1031 return false; 1032 } 1033 1034 return true; 1035 } 1036 1037 static bool coredump_write(struct core_name *cn, 1038 struct coredump_params *cprm, 1039 struct linux_binfmt *binfmt) 1040 { 1041 1042 if (dump_interrupted()) 1043 return true; 1044 1045 if (!dump_vma_snapshot(cprm)) 1046 return false; 1047 1048 file_start_write(cprm->file); 1049 cn->core_dumped = binfmt->core_dump(cprm); 1050 /* 1051 * Ensures that file size is big enough to contain the current 1052 * file postion. This prevents gdb from complaining about 1053 * a truncated file if the last "write" to the file was 1054 * dump_skip. 1055 */ 1056 if (cprm->to_skip) { 1057 cprm->to_skip--; 1058 dump_emit(cprm, "", 1); 1059 } 1060 file_end_write(cprm->file); 1061 free_vma_snapshot(cprm); 1062 return true; 1063 } 1064 1065 static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm) 1066 { 1067 if (cprm->file) 1068 filp_close(cprm->file, NULL); 1069 if (cn->core_pipe_limit) { 1070 VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE); 1071 atomic_dec(&core_pipe_count); 1072 } 1073 kfree(cn->corename); 1074 coredump_finish(cn->core_dumped); 1075 } 1076 1077 static inline bool coredump_skip(const struct coredump_params *cprm, 1078 const struct linux_binfmt *binfmt) 1079 { 1080 if (!binfmt) 1081 return true; 1082 if (!binfmt->core_dump) 1083 return true; 1084 if (!__get_dumpable(cprm->mm_flags)) 1085 return true; 1086 return false; 1087 } 1088 1089 void vfs_coredump(const kernel_siginfo_t *siginfo) 1090 { 1091 struct cred *cred __free(put_cred) = NULL; 1092 size_t *argv __free(kfree) = NULL; 1093 struct core_state core_state; 1094 struct core_name cn; 1095 struct mm_struct *mm = current->mm; 1096 struct linux_binfmt *binfmt = mm->binfmt; 1097 const struct cred *old_cred; 1098 int argc = 0; 1099 struct coredump_params cprm = { 1100 .siginfo = siginfo, 1101 .limit = rlimit(RLIMIT_CORE), 1102 /* 1103 * We must use the same mm->flags while dumping core to avoid 1104 * inconsistency of bit flags, since this flag is not protected 1105 * by any locks. 1106 * 1107 * Note that we only care about MMF_DUMP* flags. 1108 */ 1109 .mm_flags = __mm_flags_get_dumpable(mm), 1110 .vma_meta = NULL, 1111 .cpu = raw_smp_processor_id(), 1112 }; 1113 1114 audit_core_dumps(siginfo->si_signo); 1115 1116 if (coredump_skip(&cprm, binfmt)) 1117 return; 1118 1119 cred = prepare_creds(); 1120 if (!cred) 1121 return; 1122 /* 1123 * We cannot trust fsuid as being the "true" uid of the process 1124 * nor do we know its entire history. We only know it was tainted 1125 * so we dump it as root in mode 2, and only into a controlled 1126 * environment (pipe handler or fully qualified path). 1127 */ 1128 if (coredump_force_suid_safe(&cprm)) 1129 cred->fsuid = GLOBAL_ROOT_UID; 1130 1131 if (coredump_wait(siginfo->si_signo, &core_state) < 0) 1132 return; 1133 1134 old_cred = override_creds(cred); 1135 1136 if (!coredump_parse(&cn, &cprm, &argv, &argc)) { 1137 coredump_report_failure("format_corename failed, aborting core"); 1138 goto close_fail; 1139 } 1140 1141 switch (cn.core_type) { 1142 case COREDUMP_FILE: 1143 if (!coredump_file(&cn, &cprm, binfmt)) 1144 goto close_fail; 1145 break; 1146 case COREDUMP_PIPE: 1147 if (!coredump_pipe(&cn, &cprm, argv, argc)) 1148 goto close_fail; 1149 break; 1150 case COREDUMP_SOCK_REQ: 1151 fallthrough; 1152 case COREDUMP_SOCK: 1153 if (!coredump_socket(&cn, &cprm)) 1154 goto close_fail; 1155 break; 1156 default: 1157 WARN_ON_ONCE(true); 1158 goto close_fail; 1159 } 1160 1161 /* Don't even generate the coredump. */ 1162 if (cn.mask & COREDUMP_REJECT) 1163 goto close_fail; 1164 1165 /* get us an unshared descriptor table; almost always a no-op */ 1166 /* The cell spufs coredump code reads the file descriptor tables */ 1167 if (unshare_files()) 1168 goto close_fail; 1169 1170 if ((cn.mask & COREDUMP_KERNEL) && !coredump_write(&cn, &cprm, binfmt)) 1171 goto close_fail; 1172 1173 coredump_sock_shutdown(cprm.file); 1174 1175 /* Let the parent know that a coredump was generated. */ 1176 if (cn.mask & COREDUMP_USERSPACE) 1177 cn.core_dumped = true; 1178 1179 /* 1180 * When core_pipe_limit is set we wait for the coredump server 1181 * or usermodehelper to finish before exiting so it can e.g., 1182 * inspect /proc/<pid>. 1183 */ 1184 if (cn.mask & COREDUMP_WAIT) { 1185 switch (cn.core_type) { 1186 case COREDUMP_PIPE: 1187 wait_for_dump_helpers(cprm.file); 1188 break; 1189 case COREDUMP_SOCK_REQ: 1190 fallthrough; 1191 case COREDUMP_SOCK: 1192 coredump_sock_wait(cprm.file); 1193 break; 1194 default: 1195 break; 1196 } 1197 } 1198 1199 close_fail: 1200 coredump_cleanup(&cn, &cprm); 1201 revert_creds(old_cred); 1202 return; 1203 } 1204 1205 /* 1206 * Core dumping helper functions. These are the only things you should 1207 * do on a core-file: use only these functions to write out all the 1208 * necessary info. 1209 */ 1210 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr) 1211 { 1212 struct file *file = cprm->file; 1213 loff_t pos = file->f_pos; 1214 ssize_t n; 1215 1216 if (cprm->written + nr > cprm->limit) 1217 return 0; 1218 if (dump_interrupted()) 1219 return 0; 1220 n = __kernel_write(file, addr, nr, &pos); 1221 if (n != nr) 1222 return 0; 1223 file->f_pos = pos; 1224 cprm->written += n; 1225 cprm->pos += n; 1226 1227 return 1; 1228 } 1229 1230 static int __dump_skip(struct coredump_params *cprm, size_t nr) 1231 { 1232 static char zeroes[PAGE_SIZE]; 1233 struct file *file = cprm->file; 1234 1235 if (file->f_mode & FMODE_LSEEK) { 1236 if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0) 1237 return 0; 1238 cprm->pos += nr; 1239 return 1; 1240 } 1241 1242 while (nr > PAGE_SIZE) { 1243 if (!__dump_emit(cprm, zeroes, PAGE_SIZE)) 1244 return 0; 1245 nr -= PAGE_SIZE; 1246 } 1247 1248 return __dump_emit(cprm, zeroes, nr); 1249 } 1250 1251 int dump_emit(struct coredump_params *cprm, const void *addr, int nr) 1252 { 1253 if (cprm->to_skip) { 1254 if (!__dump_skip(cprm, cprm->to_skip)) 1255 return 0; 1256 cprm->to_skip = 0; 1257 } 1258 return __dump_emit(cprm, addr, nr); 1259 } 1260 EXPORT_SYMBOL(dump_emit); 1261 1262 void dump_skip_to(struct coredump_params *cprm, unsigned long pos) 1263 { 1264 cprm->to_skip = pos - cprm->pos; 1265 } 1266 EXPORT_SYMBOL(dump_skip_to); 1267 1268 void dump_skip(struct coredump_params *cprm, size_t nr) 1269 { 1270 cprm->to_skip += nr; 1271 } 1272 EXPORT_SYMBOL(dump_skip); 1273 1274 #ifdef CONFIG_ELF_CORE 1275 static int dump_emit_page(struct coredump_params *cprm, struct page *page) 1276 { 1277 struct bio_vec bvec; 1278 struct iov_iter iter; 1279 struct file *file = cprm->file; 1280 loff_t pos; 1281 ssize_t n; 1282 1283 if (!page) 1284 return 0; 1285 1286 if (cprm->to_skip) { 1287 if (!__dump_skip(cprm, cprm->to_skip)) 1288 return 0; 1289 cprm->to_skip = 0; 1290 } 1291 if (cprm->written + PAGE_SIZE > cprm->limit) 1292 return 0; 1293 if (dump_interrupted()) 1294 return 0; 1295 pos = file->f_pos; 1296 bvec_set_page(&bvec, page, PAGE_SIZE, 0); 1297 iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE); 1298 n = __kernel_write_iter(cprm->file, &iter, &pos); 1299 if (n != PAGE_SIZE) 1300 return 0; 1301 file->f_pos = pos; 1302 cprm->written += PAGE_SIZE; 1303 cprm->pos += PAGE_SIZE; 1304 1305 return 1; 1306 } 1307 1308 /* 1309 * If we might get machine checks from kernel accesses during the 1310 * core dump, let's get those errors early rather than during the 1311 * IO. This is not performance-critical enough to warrant having 1312 * all the machine check logic in the iovec paths. 1313 */ 1314 #ifdef copy_mc_to_kernel 1315 1316 #define dump_page_alloc() alloc_page(GFP_KERNEL) 1317 #define dump_page_free(x) __free_page(x) 1318 static struct page *dump_page_copy(struct page *src, struct page *dst) 1319 { 1320 void *buf = kmap_local_page(src); 1321 size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE); 1322 kunmap_local(buf); 1323 return left ? NULL : dst; 1324 } 1325 1326 #else 1327 1328 /* We just want to return non-NULL; it's never used. */ 1329 #define dump_page_alloc() ERR_PTR(-EINVAL) 1330 #define dump_page_free(x) ((void)(x)) 1331 static inline struct page *dump_page_copy(struct page *src, struct page *dst) 1332 { 1333 return src; 1334 } 1335 #endif 1336 1337 int dump_user_range(struct coredump_params *cprm, unsigned long start, 1338 unsigned long len) 1339 { 1340 unsigned long addr; 1341 struct page *dump_page; 1342 int locked, ret; 1343 1344 dump_page = dump_page_alloc(); 1345 if (!dump_page) 1346 return 0; 1347 1348 ret = 0; 1349 locked = 0; 1350 for (addr = start; addr < start + len; addr += PAGE_SIZE) { 1351 struct page *page; 1352 1353 if (!locked) { 1354 if (mmap_read_lock_killable(current->mm)) 1355 goto out; 1356 locked = 1; 1357 } 1358 1359 /* 1360 * To avoid having to allocate page tables for virtual address 1361 * ranges that have never been used yet, and also to make it 1362 * easy to generate sparse core files, use a helper that returns 1363 * NULL when encountering an empty page table entry that would 1364 * otherwise have been filled with the zero page. 1365 */ 1366 page = get_dump_page(addr, &locked); 1367 if (page) { 1368 if (locked) { 1369 mmap_read_unlock(current->mm); 1370 locked = 0; 1371 } 1372 int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page)); 1373 put_page(page); 1374 if (stop) 1375 goto out; 1376 } else { 1377 dump_skip(cprm, PAGE_SIZE); 1378 } 1379 1380 if (dump_interrupted()) 1381 goto out; 1382 1383 if (!need_resched()) 1384 continue; 1385 if (locked) { 1386 mmap_read_unlock(current->mm); 1387 locked = 0; 1388 } 1389 cond_resched(); 1390 } 1391 ret = 1; 1392 out: 1393 if (locked) 1394 mmap_read_unlock(current->mm); 1395 1396 dump_page_free(dump_page); 1397 return ret; 1398 } 1399 #endif 1400 1401 int dump_align(struct coredump_params *cprm, int align) 1402 { 1403 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1); 1404 if (align & (align - 1)) 1405 return 0; 1406 if (mod) 1407 cprm->to_skip += align - mod; 1408 return 1; 1409 } 1410 EXPORT_SYMBOL(dump_align); 1411 1412 #ifdef CONFIG_SYSCTL 1413 1414 void validate_coredump_safety(void) 1415 { 1416 if (suid_dumpable == SUID_DUMP_ROOT && 1417 core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') { 1418 1419 coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: " 1420 "pipe handler or fully qualified core dump path required. " 1421 "Set kernel.core_pattern before fs.suid_dumpable."); 1422 } 1423 } 1424 1425 static inline bool check_coredump_socket(void) 1426 { 1427 const char *p; 1428 1429 if (core_pattern[0] != '@') 1430 return true; 1431 1432 /* 1433 * Coredump socket must be located in the initial mount 1434 * namespace. Don't give the impression that anything else is 1435 * supported right now. 1436 */ 1437 if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) 1438 return false; 1439 1440 /* Must be an absolute path... */ 1441 if (core_pattern[1] != '/') { 1442 /* ... or the socket request protocol... */ 1443 if (core_pattern[1] != '@') 1444 return false; 1445 /* ... and if so must be an absolute path. */ 1446 if (core_pattern[2] != '/') 1447 return false; 1448 p = &core_pattern[2]; 1449 } else { 1450 p = &core_pattern[1]; 1451 } 1452 1453 /* The path obviously cannot exceed UNIX_PATH_MAX. */ 1454 if (strlen(p) >= UNIX_PATH_MAX) 1455 return false; 1456 1457 /* Must not contain ".." in the path. */ 1458 if (name_contains_dotdot(core_pattern)) 1459 return false; 1460 1461 return true; 1462 } 1463 1464 static int proc_dostring_coredump(const struct ctl_table *table, int write, 1465 void *buffer, size_t *lenp, loff_t *ppos) 1466 { 1467 int error; 1468 ssize_t retval; 1469 char old_core_pattern[CORENAME_MAX_SIZE]; 1470 1471 retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE); 1472 1473 error = proc_dostring(table, write, buffer, lenp, ppos); 1474 if (error) 1475 return error; 1476 if (!check_coredump_socket()) { 1477 strscpy(core_pattern, old_core_pattern, retval + 1); 1478 return -EINVAL; 1479 } 1480 1481 validate_coredump_safety(); 1482 return error; 1483 } 1484 1485 static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT; 1486 static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX; 1487 static char core_modes[] = { 1488 "file\npipe" 1489 #ifdef CONFIG_UNIX 1490 "\nsocket" 1491 #endif 1492 }; 1493 1494 static const struct ctl_table coredump_sysctls[] = { 1495 { 1496 .procname = "core_uses_pid", 1497 .data = &core_uses_pid, 1498 .maxlen = sizeof(int), 1499 .mode = 0644, 1500 .proc_handler = proc_dointvec, 1501 }, 1502 { 1503 .procname = "core_pattern", 1504 .data = core_pattern, 1505 .maxlen = CORENAME_MAX_SIZE, 1506 .mode = 0644, 1507 .proc_handler = proc_dostring_coredump, 1508 }, 1509 { 1510 .procname = "core_pipe_limit", 1511 .data = &core_pipe_limit, 1512 .maxlen = sizeof(unsigned int), 1513 .mode = 0644, 1514 .proc_handler = proc_dointvec_minmax, 1515 .extra1 = SYSCTL_ZERO, 1516 .extra2 = SYSCTL_INT_MAX, 1517 }, 1518 { 1519 .procname = "core_file_note_size_limit", 1520 .data = &core_file_note_size_limit, 1521 .maxlen = sizeof(unsigned int), 1522 .mode = 0644, 1523 .proc_handler = proc_douintvec_minmax, 1524 .extra1 = (unsigned int *)&core_file_note_size_min, 1525 .extra2 = (unsigned int *)&core_file_note_size_max, 1526 }, 1527 { 1528 .procname = "core_sort_vma", 1529 .data = &core_sort_vma, 1530 .maxlen = sizeof(int), 1531 .mode = 0644, 1532 .proc_handler = proc_douintvec_minmax, 1533 .extra1 = SYSCTL_ZERO, 1534 .extra2 = SYSCTL_ONE, 1535 }, 1536 { 1537 .procname = "core_modes", 1538 .data = core_modes, 1539 .maxlen = sizeof(core_modes) - 1, 1540 .mode = 0444, 1541 .proc_handler = proc_dostring, 1542 }, 1543 }; 1544 1545 static int __init init_fs_coredump_sysctls(void) 1546 { 1547 register_sysctl_init("kernel", coredump_sysctls); 1548 return 0; 1549 } 1550 fs_initcall(init_fs_coredump_sysctls); 1551 #endif /* CONFIG_SYSCTL */ 1552 1553 /* 1554 * The purpose of always_dump_vma() is to make sure that special kernel mappings 1555 * that are useful for post-mortem analysis are included in every core dump. 1556 * In that way we ensure that the core dump is fully interpretable later 1557 * without matching up the same kernel and hardware config to see what PC values 1558 * meant. These special mappings include - vDSO, vsyscall, and other 1559 * architecture specific mappings 1560 */ 1561 static bool always_dump_vma(struct vm_area_struct *vma) 1562 { 1563 /* Any vsyscall mappings? */ 1564 if (vma == get_gate_vma(vma->vm_mm)) 1565 return true; 1566 1567 /* 1568 * Assume that all vmas with a .name op should always be dumped. 1569 * If this changes, a new vm_ops field can easily be added. 1570 */ 1571 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) 1572 return true; 1573 1574 /* 1575 * arch_vma_name() returns non-NULL for special architecture mappings, 1576 * such as vDSO sections. 1577 */ 1578 if (arch_vma_name(vma)) 1579 return true; 1580 1581 return false; 1582 } 1583 1584 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 1585 1586 /* 1587 * Decide how much of @vma's contents should be included in a core dump. 1588 */ 1589 static unsigned long vma_dump_size(struct vm_area_struct *vma, 1590 unsigned long mm_flags) 1591 { 1592 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) 1593 1594 /* always dump the vdso and vsyscall sections */ 1595 if (always_dump_vma(vma)) 1596 goto whole; 1597 1598 if (vma->vm_flags & VM_DONTDUMP) 1599 return 0; 1600 1601 /* support for DAX */ 1602 if (vma_is_dax(vma)) { 1603 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) 1604 goto whole; 1605 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) 1606 goto whole; 1607 return 0; 1608 } 1609 1610 /* Hugetlb memory check */ 1611 if (is_vm_hugetlb_page(vma)) { 1612 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) 1613 goto whole; 1614 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1615 goto whole; 1616 return 0; 1617 } 1618 1619 /* Do not dump I/O mapped devices or special mappings */ 1620 if (vma->vm_flags & VM_IO) 1621 return 0; 1622 1623 /* By default, dump shared memory if mapped from an anonymous file. */ 1624 if (vma->vm_flags & VM_SHARED) { 1625 if (file_inode(vma->vm_file)->i_nlink == 0 ? 1626 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) 1627 goto whole; 1628 return 0; 1629 } 1630 1631 /* Dump segments that have been written to. */ 1632 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) 1633 goto whole; 1634 if (vma->vm_file == NULL) 1635 return 0; 1636 1637 if (FILTER(MAPPED_PRIVATE)) 1638 goto whole; 1639 1640 /* 1641 * If this is the beginning of an executable file mapping, 1642 * dump the first page to aid in determining what was mapped here. 1643 */ 1644 if (FILTER(ELF_HEADERS) && 1645 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { 1646 if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) 1647 return PAGE_SIZE; 1648 1649 /* 1650 * ELF libraries aren't always executable. 1651 * We'll want to check whether the mapping starts with the ELF 1652 * magic, but not now - we're holding the mmap lock, 1653 * so copy_from_user() doesn't work here. 1654 * Use a placeholder instead, and fix it up later in 1655 * dump_vma_snapshot(). 1656 */ 1657 return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; 1658 } 1659 1660 #undef FILTER 1661 1662 return 0; 1663 1664 whole: 1665 return vma->vm_end - vma->vm_start; 1666 } 1667 1668 /* 1669 * Helper function for iterating across a vma list. It ensures that the caller 1670 * will visit `gate_vma' prior to terminating the search. 1671 */ 1672 static struct vm_area_struct *coredump_next_vma(struct vma_iterator *vmi, 1673 struct vm_area_struct *vma, 1674 struct vm_area_struct *gate_vma) 1675 { 1676 if (gate_vma && (vma == gate_vma)) 1677 return NULL; 1678 1679 vma = vma_next(vmi); 1680 if (vma) 1681 return vma; 1682 return gate_vma; 1683 } 1684 1685 static void free_vma_snapshot(struct coredump_params *cprm) 1686 { 1687 if (cprm->vma_meta) { 1688 int i; 1689 for (i = 0; i < cprm->vma_count; i++) { 1690 struct file *file = cprm->vma_meta[i].file; 1691 if (file) 1692 fput(file); 1693 } 1694 kvfree(cprm->vma_meta); 1695 cprm->vma_meta = NULL; 1696 } 1697 } 1698 1699 static int cmp_vma_size(const void *vma_meta_lhs_ptr, const void *vma_meta_rhs_ptr) 1700 { 1701 const struct core_vma_metadata *vma_meta_lhs = vma_meta_lhs_ptr; 1702 const struct core_vma_metadata *vma_meta_rhs = vma_meta_rhs_ptr; 1703 1704 if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size) 1705 return -1; 1706 if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size) 1707 return 1; 1708 return 0; 1709 } 1710 1711 /* 1712 * Under the mmap_lock, take a snapshot of relevant information about the task's 1713 * VMAs. 1714 */ 1715 static bool dump_vma_snapshot(struct coredump_params *cprm) 1716 { 1717 struct vm_area_struct *gate_vma, *vma = NULL; 1718 struct mm_struct *mm = current->mm; 1719 VMA_ITERATOR(vmi, mm, 0); 1720 int i = 0; 1721 1722 /* 1723 * Once the stack expansion code is fixed to not change VMA bounds 1724 * under mmap_lock in read mode, this can be changed to take the 1725 * mmap_lock in read mode. 1726 */ 1727 if (mmap_write_lock_killable(mm)) 1728 return false; 1729 1730 cprm->vma_data_size = 0; 1731 gate_vma = get_gate_vma(mm); 1732 cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); 1733 1734 cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL); 1735 if (!cprm->vma_meta) { 1736 mmap_write_unlock(mm); 1737 return false; 1738 } 1739 1740 while ((vma = coredump_next_vma(&vmi, vma, gate_vma)) != NULL) { 1741 struct core_vma_metadata *m = cprm->vma_meta + i; 1742 1743 m->start = vma->vm_start; 1744 m->end = vma->vm_end; 1745 m->flags = vma->vm_flags; 1746 m->dump_size = vma_dump_size(vma, cprm->mm_flags); 1747 m->pgoff = vma->vm_pgoff; 1748 m->file = vma->vm_file; 1749 if (m->file) 1750 get_file(m->file); 1751 i++; 1752 } 1753 1754 mmap_write_unlock(mm); 1755 1756 for (i = 0; i < cprm->vma_count; i++) { 1757 struct core_vma_metadata *m = cprm->vma_meta + i; 1758 1759 if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { 1760 char elfmag[SELFMAG]; 1761 1762 if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || 1763 memcmp(elfmag, ELFMAG, SELFMAG) != 0) { 1764 m->dump_size = 0; 1765 } else { 1766 m->dump_size = PAGE_SIZE; 1767 } 1768 } 1769 1770 cprm->vma_data_size += m->dump_size; 1771 } 1772 1773 if (core_sort_vma) 1774 sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), 1775 cmp_vma_size, NULL); 1776 1777 return true; 1778 } 1779