xref: /linux/fs/coredump.c (revision 672dcda246071e1940eab8bb5a03d04ea026f46e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/freezer.h>
6 #include <linux/mm.h>
7 #include <linux/stat.h>
8 #include <linux/fcntl.h>
9 #include <linux/swap.h>
10 #include <linux/ctype.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/perf_event.h>
15 #include <linux/highmem.h>
16 #include <linux/spinlock.h>
17 #include <linux/key.h>
18 #include <linux/personality.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/sort.h>
22 #include <linux/sched/coredump.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/utsname.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/module.h>
28 #include <linux/namei.h>
29 #include <linux/mount.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/tsacct_kern.h>
33 #include <linux/cn_proc.h>
34 #include <linux/audit.h>
35 #include <linux/kmod.h>
36 #include <linux/fsnotify.h>
37 #include <linux/fs_struct.h>
38 #include <linux/pipe_fs_i.h>
39 #include <linux/oom.h>
40 #include <linux/compat.h>
41 #include <linux/fs.h>
42 #include <linux/path.h>
43 #include <linux/timekeeping.h>
44 #include <linux/sysctl.h>
45 #include <linux/elf.h>
46 #include <linux/pidfs.h>
47 #include <linux/net.h>
48 #include <linux/socket.h>
49 #include <net/af_unix.h>
50 #include <net/net_namespace.h>
51 #include <net/sock.h>
52 #include <uapi/linux/pidfd.h>
53 #include <uapi/linux/un.h>
54 #include <uapi/linux/coredump.h>
55 
56 #include <linux/uaccess.h>
57 #include <asm/mmu_context.h>
58 #include <asm/tlb.h>
59 #include <asm/exec.h>
60 
61 #include <trace/events/task.h>
62 #include "internal.h"
63 
64 #include <trace/events/sched.h>
65 
66 static bool dump_vma_snapshot(struct coredump_params *cprm);
67 static void free_vma_snapshot(struct coredump_params *cprm);
68 
69 #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024)
70 /* Define a reasonable max cap */
71 #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024)
72 /*
73  * File descriptor number for the pidfd for the thread-group leader of
74  * the coredumping task installed into the usermode helper's file
75  * descriptor table.
76  */
77 #define COREDUMP_PIDFD_NUMBER 3
78 
79 static int core_uses_pid;
80 static unsigned int core_pipe_limit;
81 static unsigned int core_sort_vma;
82 static char core_pattern[CORENAME_MAX_SIZE] = "core";
83 static int core_name_size = CORENAME_MAX_SIZE;
84 unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
85 static atomic_t core_pipe_count = ATOMIC_INIT(0);
86 
87 enum coredump_type_t {
88 	COREDUMP_FILE		= 1,
89 	COREDUMP_PIPE		= 2,
90 	COREDUMP_SOCK		= 3,
91 	COREDUMP_SOCK_REQ	= 4,
92 };
93 
94 struct core_name {
95 	char *corename;
96 	int used, size;
97 	unsigned int core_pipe_limit;
98 	bool core_dumped;
99 	enum coredump_type_t core_type;
100 	u64 mask;
101 };
102 
expand_corename(struct core_name * cn,int size)103 static int expand_corename(struct core_name *cn, int size)
104 {
105 	char *corename;
106 
107 	size = kmalloc_size_roundup(size);
108 	corename = krealloc(cn->corename, size, GFP_KERNEL);
109 
110 	if (!corename)
111 		return -ENOMEM;
112 
113 	if (size > core_name_size) /* racy but harmless */
114 		core_name_size = size;
115 
116 	cn->size = size;
117 	cn->corename = corename;
118 	return 0;
119 }
120 
cn_vprintf(struct core_name * cn,const char * fmt,va_list arg)121 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
122 				     va_list arg)
123 {
124 	int free, need;
125 	va_list arg_copy;
126 
127 again:
128 	free = cn->size - cn->used;
129 
130 	va_copy(arg_copy, arg);
131 	need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
132 	va_end(arg_copy);
133 
134 	if (need < free) {
135 		cn->used += need;
136 		return 0;
137 	}
138 
139 	if (!expand_corename(cn, cn->size + need - free + 1))
140 		goto again;
141 
142 	return -ENOMEM;
143 }
144 
cn_printf(struct core_name * cn,const char * fmt,...)145 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
146 {
147 	va_list arg;
148 	int ret;
149 
150 	va_start(arg, fmt);
151 	ret = cn_vprintf(cn, fmt, arg);
152 	va_end(arg);
153 
154 	return ret;
155 }
156 
157 static __printf(2, 3)
cn_esc_printf(struct core_name * cn,const char * fmt,...)158 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
159 {
160 	int cur = cn->used;
161 	va_list arg;
162 	int ret;
163 
164 	va_start(arg, fmt);
165 	ret = cn_vprintf(cn, fmt, arg);
166 	va_end(arg);
167 
168 	if (ret == 0) {
169 		/*
170 		 * Ensure that this coredump name component can't cause the
171 		 * resulting corefile path to consist of a ".." or ".".
172 		 */
173 		if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
174 				(cn->used - cur == 2 && cn->corename[cur] == '.'
175 				&& cn->corename[cur+1] == '.'))
176 			cn->corename[cur] = '!';
177 
178 		/*
179 		 * Empty names are fishy and could be used to create a "//" in a
180 		 * corefile name, causing the coredump to happen one directory
181 		 * level too high. Enforce that all components of the core
182 		 * pattern are at least one character long.
183 		 */
184 		if (cn->used == cur)
185 			ret = cn_printf(cn, "!");
186 	}
187 
188 	for (; cur < cn->used; ++cur) {
189 		if (cn->corename[cur] == '/')
190 			cn->corename[cur] = '!';
191 	}
192 	return ret;
193 }
194 
cn_print_exe_file(struct core_name * cn,bool name_only)195 static int cn_print_exe_file(struct core_name *cn, bool name_only)
196 {
197 	struct file *exe_file;
198 	char *pathbuf, *path, *ptr;
199 	int ret;
200 
201 	exe_file = get_mm_exe_file(current->mm);
202 	if (!exe_file)
203 		return cn_esc_printf(cn, "%s (path unknown)", current->comm);
204 
205 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
206 	if (!pathbuf) {
207 		ret = -ENOMEM;
208 		goto put_exe_file;
209 	}
210 
211 	path = file_path(exe_file, pathbuf, PATH_MAX);
212 	if (IS_ERR(path)) {
213 		ret = PTR_ERR(path);
214 		goto free_buf;
215 	}
216 
217 	if (name_only) {
218 		ptr = strrchr(path, '/');
219 		if (ptr)
220 			path = ptr + 1;
221 	}
222 	ret = cn_esc_printf(cn, "%s", path);
223 
224 free_buf:
225 	kfree(pathbuf);
226 put_exe_file:
227 	fput(exe_file);
228 	return ret;
229 }
230 
231 /*
232  * coredump_parse will inspect the pattern parameter, and output a name
233  * into corename, which must have space for at least CORENAME_MAX_SIZE
234  * bytes plus one byte for the zero terminator.
235  */
coredump_parse(struct core_name * cn,struct coredump_params * cprm,size_t ** argv,int * argc)236 static bool coredump_parse(struct core_name *cn, struct coredump_params *cprm,
237 			   size_t **argv, int *argc)
238 {
239 	const struct cred *cred = current_cred();
240 	const char *pat_ptr = core_pattern;
241 	bool was_space = false;
242 	int pid_in_pattern = 0;
243 	int err = 0;
244 
245 	cn->mask = COREDUMP_KERNEL;
246 	if (core_pipe_limit)
247 		cn->mask |= COREDUMP_WAIT;
248 	cn->used = 0;
249 	cn->corename = NULL;
250 	cn->core_pipe_limit = 0;
251 	cn->core_dumped = false;
252 	if (*pat_ptr == '|')
253 		cn->core_type = COREDUMP_PIPE;
254 	else if (*pat_ptr == '@')
255 		cn->core_type = COREDUMP_SOCK;
256 	else
257 		cn->core_type = COREDUMP_FILE;
258 	if (expand_corename(cn, core_name_size))
259 		return false;
260 	cn->corename[0] = '\0';
261 
262 	switch (cn->core_type) {
263 	case COREDUMP_PIPE: {
264 		int argvs = sizeof(core_pattern) / 2;
265 		(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
266 		if (!(*argv))
267 			return false;
268 		(*argv)[(*argc)++] = 0;
269 		++pat_ptr;
270 		if (!(*pat_ptr))
271 			return false;
272 		break;
273 	}
274 	case COREDUMP_SOCK: {
275 		/* skip the @ */
276 		pat_ptr++;
277 		if (!(*pat_ptr))
278 			return false;
279 		if (*pat_ptr == '@') {
280 			pat_ptr++;
281 			if (!(*pat_ptr))
282 				return false;
283 
284 			cn->core_type = COREDUMP_SOCK_REQ;
285 		}
286 
287 		err = cn_printf(cn, "%s", pat_ptr);
288 		if (err)
289 			return false;
290 
291 		/* Require absolute paths. */
292 		if (cn->corename[0] != '/')
293 			return false;
294 
295 		/*
296 		 * Ensure we can uses spaces to indicate additional
297 		 * parameters in the future.
298 		 */
299 		if (strchr(cn->corename, ' ')) {
300 			coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename);
301 			return false;
302 		}
303 
304 		/* Must not contain ".." in the path. */
305 		if (name_contains_dotdot(cn->corename)) {
306 			coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename);
307 			return false;
308 		}
309 
310 		if (strlen(cn->corename) >= UNIX_PATH_MAX) {
311 			coredump_report_failure("Coredump socket path %s too long", cn->corename);
312 			return false;
313 		}
314 
315 		/*
316 		 * Currently no need to parse any other options.
317 		 * Relevant information can be retrieved from the peer
318 		 * pidfd retrievable via SO_PEERPIDFD by the receiver or
319 		 * via /proc/<pid>, using the SO_PEERPIDFD to guard
320 		 * against pid recycling when opening /proc/<pid>.
321 		 */
322 		return true;
323 	}
324 	case COREDUMP_FILE:
325 		break;
326 	default:
327 		WARN_ON_ONCE(true);
328 		return false;
329 	}
330 
331 	/* Repeat as long as we have more pattern to process and more output
332 	   space */
333 	while (*pat_ptr) {
334 		/*
335 		 * Split on spaces before doing template expansion so that
336 		 * %e and %E don't get split if they have spaces in them
337 		 */
338 		if (cn->core_type == COREDUMP_PIPE) {
339 			if (isspace(*pat_ptr)) {
340 				if (cn->used != 0)
341 					was_space = true;
342 				pat_ptr++;
343 				continue;
344 			} else if (was_space) {
345 				was_space = false;
346 				err = cn_printf(cn, "%c", '\0');
347 				if (err)
348 					return err;
349 				(*argv)[(*argc)++] = cn->used;
350 			}
351 		}
352 		if (*pat_ptr != '%') {
353 			err = cn_printf(cn, "%c", *pat_ptr++);
354 		} else {
355 			switch (*++pat_ptr) {
356 			/* single % at the end, drop that */
357 			case 0:
358 				goto out;
359 			/* Double percent, output one percent */
360 			case '%':
361 				err = cn_printf(cn, "%c", '%');
362 				break;
363 			/* pid */
364 			case 'p':
365 				pid_in_pattern = 1;
366 				err = cn_printf(cn, "%d",
367 					      task_tgid_vnr(current));
368 				break;
369 			/* global pid */
370 			case 'P':
371 				err = cn_printf(cn, "%d",
372 					      task_tgid_nr(current));
373 				break;
374 			case 'i':
375 				err = cn_printf(cn, "%d",
376 					      task_pid_vnr(current));
377 				break;
378 			case 'I':
379 				err = cn_printf(cn, "%d",
380 					      task_pid_nr(current));
381 				break;
382 			/* uid */
383 			case 'u':
384 				err = cn_printf(cn, "%u",
385 						from_kuid(&init_user_ns,
386 							  cred->uid));
387 				break;
388 			/* gid */
389 			case 'g':
390 				err = cn_printf(cn, "%u",
391 						from_kgid(&init_user_ns,
392 							  cred->gid));
393 				break;
394 			case 'd':
395 				err = cn_printf(cn, "%d",
396 					__get_dumpable(cprm->mm_flags));
397 				break;
398 			/* signal that caused the coredump */
399 			case 's':
400 				err = cn_printf(cn, "%d",
401 						cprm->siginfo->si_signo);
402 				break;
403 			/* UNIX time of coredump */
404 			case 't': {
405 				time64_t time;
406 
407 				time = ktime_get_real_seconds();
408 				err = cn_printf(cn, "%lld", time);
409 				break;
410 			}
411 			/* hostname */
412 			case 'h':
413 				down_read(&uts_sem);
414 				err = cn_esc_printf(cn, "%s",
415 					      utsname()->nodename);
416 				up_read(&uts_sem);
417 				break;
418 			/* executable, could be changed by prctl PR_SET_NAME etc */
419 			case 'e':
420 				err = cn_esc_printf(cn, "%s", current->comm);
421 				break;
422 			/* file name of executable */
423 			case 'f':
424 				err = cn_print_exe_file(cn, true);
425 				break;
426 			case 'E':
427 				err = cn_print_exe_file(cn, false);
428 				break;
429 			/* core limit size */
430 			case 'c':
431 				err = cn_printf(cn, "%lu",
432 					      rlimit(RLIMIT_CORE));
433 				break;
434 			/* CPU the task ran on */
435 			case 'C':
436 				err = cn_printf(cn, "%d", cprm->cpu);
437 				break;
438 			/* pidfd number */
439 			case 'F': {
440 				/*
441 				 * Installing a pidfd only makes sense if
442 				 * we actually spawn a usermode helper.
443 				 */
444 				if (cn->core_type != COREDUMP_PIPE)
445 					break;
446 
447 				/*
448 				 * Note that we'll install a pidfd for the
449 				 * thread-group leader. We know that task
450 				 * linkage hasn't been removed yet and even if
451 				 * this @current isn't the actual thread-group
452 				 * leader we know that the thread-group leader
453 				 * cannot be reaped until @current has exited.
454 				 */
455 				cprm->pid = task_tgid(current);
456 				err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER);
457 				break;
458 			}
459 			default:
460 				break;
461 			}
462 			++pat_ptr;
463 		}
464 
465 		if (err)
466 			return false;
467 	}
468 
469 out:
470 	/* Backward compatibility with core_uses_pid:
471 	 *
472 	 * If core_pattern does not include a %p (as is the default)
473 	 * and core_uses_pid is set, then .%pid will be appended to
474 	 * the filename. Do not do this for piped commands. */
475 	if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid)
476 		return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0;
477 
478 	return true;
479 }
480 
zap_process(struct signal_struct * signal,int exit_code)481 static int zap_process(struct signal_struct *signal, int exit_code)
482 {
483 	struct task_struct *t;
484 	int nr = 0;
485 
486 	signal->flags = SIGNAL_GROUP_EXIT;
487 	signal->group_exit_code = exit_code;
488 	signal->group_stop_count = 0;
489 
490 	__for_each_thread(signal, t) {
491 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
492 		if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
493 			sigaddset(&t->pending.signal, SIGKILL);
494 			signal_wake_up(t, 1);
495 			nr++;
496 		}
497 	}
498 
499 	return nr;
500 }
501 
zap_threads(struct task_struct * tsk,struct core_state * core_state,int exit_code)502 static int zap_threads(struct task_struct *tsk,
503 			struct core_state *core_state, int exit_code)
504 {
505 	struct signal_struct *signal = tsk->signal;
506 	int nr = -EAGAIN;
507 
508 	spin_lock_irq(&tsk->sighand->siglock);
509 	if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
510 		/* Allow SIGKILL, see prepare_signal() */
511 		signal->core_state = core_state;
512 		nr = zap_process(signal, exit_code);
513 		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
514 		tsk->flags |= PF_DUMPCORE;
515 		atomic_set(&core_state->nr_threads, nr);
516 	}
517 	spin_unlock_irq(&tsk->sighand->siglock);
518 	return nr;
519 }
520 
coredump_wait(int exit_code,struct core_state * core_state)521 static int coredump_wait(int exit_code, struct core_state *core_state)
522 {
523 	struct task_struct *tsk = current;
524 	int core_waiters = -EBUSY;
525 
526 	init_completion(&core_state->startup);
527 	core_state->dumper.task = tsk;
528 	core_state->dumper.next = NULL;
529 
530 	core_waiters = zap_threads(tsk, core_state, exit_code);
531 	if (core_waiters > 0) {
532 		struct core_thread *ptr;
533 
534 		wait_for_completion_state(&core_state->startup,
535 					  TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
536 		/*
537 		 * Wait for all the threads to become inactive, so that
538 		 * all the thread context (extended register state, like
539 		 * fpu etc) gets copied to the memory.
540 		 */
541 		ptr = core_state->dumper.next;
542 		while (ptr != NULL) {
543 			wait_task_inactive(ptr->task, TASK_ANY);
544 			ptr = ptr->next;
545 		}
546 	}
547 
548 	return core_waiters;
549 }
550 
coredump_finish(bool core_dumped)551 static void coredump_finish(bool core_dumped)
552 {
553 	struct core_thread *curr, *next;
554 	struct task_struct *task;
555 
556 	spin_lock_irq(&current->sighand->siglock);
557 	if (core_dumped && !__fatal_signal_pending(current))
558 		current->signal->group_exit_code |= 0x80;
559 	next = current->signal->core_state->dumper.next;
560 	current->signal->core_state = NULL;
561 	spin_unlock_irq(&current->sighand->siglock);
562 
563 	while ((curr = next) != NULL) {
564 		next = curr->next;
565 		task = curr->task;
566 		/*
567 		 * see coredump_task_exit(), curr->task must not see
568 		 * ->task == NULL before we read ->next.
569 		 */
570 		smp_mb();
571 		curr->task = NULL;
572 		wake_up_process(task);
573 	}
574 }
575 
dump_interrupted(void)576 static bool dump_interrupted(void)
577 {
578 	/*
579 	 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
580 	 * can do try_to_freeze() and check __fatal_signal_pending(),
581 	 * but then we need to teach dump_write() to restart and clear
582 	 * TIF_SIGPENDING.
583 	 */
584 	return fatal_signal_pending(current) || freezing(current);
585 }
586 
wait_for_dump_helpers(struct file * file)587 static void wait_for_dump_helpers(struct file *file)
588 {
589 	struct pipe_inode_info *pipe = file->private_data;
590 
591 	pipe_lock(pipe);
592 	pipe->readers++;
593 	pipe->writers--;
594 	wake_up_interruptible_sync(&pipe->rd_wait);
595 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
596 	pipe_unlock(pipe);
597 
598 	/*
599 	 * We actually want wait_event_freezable() but then we need
600 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
601 	 */
602 	wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
603 
604 	pipe_lock(pipe);
605 	pipe->readers--;
606 	pipe->writers++;
607 	pipe_unlock(pipe);
608 }
609 
610 /*
611  * umh_coredump_setup
612  * helper function to customize the process used
613  * to collect the core in userspace.  Specifically
614  * it sets up a pipe and installs it as fd 0 (stdin)
615  * for the process.  Returns 0 on success, or
616  * PTR_ERR on failure.
617  * Note that it also sets the core limit to 1.  This
618  * is a special value that we use to trap recursive
619  * core dumps
620  */
umh_coredump_setup(struct subprocess_info * info,struct cred * new)621 static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
622 {
623 	struct file *files[2];
624 	struct coredump_params *cp = (struct coredump_params *)info->data;
625 	int err;
626 
627 	if (cp->pid) {
628 		struct file *pidfs_file __free(fput) = NULL;
629 
630 		pidfs_file = pidfs_alloc_file(cp->pid, 0);
631 		if (IS_ERR(pidfs_file))
632 			return PTR_ERR(pidfs_file);
633 
634 		pidfs_coredump(cp);
635 
636 		/*
637 		 * Usermode helpers are childen of either
638 		 * system_unbound_wq or of kthreadd. So we know that
639 		 * we're starting off with a clean file descriptor
640 		 * table. So we should always be able to use
641 		 * COREDUMP_PIDFD_NUMBER as our file descriptor value.
642 		 */
643 		err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0);
644 		if (err < 0)
645 			return err;
646 	}
647 
648 	err = create_pipe_files(files, 0);
649 	if (err)
650 		return err;
651 
652 	cp->file = files[1];
653 
654 	err = replace_fd(0, files[0], 0);
655 	fput(files[0]);
656 	if (err < 0)
657 		return err;
658 
659 	/* and disallow core files too */
660 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
661 
662 	return 0;
663 }
664 
665 #ifdef CONFIG_UNIX
coredump_sock_connect(struct core_name * cn,struct coredump_params * cprm)666 static bool coredump_sock_connect(struct core_name *cn, struct coredump_params *cprm)
667 {
668 	struct file *file __free(fput) = NULL;
669 	struct sockaddr_un addr = {
670 		.sun_family = AF_UNIX,
671 	};
672 	ssize_t addr_len;
673 	int retval;
674 	struct socket *socket;
675 
676 	addr_len = strscpy(addr.sun_path, cn->corename);
677 	if (addr_len < 0)
678 		return false;
679 	addr_len += offsetof(struct sockaddr_un, sun_path) + 1;
680 
681 	/*
682 	 * It is possible that the userspace process which is supposed
683 	 * to handle the coredump and is listening on the AF_UNIX socket
684 	 * coredumps. Userspace should just mark itself non dumpable.
685 	 */
686 
687 	retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket);
688 	if (retval < 0)
689 		return false;
690 
691 	file = sock_alloc_file(socket, 0, NULL);
692 	if (IS_ERR(file))
693 		return false;
694 
695 	/*
696 	 * Set the thread-group leader pid which is used for the peer
697 	 * credentials during connect() below. Then immediately register
698 	 * it in pidfs...
699 	 */
700 	cprm->pid = task_tgid(current);
701 	retval = pidfs_register_pid(cprm->pid);
702 	if (retval)
703 		return false;
704 
705 	/*
706 	 * ... and set the coredump information so userspace has it
707 	 * available after connect()...
708 	 */
709 	pidfs_coredump(cprm);
710 
711 	retval = kernel_connect(socket, (struct sockaddr *)(&addr), addr_len,
712 				O_NONBLOCK | SOCK_COREDUMP);
713 
714 	if (retval) {
715 		if (retval == -EAGAIN)
716 			coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path);
717 		else
718 			coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval);
719 		return false;
720 	}
721 
722 	/* ... and validate that @sk_peer_pid matches @cprm.pid. */
723 	if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm->pid))
724 		return false;
725 
726 	cprm->limit = RLIM_INFINITY;
727 	cprm->file = no_free_ptr(file);
728 
729 	return true;
730 }
731 
coredump_sock_recv(struct file * file,struct coredump_ack * ack,size_t size,int flags)732 static inline bool coredump_sock_recv(struct file *file, struct coredump_ack *ack, size_t size, int flags)
733 {
734 	struct msghdr msg = {};
735 	struct kvec iov = { .iov_base = ack, .iov_len = size };
736 	ssize_t ret;
737 
738 	memset(ack, 0, size);
739 	ret = kernel_recvmsg(sock_from_file(file), &msg, &iov, 1, size, flags);
740 	return ret == size;
741 }
742 
coredump_sock_send(struct file * file,struct coredump_req * req)743 static inline bool coredump_sock_send(struct file *file, struct coredump_req *req)
744 {
745 	struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
746 	struct kvec iov = { .iov_base = req, .iov_len = sizeof(*req) };
747 	ssize_t ret;
748 
749 	ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(*req));
750 	return ret == sizeof(*req);
751 }
752 
753 static_assert(sizeof(enum coredump_mark) == sizeof(__u32));
754 
coredump_sock_mark(struct file * file,enum coredump_mark mark)755 static inline bool coredump_sock_mark(struct file *file, enum coredump_mark mark)
756 {
757 	struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
758 	struct kvec iov = { .iov_base = &mark, .iov_len = sizeof(mark) };
759 	ssize_t ret;
760 
761 	ret = kernel_sendmsg(sock_from_file(file), &msg, &iov, 1, sizeof(mark));
762 	return ret == sizeof(mark);
763 }
764 
coredump_sock_wait(struct file * file)765 static inline void coredump_sock_wait(struct file *file)
766 {
767 	ssize_t n;
768 
769 	/*
770 	 * We use a simple read to wait for the coredump processing to
771 	 * finish. Either the socket is closed or we get sent unexpected
772 	 * data. In both cases, we're done.
773 	 */
774 	n = __kernel_read(file, &(char){ 0 }, 1, NULL);
775 	if (n > 0)
776 		coredump_report_failure("Coredump socket had unexpected data");
777 	else if (n < 0)
778 		coredump_report_failure("Coredump socket failed");
779 }
780 
coredump_sock_shutdown(struct file * file)781 static inline void coredump_sock_shutdown(struct file *file)
782 {
783 	struct socket *socket;
784 
785 	socket = sock_from_file(file);
786 	if (!socket)
787 		return;
788 
789 	/* Let userspace know we're done processing the coredump. */
790 	kernel_sock_shutdown(socket, SHUT_WR);
791 }
792 
coredump_sock_request(struct core_name * cn,struct coredump_params * cprm)793 static bool coredump_sock_request(struct core_name *cn, struct coredump_params *cprm)
794 {
795 	struct coredump_req req = {
796 		.size		= sizeof(struct coredump_req),
797 		.mask		= COREDUMP_KERNEL | COREDUMP_USERSPACE |
798 				  COREDUMP_REJECT | COREDUMP_WAIT,
799 		.size_ack	= sizeof(struct coredump_ack),
800 	};
801 	struct coredump_ack ack = {};
802 	ssize_t usize;
803 
804 	if (cn->core_type != COREDUMP_SOCK_REQ)
805 		return true;
806 
807 	/* Let userspace know what we support. */
808 	if (!coredump_sock_send(cprm->file, &req))
809 		return false;
810 
811 	/* Peek the size of the coredump_ack. */
812 	if (!coredump_sock_recv(cprm->file, &ack, sizeof(ack.size),
813 				MSG_PEEK | MSG_WAITALL))
814 		return false;
815 
816 	/* Refuse unknown coredump_ack sizes. */
817 	usize = ack.size;
818 	if (usize < COREDUMP_ACK_SIZE_VER0) {
819 		coredump_sock_mark(cprm->file, COREDUMP_MARK_MINSIZE);
820 		return false;
821 	}
822 
823 	if (usize > sizeof(ack)) {
824 		coredump_sock_mark(cprm->file, COREDUMP_MARK_MAXSIZE);
825 		return false;
826 	}
827 
828 	/* Now retrieve the coredump_ack. */
829 	if (!coredump_sock_recv(cprm->file, &ack, usize, MSG_WAITALL))
830 		return false;
831 	if (ack.size != usize)
832 		return false;
833 
834 	/* Refuse unknown coredump_ack flags. */
835 	if (ack.mask & ~req.mask) {
836 		coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
837 		return false;
838 	}
839 
840 	/* Refuse mutually exclusive options. */
841 	if (hweight64(ack.mask & (COREDUMP_USERSPACE | COREDUMP_KERNEL |
842 				  COREDUMP_REJECT)) != 1) {
843 		coredump_sock_mark(cprm->file, COREDUMP_MARK_CONFLICTING);
844 		return false;
845 	}
846 
847 	if (ack.spare) {
848 		coredump_sock_mark(cprm->file, COREDUMP_MARK_UNSUPPORTED);
849 		return false;
850 	}
851 
852 	cn->mask = ack.mask;
853 	return coredump_sock_mark(cprm->file, COREDUMP_MARK_REQACK);
854 }
855 
coredump_socket(struct core_name * cn,struct coredump_params * cprm)856 static bool coredump_socket(struct core_name *cn, struct coredump_params *cprm)
857 {
858 	if (!coredump_sock_connect(cn, cprm))
859 		return false;
860 
861 	return coredump_sock_request(cn, cprm);
862 }
863 #else
coredump_sock_wait(struct file * file)864 static inline void coredump_sock_wait(struct file *file) { }
coredump_sock_shutdown(struct file * file)865 static inline void coredump_sock_shutdown(struct file *file) { }
coredump_socket(struct core_name * cn,struct coredump_params * cprm)866 static inline bool coredump_socket(struct core_name *cn, struct coredump_params *cprm) { return false; }
867 #endif
868 
869 /* cprm->mm_flags contains a stable snapshot of dumpability flags. */
coredump_force_suid_safe(const struct coredump_params * cprm)870 static inline bool coredump_force_suid_safe(const struct coredump_params *cprm)
871 {
872 	/* Require nonrelative corefile path and be extra careful. */
873 	return __get_dumpable(cprm->mm_flags) == SUID_DUMP_ROOT;
874 }
875 
coredump_file(struct core_name * cn,struct coredump_params * cprm,const struct linux_binfmt * binfmt)876 static bool coredump_file(struct core_name *cn, struct coredump_params *cprm,
877 			  const struct linux_binfmt *binfmt)
878 {
879 	struct mnt_idmap *idmap;
880 	struct inode *inode;
881 	struct file *file __free(fput) = NULL;
882 	int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | O_LARGEFILE | O_EXCL;
883 
884 	if (cprm->limit < binfmt->min_coredump)
885 		return false;
886 
887 	if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') {
888 		coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump");
889 		return false;
890 	}
891 
892 	/*
893 	 * Unlink the file if it exists unless this is a SUID
894 	 * binary - in that case, we're running around with root
895 	 * privs and don't want to unlink another user's coredump.
896 	 */
897 	if (!coredump_force_suid_safe(cprm)) {
898 		/*
899 		 * If it doesn't exist, that's fine. If there's some
900 		 * other problem, we'll catch it at the filp_open().
901 		 */
902 		do_unlinkat(AT_FDCWD, getname_kernel(cn->corename));
903 	}
904 
905 	/*
906 	 * There is a race between unlinking and creating the
907 	 * file, but if that causes an EEXIST here, that's
908 	 * fine - another process raced with us while creating
909 	 * the corefile, and the other process won. To userspace,
910 	 * what matters is that at least one of the two processes
911 	 * writes its coredump successfully, not which one.
912 	 */
913 	if (coredump_force_suid_safe(cprm)) {
914 		/*
915 		 * Using user namespaces, normal user tasks can change
916 		 * their current->fs->root to point to arbitrary
917 		 * directories. Since the intention of the "only dump
918 		 * with a fully qualified path" rule is to control where
919 		 * coredumps may be placed using root privileges,
920 		 * current->fs->root must not be used. Instead, use the
921 		 * root directory of init_task.
922 		 */
923 		struct path root;
924 
925 		task_lock(&init_task);
926 		get_fs_root(init_task.fs, &root);
927 		task_unlock(&init_task);
928 		file = file_open_root(&root, cn->corename, open_flags, 0600);
929 		path_put(&root);
930 	} else {
931 		file = filp_open(cn->corename, open_flags, 0600);
932 	}
933 	if (IS_ERR(file))
934 		return false;
935 
936 	inode = file_inode(file);
937 	if (inode->i_nlink > 1)
938 		return false;
939 	if (d_unhashed(file->f_path.dentry))
940 		return false;
941 	/*
942 	 * AK: actually i see no reason to not allow this for named
943 	 * pipes etc, but keep the previous behaviour for now.
944 	 */
945 	if (!S_ISREG(inode->i_mode))
946 		return false;
947 	/*
948 	 * Don't dump core if the filesystem changed owner or mode
949 	 * of the file during file creation. This is an issue when
950 	 * a process dumps core while its cwd is e.g. on a vfat
951 	 * filesystem.
952 	 */
953 	idmap = file_mnt_idmap(file);
954 	if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) {
955 		coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename);
956 		return false;
957 	}
958 	if ((inode->i_mode & 0677) != 0600) {
959 		coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename);
960 		return false;
961 	}
962 	if (!(file->f_mode & FMODE_CAN_WRITE))
963 		return false;
964 	if (do_truncate(idmap, file->f_path.dentry, 0, 0, file))
965 		return false;
966 
967 	cprm->file = no_free_ptr(file);
968 	return true;
969 }
970 
coredump_pipe(struct core_name * cn,struct coredump_params * cprm,size_t * argv,int argc)971 static bool coredump_pipe(struct core_name *cn, struct coredump_params *cprm,
972 			  size_t *argv, int argc)
973 {
974 	int argi;
975 	char **helper_argv __free(kfree) = NULL;
976 	struct subprocess_info *sub_info;
977 
978 	if (cprm->limit == 1) {
979 		/* See umh_coredump_setup() which sets RLIMIT_CORE = 1.
980 		 *
981 		 * Normally core limits are irrelevant to pipes, since
982 		 * we're not writing to the file system, but we use
983 		 * cprm.limit of 1 here as a special value, this is a
984 		 * consistent way to catch recursive crashes.
985 		 * We can still crash if the core_pattern binary sets
986 		 * RLIM_CORE = !1, but it runs as root, and can do
987 		 * lots of stupid things.
988 		 *
989 		 * Note that we use task_tgid_vnr here to grab the pid
990 		 * of the process group leader.  That way we get the
991 		 * right pid if a thread in a multi-threaded
992 		 * core_pattern process dies.
993 		 */
994 		coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
995 		return false;
996 	}
997 	cprm->limit = RLIM_INFINITY;
998 
999 	cn->core_pipe_limit = atomic_inc_return(&core_pipe_count);
1000 	if (core_pipe_limit && (core_pipe_limit < cn->core_pipe_limit)) {
1001 		coredump_report_failure("over core_pipe_limit, skipping core dump");
1002 		return false;
1003 	}
1004 
1005 	helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), GFP_KERNEL);
1006 	if (!helper_argv) {
1007 		coredump_report_failure("%s failed to allocate memory", __func__);
1008 		return false;
1009 	}
1010 	for (argi = 0; argi < argc; argi++)
1011 		helper_argv[argi] = cn->corename + argv[argi];
1012 	helper_argv[argi] = NULL;
1013 
1014 	sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL,
1015 					     GFP_KERNEL, umh_coredump_setup,
1016 					     NULL, cprm);
1017 	if (!sub_info)
1018 		return false;
1019 
1020 	if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) {
1021 		coredump_report_failure("|%s pipe failed", cn->corename);
1022 		return false;
1023 	}
1024 
1025 	/*
1026 	 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
1027 	 * have this set to NULL.
1028 	 */
1029 	if (!cprm->file) {
1030 		coredump_report_failure("Core dump to |%s disabled", cn->corename);
1031 		return false;
1032 	}
1033 
1034 	return true;
1035 }
1036 
coredump_write(struct core_name * cn,struct coredump_params * cprm,struct linux_binfmt * binfmt)1037 static bool coredump_write(struct core_name *cn,
1038 			  struct coredump_params *cprm,
1039 			  struct linux_binfmt *binfmt)
1040 {
1041 
1042 	if (dump_interrupted())
1043 		return true;
1044 
1045 	if (!dump_vma_snapshot(cprm))
1046 		return false;
1047 
1048 	file_start_write(cprm->file);
1049 	cn->core_dumped = binfmt->core_dump(cprm);
1050 	/*
1051 	 * Ensures that file size is big enough to contain the current
1052 	 * file postion. This prevents gdb from complaining about
1053 	 * a truncated file if the last "write" to the file was
1054 	 * dump_skip.
1055 	 */
1056 	if (cprm->to_skip) {
1057 		cprm->to_skip--;
1058 		dump_emit(cprm, "", 1);
1059 	}
1060 	file_end_write(cprm->file);
1061 	free_vma_snapshot(cprm);
1062 	return true;
1063 }
1064 
coredump_cleanup(struct core_name * cn,struct coredump_params * cprm)1065 static void coredump_cleanup(struct core_name *cn, struct coredump_params *cprm)
1066 {
1067 	if (cprm->file)
1068 		filp_close(cprm->file, NULL);
1069 	if (cn->core_pipe_limit) {
1070 		VFS_WARN_ON_ONCE(cn->core_type != COREDUMP_PIPE);
1071 		atomic_dec(&core_pipe_count);
1072 	}
1073 	kfree(cn->corename);
1074 	coredump_finish(cn->core_dumped);
1075 }
1076 
coredump_skip(const struct coredump_params * cprm,const struct linux_binfmt * binfmt)1077 static inline bool coredump_skip(const struct coredump_params *cprm,
1078 				 const struct linux_binfmt *binfmt)
1079 {
1080 	if (!binfmt)
1081 		return true;
1082 	if (!binfmt->core_dump)
1083 		return true;
1084 	if (!__get_dumpable(cprm->mm_flags))
1085 		return true;
1086 	return false;
1087 }
1088 
vfs_coredump(const kernel_siginfo_t * siginfo)1089 void vfs_coredump(const kernel_siginfo_t *siginfo)
1090 {
1091 	struct cred *cred __free(put_cred) = NULL;
1092 	size_t *argv __free(kfree) = NULL;
1093 	struct core_state core_state;
1094 	struct core_name cn;
1095 	struct mm_struct *mm = current->mm;
1096 	struct linux_binfmt *binfmt = mm->binfmt;
1097 	const struct cred *old_cred;
1098 	int argc = 0;
1099 	struct coredump_params cprm = {
1100 		.siginfo = siginfo,
1101 		.limit = rlimit(RLIMIT_CORE),
1102 		/*
1103 		 * We must use the same mm->flags while dumping core to avoid
1104 		 * inconsistency of bit flags, since this flag is not protected
1105 		 * by any locks.
1106 		 */
1107 		.mm_flags = mm->flags,
1108 		.vma_meta = NULL,
1109 		.cpu = raw_smp_processor_id(),
1110 	};
1111 
1112 	audit_core_dumps(siginfo->si_signo);
1113 
1114 	if (coredump_skip(&cprm, binfmt))
1115 		return;
1116 
1117 	cred = prepare_creds();
1118 	if (!cred)
1119 		return;
1120 	/*
1121 	 * We cannot trust fsuid as being the "true" uid of the process
1122 	 * nor do we know its entire history. We only know it was tainted
1123 	 * so we dump it as root in mode 2, and only into a controlled
1124 	 * environment (pipe handler or fully qualified path).
1125 	 */
1126 	if (coredump_force_suid_safe(&cprm))
1127 		cred->fsuid = GLOBAL_ROOT_UID;
1128 
1129 	if (coredump_wait(siginfo->si_signo, &core_state) < 0)
1130 		return;
1131 
1132 	old_cred = override_creds(cred);
1133 
1134 	if (!coredump_parse(&cn, &cprm, &argv, &argc)) {
1135 		coredump_report_failure("format_corename failed, aborting core");
1136 		goto close_fail;
1137 	}
1138 
1139 	switch (cn.core_type) {
1140 	case COREDUMP_FILE:
1141 		if (!coredump_file(&cn, &cprm, binfmt))
1142 			goto close_fail;
1143 		break;
1144 	case COREDUMP_PIPE:
1145 		if (!coredump_pipe(&cn, &cprm, argv, argc))
1146 			goto close_fail;
1147 		break;
1148 	case COREDUMP_SOCK_REQ:
1149 		fallthrough;
1150 	case COREDUMP_SOCK:
1151 		if (!coredump_socket(&cn, &cprm))
1152 			goto close_fail;
1153 		break;
1154 	default:
1155 		WARN_ON_ONCE(true);
1156 		goto close_fail;
1157 	}
1158 
1159 	/* Don't even generate the coredump. */
1160 	if (cn.mask & COREDUMP_REJECT)
1161 		goto close_fail;
1162 
1163 	/* get us an unshared descriptor table; almost always a no-op */
1164 	/* The cell spufs coredump code reads the file descriptor tables */
1165 	if (unshare_files())
1166 		goto close_fail;
1167 
1168 	if ((cn.mask & COREDUMP_KERNEL) && !coredump_write(&cn, &cprm, binfmt))
1169 		goto close_fail;
1170 
1171 	coredump_sock_shutdown(cprm.file);
1172 
1173 	/* Let the parent know that a coredump was generated. */
1174 	if (cn.mask & COREDUMP_USERSPACE)
1175 		cn.core_dumped = true;
1176 
1177 	/*
1178 	 * When core_pipe_limit is set we wait for the coredump server
1179 	 * or usermodehelper to finish before exiting so it can e.g.,
1180 	 * inspect /proc/<pid>.
1181 	 */
1182 	if (cn.mask & COREDUMP_WAIT) {
1183 		switch (cn.core_type) {
1184 		case COREDUMP_PIPE:
1185 			wait_for_dump_helpers(cprm.file);
1186 			break;
1187 		case COREDUMP_SOCK_REQ:
1188 			fallthrough;
1189 		case COREDUMP_SOCK:
1190 			coredump_sock_wait(cprm.file);
1191 			break;
1192 		default:
1193 			break;
1194 		}
1195 	}
1196 
1197 close_fail:
1198 	coredump_cleanup(&cn, &cprm);
1199 	revert_creds(old_cred);
1200 	return;
1201 }
1202 
1203 /*
1204  * Core dumping helper functions.  These are the only things you should
1205  * do on a core-file: use only these functions to write out all the
1206  * necessary info.
1207  */
__dump_emit(struct coredump_params * cprm,const void * addr,int nr)1208 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
1209 {
1210 	struct file *file = cprm->file;
1211 	loff_t pos = file->f_pos;
1212 	ssize_t n;
1213 
1214 	if (cprm->written + nr > cprm->limit)
1215 		return 0;
1216 	if (dump_interrupted())
1217 		return 0;
1218 	n = __kernel_write(file, addr, nr, &pos);
1219 	if (n != nr)
1220 		return 0;
1221 	file->f_pos = pos;
1222 	cprm->written += n;
1223 	cprm->pos += n;
1224 
1225 	return 1;
1226 }
1227 
__dump_skip(struct coredump_params * cprm,size_t nr)1228 static int __dump_skip(struct coredump_params *cprm, size_t nr)
1229 {
1230 	static char zeroes[PAGE_SIZE];
1231 	struct file *file = cprm->file;
1232 
1233 	if (file->f_mode & FMODE_LSEEK) {
1234 		if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0)
1235 			return 0;
1236 		cprm->pos += nr;
1237 		return 1;
1238 	}
1239 
1240 	while (nr > PAGE_SIZE) {
1241 		if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
1242 			return 0;
1243 		nr -= PAGE_SIZE;
1244 	}
1245 
1246 	return __dump_emit(cprm, zeroes, nr);
1247 }
1248 
dump_emit(struct coredump_params * cprm,const void * addr,int nr)1249 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
1250 {
1251 	if (cprm->to_skip) {
1252 		if (!__dump_skip(cprm, cprm->to_skip))
1253 			return 0;
1254 		cprm->to_skip = 0;
1255 	}
1256 	return __dump_emit(cprm, addr, nr);
1257 }
1258 EXPORT_SYMBOL(dump_emit);
1259 
dump_skip_to(struct coredump_params * cprm,unsigned long pos)1260 void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
1261 {
1262 	cprm->to_skip = pos - cprm->pos;
1263 }
1264 EXPORT_SYMBOL(dump_skip_to);
1265 
dump_skip(struct coredump_params * cprm,size_t nr)1266 void dump_skip(struct coredump_params *cprm, size_t nr)
1267 {
1268 	cprm->to_skip += nr;
1269 }
1270 EXPORT_SYMBOL(dump_skip);
1271 
1272 #ifdef CONFIG_ELF_CORE
dump_emit_page(struct coredump_params * cprm,struct page * page)1273 static int dump_emit_page(struct coredump_params *cprm, struct page *page)
1274 {
1275 	struct bio_vec bvec;
1276 	struct iov_iter iter;
1277 	struct file *file = cprm->file;
1278 	loff_t pos;
1279 	ssize_t n;
1280 
1281 	if (!page)
1282 		return 0;
1283 
1284 	if (cprm->to_skip) {
1285 		if (!__dump_skip(cprm, cprm->to_skip))
1286 			return 0;
1287 		cprm->to_skip = 0;
1288 	}
1289 	if (cprm->written + PAGE_SIZE > cprm->limit)
1290 		return 0;
1291 	if (dump_interrupted())
1292 		return 0;
1293 	pos = file->f_pos;
1294 	bvec_set_page(&bvec, page, PAGE_SIZE, 0);
1295 	iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
1296 	n = __kernel_write_iter(cprm->file, &iter, &pos);
1297 	if (n != PAGE_SIZE)
1298 		return 0;
1299 	file->f_pos = pos;
1300 	cprm->written += PAGE_SIZE;
1301 	cprm->pos += PAGE_SIZE;
1302 
1303 	return 1;
1304 }
1305 
1306 /*
1307  * If we might get machine checks from kernel accesses during the
1308  * core dump, let's get those errors early rather than during the
1309  * IO. This is not performance-critical enough to warrant having
1310  * all the machine check logic in the iovec paths.
1311  */
1312 #ifdef copy_mc_to_kernel
1313 
1314 #define dump_page_alloc() alloc_page(GFP_KERNEL)
1315 #define dump_page_free(x) __free_page(x)
dump_page_copy(struct page * src,struct page * dst)1316 static struct page *dump_page_copy(struct page *src, struct page *dst)
1317 {
1318 	void *buf = kmap_local_page(src);
1319 	size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE);
1320 	kunmap_local(buf);
1321 	return left ? NULL : dst;
1322 }
1323 
1324 #else
1325 
1326 /* We just want to return non-NULL; it's never used. */
1327 #define dump_page_alloc() ERR_PTR(-EINVAL)
1328 #define dump_page_free(x) ((void)(x))
dump_page_copy(struct page * src,struct page * dst)1329 static inline struct page *dump_page_copy(struct page *src, struct page *dst)
1330 {
1331 	return src;
1332 }
1333 #endif
1334 
dump_user_range(struct coredump_params * cprm,unsigned long start,unsigned long len)1335 int dump_user_range(struct coredump_params *cprm, unsigned long start,
1336 		    unsigned long len)
1337 {
1338 	unsigned long addr;
1339 	struct page *dump_page;
1340 	int locked, ret;
1341 
1342 	dump_page = dump_page_alloc();
1343 	if (!dump_page)
1344 		return 0;
1345 
1346 	ret = 0;
1347 	locked = 0;
1348 	for (addr = start; addr < start + len; addr += PAGE_SIZE) {
1349 		struct page *page;
1350 
1351 		if (!locked) {
1352 			if (mmap_read_lock_killable(current->mm))
1353 				goto out;
1354 			locked = 1;
1355 		}
1356 
1357 		/*
1358 		 * To avoid having to allocate page tables for virtual address
1359 		 * ranges that have never been used yet, and also to make it
1360 		 * easy to generate sparse core files, use a helper that returns
1361 		 * NULL when encountering an empty page table entry that would
1362 		 * otherwise have been filled with the zero page.
1363 		 */
1364 		page = get_dump_page(addr, &locked);
1365 		if (page) {
1366 			if (locked) {
1367 				mmap_read_unlock(current->mm);
1368 				locked = 0;
1369 			}
1370 			int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
1371 			put_page(page);
1372 			if (stop)
1373 				goto out;
1374 		} else {
1375 			dump_skip(cprm, PAGE_SIZE);
1376 		}
1377 
1378 		if (dump_interrupted())
1379 			goto out;
1380 
1381 		if (!need_resched())
1382 			continue;
1383 		if (locked) {
1384 			mmap_read_unlock(current->mm);
1385 			locked = 0;
1386 		}
1387 		cond_resched();
1388 	}
1389 	ret = 1;
1390 out:
1391 	if (locked)
1392 		mmap_read_unlock(current->mm);
1393 
1394 	dump_page_free(dump_page);
1395 	return ret;
1396 }
1397 #endif
1398 
dump_align(struct coredump_params * cprm,int align)1399 int dump_align(struct coredump_params *cprm, int align)
1400 {
1401 	unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
1402 	if (align & (align - 1))
1403 		return 0;
1404 	if (mod)
1405 		cprm->to_skip += align - mod;
1406 	return 1;
1407 }
1408 EXPORT_SYMBOL(dump_align);
1409 
1410 #ifdef CONFIG_SYSCTL
1411 
validate_coredump_safety(void)1412 void validate_coredump_safety(void)
1413 {
1414 	if (suid_dumpable == SUID_DUMP_ROOT &&
1415 	    core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') {
1416 
1417 		coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: "
1418 			"pipe handler or fully qualified core dump path required. "
1419 			"Set kernel.core_pattern before fs.suid_dumpable.");
1420 	}
1421 }
1422 
check_coredump_socket(void)1423 static inline bool check_coredump_socket(void)
1424 {
1425 	const char *p;
1426 
1427 	if (core_pattern[0] != '@')
1428 		return true;
1429 
1430 	/*
1431 	 * Coredump socket must be located in the initial mount
1432 	 * namespace. Don't give the impression that anything else is
1433 	 * supported right now.
1434 	 */
1435 	if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns)
1436 		return false;
1437 
1438 	/* Must be an absolute path... */
1439 	if (core_pattern[1] != '/') {
1440 		/* ... or the socket request protocol... */
1441 		if (core_pattern[1] != '@')
1442 			return false;
1443 		/* ... and if so must be an absolute path. */
1444 		if (core_pattern[2] != '/')
1445 			return false;
1446 		p = &core_pattern[2];
1447 	} else {
1448 		p = &core_pattern[1];
1449 	}
1450 
1451 	/* The path obviously cannot exceed UNIX_PATH_MAX. */
1452 	if (strlen(p) >= UNIX_PATH_MAX)
1453 		return false;
1454 
1455 	/* Must not contain ".." in the path. */
1456 	if (name_contains_dotdot(core_pattern))
1457 		return false;
1458 
1459 	return true;
1460 }
1461 
proc_dostring_coredump(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1462 static int proc_dostring_coredump(const struct ctl_table *table, int write,
1463 		  void *buffer, size_t *lenp, loff_t *ppos)
1464 {
1465 	int error;
1466 	ssize_t retval;
1467 	char old_core_pattern[CORENAME_MAX_SIZE];
1468 
1469 	retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
1470 
1471 	error = proc_dostring(table, write, buffer, lenp, ppos);
1472 	if (error)
1473 		return error;
1474 	if (!check_coredump_socket()) {
1475 		strscpy(core_pattern, old_core_pattern, retval + 1);
1476 		return -EINVAL;
1477 	}
1478 
1479 	validate_coredump_safety();
1480 	return error;
1481 }
1482 
1483 static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT;
1484 static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX;
1485 static char core_modes[] = {
1486 	"file\npipe"
1487 #ifdef CONFIG_UNIX
1488 	"\nsocket"
1489 #endif
1490 };
1491 
1492 static const struct ctl_table coredump_sysctls[] = {
1493 	{
1494 		.procname	= "core_uses_pid",
1495 		.data		= &core_uses_pid,
1496 		.maxlen		= sizeof(int),
1497 		.mode		= 0644,
1498 		.proc_handler	= proc_dointvec,
1499 	},
1500 	{
1501 		.procname	= "core_pattern",
1502 		.data		= core_pattern,
1503 		.maxlen		= CORENAME_MAX_SIZE,
1504 		.mode		= 0644,
1505 		.proc_handler	= proc_dostring_coredump,
1506 	},
1507 	{
1508 		.procname	= "core_pipe_limit",
1509 		.data		= &core_pipe_limit,
1510 		.maxlen		= sizeof(unsigned int),
1511 		.mode		= 0644,
1512 		.proc_handler	= proc_dointvec_minmax,
1513 		.extra1		= SYSCTL_ZERO,
1514 		.extra2		= SYSCTL_INT_MAX,
1515 	},
1516 	{
1517 		.procname       = "core_file_note_size_limit",
1518 		.data           = &core_file_note_size_limit,
1519 		.maxlen         = sizeof(unsigned int),
1520 		.mode           = 0644,
1521 		.proc_handler	= proc_douintvec_minmax,
1522 		.extra1		= (unsigned int *)&core_file_note_size_min,
1523 		.extra2		= (unsigned int *)&core_file_note_size_max,
1524 	},
1525 	{
1526 		.procname	= "core_sort_vma",
1527 		.data		= &core_sort_vma,
1528 		.maxlen		= sizeof(int),
1529 		.mode		= 0644,
1530 		.proc_handler	= proc_douintvec_minmax,
1531 		.extra1		= SYSCTL_ZERO,
1532 		.extra2		= SYSCTL_ONE,
1533 	},
1534 	{
1535 		.procname	= "core_modes",
1536 		.data		= core_modes,
1537 		.maxlen		= sizeof(core_modes) - 1,
1538 		.mode		= 0444,
1539 		.proc_handler	= proc_dostring,
1540 	},
1541 };
1542 
init_fs_coredump_sysctls(void)1543 static int __init init_fs_coredump_sysctls(void)
1544 {
1545 	register_sysctl_init("kernel", coredump_sysctls);
1546 	return 0;
1547 }
1548 fs_initcall(init_fs_coredump_sysctls);
1549 #endif /* CONFIG_SYSCTL */
1550 
1551 /*
1552  * The purpose of always_dump_vma() is to make sure that special kernel mappings
1553  * that are useful for post-mortem analysis are included in every core dump.
1554  * In that way we ensure that the core dump is fully interpretable later
1555  * without matching up the same kernel and hardware config to see what PC values
1556  * meant. These special mappings include - vDSO, vsyscall, and other
1557  * architecture specific mappings
1558  */
always_dump_vma(struct vm_area_struct * vma)1559 static bool always_dump_vma(struct vm_area_struct *vma)
1560 {
1561 	/* Any vsyscall mappings? */
1562 	if (vma == get_gate_vma(vma->vm_mm))
1563 		return true;
1564 
1565 	/*
1566 	 * Assume that all vmas with a .name op should always be dumped.
1567 	 * If this changes, a new vm_ops field can easily be added.
1568 	 */
1569 	if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1570 		return true;
1571 
1572 	/*
1573 	 * arch_vma_name() returns non-NULL for special architecture mappings,
1574 	 * such as vDSO sections.
1575 	 */
1576 	if (arch_vma_name(vma))
1577 		return true;
1578 
1579 	return false;
1580 }
1581 
1582 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
1583 
1584 /*
1585  * Decide how much of @vma's contents should be included in a core dump.
1586  */
vma_dump_size(struct vm_area_struct * vma,unsigned long mm_flags)1587 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1588 				   unsigned long mm_flags)
1589 {
1590 #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
1591 
1592 	/* always dump the vdso and vsyscall sections */
1593 	if (always_dump_vma(vma))
1594 		goto whole;
1595 
1596 	if (vma->vm_flags & VM_DONTDUMP)
1597 		return 0;
1598 
1599 	/* support for DAX */
1600 	if (vma_is_dax(vma)) {
1601 		if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1602 			goto whole;
1603 		if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1604 			goto whole;
1605 		return 0;
1606 	}
1607 
1608 	/* Hugetlb memory check */
1609 	if (is_vm_hugetlb_page(vma)) {
1610 		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1611 			goto whole;
1612 		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1613 			goto whole;
1614 		return 0;
1615 	}
1616 
1617 	/* Do not dump I/O mapped devices or special mappings */
1618 	if (vma->vm_flags & VM_IO)
1619 		return 0;
1620 
1621 	/* By default, dump shared memory if mapped from an anonymous file. */
1622 	if (vma->vm_flags & VM_SHARED) {
1623 		if (file_inode(vma->vm_file)->i_nlink == 0 ?
1624 		    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1625 			goto whole;
1626 		return 0;
1627 	}
1628 
1629 	/* Dump segments that have been written to.  */
1630 	if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1631 		goto whole;
1632 	if (vma->vm_file == NULL)
1633 		return 0;
1634 
1635 	if (FILTER(MAPPED_PRIVATE))
1636 		goto whole;
1637 
1638 	/*
1639 	 * If this is the beginning of an executable file mapping,
1640 	 * dump the first page to aid in determining what was mapped here.
1641 	 */
1642 	if (FILTER(ELF_HEADERS) &&
1643 	    vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1644 		if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1645 			return PAGE_SIZE;
1646 
1647 		/*
1648 		 * ELF libraries aren't always executable.
1649 		 * We'll want to check whether the mapping starts with the ELF
1650 		 * magic, but not now - we're holding the mmap lock,
1651 		 * so copy_from_user() doesn't work here.
1652 		 * Use a placeholder instead, and fix it up later in
1653 		 * dump_vma_snapshot().
1654 		 */
1655 		return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
1656 	}
1657 
1658 #undef	FILTER
1659 
1660 	return 0;
1661 
1662 whole:
1663 	return vma->vm_end - vma->vm_start;
1664 }
1665 
1666 /*
1667  * Helper function for iterating across a vma list.  It ensures that the caller
1668  * will visit `gate_vma' prior to terminating the search.
1669  */
coredump_next_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,struct vm_area_struct * gate_vma)1670 static struct vm_area_struct *coredump_next_vma(struct vma_iterator *vmi,
1671 				       struct vm_area_struct *vma,
1672 				       struct vm_area_struct *gate_vma)
1673 {
1674 	if (gate_vma && (vma == gate_vma))
1675 		return NULL;
1676 
1677 	vma = vma_next(vmi);
1678 	if (vma)
1679 		return vma;
1680 	return gate_vma;
1681 }
1682 
free_vma_snapshot(struct coredump_params * cprm)1683 static void free_vma_snapshot(struct coredump_params *cprm)
1684 {
1685 	if (cprm->vma_meta) {
1686 		int i;
1687 		for (i = 0; i < cprm->vma_count; i++) {
1688 			struct file *file = cprm->vma_meta[i].file;
1689 			if (file)
1690 				fput(file);
1691 		}
1692 		kvfree(cprm->vma_meta);
1693 		cprm->vma_meta = NULL;
1694 	}
1695 }
1696 
cmp_vma_size(const void * vma_meta_lhs_ptr,const void * vma_meta_rhs_ptr)1697 static int cmp_vma_size(const void *vma_meta_lhs_ptr, const void *vma_meta_rhs_ptr)
1698 {
1699 	const struct core_vma_metadata *vma_meta_lhs = vma_meta_lhs_ptr;
1700 	const struct core_vma_metadata *vma_meta_rhs = vma_meta_rhs_ptr;
1701 
1702 	if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size)
1703 		return -1;
1704 	if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size)
1705 		return 1;
1706 	return 0;
1707 }
1708 
1709 /*
1710  * Under the mmap_lock, take a snapshot of relevant information about the task's
1711  * VMAs.
1712  */
dump_vma_snapshot(struct coredump_params * cprm)1713 static bool dump_vma_snapshot(struct coredump_params *cprm)
1714 {
1715 	struct vm_area_struct *gate_vma, *vma = NULL;
1716 	struct mm_struct *mm = current->mm;
1717 	VMA_ITERATOR(vmi, mm, 0);
1718 	int i = 0;
1719 
1720 	/*
1721 	 * Once the stack expansion code is fixed to not change VMA bounds
1722 	 * under mmap_lock in read mode, this can be changed to take the
1723 	 * mmap_lock in read mode.
1724 	 */
1725 	if (mmap_write_lock_killable(mm))
1726 		return false;
1727 
1728 	cprm->vma_data_size = 0;
1729 	gate_vma = get_gate_vma(mm);
1730 	cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
1731 
1732 	cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
1733 	if (!cprm->vma_meta) {
1734 		mmap_write_unlock(mm);
1735 		return false;
1736 	}
1737 
1738 	while ((vma = coredump_next_vma(&vmi, vma, gate_vma)) != NULL) {
1739 		struct core_vma_metadata *m = cprm->vma_meta + i;
1740 
1741 		m->start = vma->vm_start;
1742 		m->end = vma->vm_end;
1743 		m->flags = vma->vm_flags;
1744 		m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1745 		m->pgoff = vma->vm_pgoff;
1746 		m->file = vma->vm_file;
1747 		if (m->file)
1748 			get_file(m->file);
1749 		i++;
1750 	}
1751 
1752 	mmap_write_unlock(mm);
1753 
1754 	for (i = 0; i < cprm->vma_count; i++) {
1755 		struct core_vma_metadata *m = cprm->vma_meta + i;
1756 
1757 		if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
1758 			char elfmag[SELFMAG];
1759 
1760 			if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
1761 					memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
1762 				m->dump_size = 0;
1763 			} else {
1764 				m->dump_size = PAGE_SIZE;
1765 			}
1766 		}
1767 
1768 		cprm->vma_data_size += m->dump_size;
1769 	}
1770 
1771 	if (core_sort_vma)
1772 		sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
1773 		     cmp_vma_size, NULL);
1774 
1775 	return true;
1776 }
1777