xref: /linux/kernel/seccomp.c (revision 4246b92cf9fb32da8d8b060c92d8302797c6fbea)
1 /*
2  * linux/kernel/seccomp.c
3  *
4  * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
5  *
6  * Copyright (C) 2012 Google, Inc.
7  * Will Drewry <wad@chromium.org>
8  *
9  * This defines a simple but solid secure-computing facility.
10  *
11  * Mode 1 uses a fixed list of allowed system calls.
12  * Mode 2 allows user-defined system call filters in the form
13  *        of Berkeley Packet Filters/Linux Socket Filters.
14  */
15 
16 #include <linux/refcount.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/coredump.h>
20 #include <linux/kmemleak.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/seccomp.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/sysctl.h>
27 
28 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
29 #include <asm/syscall.h>
30 #endif
31 
32 #ifdef CONFIG_SECCOMP_FILTER
33 #include <linux/filter.h>
34 #include <linux/pid.h>
35 #include <linux/ptrace.h>
36 #include <linux/security.h>
37 #include <linux/tracehook.h>
38 #include <linux/uaccess.h>
39 
40 /**
41  * struct seccomp_filter - container for seccomp BPF programs
42  *
43  * @usage: reference count to manage the object lifetime.
44  *         get/put helpers should be used when accessing an instance
45  *         outside of a lifetime-guarded section.  In general, this
46  *         is only needed for handling filters shared across tasks.
47  * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
48  * @prev: points to a previously installed, or inherited, filter
49  * @prog: the BPF program to evaluate
50  *
51  * seccomp_filter objects are organized in a tree linked via the @prev
52  * pointer.  For any task, it appears to be a singly-linked list starting
53  * with current->seccomp.filter, the most recently attached or inherited filter.
54  * However, multiple filters may share a @prev node, by way of fork(), which
55  * results in a unidirectional tree existing in memory.  This is similar to
56  * how namespaces work.
57  *
58  * seccomp_filter objects should never be modified after being attached
59  * to a task_struct (other than @usage).
60  */
61 struct seccomp_filter {
62 	refcount_t usage;
63 	bool log;
64 	struct seccomp_filter *prev;
65 	struct bpf_prog *prog;
66 };
67 
68 /* Limit any path through the tree to 256KB worth of instructions. */
69 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
70 
71 /*
72  * Endianness is explicitly ignored and left for BPF program authors to manage
73  * as per the specific architecture.
74  */
75 static void populate_seccomp_data(struct seccomp_data *sd)
76 {
77 	struct task_struct *task = current;
78 	struct pt_regs *regs = task_pt_regs(task);
79 	unsigned long args[6];
80 
81 	sd->nr = syscall_get_nr(task, regs);
82 	sd->arch = syscall_get_arch();
83 	syscall_get_arguments(task, regs, 0, 6, args);
84 	sd->args[0] = args[0];
85 	sd->args[1] = args[1];
86 	sd->args[2] = args[2];
87 	sd->args[3] = args[3];
88 	sd->args[4] = args[4];
89 	sd->args[5] = args[5];
90 	sd->instruction_pointer = KSTK_EIP(task);
91 }
92 
93 /**
94  *	seccomp_check_filter - verify seccomp filter code
95  *	@filter: filter to verify
96  *	@flen: length of filter
97  *
98  * Takes a previously checked filter (by bpf_check_classic) and
99  * redirects all filter code that loads struct sk_buff data
100  * and related data through seccomp_bpf_load.  It also
101  * enforces length and alignment checking of those loads.
102  *
103  * Returns 0 if the rule set is legal or -EINVAL if not.
104  */
105 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
106 {
107 	int pc;
108 	for (pc = 0; pc < flen; pc++) {
109 		struct sock_filter *ftest = &filter[pc];
110 		u16 code = ftest->code;
111 		u32 k = ftest->k;
112 
113 		switch (code) {
114 		case BPF_LD | BPF_W | BPF_ABS:
115 			ftest->code = BPF_LDX | BPF_W | BPF_ABS;
116 			/* 32-bit aligned and not out of bounds. */
117 			if (k >= sizeof(struct seccomp_data) || k & 3)
118 				return -EINVAL;
119 			continue;
120 		case BPF_LD | BPF_W | BPF_LEN:
121 			ftest->code = BPF_LD | BPF_IMM;
122 			ftest->k = sizeof(struct seccomp_data);
123 			continue;
124 		case BPF_LDX | BPF_W | BPF_LEN:
125 			ftest->code = BPF_LDX | BPF_IMM;
126 			ftest->k = sizeof(struct seccomp_data);
127 			continue;
128 		/* Explicitly include allowed calls. */
129 		case BPF_RET | BPF_K:
130 		case BPF_RET | BPF_A:
131 		case BPF_ALU | BPF_ADD | BPF_K:
132 		case BPF_ALU | BPF_ADD | BPF_X:
133 		case BPF_ALU | BPF_SUB | BPF_K:
134 		case BPF_ALU | BPF_SUB | BPF_X:
135 		case BPF_ALU | BPF_MUL | BPF_K:
136 		case BPF_ALU | BPF_MUL | BPF_X:
137 		case BPF_ALU | BPF_DIV | BPF_K:
138 		case BPF_ALU | BPF_DIV | BPF_X:
139 		case BPF_ALU | BPF_AND | BPF_K:
140 		case BPF_ALU | BPF_AND | BPF_X:
141 		case BPF_ALU | BPF_OR | BPF_K:
142 		case BPF_ALU | BPF_OR | BPF_X:
143 		case BPF_ALU | BPF_XOR | BPF_K:
144 		case BPF_ALU | BPF_XOR | BPF_X:
145 		case BPF_ALU | BPF_LSH | BPF_K:
146 		case BPF_ALU | BPF_LSH | BPF_X:
147 		case BPF_ALU | BPF_RSH | BPF_K:
148 		case BPF_ALU | BPF_RSH | BPF_X:
149 		case BPF_ALU | BPF_NEG:
150 		case BPF_LD | BPF_IMM:
151 		case BPF_LDX | BPF_IMM:
152 		case BPF_MISC | BPF_TAX:
153 		case BPF_MISC | BPF_TXA:
154 		case BPF_LD | BPF_MEM:
155 		case BPF_LDX | BPF_MEM:
156 		case BPF_ST:
157 		case BPF_STX:
158 		case BPF_JMP | BPF_JA:
159 		case BPF_JMP | BPF_JEQ | BPF_K:
160 		case BPF_JMP | BPF_JEQ | BPF_X:
161 		case BPF_JMP | BPF_JGE | BPF_K:
162 		case BPF_JMP | BPF_JGE | BPF_X:
163 		case BPF_JMP | BPF_JGT | BPF_K:
164 		case BPF_JMP | BPF_JGT | BPF_X:
165 		case BPF_JMP | BPF_JSET | BPF_K:
166 		case BPF_JMP | BPF_JSET | BPF_X:
167 			continue;
168 		default:
169 			return -EINVAL;
170 		}
171 	}
172 	return 0;
173 }
174 
175 /**
176  * seccomp_run_filters - evaluates all seccomp filters against @sd
177  * @sd: optional seccomp data to be passed to filters
178  * @match: stores struct seccomp_filter that resulted in the return value,
179  *         unless filter returned SECCOMP_RET_ALLOW, in which case it will
180  *         be unchanged.
181  *
182  * Returns valid seccomp BPF response codes.
183  */
184 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
185 static u32 seccomp_run_filters(const struct seccomp_data *sd,
186 			       struct seccomp_filter **match)
187 {
188 	struct seccomp_data sd_local;
189 	u32 ret = SECCOMP_RET_ALLOW;
190 	/* Make sure cross-thread synced filter points somewhere sane. */
191 	struct seccomp_filter *f =
192 			lockless_dereference(current->seccomp.filter);
193 
194 	/* Ensure unexpected behavior doesn't result in failing open. */
195 	if (unlikely(WARN_ON(f == NULL)))
196 		return SECCOMP_RET_KILL_PROCESS;
197 
198 	if (!sd) {
199 		populate_seccomp_data(&sd_local);
200 		sd = &sd_local;
201 	}
202 
203 	/*
204 	 * All filters in the list are evaluated and the lowest BPF return
205 	 * value always takes priority (ignoring the DATA).
206 	 */
207 	for (; f; f = f->prev) {
208 		u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
209 
210 		if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
211 			ret = cur_ret;
212 			*match = f;
213 		}
214 	}
215 	return ret;
216 }
217 #endif /* CONFIG_SECCOMP_FILTER */
218 
219 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
220 {
221 	assert_spin_locked(&current->sighand->siglock);
222 
223 	if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
224 		return false;
225 
226 	return true;
227 }
228 
229 static inline void seccomp_assign_mode(struct task_struct *task,
230 				       unsigned long seccomp_mode)
231 {
232 	assert_spin_locked(&task->sighand->siglock);
233 
234 	task->seccomp.mode = seccomp_mode;
235 	/*
236 	 * Make sure TIF_SECCOMP cannot be set before the mode (and
237 	 * filter) is set.
238 	 */
239 	smp_mb__before_atomic();
240 	set_tsk_thread_flag(task, TIF_SECCOMP);
241 }
242 
243 #ifdef CONFIG_SECCOMP_FILTER
244 /* Returns 1 if the parent is an ancestor of the child. */
245 static int is_ancestor(struct seccomp_filter *parent,
246 		       struct seccomp_filter *child)
247 {
248 	/* NULL is the root ancestor. */
249 	if (parent == NULL)
250 		return 1;
251 	for (; child; child = child->prev)
252 		if (child == parent)
253 			return 1;
254 	return 0;
255 }
256 
257 /**
258  * seccomp_can_sync_threads: checks if all threads can be synchronized
259  *
260  * Expects sighand and cred_guard_mutex locks to be held.
261  *
262  * Returns 0 on success, -ve on error, or the pid of a thread which was
263  * either not in the correct seccomp mode or it did not have an ancestral
264  * seccomp filter.
265  */
266 static inline pid_t seccomp_can_sync_threads(void)
267 {
268 	struct task_struct *thread, *caller;
269 
270 	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
271 	assert_spin_locked(&current->sighand->siglock);
272 
273 	/* Validate all threads being eligible for synchronization. */
274 	caller = current;
275 	for_each_thread(caller, thread) {
276 		pid_t failed;
277 
278 		/* Skip current, since it is initiating the sync. */
279 		if (thread == caller)
280 			continue;
281 
282 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
283 		    (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
284 		     is_ancestor(thread->seccomp.filter,
285 				 caller->seccomp.filter)))
286 			continue;
287 
288 		/* Return the first thread that cannot be synchronized. */
289 		failed = task_pid_vnr(thread);
290 		/* If the pid cannot be resolved, then return -ESRCH */
291 		if (unlikely(WARN_ON(failed == 0)))
292 			failed = -ESRCH;
293 		return failed;
294 	}
295 
296 	return 0;
297 }
298 
299 /**
300  * seccomp_sync_threads: sets all threads to use current's filter
301  *
302  * Expects sighand and cred_guard_mutex locks to be held, and for
303  * seccomp_can_sync_threads() to have returned success already
304  * without dropping the locks.
305  *
306  */
307 static inline void seccomp_sync_threads(void)
308 {
309 	struct task_struct *thread, *caller;
310 
311 	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
312 	assert_spin_locked(&current->sighand->siglock);
313 
314 	/* Synchronize all threads. */
315 	caller = current;
316 	for_each_thread(caller, thread) {
317 		/* Skip current, since it needs no changes. */
318 		if (thread == caller)
319 			continue;
320 
321 		/* Get a task reference for the new leaf node. */
322 		get_seccomp_filter(caller);
323 		/*
324 		 * Drop the task reference to the shared ancestor since
325 		 * current's path will hold a reference.  (This also
326 		 * allows a put before the assignment.)
327 		 */
328 		put_seccomp_filter(thread);
329 		smp_store_release(&thread->seccomp.filter,
330 				  caller->seccomp.filter);
331 
332 		/*
333 		 * Don't let an unprivileged task work around
334 		 * the no_new_privs restriction by creating
335 		 * a thread that sets it up, enters seccomp,
336 		 * then dies.
337 		 */
338 		if (task_no_new_privs(caller))
339 			task_set_no_new_privs(thread);
340 
341 		/*
342 		 * Opt the other thread into seccomp if needed.
343 		 * As threads are considered to be trust-realm
344 		 * equivalent (see ptrace_may_access), it is safe to
345 		 * allow one thread to transition the other.
346 		 */
347 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
348 			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
349 	}
350 }
351 
352 /**
353  * seccomp_prepare_filter: Prepares a seccomp filter for use.
354  * @fprog: BPF program to install
355  *
356  * Returns filter on success or an ERR_PTR on failure.
357  */
358 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
359 {
360 	struct seccomp_filter *sfilter;
361 	int ret;
362 	const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
363 
364 	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
365 		return ERR_PTR(-EINVAL);
366 
367 	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
368 
369 	/*
370 	 * Installing a seccomp filter requires that the task has
371 	 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
372 	 * This avoids scenarios where unprivileged tasks can affect the
373 	 * behavior of privileged children.
374 	 */
375 	if (!task_no_new_privs(current) &&
376 	    security_capable_noaudit(current_cred(), current_user_ns(),
377 				     CAP_SYS_ADMIN) != 0)
378 		return ERR_PTR(-EACCES);
379 
380 	/* Allocate a new seccomp_filter */
381 	sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
382 	if (!sfilter)
383 		return ERR_PTR(-ENOMEM);
384 
385 	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
386 					seccomp_check_filter, save_orig);
387 	if (ret < 0) {
388 		kfree(sfilter);
389 		return ERR_PTR(ret);
390 	}
391 
392 	refcount_set(&sfilter->usage, 1);
393 
394 	return sfilter;
395 }
396 
397 /**
398  * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
399  * @user_filter: pointer to the user data containing a sock_fprog.
400  *
401  * Returns 0 on success and non-zero otherwise.
402  */
403 static struct seccomp_filter *
404 seccomp_prepare_user_filter(const char __user *user_filter)
405 {
406 	struct sock_fprog fprog;
407 	struct seccomp_filter *filter = ERR_PTR(-EFAULT);
408 
409 #ifdef CONFIG_COMPAT
410 	if (in_compat_syscall()) {
411 		struct compat_sock_fprog fprog32;
412 		if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
413 			goto out;
414 		fprog.len = fprog32.len;
415 		fprog.filter = compat_ptr(fprog32.filter);
416 	} else /* falls through to the if below. */
417 #endif
418 	if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
419 		goto out;
420 	filter = seccomp_prepare_filter(&fprog);
421 out:
422 	return filter;
423 }
424 
425 /**
426  * seccomp_attach_filter: validate and attach filter
427  * @flags:  flags to change filter behavior
428  * @filter: seccomp filter to add to the current process
429  *
430  * Caller must be holding current->sighand->siglock lock.
431  *
432  * Returns 0 on success, -ve on error.
433  */
434 static long seccomp_attach_filter(unsigned int flags,
435 				  struct seccomp_filter *filter)
436 {
437 	unsigned long total_insns;
438 	struct seccomp_filter *walker;
439 
440 	assert_spin_locked(&current->sighand->siglock);
441 
442 	/* Validate resulting filter length. */
443 	total_insns = filter->prog->len;
444 	for (walker = current->seccomp.filter; walker; walker = walker->prev)
445 		total_insns += walker->prog->len + 4;  /* 4 instr penalty */
446 	if (total_insns > MAX_INSNS_PER_PATH)
447 		return -ENOMEM;
448 
449 	/* If thread sync has been requested, check that it is possible. */
450 	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
451 		int ret;
452 
453 		ret = seccomp_can_sync_threads();
454 		if (ret)
455 			return ret;
456 	}
457 
458 	/* Set log flag, if present. */
459 	if (flags & SECCOMP_FILTER_FLAG_LOG)
460 		filter->log = true;
461 
462 	/*
463 	 * If there is an existing filter, make it the prev and don't drop its
464 	 * task reference.
465 	 */
466 	filter->prev = current->seccomp.filter;
467 	current->seccomp.filter = filter;
468 
469 	/* Now that the new filter is in place, synchronize to all threads. */
470 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
471 		seccomp_sync_threads();
472 
473 	return 0;
474 }
475 
476 static void __get_seccomp_filter(struct seccomp_filter *filter)
477 {
478 	/* Reference count is bounded by the number of total processes. */
479 	refcount_inc(&filter->usage);
480 }
481 
482 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
483 void get_seccomp_filter(struct task_struct *tsk)
484 {
485 	struct seccomp_filter *orig = tsk->seccomp.filter;
486 	if (!orig)
487 		return;
488 	__get_seccomp_filter(orig);
489 }
490 
491 static inline void seccomp_filter_free(struct seccomp_filter *filter)
492 {
493 	if (filter) {
494 		bpf_prog_destroy(filter->prog);
495 		kfree(filter);
496 	}
497 }
498 
499 static void __put_seccomp_filter(struct seccomp_filter *orig)
500 {
501 	/* Clean up single-reference branches iteratively. */
502 	while (orig && refcount_dec_and_test(&orig->usage)) {
503 		struct seccomp_filter *freeme = orig;
504 		orig = orig->prev;
505 		seccomp_filter_free(freeme);
506 	}
507 }
508 
509 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
510 void put_seccomp_filter(struct task_struct *tsk)
511 {
512 	__put_seccomp_filter(tsk->seccomp.filter);
513 }
514 
515 static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
516 {
517 	memset(info, 0, sizeof(*info));
518 	info->si_signo = SIGSYS;
519 	info->si_code = SYS_SECCOMP;
520 	info->si_call_addr = (void __user *)KSTK_EIP(current);
521 	info->si_errno = reason;
522 	info->si_arch = syscall_get_arch();
523 	info->si_syscall = syscall;
524 }
525 
526 /**
527  * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
528  * @syscall: syscall number to send to userland
529  * @reason: filter-supplied reason code to send to userland (via si_errno)
530  *
531  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
532  */
533 static void seccomp_send_sigsys(int syscall, int reason)
534 {
535 	struct siginfo info;
536 	seccomp_init_siginfo(&info, syscall, reason);
537 	force_sig_info(SIGSYS, &info, current);
538 }
539 #endif	/* CONFIG_SECCOMP_FILTER */
540 
541 /* For use with seccomp_actions_logged */
542 #define SECCOMP_LOG_KILL_PROCESS	(1 << 0)
543 #define SECCOMP_LOG_KILL_THREAD		(1 << 1)
544 #define SECCOMP_LOG_TRAP		(1 << 2)
545 #define SECCOMP_LOG_ERRNO		(1 << 3)
546 #define SECCOMP_LOG_TRACE		(1 << 4)
547 #define SECCOMP_LOG_LOG			(1 << 5)
548 #define SECCOMP_LOG_ALLOW		(1 << 6)
549 
550 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
551 				    SECCOMP_LOG_KILL_THREAD  |
552 				    SECCOMP_LOG_TRAP  |
553 				    SECCOMP_LOG_ERRNO |
554 				    SECCOMP_LOG_TRACE |
555 				    SECCOMP_LOG_LOG;
556 
557 static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
558 			       bool requested)
559 {
560 	bool log = false;
561 
562 	switch (action) {
563 	case SECCOMP_RET_ALLOW:
564 		break;
565 	case SECCOMP_RET_TRAP:
566 		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
567 		break;
568 	case SECCOMP_RET_ERRNO:
569 		log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
570 		break;
571 	case SECCOMP_RET_TRACE:
572 		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
573 		break;
574 	case SECCOMP_RET_LOG:
575 		log = seccomp_actions_logged & SECCOMP_LOG_LOG;
576 		break;
577 	case SECCOMP_RET_KILL_THREAD:
578 		log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
579 		break;
580 	case SECCOMP_RET_KILL_PROCESS:
581 	default:
582 		log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
583 	}
584 
585 	/*
586 	 * Force an audit message to be emitted when the action is RET_KILL_*,
587 	 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
588 	 * allowed to be logged by the admin.
589 	 */
590 	if (log)
591 		return __audit_seccomp(syscall, signr, action);
592 
593 	/*
594 	 * Let the audit subsystem decide if the action should be audited based
595 	 * on whether the current task itself is being audited.
596 	 */
597 	return audit_seccomp(syscall, signr, action);
598 }
599 
600 /*
601  * Secure computing mode 1 allows only read/write/exit/sigreturn.
602  * To be fully secure this must be combined with rlimit
603  * to limit the stack allocations too.
604  */
605 static const int mode1_syscalls[] = {
606 	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
607 	0, /* null terminated */
608 };
609 
610 static void __secure_computing_strict(int this_syscall)
611 {
612 	const int *syscall_whitelist = mode1_syscalls;
613 #ifdef CONFIG_COMPAT
614 	if (in_compat_syscall())
615 		syscall_whitelist = get_compat_mode1_syscalls();
616 #endif
617 	do {
618 		if (*syscall_whitelist == this_syscall)
619 			return;
620 	} while (*++syscall_whitelist);
621 
622 #ifdef SECCOMP_DEBUG
623 	dump_stack();
624 #endif
625 	seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
626 	do_exit(SIGKILL);
627 }
628 
629 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
630 void secure_computing_strict(int this_syscall)
631 {
632 	int mode = current->seccomp.mode;
633 
634 	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
635 	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
636 		return;
637 
638 	if (mode == SECCOMP_MODE_DISABLED)
639 		return;
640 	else if (mode == SECCOMP_MODE_STRICT)
641 		__secure_computing_strict(this_syscall);
642 	else
643 		BUG();
644 }
645 #else
646 
647 #ifdef CONFIG_SECCOMP_FILTER
648 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
649 			    const bool recheck_after_trace)
650 {
651 	u32 filter_ret, action;
652 	struct seccomp_filter *match = NULL;
653 	int data;
654 
655 	/*
656 	 * Make sure that any changes to mode from another thread have
657 	 * been seen after TIF_SECCOMP was seen.
658 	 */
659 	rmb();
660 
661 	filter_ret = seccomp_run_filters(sd, &match);
662 	data = filter_ret & SECCOMP_RET_DATA;
663 	action = filter_ret & SECCOMP_RET_ACTION_FULL;
664 
665 	switch (action) {
666 	case SECCOMP_RET_ERRNO:
667 		/* Set low-order bits as an errno, capped at MAX_ERRNO. */
668 		if (data > MAX_ERRNO)
669 			data = MAX_ERRNO;
670 		syscall_set_return_value(current, task_pt_regs(current),
671 					 -data, 0);
672 		goto skip;
673 
674 	case SECCOMP_RET_TRAP:
675 		/* Show the handler the original registers. */
676 		syscall_rollback(current, task_pt_regs(current));
677 		/* Let the filter pass back 16 bits of data. */
678 		seccomp_send_sigsys(this_syscall, data);
679 		goto skip;
680 
681 	case SECCOMP_RET_TRACE:
682 		/* We've been put in this state by the ptracer already. */
683 		if (recheck_after_trace)
684 			return 0;
685 
686 		/* ENOSYS these calls if there is no tracer attached. */
687 		if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
688 			syscall_set_return_value(current,
689 						 task_pt_regs(current),
690 						 -ENOSYS, 0);
691 			goto skip;
692 		}
693 
694 		/* Allow the BPF to provide the event message */
695 		ptrace_event(PTRACE_EVENT_SECCOMP, data);
696 		/*
697 		 * The delivery of a fatal signal during event
698 		 * notification may silently skip tracer notification,
699 		 * which could leave us with a potentially unmodified
700 		 * syscall that the tracer would have liked to have
701 		 * changed. Since the process is about to die, we just
702 		 * force the syscall to be skipped and let the signal
703 		 * kill the process and correctly handle any tracer exit
704 		 * notifications.
705 		 */
706 		if (fatal_signal_pending(current))
707 			goto skip;
708 		/* Check if the tracer forced the syscall to be skipped. */
709 		this_syscall = syscall_get_nr(current, task_pt_regs(current));
710 		if (this_syscall < 0)
711 			goto skip;
712 
713 		/*
714 		 * Recheck the syscall, since it may have changed. This
715 		 * intentionally uses a NULL struct seccomp_data to force
716 		 * a reload of all registers. This does not goto skip since
717 		 * a skip would have already been reported.
718 		 */
719 		if (__seccomp_filter(this_syscall, NULL, true))
720 			return -1;
721 
722 		return 0;
723 
724 	case SECCOMP_RET_LOG:
725 		seccomp_log(this_syscall, 0, action, true);
726 		return 0;
727 
728 	case SECCOMP_RET_ALLOW:
729 		/*
730 		 * Note that the "match" filter will always be NULL for
731 		 * this action since SECCOMP_RET_ALLOW is the starting
732 		 * state in seccomp_run_filters().
733 		 */
734 		return 0;
735 
736 	case SECCOMP_RET_KILL_THREAD:
737 	case SECCOMP_RET_KILL_PROCESS:
738 	default:
739 		seccomp_log(this_syscall, SIGSYS, action, true);
740 		/* Dump core only if this is the last remaining thread. */
741 		if (action == SECCOMP_RET_KILL_PROCESS ||
742 		    get_nr_threads(current) == 1) {
743 			siginfo_t info;
744 
745 			/* Show the original registers in the dump. */
746 			syscall_rollback(current, task_pt_regs(current));
747 			/* Trigger a manual coredump since do_exit skips it. */
748 			seccomp_init_siginfo(&info, this_syscall, data);
749 			do_coredump(&info);
750 		}
751 		if (action == SECCOMP_RET_KILL_PROCESS)
752 			do_group_exit(SIGSYS);
753 		else
754 			do_exit(SIGSYS);
755 	}
756 
757 	unreachable();
758 
759 skip:
760 	seccomp_log(this_syscall, 0, action, match ? match->log : false);
761 	return -1;
762 }
763 #else
764 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
765 			    const bool recheck_after_trace)
766 {
767 	BUG();
768 }
769 #endif
770 
771 int __secure_computing(const struct seccomp_data *sd)
772 {
773 	int mode = current->seccomp.mode;
774 	int this_syscall;
775 
776 	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
777 	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
778 		return 0;
779 
780 	this_syscall = sd ? sd->nr :
781 		syscall_get_nr(current, task_pt_regs(current));
782 
783 	switch (mode) {
784 	case SECCOMP_MODE_STRICT:
785 		__secure_computing_strict(this_syscall);  /* may call do_exit */
786 		return 0;
787 	case SECCOMP_MODE_FILTER:
788 		return __seccomp_filter(this_syscall, sd, false);
789 	default:
790 		BUG();
791 	}
792 }
793 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
794 
795 long prctl_get_seccomp(void)
796 {
797 	return current->seccomp.mode;
798 }
799 
800 /**
801  * seccomp_set_mode_strict: internal function for setting strict seccomp
802  *
803  * Once current->seccomp.mode is non-zero, it may not be changed.
804  *
805  * Returns 0 on success or -EINVAL on failure.
806  */
807 static long seccomp_set_mode_strict(void)
808 {
809 	const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
810 	long ret = -EINVAL;
811 
812 	spin_lock_irq(&current->sighand->siglock);
813 
814 	if (!seccomp_may_assign_mode(seccomp_mode))
815 		goto out;
816 
817 #ifdef TIF_NOTSC
818 	disable_TSC();
819 #endif
820 	seccomp_assign_mode(current, seccomp_mode);
821 	ret = 0;
822 
823 out:
824 	spin_unlock_irq(&current->sighand->siglock);
825 
826 	return ret;
827 }
828 
829 #ifdef CONFIG_SECCOMP_FILTER
830 /**
831  * seccomp_set_mode_filter: internal function for setting seccomp filter
832  * @flags:  flags to change filter behavior
833  * @filter: struct sock_fprog containing filter
834  *
835  * This function may be called repeatedly to install additional filters.
836  * Every filter successfully installed will be evaluated (in reverse order)
837  * for each system call the task makes.
838  *
839  * Once current->seccomp.mode is non-zero, it may not be changed.
840  *
841  * Returns 0 on success or -EINVAL on failure.
842  */
843 static long seccomp_set_mode_filter(unsigned int flags,
844 				    const char __user *filter)
845 {
846 	const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
847 	struct seccomp_filter *prepared = NULL;
848 	long ret = -EINVAL;
849 
850 	/* Validate flags. */
851 	if (flags & ~SECCOMP_FILTER_FLAG_MASK)
852 		return -EINVAL;
853 
854 	/* Prepare the new filter before holding any locks. */
855 	prepared = seccomp_prepare_user_filter(filter);
856 	if (IS_ERR(prepared))
857 		return PTR_ERR(prepared);
858 
859 	/*
860 	 * Make sure we cannot change seccomp or nnp state via TSYNC
861 	 * while another thread is in the middle of calling exec.
862 	 */
863 	if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
864 	    mutex_lock_killable(&current->signal->cred_guard_mutex))
865 		goto out_free;
866 
867 	spin_lock_irq(&current->sighand->siglock);
868 
869 	if (!seccomp_may_assign_mode(seccomp_mode))
870 		goto out;
871 
872 	ret = seccomp_attach_filter(flags, prepared);
873 	if (ret)
874 		goto out;
875 	/* Do not free the successfully attached filter. */
876 	prepared = NULL;
877 
878 	seccomp_assign_mode(current, seccomp_mode);
879 out:
880 	spin_unlock_irq(&current->sighand->siglock);
881 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
882 		mutex_unlock(&current->signal->cred_guard_mutex);
883 out_free:
884 	seccomp_filter_free(prepared);
885 	return ret;
886 }
887 #else
888 static inline long seccomp_set_mode_filter(unsigned int flags,
889 					   const char __user *filter)
890 {
891 	return -EINVAL;
892 }
893 #endif
894 
895 static long seccomp_get_action_avail(const char __user *uaction)
896 {
897 	u32 action;
898 
899 	if (copy_from_user(&action, uaction, sizeof(action)))
900 		return -EFAULT;
901 
902 	switch (action) {
903 	case SECCOMP_RET_KILL_PROCESS:
904 	case SECCOMP_RET_KILL_THREAD:
905 	case SECCOMP_RET_TRAP:
906 	case SECCOMP_RET_ERRNO:
907 	case SECCOMP_RET_TRACE:
908 	case SECCOMP_RET_LOG:
909 	case SECCOMP_RET_ALLOW:
910 		break;
911 	default:
912 		return -EOPNOTSUPP;
913 	}
914 
915 	return 0;
916 }
917 
918 /* Common entry point for both prctl and syscall. */
919 static long do_seccomp(unsigned int op, unsigned int flags,
920 		       const char __user *uargs)
921 {
922 	switch (op) {
923 	case SECCOMP_SET_MODE_STRICT:
924 		if (flags != 0 || uargs != NULL)
925 			return -EINVAL;
926 		return seccomp_set_mode_strict();
927 	case SECCOMP_SET_MODE_FILTER:
928 		return seccomp_set_mode_filter(flags, uargs);
929 	case SECCOMP_GET_ACTION_AVAIL:
930 		if (flags != 0)
931 			return -EINVAL;
932 
933 		return seccomp_get_action_avail(uargs);
934 	default:
935 		return -EINVAL;
936 	}
937 }
938 
939 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
940 			 const char __user *, uargs)
941 {
942 	return do_seccomp(op, flags, uargs);
943 }
944 
945 /**
946  * prctl_set_seccomp: configures current->seccomp.mode
947  * @seccomp_mode: requested mode to use
948  * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
949  *
950  * Returns 0 on success or -EINVAL on failure.
951  */
952 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
953 {
954 	unsigned int op;
955 	char __user *uargs;
956 
957 	switch (seccomp_mode) {
958 	case SECCOMP_MODE_STRICT:
959 		op = SECCOMP_SET_MODE_STRICT;
960 		/*
961 		 * Setting strict mode through prctl always ignored filter,
962 		 * so make sure it is always NULL here to pass the internal
963 		 * check in do_seccomp().
964 		 */
965 		uargs = NULL;
966 		break;
967 	case SECCOMP_MODE_FILTER:
968 		op = SECCOMP_SET_MODE_FILTER;
969 		uargs = filter;
970 		break;
971 	default:
972 		return -EINVAL;
973 	}
974 
975 	/* prctl interface doesn't have flags, so they are always zero. */
976 	return do_seccomp(op, 0, uargs);
977 }
978 
979 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
980 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
981 			void __user *data)
982 {
983 	struct seccomp_filter *filter;
984 	struct sock_fprog_kern *fprog;
985 	long ret;
986 	unsigned long count = 0;
987 
988 	if (!capable(CAP_SYS_ADMIN) ||
989 	    current->seccomp.mode != SECCOMP_MODE_DISABLED) {
990 		return -EACCES;
991 	}
992 
993 	spin_lock_irq(&task->sighand->siglock);
994 	if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
995 		ret = -EINVAL;
996 		goto out;
997 	}
998 
999 	filter = task->seccomp.filter;
1000 	while (filter) {
1001 		filter = filter->prev;
1002 		count++;
1003 	}
1004 
1005 	if (filter_off >= count) {
1006 		ret = -ENOENT;
1007 		goto out;
1008 	}
1009 	count -= filter_off;
1010 
1011 	filter = task->seccomp.filter;
1012 	while (filter && count > 1) {
1013 		filter = filter->prev;
1014 		count--;
1015 	}
1016 
1017 	if (WARN_ON(count != 1 || !filter)) {
1018 		/* The filter tree shouldn't shrink while we're using it. */
1019 		ret = -ENOENT;
1020 		goto out;
1021 	}
1022 
1023 	fprog = filter->prog->orig_prog;
1024 	if (!fprog) {
1025 		/* This must be a new non-cBPF filter, since we save
1026 		 * every cBPF filter's orig_prog above when
1027 		 * CONFIG_CHECKPOINT_RESTORE is enabled.
1028 		 */
1029 		ret = -EMEDIUMTYPE;
1030 		goto out;
1031 	}
1032 
1033 	ret = fprog->len;
1034 	if (!data)
1035 		goto out;
1036 
1037 	__get_seccomp_filter(filter);
1038 	spin_unlock_irq(&task->sighand->siglock);
1039 
1040 	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1041 		ret = -EFAULT;
1042 
1043 	__put_seccomp_filter(filter);
1044 	return ret;
1045 
1046 out:
1047 	spin_unlock_irq(&task->sighand->siglock);
1048 	return ret;
1049 }
1050 #endif
1051 
1052 #ifdef CONFIG_SYSCTL
1053 
1054 /* Human readable action names for friendly sysctl interaction */
1055 #define SECCOMP_RET_KILL_PROCESS_NAME	"kill_process"
1056 #define SECCOMP_RET_KILL_THREAD_NAME	"kill_thread"
1057 #define SECCOMP_RET_TRAP_NAME		"trap"
1058 #define SECCOMP_RET_ERRNO_NAME		"errno"
1059 #define SECCOMP_RET_TRACE_NAME		"trace"
1060 #define SECCOMP_RET_LOG_NAME		"log"
1061 #define SECCOMP_RET_ALLOW_NAME		"allow"
1062 
1063 static const char seccomp_actions_avail[] =
1064 				SECCOMP_RET_KILL_PROCESS_NAME	" "
1065 				SECCOMP_RET_KILL_THREAD_NAME	" "
1066 				SECCOMP_RET_TRAP_NAME		" "
1067 				SECCOMP_RET_ERRNO_NAME		" "
1068 				SECCOMP_RET_TRACE_NAME		" "
1069 				SECCOMP_RET_LOG_NAME		" "
1070 				SECCOMP_RET_ALLOW_NAME;
1071 
1072 struct seccomp_log_name {
1073 	u32		log;
1074 	const char	*name;
1075 };
1076 
1077 static const struct seccomp_log_name seccomp_log_names[] = {
1078 	{ SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1079 	{ SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1080 	{ SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1081 	{ SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1082 	{ SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1083 	{ SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1084 	{ SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1085 	{ }
1086 };
1087 
1088 static bool seccomp_names_from_actions_logged(char *names, size_t size,
1089 					      u32 actions_logged)
1090 {
1091 	const struct seccomp_log_name *cur;
1092 	bool append_space = false;
1093 
1094 	for (cur = seccomp_log_names; cur->name && size; cur++) {
1095 		ssize_t ret;
1096 
1097 		if (!(actions_logged & cur->log))
1098 			continue;
1099 
1100 		if (append_space) {
1101 			ret = strscpy(names, " ", size);
1102 			if (ret < 0)
1103 				return false;
1104 
1105 			names += ret;
1106 			size -= ret;
1107 		} else
1108 			append_space = true;
1109 
1110 		ret = strscpy(names, cur->name, size);
1111 		if (ret < 0)
1112 			return false;
1113 
1114 		names += ret;
1115 		size -= ret;
1116 	}
1117 
1118 	return true;
1119 }
1120 
1121 static bool seccomp_action_logged_from_name(u32 *action_logged,
1122 					    const char *name)
1123 {
1124 	const struct seccomp_log_name *cur;
1125 
1126 	for (cur = seccomp_log_names; cur->name; cur++) {
1127 		if (!strcmp(cur->name, name)) {
1128 			*action_logged = cur->log;
1129 			return true;
1130 		}
1131 	}
1132 
1133 	return false;
1134 }
1135 
1136 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1137 {
1138 	char *name;
1139 
1140 	*actions_logged = 0;
1141 	while ((name = strsep(&names, " ")) && *name) {
1142 		u32 action_logged = 0;
1143 
1144 		if (!seccomp_action_logged_from_name(&action_logged, name))
1145 			return false;
1146 
1147 		*actions_logged |= action_logged;
1148 	}
1149 
1150 	return true;
1151 }
1152 
1153 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1154 					  void __user *buffer, size_t *lenp,
1155 					  loff_t *ppos)
1156 {
1157 	char names[sizeof(seccomp_actions_avail)];
1158 	struct ctl_table table;
1159 	int ret;
1160 
1161 	if (write && !capable(CAP_SYS_ADMIN))
1162 		return -EPERM;
1163 
1164 	memset(names, 0, sizeof(names));
1165 
1166 	if (!write) {
1167 		if (!seccomp_names_from_actions_logged(names, sizeof(names),
1168 						       seccomp_actions_logged))
1169 			return -EINVAL;
1170 	}
1171 
1172 	table = *ro_table;
1173 	table.data = names;
1174 	table.maxlen = sizeof(names);
1175 	ret = proc_dostring(&table, write, buffer, lenp, ppos);
1176 	if (ret)
1177 		return ret;
1178 
1179 	if (write) {
1180 		u32 actions_logged;
1181 
1182 		if (!seccomp_actions_logged_from_names(&actions_logged,
1183 						       table.data))
1184 			return -EINVAL;
1185 
1186 		if (actions_logged & SECCOMP_LOG_ALLOW)
1187 			return -EINVAL;
1188 
1189 		seccomp_actions_logged = actions_logged;
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static struct ctl_path seccomp_sysctl_path[] = {
1196 	{ .procname = "kernel", },
1197 	{ .procname = "seccomp", },
1198 	{ }
1199 };
1200 
1201 static struct ctl_table seccomp_sysctl_table[] = {
1202 	{
1203 		.procname	= "actions_avail",
1204 		.data		= (void *) &seccomp_actions_avail,
1205 		.maxlen		= sizeof(seccomp_actions_avail),
1206 		.mode		= 0444,
1207 		.proc_handler	= proc_dostring,
1208 	},
1209 	{
1210 		.procname	= "actions_logged",
1211 		.mode		= 0644,
1212 		.proc_handler	= seccomp_actions_logged_handler,
1213 	},
1214 	{ }
1215 };
1216 
1217 static int __init seccomp_sysctl_init(void)
1218 {
1219 	struct ctl_table_header *hdr;
1220 
1221 	hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1222 	if (!hdr)
1223 		pr_warn("seccomp: sysctl registration failed\n");
1224 	else
1225 		kmemleak_not_leak(hdr);
1226 
1227 	return 0;
1228 }
1229 
1230 device_initcall(seccomp_sysctl_init)
1231 
1232 #endif /* CONFIG_SYSCTL */
1233