1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/seccomp.c 4 * 5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com> 6 * 7 * Copyright (C) 2012 Google, Inc. 8 * Will Drewry <wad@chromium.org> 9 * 10 * This defines a simple but solid secure-computing facility. 11 * 12 * Mode 1 uses a fixed list of allowed system calls. 13 * Mode 2 allows user-defined system call filters in the form 14 * of Berkeley Packet Filters/Linux Socket Filters. 15 */ 16 17 #include <linux/refcount.h> 18 #include <linux/audit.h> 19 #include <linux/compat.h> 20 #include <linux/coredump.h> 21 #include <linux/kmemleak.h> 22 #include <linux/nospec.h> 23 #include <linux/prctl.h> 24 #include <linux/sched.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/seccomp.h> 27 #include <linux/slab.h> 28 #include <linux/syscalls.h> 29 #include <linux/sysctl.h> 30 31 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER 32 #include <asm/syscall.h> 33 #endif 34 35 #ifdef CONFIG_SECCOMP_FILTER 36 #include <linux/file.h> 37 #include <linux/filter.h> 38 #include <linux/pid.h> 39 #include <linux/ptrace.h> 40 #include <linux/security.h> 41 #include <linux/tracehook.h> 42 #include <linux/uaccess.h> 43 #include <linux/anon_inodes.h> 44 45 enum notify_state { 46 SECCOMP_NOTIFY_INIT, 47 SECCOMP_NOTIFY_SENT, 48 SECCOMP_NOTIFY_REPLIED, 49 }; 50 51 struct seccomp_knotif { 52 /* The struct pid of the task whose filter triggered the notification */ 53 struct task_struct *task; 54 55 /* The "cookie" for this request; this is unique for this filter. */ 56 u64 id; 57 58 /* 59 * The seccomp data. This pointer is valid the entire time this 60 * notification is active, since it comes from __seccomp_filter which 61 * eclipses the entire lifecycle here. 62 */ 63 const struct seccomp_data *data; 64 65 /* 66 * Notification states. When SECCOMP_RET_USER_NOTIF is returned, a 67 * struct seccomp_knotif is created and starts out in INIT. Once the 68 * handler reads the notification off of an FD, it transitions to SENT. 69 * If a signal is received the state transitions back to INIT and 70 * another message is sent. When the userspace handler replies, state 71 * transitions to REPLIED. 72 */ 73 enum notify_state state; 74 75 /* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */ 76 int error; 77 long val; 78 u32 flags; 79 80 /* Signals when this has entered SECCOMP_NOTIFY_REPLIED */ 81 struct completion ready; 82 83 struct list_head list; 84 }; 85 86 /** 87 * struct notification - container for seccomp userspace notifications. Since 88 * most seccomp filters will not have notification listeners attached and this 89 * structure is fairly large, we store the notification-specific stuff in a 90 * separate structure. 91 * 92 * @request: A semaphore that users of this notification can wait on for 93 * changes. Actual reads and writes are still controlled with 94 * filter->notify_lock. 95 * @next_id: The id of the next request. 96 * @notifications: A list of struct seccomp_knotif elements. 97 * @wqh: A wait queue for poll. 98 */ 99 struct notification { 100 struct semaphore request; 101 u64 next_id; 102 struct list_head notifications; 103 wait_queue_head_t wqh; 104 }; 105 106 /** 107 * struct seccomp_filter - container for seccomp BPF programs 108 * 109 * @usage: reference count to manage the object lifetime. 110 * get/put helpers should be used when accessing an instance 111 * outside of a lifetime-guarded section. In general, this 112 * is only needed for handling filters shared across tasks. 113 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged 114 * @prev: points to a previously installed, or inherited, filter 115 * @prog: the BPF program to evaluate 116 * @notif: the struct that holds all notification related information 117 * @notify_lock: A lock for all notification-related accesses. 118 * 119 * seccomp_filter objects are organized in a tree linked via the @prev 120 * pointer. For any task, it appears to be a singly-linked list starting 121 * with current->seccomp.filter, the most recently attached or inherited filter. 122 * However, multiple filters may share a @prev node, by way of fork(), which 123 * results in a unidirectional tree existing in memory. This is similar to 124 * how namespaces work. 125 * 126 * seccomp_filter objects should never be modified after being attached 127 * to a task_struct (other than @usage). 128 */ 129 struct seccomp_filter { 130 refcount_t usage; 131 bool log; 132 struct seccomp_filter *prev; 133 struct bpf_prog *prog; 134 struct notification *notif; 135 struct mutex notify_lock; 136 }; 137 138 /* Limit any path through the tree to 256KB worth of instructions. */ 139 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) 140 141 /* 142 * Endianness is explicitly ignored and left for BPF program authors to manage 143 * as per the specific architecture. 144 */ 145 static void populate_seccomp_data(struct seccomp_data *sd) 146 { 147 struct task_struct *task = current; 148 struct pt_regs *regs = task_pt_regs(task); 149 unsigned long args[6]; 150 151 sd->nr = syscall_get_nr(task, regs); 152 sd->arch = syscall_get_arch(task); 153 syscall_get_arguments(task, regs, args); 154 sd->args[0] = args[0]; 155 sd->args[1] = args[1]; 156 sd->args[2] = args[2]; 157 sd->args[3] = args[3]; 158 sd->args[4] = args[4]; 159 sd->args[5] = args[5]; 160 sd->instruction_pointer = KSTK_EIP(task); 161 } 162 163 /** 164 * seccomp_check_filter - verify seccomp filter code 165 * @filter: filter to verify 166 * @flen: length of filter 167 * 168 * Takes a previously checked filter (by bpf_check_classic) and 169 * redirects all filter code that loads struct sk_buff data 170 * and related data through seccomp_bpf_load. It also 171 * enforces length and alignment checking of those loads. 172 * 173 * Returns 0 if the rule set is legal or -EINVAL if not. 174 */ 175 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) 176 { 177 int pc; 178 for (pc = 0; pc < flen; pc++) { 179 struct sock_filter *ftest = &filter[pc]; 180 u16 code = ftest->code; 181 u32 k = ftest->k; 182 183 switch (code) { 184 case BPF_LD | BPF_W | BPF_ABS: 185 ftest->code = BPF_LDX | BPF_W | BPF_ABS; 186 /* 32-bit aligned and not out of bounds. */ 187 if (k >= sizeof(struct seccomp_data) || k & 3) 188 return -EINVAL; 189 continue; 190 case BPF_LD | BPF_W | BPF_LEN: 191 ftest->code = BPF_LD | BPF_IMM; 192 ftest->k = sizeof(struct seccomp_data); 193 continue; 194 case BPF_LDX | BPF_W | BPF_LEN: 195 ftest->code = BPF_LDX | BPF_IMM; 196 ftest->k = sizeof(struct seccomp_data); 197 continue; 198 /* Explicitly include allowed calls. */ 199 case BPF_RET | BPF_K: 200 case BPF_RET | BPF_A: 201 case BPF_ALU | BPF_ADD | BPF_K: 202 case BPF_ALU | BPF_ADD | BPF_X: 203 case BPF_ALU | BPF_SUB | BPF_K: 204 case BPF_ALU | BPF_SUB | BPF_X: 205 case BPF_ALU | BPF_MUL | BPF_K: 206 case BPF_ALU | BPF_MUL | BPF_X: 207 case BPF_ALU | BPF_DIV | BPF_K: 208 case BPF_ALU | BPF_DIV | BPF_X: 209 case BPF_ALU | BPF_AND | BPF_K: 210 case BPF_ALU | BPF_AND | BPF_X: 211 case BPF_ALU | BPF_OR | BPF_K: 212 case BPF_ALU | BPF_OR | BPF_X: 213 case BPF_ALU | BPF_XOR | BPF_K: 214 case BPF_ALU | BPF_XOR | BPF_X: 215 case BPF_ALU | BPF_LSH | BPF_K: 216 case BPF_ALU | BPF_LSH | BPF_X: 217 case BPF_ALU | BPF_RSH | BPF_K: 218 case BPF_ALU | BPF_RSH | BPF_X: 219 case BPF_ALU | BPF_NEG: 220 case BPF_LD | BPF_IMM: 221 case BPF_LDX | BPF_IMM: 222 case BPF_MISC | BPF_TAX: 223 case BPF_MISC | BPF_TXA: 224 case BPF_LD | BPF_MEM: 225 case BPF_LDX | BPF_MEM: 226 case BPF_ST: 227 case BPF_STX: 228 case BPF_JMP | BPF_JA: 229 case BPF_JMP | BPF_JEQ | BPF_K: 230 case BPF_JMP | BPF_JEQ | BPF_X: 231 case BPF_JMP | BPF_JGE | BPF_K: 232 case BPF_JMP | BPF_JGE | BPF_X: 233 case BPF_JMP | BPF_JGT | BPF_K: 234 case BPF_JMP | BPF_JGT | BPF_X: 235 case BPF_JMP | BPF_JSET | BPF_K: 236 case BPF_JMP | BPF_JSET | BPF_X: 237 continue; 238 default: 239 return -EINVAL; 240 } 241 } 242 return 0; 243 } 244 245 /** 246 * seccomp_run_filters - evaluates all seccomp filters against @sd 247 * @sd: optional seccomp data to be passed to filters 248 * @match: stores struct seccomp_filter that resulted in the return value, 249 * unless filter returned SECCOMP_RET_ALLOW, in which case it will 250 * be unchanged. 251 * 252 * Returns valid seccomp BPF response codes. 253 */ 254 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL))) 255 static u32 seccomp_run_filters(const struct seccomp_data *sd, 256 struct seccomp_filter **match) 257 { 258 u32 ret = SECCOMP_RET_ALLOW; 259 /* Make sure cross-thread synced filter points somewhere sane. */ 260 struct seccomp_filter *f = 261 READ_ONCE(current->seccomp.filter); 262 263 /* Ensure unexpected behavior doesn't result in failing open. */ 264 if (WARN_ON(f == NULL)) 265 return SECCOMP_RET_KILL_PROCESS; 266 267 /* 268 * All filters in the list are evaluated and the lowest BPF return 269 * value always takes priority (ignoring the DATA). 270 */ 271 for (; f; f = f->prev) { 272 u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd); 273 274 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) { 275 ret = cur_ret; 276 *match = f; 277 } 278 } 279 return ret; 280 } 281 #endif /* CONFIG_SECCOMP_FILTER */ 282 283 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) 284 { 285 assert_spin_locked(¤t->sighand->siglock); 286 287 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) 288 return false; 289 290 return true; 291 } 292 293 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } 294 295 static inline void seccomp_assign_mode(struct task_struct *task, 296 unsigned long seccomp_mode, 297 unsigned long flags) 298 { 299 assert_spin_locked(&task->sighand->siglock); 300 301 task->seccomp.mode = seccomp_mode; 302 /* 303 * Make sure TIF_SECCOMP cannot be set before the mode (and 304 * filter) is set. 305 */ 306 smp_mb__before_atomic(); 307 /* Assume default seccomp processes want spec flaw mitigation. */ 308 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) 309 arch_seccomp_spec_mitigate(task); 310 set_tsk_thread_flag(task, TIF_SECCOMP); 311 } 312 313 #ifdef CONFIG_SECCOMP_FILTER 314 /* Returns 1 if the parent is an ancestor of the child. */ 315 static int is_ancestor(struct seccomp_filter *parent, 316 struct seccomp_filter *child) 317 { 318 /* NULL is the root ancestor. */ 319 if (parent == NULL) 320 return 1; 321 for (; child; child = child->prev) 322 if (child == parent) 323 return 1; 324 return 0; 325 } 326 327 /** 328 * seccomp_can_sync_threads: checks if all threads can be synchronized 329 * 330 * Expects sighand and cred_guard_mutex locks to be held. 331 * 332 * Returns 0 on success, -ve on error, or the pid of a thread which was 333 * either not in the correct seccomp mode or did not have an ancestral 334 * seccomp filter. 335 */ 336 static inline pid_t seccomp_can_sync_threads(void) 337 { 338 struct task_struct *thread, *caller; 339 340 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); 341 assert_spin_locked(¤t->sighand->siglock); 342 343 /* Validate all threads being eligible for synchronization. */ 344 caller = current; 345 for_each_thread(caller, thread) { 346 pid_t failed; 347 348 /* Skip current, since it is initiating the sync. */ 349 if (thread == caller) 350 continue; 351 352 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED || 353 (thread->seccomp.mode == SECCOMP_MODE_FILTER && 354 is_ancestor(thread->seccomp.filter, 355 caller->seccomp.filter))) 356 continue; 357 358 /* Return the first thread that cannot be synchronized. */ 359 failed = task_pid_vnr(thread); 360 /* If the pid cannot be resolved, then return -ESRCH */ 361 if (WARN_ON(failed == 0)) 362 failed = -ESRCH; 363 return failed; 364 } 365 366 return 0; 367 } 368 369 /** 370 * seccomp_sync_threads: sets all threads to use current's filter 371 * 372 * Expects sighand and cred_guard_mutex locks to be held, and for 373 * seccomp_can_sync_threads() to have returned success already 374 * without dropping the locks. 375 * 376 */ 377 static inline void seccomp_sync_threads(unsigned long flags) 378 { 379 struct task_struct *thread, *caller; 380 381 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); 382 assert_spin_locked(¤t->sighand->siglock); 383 384 /* Synchronize all threads. */ 385 caller = current; 386 for_each_thread(caller, thread) { 387 /* Skip current, since it needs no changes. */ 388 if (thread == caller) 389 continue; 390 391 /* Get a task reference for the new leaf node. */ 392 get_seccomp_filter(caller); 393 /* 394 * Drop the task reference to the shared ancestor since 395 * current's path will hold a reference. (This also 396 * allows a put before the assignment.) 397 */ 398 put_seccomp_filter(thread); 399 smp_store_release(&thread->seccomp.filter, 400 caller->seccomp.filter); 401 402 /* 403 * Don't let an unprivileged task work around 404 * the no_new_privs restriction by creating 405 * a thread that sets it up, enters seccomp, 406 * then dies. 407 */ 408 if (task_no_new_privs(caller)) 409 task_set_no_new_privs(thread); 410 411 /* 412 * Opt the other thread into seccomp if needed. 413 * As threads are considered to be trust-realm 414 * equivalent (see ptrace_may_access), it is safe to 415 * allow one thread to transition the other. 416 */ 417 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) 418 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, 419 flags); 420 } 421 } 422 423 /** 424 * seccomp_prepare_filter: Prepares a seccomp filter for use. 425 * @fprog: BPF program to install 426 * 427 * Returns filter on success or an ERR_PTR on failure. 428 */ 429 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) 430 { 431 struct seccomp_filter *sfilter; 432 int ret; 433 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE); 434 435 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) 436 return ERR_PTR(-EINVAL); 437 438 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter)); 439 440 /* 441 * Installing a seccomp filter requires that the task has 442 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. 443 * This avoids scenarios where unprivileged tasks can affect the 444 * behavior of privileged children. 445 */ 446 if (!task_no_new_privs(current) && 447 security_capable(current_cred(), current_user_ns(), 448 CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0) 449 return ERR_PTR(-EACCES); 450 451 /* Allocate a new seccomp_filter */ 452 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN); 453 if (!sfilter) 454 return ERR_PTR(-ENOMEM); 455 456 mutex_init(&sfilter->notify_lock); 457 ret = bpf_prog_create_from_user(&sfilter->prog, fprog, 458 seccomp_check_filter, save_orig); 459 if (ret < 0) { 460 kfree(sfilter); 461 return ERR_PTR(ret); 462 } 463 464 refcount_set(&sfilter->usage, 1); 465 466 return sfilter; 467 } 468 469 /** 470 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog 471 * @user_filter: pointer to the user data containing a sock_fprog. 472 * 473 * Returns 0 on success and non-zero otherwise. 474 */ 475 static struct seccomp_filter * 476 seccomp_prepare_user_filter(const char __user *user_filter) 477 { 478 struct sock_fprog fprog; 479 struct seccomp_filter *filter = ERR_PTR(-EFAULT); 480 481 #ifdef CONFIG_COMPAT 482 if (in_compat_syscall()) { 483 struct compat_sock_fprog fprog32; 484 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) 485 goto out; 486 fprog.len = fprog32.len; 487 fprog.filter = compat_ptr(fprog32.filter); 488 } else /* falls through to the if below. */ 489 #endif 490 if (copy_from_user(&fprog, user_filter, sizeof(fprog))) 491 goto out; 492 filter = seccomp_prepare_filter(&fprog); 493 out: 494 return filter; 495 } 496 497 /** 498 * seccomp_attach_filter: validate and attach filter 499 * @flags: flags to change filter behavior 500 * @filter: seccomp filter to add to the current process 501 * 502 * Caller must be holding current->sighand->siglock lock. 503 * 504 * Returns 0 on success, -ve on error, or 505 * - in TSYNC mode: the pid of a thread which was either not in the correct 506 * seccomp mode or did not have an ancestral seccomp filter 507 * - in NEW_LISTENER mode: the fd of the new listener 508 */ 509 static long seccomp_attach_filter(unsigned int flags, 510 struct seccomp_filter *filter) 511 { 512 unsigned long total_insns; 513 struct seccomp_filter *walker; 514 515 assert_spin_locked(¤t->sighand->siglock); 516 517 /* Validate resulting filter length. */ 518 total_insns = filter->prog->len; 519 for (walker = current->seccomp.filter; walker; walker = walker->prev) 520 total_insns += walker->prog->len + 4; /* 4 instr penalty */ 521 if (total_insns > MAX_INSNS_PER_PATH) 522 return -ENOMEM; 523 524 /* If thread sync has been requested, check that it is possible. */ 525 if (flags & SECCOMP_FILTER_FLAG_TSYNC) { 526 int ret; 527 528 ret = seccomp_can_sync_threads(); 529 if (ret) { 530 if (flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH) 531 return -ESRCH; 532 else 533 return ret; 534 } 535 } 536 537 /* Set log flag, if present. */ 538 if (flags & SECCOMP_FILTER_FLAG_LOG) 539 filter->log = true; 540 541 /* 542 * If there is an existing filter, make it the prev and don't drop its 543 * task reference. 544 */ 545 filter->prev = current->seccomp.filter; 546 current->seccomp.filter = filter; 547 548 /* Now that the new filter is in place, synchronize to all threads. */ 549 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 550 seccomp_sync_threads(flags); 551 552 return 0; 553 } 554 555 static void __get_seccomp_filter(struct seccomp_filter *filter) 556 { 557 refcount_inc(&filter->usage); 558 } 559 560 /* get_seccomp_filter - increments the reference count of the filter on @tsk */ 561 void get_seccomp_filter(struct task_struct *tsk) 562 { 563 struct seccomp_filter *orig = tsk->seccomp.filter; 564 if (!orig) 565 return; 566 __get_seccomp_filter(orig); 567 } 568 569 static inline void seccomp_filter_free(struct seccomp_filter *filter) 570 { 571 if (filter) { 572 bpf_prog_destroy(filter->prog); 573 kfree(filter); 574 } 575 } 576 577 static void __put_seccomp_filter(struct seccomp_filter *orig) 578 { 579 /* Clean up single-reference branches iteratively. */ 580 while (orig && refcount_dec_and_test(&orig->usage)) { 581 struct seccomp_filter *freeme = orig; 582 orig = orig->prev; 583 seccomp_filter_free(freeme); 584 } 585 } 586 587 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ 588 void put_seccomp_filter(struct task_struct *tsk) 589 { 590 __put_seccomp_filter(tsk->seccomp.filter); 591 } 592 593 static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason) 594 { 595 clear_siginfo(info); 596 info->si_signo = SIGSYS; 597 info->si_code = SYS_SECCOMP; 598 info->si_call_addr = (void __user *)KSTK_EIP(current); 599 info->si_errno = reason; 600 info->si_arch = syscall_get_arch(current); 601 info->si_syscall = syscall; 602 } 603 604 /** 605 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation 606 * @syscall: syscall number to send to userland 607 * @reason: filter-supplied reason code to send to userland (via si_errno) 608 * 609 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. 610 */ 611 static void seccomp_send_sigsys(int syscall, int reason) 612 { 613 struct kernel_siginfo info; 614 seccomp_init_siginfo(&info, syscall, reason); 615 force_sig_info(&info); 616 } 617 #endif /* CONFIG_SECCOMP_FILTER */ 618 619 /* For use with seccomp_actions_logged */ 620 #define SECCOMP_LOG_KILL_PROCESS (1 << 0) 621 #define SECCOMP_LOG_KILL_THREAD (1 << 1) 622 #define SECCOMP_LOG_TRAP (1 << 2) 623 #define SECCOMP_LOG_ERRNO (1 << 3) 624 #define SECCOMP_LOG_TRACE (1 << 4) 625 #define SECCOMP_LOG_LOG (1 << 5) 626 #define SECCOMP_LOG_ALLOW (1 << 6) 627 #define SECCOMP_LOG_USER_NOTIF (1 << 7) 628 629 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS | 630 SECCOMP_LOG_KILL_THREAD | 631 SECCOMP_LOG_TRAP | 632 SECCOMP_LOG_ERRNO | 633 SECCOMP_LOG_USER_NOTIF | 634 SECCOMP_LOG_TRACE | 635 SECCOMP_LOG_LOG; 636 637 static inline void seccomp_log(unsigned long syscall, long signr, u32 action, 638 bool requested) 639 { 640 bool log = false; 641 642 switch (action) { 643 case SECCOMP_RET_ALLOW: 644 break; 645 case SECCOMP_RET_TRAP: 646 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP; 647 break; 648 case SECCOMP_RET_ERRNO: 649 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO; 650 break; 651 case SECCOMP_RET_TRACE: 652 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE; 653 break; 654 case SECCOMP_RET_USER_NOTIF: 655 log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF; 656 break; 657 case SECCOMP_RET_LOG: 658 log = seccomp_actions_logged & SECCOMP_LOG_LOG; 659 break; 660 case SECCOMP_RET_KILL_THREAD: 661 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD; 662 break; 663 case SECCOMP_RET_KILL_PROCESS: 664 default: 665 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS; 666 } 667 668 /* 669 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the 670 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence 671 * any action from being logged by removing the action name from the 672 * seccomp_actions_logged sysctl. 673 */ 674 if (!log) 675 return; 676 677 audit_seccomp(syscall, signr, action); 678 } 679 680 /* 681 * Secure computing mode 1 allows only read/write/exit/sigreturn. 682 * To be fully secure this must be combined with rlimit 683 * to limit the stack allocations too. 684 */ 685 static const int mode1_syscalls[] = { 686 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, 687 0, /* null terminated */ 688 }; 689 690 static void __secure_computing_strict(int this_syscall) 691 { 692 const int *syscall_whitelist = mode1_syscalls; 693 #ifdef CONFIG_COMPAT 694 if (in_compat_syscall()) 695 syscall_whitelist = get_compat_mode1_syscalls(); 696 #endif 697 do { 698 if (*syscall_whitelist == this_syscall) 699 return; 700 } while (*++syscall_whitelist); 701 702 #ifdef SECCOMP_DEBUG 703 dump_stack(); 704 #endif 705 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true); 706 do_exit(SIGKILL); 707 } 708 709 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER 710 void secure_computing_strict(int this_syscall) 711 { 712 int mode = current->seccomp.mode; 713 714 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && 715 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) 716 return; 717 718 if (mode == SECCOMP_MODE_DISABLED) 719 return; 720 else if (mode == SECCOMP_MODE_STRICT) 721 __secure_computing_strict(this_syscall); 722 else 723 BUG(); 724 } 725 #else 726 727 #ifdef CONFIG_SECCOMP_FILTER 728 static u64 seccomp_next_notify_id(struct seccomp_filter *filter) 729 { 730 /* 731 * Note: overflow is ok here, the id just needs to be unique per 732 * filter. 733 */ 734 lockdep_assert_held(&filter->notify_lock); 735 return filter->notif->next_id++; 736 } 737 738 static int seccomp_do_user_notification(int this_syscall, 739 struct seccomp_filter *match, 740 const struct seccomp_data *sd) 741 { 742 int err; 743 u32 flags = 0; 744 long ret = 0; 745 struct seccomp_knotif n = {}; 746 747 mutex_lock(&match->notify_lock); 748 err = -ENOSYS; 749 if (!match->notif) 750 goto out; 751 752 n.task = current; 753 n.state = SECCOMP_NOTIFY_INIT; 754 n.data = sd; 755 n.id = seccomp_next_notify_id(match); 756 init_completion(&n.ready); 757 list_add(&n.list, &match->notif->notifications); 758 759 up(&match->notif->request); 760 wake_up_poll(&match->notif->wqh, EPOLLIN | EPOLLRDNORM); 761 mutex_unlock(&match->notify_lock); 762 763 /* 764 * This is where we wait for a reply from userspace. 765 */ 766 err = wait_for_completion_interruptible(&n.ready); 767 mutex_lock(&match->notify_lock); 768 if (err == 0) { 769 ret = n.val; 770 err = n.error; 771 flags = n.flags; 772 } 773 774 /* 775 * Note that it's possible the listener died in between the time when 776 * we were notified of a respons (or a signal) and when we were able to 777 * re-acquire the lock, so only delete from the list if the 778 * notification actually exists. 779 * 780 * Also note that this test is only valid because there's no way to 781 * *reattach* to a notifier right now. If one is added, we'll need to 782 * keep track of the notif itself and make sure they match here. 783 */ 784 if (match->notif) 785 list_del(&n.list); 786 out: 787 mutex_unlock(&match->notify_lock); 788 789 /* Userspace requests to continue the syscall. */ 790 if (flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) 791 return 0; 792 793 syscall_set_return_value(current, task_pt_regs(current), 794 err, ret); 795 return -1; 796 } 797 798 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, 799 const bool recheck_after_trace) 800 { 801 u32 filter_ret, action; 802 struct seccomp_filter *match = NULL; 803 int data; 804 struct seccomp_data sd_local; 805 806 /* 807 * Make sure that any changes to mode from another thread have 808 * been seen after TIF_SECCOMP was seen. 809 */ 810 rmb(); 811 812 if (!sd) { 813 populate_seccomp_data(&sd_local); 814 sd = &sd_local; 815 } 816 817 filter_ret = seccomp_run_filters(sd, &match); 818 data = filter_ret & SECCOMP_RET_DATA; 819 action = filter_ret & SECCOMP_RET_ACTION_FULL; 820 821 switch (action) { 822 case SECCOMP_RET_ERRNO: 823 /* Set low-order bits as an errno, capped at MAX_ERRNO. */ 824 if (data > MAX_ERRNO) 825 data = MAX_ERRNO; 826 syscall_set_return_value(current, task_pt_regs(current), 827 -data, 0); 828 goto skip; 829 830 case SECCOMP_RET_TRAP: 831 /* Show the handler the original registers. */ 832 syscall_rollback(current, task_pt_regs(current)); 833 /* Let the filter pass back 16 bits of data. */ 834 seccomp_send_sigsys(this_syscall, data); 835 goto skip; 836 837 case SECCOMP_RET_TRACE: 838 /* We've been put in this state by the ptracer already. */ 839 if (recheck_after_trace) 840 return 0; 841 842 /* ENOSYS these calls if there is no tracer attached. */ 843 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { 844 syscall_set_return_value(current, 845 task_pt_regs(current), 846 -ENOSYS, 0); 847 goto skip; 848 } 849 850 /* Allow the BPF to provide the event message */ 851 ptrace_event(PTRACE_EVENT_SECCOMP, data); 852 /* 853 * The delivery of a fatal signal during event 854 * notification may silently skip tracer notification, 855 * which could leave us with a potentially unmodified 856 * syscall that the tracer would have liked to have 857 * changed. Since the process is about to die, we just 858 * force the syscall to be skipped and let the signal 859 * kill the process and correctly handle any tracer exit 860 * notifications. 861 */ 862 if (fatal_signal_pending(current)) 863 goto skip; 864 /* Check if the tracer forced the syscall to be skipped. */ 865 this_syscall = syscall_get_nr(current, task_pt_regs(current)); 866 if (this_syscall < 0) 867 goto skip; 868 869 /* 870 * Recheck the syscall, since it may have changed. This 871 * intentionally uses a NULL struct seccomp_data to force 872 * a reload of all registers. This does not goto skip since 873 * a skip would have already been reported. 874 */ 875 if (__seccomp_filter(this_syscall, NULL, true)) 876 return -1; 877 878 return 0; 879 880 case SECCOMP_RET_USER_NOTIF: 881 if (seccomp_do_user_notification(this_syscall, match, sd)) 882 goto skip; 883 884 return 0; 885 886 case SECCOMP_RET_LOG: 887 seccomp_log(this_syscall, 0, action, true); 888 return 0; 889 890 case SECCOMP_RET_ALLOW: 891 /* 892 * Note that the "match" filter will always be NULL for 893 * this action since SECCOMP_RET_ALLOW is the starting 894 * state in seccomp_run_filters(). 895 */ 896 return 0; 897 898 case SECCOMP_RET_KILL_THREAD: 899 case SECCOMP_RET_KILL_PROCESS: 900 default: 901 seccomp_log(this_syscall, SIGSYS, action, true); 902 /* Dump core only if this is the last remaining thread. */ 903 if (action == SECCOMP_RET_KILL_PROCESS || 904 get_nr_threads(current) == 1) { 905 kernel_siginfo_t info; 906 907 /* Show the original registers in the dump. */ 908 syscall_rollback(current, task_pt_regs(current)); 909 /* Trigger a manual coredump since do_exit skips it. */ 910 seccomp_init_siginfo(&info, this_syscall, data); 911 do_coredump(&info); 912 } 913 if (action == SECCOMP_RET_KILL_PROCESS) 914 do_group_exit(SIGSYS); 915 else 916 do_exit(SIGSYS); 917 } 918 919 unreachable(); 920 921 skip: 922 seccomp_log(this_syscall, 0, action, match ? match->log : false); 923 return -1; 924 } 925 #else 926 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, 927 const bool recheck_after_trace) 928 { 929 BUG(); 930 } 931 #endif 932 933 int __secure_computing(const struct seccomp_data *sd) 934 { 935 int mode = current->seccomp.mode; 936 int this_syscall; 937 938 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && 939 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) 940 return 0; 941 942 this_syscall = sd ? sd->nr : 943 syscall_get_nr(current, task_pt_regs(current)); 944 945 switch (mode) { 946 case SECCOMP_MODE_STRICT: 947 __secure_computing_strict(this_syscall); /* may call do_exit */ 948 return 0; 949 case SECCOMP_MODE_FILTER: 950 return __seccomp_filter(this_syscall, sd, false); 951 default: 952 BUG(); 953 } 954 } 955 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */ 956 957 long prctl_get_seccomp(void) 958 { 959 return current->seccomp.mode; 960 } 961 962 /** 963 * seccomp_set_mode_strict: internal function for setting strict seccomp 964 * 965 * Once current->seccomp.mode is non-zero, it may not be changed. 966 * 967 * Returns 0 on success or -EINVAL on failure. 968 */ 969 static long seccomp_set_mode_strict(void) 970 { 971 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT; 972 long ret = -EINVAL; 973 974 spin_lock_irq(¤t->sighand->siglock); 975 976 if (!seccomp_may_assign_mode(seccomp_mode)) 977 goto out; 978 979 #ifdef TIF_NOTSC 980 disable_TSC(); 981 #endif 982 seccomp_assign_mode(current, seccomp_mode, 0); 983 ret = 0; 984 985 out: 986 spin_unlock_irq(¤t->sighand->siglock); 987 988 return ret; 989 } 990 991 #ifdef CONFIG_SECCOMP_FILTER 992 static int seccomp_notify_release(struct inode *inode, struct file *file) 993 { 994 struct seccomp_filter *filter = file->private_data; 995 struct seccomp_knotif *knotif; 996 997 if (!filter) 998 return 0; 999 1000 mutex_lock(&filter->notify_lock); 1001 1002 /* 1003 * If this file is being closed because e.g. the task who owned it 1004 * died, let's wake everyone up who was waiting on us. 1005 */ 1006 list_for_each_entry(knotif, &filter->notif->notifications, list) { 1007 if (knotif->state == SECCOMP_NOTIFY_REPLIED) 1008 continue; 1009 1010 knotif->state = SECCOMP_NOTIFY_REPLIED; 1011 knotif->error = -ENOSYS; 1012 knotif->val = 0; 1013 1014 complete(&knotif->ready); 1015 } 1016 1017 kfree(filter->notif); 1018 filter->notif = NULL; 1019 mutex_unlock(&filter->notify_lock); 1020 __put_seccomp_filter(filter); 1021 return 0; 1022 } 1023 1024 static long seccomp_notify_recv(struct seccomp_filter *filter, 1025 void __user *buf) 1026 { 1027 struct seccomp_knotif *knotif = NULL, *cur; 1028 struct seccomp_notif unotif; 1029 ssize_t ret; 1030 1031 /* Verify that we're not given garbage to keep struct extensible. */ 1032 ret = check_zeroed_user(buf, sizeof(unotif)); 1033 if (ret < 0) 1034 return ret; 1035 if (!ret) 1036 return -EINVAL; 1037 1038 memset(&unotif, 0, sizeof(unotif)); 1039 1040 ret = down_interruptible(&filter->notif->request); 1041 if (ret < 0) 1042 return ret; 1043 1044 mutex_lock(&filter->notify_lock); 1045 list_for_each_entry(cur, &filter->notif->notifications, list) { 1046 if (cur->state == SECCOMP_NOTIFY_INIT) { 1047 knotif = cur; 1048 break; 1049 } 1050 } 1051 1052 /* 1053 * If we didn't find a notification, it could be that the task was 1054 * interrupted by a fatal signal between the time we were woken and 1055 * when we were able to acquire the rw lock. 1056 */ 1057 if (!knotif) { 1058 ret = -ENOENT; 1059 goto out; 1060 } 1061 1062 unotif.id = knotif->id; 1063 unotif.pid = task_pid_vnr(knotif->task); 1064 unotif.data = *(knotif->data); 1065 1066 knotif->state = SECCOMP_NOTIFY_SENT; 1067 wake_up_poll(&filter->notif->wqh, EPOLLOUT | EPOLLWRNORM); 1068 ret = 0; 1069 out: 1070 mutex_unlock(&filter->notify_lock); 1071 1072 if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) { 1073 ret = -EFAULT; 1074 1075 /* 1076 * Userspace screwed up. To make sure that we keep this 1077 * notification alive, let's reset it back to INIT. It 1078 * may have died when we released the lock, so we need to make 1079 * sure it's still around. 1080 */ 1081 knotif = NULL; 1082 mutex_lock(&filter->notify_lock); 1083 list_for_each_entry(cur, &filter->notif->notifications, list) { 1084 if (cur->id == unotif.id) { 1085 knotif = cur; 1086 break; 1087 } 1088 } 1089 1090 if (knotif) { 1091 knotif->state = SECCOMP_NOTIFY_INIT; 1092 up(&filter->notif->request); 1093 } 1094 mutex_unlock(&filter->notify_lock); 1095 } 1096 1097 return ret; 1098 } 1099 1100 static long seccomp_notify_send(struct seccomp_filter *filter, 1101 void __user *buf) 1102 { 1103 struct seccomp_notif_resp resp = {}; 1104 struct seccomp_knotif *knotif = NULL, *cur; 1105 long ret; 1106 1107 if (copy_from_user(&resp, buf, sizeof(resp))) 1108 return -EFAULT; 1109 1110 if (resp.flags & ~SECCOMP_USER_NOTIF_FLAG_CONTINUE) 1111 return -EINVAL; 1112 1113 if ((resp.flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) && 1114 (resp.error || resp.val)) 1115 return -EINVAL; 1116 1117 ret = mutex_lock_interruptible(&filter->notify_lock); 1118 if (ret < 0) 1119 return ret; 1120 1121 list_for_each_entry(cur, &filter->notif->notifications, list) { 1122 if (cur->id == resp.id) { 1123 knotif = cur; 1124 break; 1125 } 1126 } 1127 1128 if (!knotif) { 1129 ret = -ENOENT; 1130 goto out; 1131 } 1132 1133 /* Allow exactly one reply. */ 1134 if (knotif->state != SECCOMP_NOTIFY_SENT) { 1135 ret = -EINPROGRESS; 1136 goto out; 1137 } 1138 1139 ret = 0; 1140 knotif->state = SECCOMP_NOTIFY_REPLIED; 1141 knotif->error = resp.error; 1142 knotif->val = resp.val; 1143 knotif->flags = resp.flags; 1144 complete(&knotif->ready); 1145 out: 1146 mutex_unlock(&filter->notify_lock); 1147 return ret; 1148 } 1149 1150 static long seccomp_notify_id_valid(struct seccomp_filter *filter, 1151 void __user *buf) 1152 { 1153 struct seccomp_knotif *knotif = NULL; 1154 u64 id; 1155 long ret; 1156 1157 if (copy_from_user(&id, buf, sizeof(id))) 1158 return -EFAULT; 1159 1160 ret = mutex_lock_interruptible(&filter->notify_lock); 1161 if (ret < 0) 1162 return ret; 1163 1164 ret = -ENOENT; 1165 list_for_each_entry(knotif, &filter->notif->notifications, list) { 1166 if (knotif->id == id) { 1167 if (knotif->state == SECCOMP_NOTIFY_SENT) 1168 ret = 0; 1169 goto out; 1170 } 1171 } 1172 1173 out: 1174 mutex_unlock(&filter->notify_lock); 1175 return ret; 1176 } 1177 1178 static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, 1179 unsigned long arg) 1180 { 1181 struct seccomp_filter *filter = file->private_data; 1182 void __user *buf = (void __user *)arg; 1183 1184 switch (cmd) { 1185 case SECCOMP_IOCTL_NOTIF_RECV: 1186 return seccomp_notify_recv(filter, buf); 1187 case SECCOMP_IOCTL_NOTIF_SEND: 1188 return seccomp_notify_send(filter, buf); 1189 case SECCOMP_IOCTL_NOTIF_ID_VALID: 1190 return seccomp_notify_id_valid(filter, buf); 1191 default: 1192 return -EINVAL; 1193 } 1194 } 1195 1196 static __poll_t seccomp_notify_poll(struct file *file, 1197 struct poll_table_struct *poll_tab) 1198 { 1199 struct seccomp_filter *filter = file->private_data; 1200 __poll_t ret = 0; 1201 struct seccomp_knotif *cur; 1202 1203 poll_wait(file, &filter->notif->wqh, poll_tab); 1204 1205 if (mutex_lock_interruptible(&filter->notify_lock) < 0) 1206 return EPOLLERR; 1207 1208 list_for_each_entry(cur, &filter->notif->notifications, list) { 1209 if (cur->state == SECCOMP_NOTIFY_INIT) 1210 ret |= EPOLLIN | EPOLLRDNORM; 1211 if (cur->state == SECCOMP_NOTIFY_SENT) 1212 ret |= EPOLLOUT | EPOLLWRNORM; 1213 if ((ret & EPOLLIN) && (ret & EPOLLOUT)) 1214 break; 1215 } 1216 1217 mutex_unlock(&filter->notify_lock); 1218 1219 return ret; 1220 } 1221 1222 static const struct file_operations seccomp_notify_ops = { 1223 .poll = seccomp_notify_poll, 1224 .release = seccomp_notify_release, 1225 .unlocked_ioctl = seccomp_notify_ioctl, 1226 .compat_ioctl = seccomp_notify_ioctl, 1227 }; 1228 1229 static struct file *init_listener(struct seccomp_filter *filter) 1230 { 1231 struct file *ret = ERR_PTR(-EBUSY); 1232 struct seccomp_filter *cur; 1233 1234 for (cur = current->seccomp.filter; cur; cur = cur->prev) { 1235 if (cur->notif) 1236 goto out; 1237 } 1238 1239 ret = ERR_PTR(-ENOMEM); 1240 filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL); 1241 if (!filter->notif) 1242 goto out; 1243 1244 sema_init(&filter->notif->request, 0); 1245 filter->notif->next_id = get_random_u64(); 1246 INIT_LIST_HEAD(&filter->notif->notifications); 1247 init_waitqueue_head(&filter->notif->wqh); 1248 1249 ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops, 1250 filter, O_RDWR); 1251 if (IS_ERR(ret)) 1252 goto out_notif; 1253 1254 /* The file has a reference to it now */ 1255 __get_seccomp_filter(filter); 1256 1257 out_notif: 1258 if (IS_ERR(ret)) 1259 kfree(filter->notif); 1260 out: 1261 return ret; 1262 } 1263 1264 /** 1265 * seccomp_set_mode_filter: internal function for setting seccomp filter 1266 * @flags: flags to change filter behavior 1267 * @filter: struct sock_fprog containing filter 1268 * 1269 * This function may be called repeatedly to install additional filters. 1270 * Every filter successfully installed will be evaluated (in reverse order) 1271 * for each system call the task makes. 1272 * 1273 * Once current->seccomp.mode is non-zero, it may not be changed. 1274 * 1275 * Returns 0 on success or -EINVAL on failure. 1276 */ 1277 static long seccomp_set_mode_filter(unsigned int flags, 1278 const char __user *filter) 1279 { 1280 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER; 1281 struct seccomp_filter *prepared = NULL; 1282 long ret = -EINVAL; 1283 int listener = -1; 1284 struct file *listener_f = NULL; 1285 1286 /* Validate flags. */ 1287 if (flags & ~SECCOMP_FILTER_FLAG_MASK) 1288 return -EINVAL; 1289 1290 /* 1291 * In the successful case, NEW_LISTENER returns the new listener fd. 1292 * But in the failure case, TSYNC returns the thread that died. If you 1293 * combine these two flags, there's no way to tell whether something 1294 * succeeded or failed. So, let's disallow this combination if the user 1295 * has not explicitly requested no errors from TSYNC. 1296 */ 1297 if ((flags & SECCOMP_FILTER_FLAG_TSYNC) && 1298 (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) && 1299 ((flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH) == 0)) 1300 return -EINVAL; 1301 1302 /* Prepare the new filter before holding any locks. */ 1303 prepared = seccomp_prepare_user_filter(filter); 1304 if (IS_ERR(prepared)) 1305 return PTR_ERR(prepared); 1306 1307 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1308 listener = get_unused_fd_flags(O_CLOEXEC); 1309 if (listener < 0) { 1310 ret = listener; 1311 goto out_free; 1312 } 1313 1314 listener_f = init_listener(prepared); 1315 if (IS_ERR(listener_f)) { 1316 put_unused_fd(listener); 1317 ret = PTR_ERR(listener_f); 1318 goto out_free; 1319 } 1320 } 1321 1322 /* 1323 * Make sure we cannot change seccomp or nnp state via TSYNC 1324 * while another thread is in the middle of calling exec. 1325 */ 1326 if (flags & SECCOMP_FILTER_FLAG_TSYNC && 1327 mutex_lock_killable(¤t->signal->cred_guard_mutex)) 1328 goto out_put_fd; 1329 1330 spin_lock_irq(¤t->sighand->siglock); 1331 1332 if (!seccomp_may_assign_mode(seccomp_mode)) 1333 goto out; 1334 1335 ret = seccomp_attach_filter(flags, prepared); 1336 if (ret) 1337 goto out; 1338 /* Do not free the successfully attached filter. */ 1339 prepared = NULL; 1340 1341 seccomp_assign_mode(current, seccomp_mode, flags); 1342 out: 1343 spin_unlock_irq(¤t->sighand->siglock); 1344 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 1345 mutex_unlock(¤t->signal->cred_guard_mutex); 1346 out_put_fd: 1347 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1348 if (ret) { 1349 listener_f->private_data = NULL; 1350 fput(listener_f); 1351 put_unused_fd(listener); 1352 } else { 1353 fd_install(listener, listener_f); 1354 ret = listener; 1355 } 1356 } 1357 out_free: 1358 seccomp_filter_free(prepared); 1359 return ret; 1360 } 1361 #else 1362 static inline long seccomp_set_mode_filter(unsigned int flags, 1363 const char __user *filter) 1364 { 1365 return -EINVAL; 1366 } 1367 #endif 1368 1369 static long seccomp_get_action_avail(const char __user *uaction) 1370 { 1371 u32 action; 1372 1373 if (copy_from_user(&action, uaction, sizeof(action))) 1374 return -EFAULT; 1375 1376 switch (action) { 1377 case SECCOMP_RET_KILL_PROCESS: 1378 case SECCOMP_RET_KILL_THREAD: 1379 case SECCOMP_RET_TRAP: 1380 case SECCOMP_RET_ERRNO: 1381 case SECCOMP_RET_USER_NOTIF: 1382 case SECCOMP_RET_TRACE: 1383 case SECCOMP_RET_LOG: 1384 case SECCOMP_RET_ALLOW: 1385 break; 1386 default: 1387 return -EOPNOTSUPP; 1388 } 1389 1390 return 0; 1391 } 1392 1393 static long seccomp_get_notif_sizes(void __user *usizes) 1394 { 1395 struct seccomp_notif_sizes sizes = { 1396 .seccomp_notif = sizeof(struct seccomp_notif), 1397 .seccomp_notif_resp = sizeof(struct seccomp_notif_resp), 1398 .seccomp_data = sizeof(struct seccomp_data), 1399 }; 1400 1401 if (copy_to_user(usizes, &sizes, sizeof(sizes))) 1402 return -EFAULT; 1403 1404 return 0; 1405 } 1406 1407 /* Common entry point for both prctl and syscall. */ 1408 static long do_seccomp(unsigned int op, unsigned int flags, 1409 void __user *uargs) 1410 { 1411 switch (op) { 1412 case SECCOMP_SET_MODE_STRICT: 1413 if (flags != 0 || uargs != NULL) 1414 return -EINVAL; 1415 return seccomp_set_mode_strict(); 1416 case SECCOMP_SET_MODE_FILTER: 1417 return seccomp_set_mode_filter(flags, uargs); 1418 case SECCOMP_GET_ACTION_AVAIL: 1419 if (flags != 0) 1420 return -EINVAL; 1421 1422 return seccomp_get_action_avail(uargs); 1423 case SECCOMP_GET_NOTIF_SIZES: 1424 if (flags != 0) 1425 return -EINVAL; 1426 1427 return seccomp_get_notif_sizes(uargs); 1428 default: 1429 return -EINVAL; 1430 } 1431 } 1432 1433 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, 1434 void __user *, uargs) 1435 { 1436 return do_seccomp(op, flags, uargs); 1437 } 1438 1439 /** 1440 * prctl_set_seccomp: configures current->seccomp.mode 1441 * @seccomp_mode: requested mode to use 1442 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER 1443 * 1444 * Returns 0 on success or -EINVAL on failure. 1445 */ 1446 long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter) 1447 { 1448 unsigned int op; 1449 void __user *uargs; 1450 1451 switch (seccomp_mode) { 1452 case SECCOMP_MODE_STRICT: 1453 op = SECCOMP_SET_MODE_STRICT; 1454 /* 1455 * Setting strict mode through prctl always ignored filter, 1456 * so make sure it is always NULL here to pass the internal 1457 * check in do_seccomp(). 1458 */ 1459 uargs = NULL; 1460 break; 1461 case SECCOMP_MODE_FILTER: 1462 op = SECCOMP_SET_MODE_FILTER; 1463 uargs = filter; 1464 break; 1465 default: 1466 return -EINVAL; 1467 } 1468 1469 /* prctl interface doesn't have flags, so they are always zero. */ 1470 return do_seccomp(op, 0, uargs); 1471 } 1472 1473 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) 1474 static struct seccomp_filter *get_nth_filter(struct task_struct *task, 1475 unsigned long filter_off) 1476 { 1477 struct seccomp_filter *orig, *filter; 1478 unsigned long count; 1479 1480 /* 1481 * Note: this is only correct because the caller should be the (ptrace) 1482 * tracer of the task, otherwise lock_task_sighand is needed. 1483 */ 1484 spin_lock_irq(&task->sighand->siglock); 1485 1486 if (task->seccomp.mode != SECCOMP_MODE_FILTER) { 1487 spin_unlock_irq(&task->sighand->siglock); 1488 return ERR_PTR(-EINVAL); 1489 } 1490 1491 orig = task->seccomp.filter; 1492 __get_seccomp_filter(orig); 1493 spin_unlock_irq(&task->sighand->siglock); 1494 1495 count = 0; 1496 for (filter = orig; filter; filter = filter->prev) 1497 count++; 1498 1499 if (filter_off >= count) { 1500 filter = ERR_PTR(-ENOENT); 1501 goto out; 1502 } 1503 1504 count -= filter_off; 1505 for (filter = orig; filter && count > 1; filter = filter->prev) 1506 count--; 1507 1508 if (WARN_ON(count != 1 || !filter)) { 1509 filter = ERR_PTR(-ENOENT); 1510 goto out; 1511 } 1512 1513 __get_seccomp_filter(filter); 1514 1515 out: 1516 __put_seccomp_filter(orig); 1517 return filter; 1518 } 1519 1520 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, 1521 void __user *data) 1522 { 1523 struct seccomp_filter *filter; 1524 struct sock_fprog_kern *fprog; 1525 long ret; 1526 1527 if (!capable(CAP_SYS_ADMIN) || 1528 current->seccomp.mode != SECCOMP_MODE_DISABLED) { 1529 return -EACCES; 1530 } 1531 1532 filter = get_nth_filter(task, filter_off); 1533 if (IS_ERR(filter)) 1534 return PTR_ERR(filter); 1535 1536 fprog = filter->prog->orig_prog; 1537 if (!fprog) { 1538 /* This must be a new non-cBPF filter, since we save 1539 * every cBPF filter's orig_prog above when 1540 * CONFIG_CHECKPOINT_RESTORE is enabled. 1541 */ 1542 ret = -EMEDIUMTYPE; 1543 goto out; 1544 } 1545 1546 ret = fprog->len; 1547 if (!data) 1548 goto out; 1549 1550 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog))) 1551 ret = -EFAULT; 1552 1553 out: 1554 __put_seccomp_filter(filter); 1555 return ret; 1556 } 1557 1558 long seccomp_get_metadata(struct task_struct *task, 1559 unsigned long size, void __user *data) 1560 { 1561 long ret; 1562 struct seccomp_filter *filter; 1563 struct seccomp_metadata kmd = {}; 1564 1565 if (!capable(CAP_SYS_ADMIN) || 1566 current->seccomp.mode != SECCOMP_MODE_DISABLED) { 1567 return -EACCES; 1568 } 1569 1570 size = min_t(unsigned long, size, sizeof(kmd)); 1571 1572 if (size < sizeof(kmd.filter_off)) 1573 return -EINVAL; 1574 1575 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off))) 1576 return -EFAULT; 1577 1578 filter = get_nth_filter(task, kmd.filter_off); 1579 if (IS_ERR(filter)) 1580 return PTR_ERR(filter); 1581 1582 if (filter->log) 1583 kmd.flags |= SECCOMP_FILTER_FLAG_LOG; 1584 1585 ret = size; 1586 if (copy_to_user(data, &kmd, size)) 1587 ret = -EFAULT; 1588 1589 __put_seccomp_filter(filter); 1590 return ret; 1591 } 1592 #endif 1593 1594 #ifdef CONFIG_SYSCTL 1595 1596 /* Human readable action names for friendly sysctl interaction */ 1597 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process" 1598 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread" 1599 #define SECCOMP_RET_TRAP_NAME "trap" 1600 #define SECCOMP_RET_ERRNO_NAME "errno" 1601 #define SECCOMP_RET_USER_NOTIF_NAME "user_notif" 1602 #define SECCOMP_RET_TRACE_NAME "trace" 1603 #define SECCOMP_RET_LOG_NAME "log" 1604 #define SECCOMP_RET_ALLOW_NAME "allow" 1605 1606 static const char seccomp_actions_avail[] = 1607 SECCOMP_RET_KILL_PROCESS_NAME " " 1608 SECCOMP_RET_KILL_THREAD_NAME " " 1609 SECCOMP_RET_TRAP_NAME " " 1610 SECCOMP_RET_ERRNO_NAME " " 1611 SECCOMP_RET_USER_NOTIF_NAME " " 1612 SECCOMP_RET_TRACE_NAME " " 1613 SECCOMP_RET_LOG_NAME " " 1614 SECCOMP_RET_ALLOW_NAME; 1615 1616 struct seccomp_log_name { 1617 u32 log; 1618 const char *name; 1619 }; 1620 1621 static const struct seccomp_log_name seccomp_log_names[] = { 1622 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME }, 1623 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME }, 1624 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME }, 1625 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME }, 1626 { SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME }, 1627 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME }, 1628 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME }, 1629 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME }, 1630 { } 1631 }; 1632 1633 static bool seccomp_names_from_actions_logged(char *names, size_t size, 1634 u32 actions_logged, 1635 const char *sep) 1636 { 1637 const struct seccomp_log_name *cur; 1638 bool append_sep = false; 1639 1640 for (cur = seccomp_log_names; cur->name && size; cur++) { 1641 ssize_t ret; 1642 1643 if (!(actions_logged & cur->log)) 1644 continue; 1645 1646 if (append_sep) { 1647 ret = strscpy(names, sep, size); 1648 if (ret < 0) 1649 return false; 1650 1651 names += ret; 1652 size -= ret; 1653 } else 1654 append_sep = true; 1655 1656 ret = strscpy(names, cur->name, size); 1657 if (ret < 0) 1658 return false; 1659 1660 names += ret; 1661 size -= ret; 1662 } 1663 1664 return true; 1665 } 1666 1667 static bool seccomp_action_logged_from_name(u32 *action_logged, 1668 const char *name) 1669 { 1670 const struct seccomp_log_name *cur; 1671 1672 for (cur = seccomp_log_names; cur->name; cur++) { 1673 if (!strcmp(cur->name, name)) { 1674 *action_logged = cur->log; 1675 return true; 1676 } 1677 } 1678 1679 return false; 1680 } 1681 1682 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names) 1683 { 1684 char *name; 1685 1686 *actions_logged = 0; 1687 while ((name = strsep(&names, " ")) && *name) { 1688 u32 action_logged = 0; 1689 1690 if (!seccomp_action_logged_from_name(&action_logged, name)) 1691 return false; 1692 1693 *actions_logged |= action_logged; 1694 } 1695 1696 return true; 1697 } 1698 1699 static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer, 1700 size_t *lenp, loff_t *ppos) 1701 { 1702 char names[sizeof(seccomp_actions_avail)]; 1703 struct ctl_table table; 1704 1705 memset(names, 0, sizeof(names)); 1706 1707 if (!seccomp_names_from_actions_logged(names, sizeof(names), 1708 seccomp_actions_logged, " ")) 1709 return -EINVAL; 1710 1711 table = *ro_table; 1712 table.data = names; 1713 table.maxlen = sizeof(names); 1714 return proc_dostring(&table, 0, buffer, lenp, ppos); 1715 } 1716 1717 static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer, 1718 size_t *lenp, loff_t *ppos, u32 *actions_logged) 1719 { 1720 char names[sizeof(seccomp_actions_avail)]; 1721 struct ctl_table table; 1722 int ret; 1723 1724 if (!capable(CAP_SYS_ADMIN)) 1725 return -EPERM; 1726 1727 memset(names, 0, sizeof(names)); 1728 1729 table = *ro_table; 1730 table.data = names; 1731 table.maxlen = sizeof(names); 1732 ret = proc_dostring(&table, 1, buffer, lenp, ppos); 1733 if (ret) 1734 return ret; 1735 1736 if (!seccomp_actions_logged_from_names(actions_logged, table.data)) 1737 return -EINVAL; 1738 1739 if (*actions_logged & SECCOMP_LOG_ALLOW) 1740 return -EINVAL; 1741 1742 seccomp_actions_logged = *actions_logged; 1743 return 0; 1744 } 1745 1746 static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged, 1747 int ret) 1748 { 1749 char names[sizeof(seccomp_actions_avail)]; 1750 char old_names[sizeof(seccomp_actions_avail)]; 1751 const char *new = names; 1752 const char *old = old_names; 1753 1754 if (!audit_enabled) 1755 return; 1756 1757 memset(names, 0, sizeof(names)); 1758 memset(old_names, 0, sizeof(old_names)); 1759 1760 if (ret) 1761 new = "?"; 1762 else if (!actions_logged) 1763 new = "(none)"; 1764 else if (!seccomp_names_from_actions_logged(names, sizeof(names), 1765 actions_logged, ",")) 1766 new = "?"; 1767 1768 if (!old_actions_logged) 1769 old = "(none)"; 1770 else if (!seccomp_names_from_actions_logged(old_names, 1771 sizeof(old_names), 1772 old_actions_logged, ",")) 1773 old = "?"; 1774 1775 return audit_seccomp_actions_logged(new, old, !ret); 1776 } 1777 1778 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write, 1779 void __user *buffer, size_t *lenp, 1780 loff_t *ppos) 1781 { 1782 int ret; 1783 1784 if (write) { 1785 u32 actions_logged = 0; 1786 u32 old_actions_logged = seccomp_actions_logged; 1787 1788 ret = write_actions_logged(ro_table, buffer, lenp, ppos, 1789 &actions_logged); 1790 audit_actions_logged(actions_logged, old_actions_logged, ret); 1791 } else 1792 ret = read_actions_logged(ro_table, buffer, lenp, ppos); 1793 1794 return ret; 1795 } 1796 1797 static struct ctl_path seccomp_sysctl_path[] = { 1798 { .procname = "kernel", }, 1799 { .procname = "seccomp", }, 1800 { } 1801 }; 1802 1803 static struct ctl_table seccomp_sysctl_table[] = { 1804 { 1805 .procname = "actions_avail", 1806 .data = (void *) &seccomp_actions_avail, 1807 .maxlen = sizeof(seccomp_actions_avail), 1808 .mode = 0444, 1809 .proc_handler = proc_dostring, 1810 }, 1811 { 1812 .procname = "actions_logged", 1813 .mode = 0644, 1814 .proc_handler = seccomp_actions_logged_handler, 1815 }, 1816 { } 1817 }; 1818 1819 static int __init seccomp_sysctl_init(void) 1820 { 1821 struct ctl_table_header *hdr; 1822 1823 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table); 1824 if (!hdr) 1825 pr_warn("seccomp: sysctl registration failed\n"); 1826 else 1827 kmemleak_not_leak(hdr); 1828 1829 return 0; 1830 } 1831 1832 device_initcall(seccomp_sysctl_init) 1833 1834 #endif /* CONFIG_SYSCTL */ 1835