1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/seccomp.c 4 * 5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com> 6 * 7 * Copyright (C) 2012 Google, Inc. 8 * Will Drewry <wad@chromium.org> 9 * 10 * This defines a simple but solid secure-computing facility. 11 * 12 * Mode 1 uses a fixed list of allowed system calls. 13 * Mode 2 allows user-defined system call filters in the form 14 * of Berkeley Packet Filters/Linux Socket Filters. 15 */ 16 17 #include <linux/refcount.h> 18 #include <linux/audit.h> 19 #include <linux/compat.h> 20 #include <linux/coredump.h> 21 #include <linux/kmemleak.h> 22 #include <linux/nospec.h> 23 #include <linux/prctl.h> 24 #include <linux/sched.h> 25 #include <linux/sched/task_stack.h> 26 #include <linux/seccomp.h> 27 #include <linux/slab.h> 28 #include <linux/syscalls.h> 29 #include <linux/sysctl.h> 30 31 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER 32 #include <asm/syscall.h> 33 #endif 34 35 #ifdef CONFIG_SECCOMP_FILTER 36 #include <linux/file.h> 37 #include <linux/filter.h> 38 #include <linux/pid.h> 39 #include <linux/ptrace.h> 40 #include <linux/security.h> 41 #include <linux/tracehook.h> 42 #include <linux/uaccess.h> 43 #include <linux/anon_inodes.h> 44 45 enum notify_state { 46 SECCOMP_NOTIFY_INIT, 47 SECCOMP_NOTIFY_SENT, 48 SECCOMP_NOTIFY_REPLIED, 49 }; 50 51 struct seccomp_knotif { 52 /* The struct pid of the task whose filter triggered the notification */ 53 struct task_struct *task; 54 55 /* The "cookie" for this request; this is unique for this filter. */ 56 u64 id; 57 58 /* 59 * The seccomp data. This pointer is valid the entire time this 60 * notification is active, since it comes from __seccomp_filter which 61 * eclipses the entire lifecycle here. 62 */ 63 const struct seccomp_data *data; 64 65 /* 66 * Notification states. When SECCOMP_RET_USER_NOTIF is returned, a 67 * struct seccomp_knotif is created and starts out in INIT. Once the 68 * handler reads the notification off of an FD, it transitions to SENT. 69 * If a signal is received the state transitions back to INIT and 70 * another message is sent. When the userspace handler replies, state 71 * transitions to REPLIED. 72 */ 73 enum notify_state state; 74 75 /* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */ 76 int error; 77 long val; 78 u32 flags; 79 80 /* Signals when this has entered SECCOMP_NOTIFY_REPLIED */ 81 struct completion ready; 82 83 struct list_head list; 84 }; 85 86 /** 87 * struct notification - container for seccomp userspace notifications. Since 88 * most seccomp filters will not have notification listeners attached and this 89 * structure is fairly large, we store the notification-specific stuff in a 90 * separate structure. 91 * 92 * @request: A semaphore that users of this notification can wait on for 93 * changes. Actual reads and writes are still controlled with 94 * filter->notify_lock. 95 * @next_id: The id of the next request. 96 * @notifications: A list of struct seccomp_knotif elements. 97 * @wqh: A wait queue for poll. 98 */ 99 struct notification { 100 struct semaphore request; 101 u64 next_id; 102 struct list_head notifications; 103 wait_queue_head_t wqh; 104 }; 105 106 /** 107 * struct seccomp_filter - container for seccomp BPF programs 108 * 109 * @usage: reference count to manage the object lifetime. 110 * get/put helpers should be used when accessing an instance 111 * outside of a lifetime-guarded section. In general, this 112 * is only needed for handling filters shared across tasks. 113 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged 114 * @prev: points to a previously installed, or inherited, filter 115 * @prog: the BPF program to evaluate 116 * @notif: the struct that holds all notification related information 117 * @notify_lock: A lock for all notification-related accesses. 118 * 119 * seccomp_filter objects are organized in a tree linked via the @prev 120 * pointer. For any task, it appears to be a singly-linked list starting 121 * with current->seccomp.filter, the most recently attached or inherited filter. 122 * However, multiple filters may share a @prev node, by way of fork(), which 123 * results in a unidirectional tree existing in memory. This is similar to 124 * how namespaces work. 125 * 126 * seccomp_filter objects should never be modified after being attached 127 * to a task_struct (other than @usage). 128 */ 129 struct seccomp_filter { 130 refcount_t usage; 131 bool log; 132 struct seccomp_filter *prev; 133 struct bpf_prog *prog; 134 struct notification *notif; 135 struct mutex notify_lock; 136 }; 137 138 /* Limit any path through the tree to 256KB worth of instructions. */ 139 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) 140 141 /* 142 * Endianness is explicitly ignored and left for BPF program authors to manage 143 * as per the specific architecture. 144 */ 145 static void populate_seccomp_data(struct seccomp_data *sd) 146 { 147 struct task_struct *task = current; 148 struct pt_regs *regs = task_pt_regs(task); 149 unsigned long args[6]; 150 151 sd->nr = syscall_get_nr(task, regs); 152 sd->arch = syscall_get_arch(task); 153 syscall_get_arguments(task, regs, args); 154 sd->args[0] = args[0]; 155 sd->args[1] = args[1]; 156 sd->args[2] = args[2]; 157 sd->args[3] = args[3]; 158 sd->args[4] = args[4]; 159 sd->args[5] = args[5]; 160 sd->instruction_pointer = KSTK_EIP(task); 161 } 162 163 /** 164 * seccomp_check_filter - verify seccomp filter code 165 * @filter: filter to verify 166 * @flen: length of filter 167 * 168 * Takes a previously checked filter (by bpf_check_classic) and 169 * redirects all filter code that loads struct sk_buff data 170 * and related data through seccomp_bpf_load. It also 171 * enforces length and alignment checking of those loads. 172 * 173 * Returns 0 if the rule set is legal or -EINVAL if not. 174 */ 175 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) 176 { 177 int pc; 178 for (pc = 0; pc < flen; pc++) { 179 struct sock_filter *ftest = &filter[pc]; 180 u16 code = ftest->code; 181 u32 k = ftest->k; 182 183 switch (code) { 184 case BPF_LD | BPF_W | BPF_ABS: 185 ftest->code = BPF_LDX | BPF_W | BPF_ABS; 186 /* 32-bit aligned and not out of bounds. */ 187 if (k >= sizeof(struct seccomp_data) || k & 3) 188 return -EINVAL; 189 continue; 190 case BPF_LD | BPF_W | BPF_LEN: 191 ftest->code = BPF_LD | BPF_IMM; 192 ftest->k = sizeof(struct seccomp_data); 193 continue; 194 case BPF_LDX | BPF_W | BPF_LEN: 195 ftest->code = BPF_LDX | BPF_IMM; 196 ftest->k = sizeof(struct seccomp_data); 197 continue; 198 /* Explicitly include allowed calls. */ 199 case BPF_RET | BPF_K: 200 case BPF_RET | BPF_A: 201 case BPF_ALU | BPF_ADD | BPF_K: 202 case BPF_ALU | BPF_ADD | BPF_X: 203 case BPF_ALU | BPF_SUB | BPF_K: 204 case BPF_ALU | BPF_SUB | BPF_X: 205 case BPF_ALU | BPF_MUL | BPF_K: 206 case BPF_ALU | BPF_MUL | BPF_X: 207 case BPF_ALU | BPF_DIV | BPF_K: 208 case BPF_ALU | BPF_DIV | BPF_X: 209 case BPF_ALU | BPF_AND | BPF_K: 210 case BPF_ALU | BPF_AND | BPF_X: 211 case BPF_ALU | BPF_OR | BPF_K: 212 case BPF_ALU | BPF_OR | BPF_X: 213 case BPF_ALU | BPF_XOR | BPF_K: 214 case BPF_ALU | BPF_XOR | BPF_X: 215 case BPF_ALU | BPF_LSH | BPF_K: 216 case BPF_ALU | BPF_LSH | BPF_X: 217 case BPF_ALU | BPF_RSH | BPF_K: 218 case BPF_ALU | BPF_RSH | BPF_X: 219 case BPF_ALU | BPF_NEG: 220 case BPF_LD | BPF_IMM: 221 case BPF_LDX | BPF_IMM: 222 case BPF_MISC | BPF_TAX: 223 case BPF_MISC | BPF_TXA: 224 case BPF_LD | BPF_MEM: 225 case BPF_LDX | BPF_MEM: 226 case BPF_ST: 227 case BPF_STX: 228 case BPF_JMP | BPF_JA: 229 case BPF_JMP | BPF_JEQ | BPF_K: 230 case BPF_JMP | BPF_JEQ | BPF_X: 231 case BPF_JMP | BPF_JGE | BPF_K: 232 case BPF_JMP | BPF_JGE | BPF_X: 233 case BPF_JMP | BPF_JGT | BPF_K: 234 case BPF_JMP | BPF_JGT | BPF_X: 235 case BPF_JMP | BPF_JSET | BPF_K: 236 case BPF_JMP | BPF_JSET | BPF_X: 237 continue; 238 default: 239 return -EINVAL; 240 } 241 } 242 return 0; 243 } 244 245 /** 246 * seccomp_run_filters - evaluates all seccomp filters against @sd 247 * @sd: optional seccomp data to be passed to filters 248 * @match: stores struct seccomp_filter that resulted in the return value, 249 * unless filter returned SECCOMP_RET_ALLOW, in which case it will 250 * be unchanged. 251 * 252 * Returns valid seccomp BPF response codes. 253 */ 254 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL))) 255 static u32 seccomp_run_filters(const struct seccomp_data *sd, 256 struct seccomp_filter **match) 257 { 258 u32 ret = SECCOMP_RET_ALLOW; 259 /* Make sure cross-thread synced filter points somewhere sane. */ 260 struct seccomp_filter *f = 261 READ_ONCE(current->seccomp.filter); 262 263 /* Ensure unexpected behavior doesn't result in failing open. */ 264 if (WARN_ON(f == NULL)) 265 return SECCOMP_RET_KILL_PROCESS; 266 267 /* 268 * All filters in the list are evaluated and the lowest BPF return 269 * value always takes priority (ignoring the DATA). 270 */ 271 for (; f; f = f->prev) { 272 u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd); 273 274 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) { 275 ret = cur_ret; 276 *match = f; 277 } 278 } 279 return ret; 280 } 281 #endif /* CONFIG_SECCOMP_FILTER */ 282 283 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) 284 { 285 assert_spin_locked(¤t->sighand->siglock); 286 287 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) 288 return false; 289 290 return true; 291 } 292 293 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } 294 295 static inline void seccomp_assign_mode(struct task_struct *task, 296 unsigned long seccomp_mode, 297 unsigned long flags) 298 { 299 assert_spin_locked(&task->sighand->siglock); 300 301 task->seccomp.mode = seccomp_mode; 302 /* 303 * Make sure TIF_SECCOMP cannot be set before the mode (and 304 * filter) is set. 305 */ 306 smp_mb__before_atomic(); 307 /* Assume default seccomp processes want spec flaw mitigation. */ 308 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) 309 arch_seccomp_spec_mitigate(task); 310 set_tsk_thread_flag(task, TIF_SECCOMP); 311 } 312 313 #ifdef CONFIG_SECCOMP_FILTER 314 /* Returns 1 if the parent is an ancestor of the child. */ 315 static int is_ancestor(struct seccomp_filter *parent, 316 struct seccomp_filter *child) 317 { 318 /* NULL is the root ancestor. */ 319 if (parent == NULL) 320 return 1; 321 for (; child; child = child->prev) 322 if (child == parent) 323 return 1; 324 return 0; 325 } 326 327 /** 328 * seccomp_can_sync_threads: checks if all threads can be synchronized 329 * 330 * Expects sighand and cred_guard_mutex locks to be held. 331 * 332 * Returns 0 on success, -ve on error, or the pid of a thread which was 333 * either not in the correct seccomp mode or did not have an ancestral 334 * seccomp filter. 335 */ 336 static inline pid_t seccomp_can_sync_threads(void) 337 { 338 struct task_struct *thread, *caller; 339 340 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); 341 assert_spin_locked(¤t->sighand->siglock); 342 343 /* Validate all threads being eligible for synchronization. */ 344 caller = current; 345 for_each_thread(caller, thread) { 346 pid_t failed; 347 348 /* Skip current, since it is initiating the sync. */ 349 if (thread == caller) 350 continue; 351 352 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED || 353 (thread->seccomp.mode == SECCOMP_MODE_FILTER && 354 is_ancestor(thread->seccomp.filter, 355 caller->seccomp.filter))) 356 continue; 357 358 /* Return the first thread that cannot be synchronized. */ 359 failed = task_pid_vnr(thread); 360 /* If the pid cannot be resolved, then return -ESRCH */ 361 if (WARN_ON(failed == 0)) 362 failed = -ESRCH; 363 return failed; 364 } 365 366 return 0; 367 } 368 369 /** 370 * seccomp_sync_threads: sets all threads to use current's filter 371 * 372 * Expects sighand and cred_guard_mutex locks to be held, and for 373 * seccomp_can_sync_threads() to have returned success already 374 * without dropping the locks. 375 * 376 */ 377 static inline void seccomp_sync_threads(unsigned long flags) 378 { 379 struct task_struct *thread, *caller; 380 381 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); 382 assert_spin_locked(¤t->sighand->siglock); 383 384 /* Synchronize all threads. */ 385 caller = current; 386 for_each_thread(caller, thread) { 387 /* Skip current, since it needs no changes. */ 388 if (thread == caller) 389 continue; 390 391 /* Get a task reference for the new leaf node. */ 392 get_seccomp_filter(caller); 393 /* 394 * Drop the task reference to the shared ancestor since 395 * current's path will hold a reference. (This also 396 * allows a put before the assignment.) 397 */ 398 put_seccomp_filter(thread); 399 smp_store_release(&thread->seccomp.filter, 400 caller->seccomp.filter); 401 402 /* 403 * Don't let an unprivileged task work around 404 * the no_new_privs restriction by creating 405 * a thread that sets it up, enters seccomp, 406 * then dies. 407 */ 408 if (task_no_new_privs(caller)) 409 task_set_no_new_privs(thread); 410 411 /* 412 * Opt the other thread into seccomp if needed. 413 * As threads are considered to be trust-realm 414 * equivalent (see ptrace_may_access), it is safe to 415 * allow one thread to transition the other. 416 */ 417 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) 418 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, 419 flags); 420 } 421 } 422 423 /** 424 * seccomp_prepare_filter: Prepares a seccomp filter for use. 425 * @fprog: BPF program to install 426 * 427 * Returns filter on success or an ERR_PTR on failure. 428 */ 429 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) 430 { 431 struct seccomp_filter *sfilter; 432 int ret; 433 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE); 434 435 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) 436 return ERR_PTR(-EINVAL); 437 438 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter)); 439 440 /* 441 * Installing a seccomp filter requires that the task has 442 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. 443 * This avoids scenarios where unprivileged tasks can affect the 444 * behavior of privileged children. 445 */ 446 if (!task_no_new_privs(current) && 447 security_capable(current_cred(), current_user_ns(), 448 CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0) 449 return ERR_PTR(-EACCES); 450 451 /* Allocate a new seccomp_filter */ 452 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN); 453 if (!sfilter) 454 return ERR_PTR(-ENOMEM); 455 456 mutex_init(&sfilter->notify_lock); 457 ret = bpf_prog_create_from_user(&sfilter->prog, fprog, 458 seccomp_check_filter, save_orig); 459 if (ret < 0) { 460 kfree(sfilter); 461 return ERR_PTR(ret); 462 } 463 464 refcount_set(&sfilter->usage, 1); 465 466 return sfilter; 467 } 468 469 /** 470 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog 471 * @user_filter: pointer to the user data containing a sock_fprog. 472 * 473 * Returns 0 on success and non-zero otherwise. 474 */ 475 static struct seccomp_filter * 476 seccomp_prepare_user_filter(const char __user *user_filter) 477 { 478 struct sock_fprog fprog; 479 struct seccomp_filter *filter = ERR_PTR(-EFAULT); 480 481 #ifdef CONFIG_COMPAT 482 if (in_compat_syscall()) { 483 struct compat_sock_fprog fprog32; 484 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) 485 goto out; 486 fprog.len = fprog32.len; 487 fprog.filter = compat_ptr(fprog32.filter); 488 } else /* falls through to the if below. */ 489 #endif 490 if (copy_from_user(&fprog, user_filter, sizeof(fprog))) 491 goto out; 492 filter = seccomp_prepare_filter(&fprog); 493 out: 494 return filter; 495 } 496 497 /** 498 * seccomp_attach_filter: validate and attach filter 499 * @flags: flags to change filter behavior 500 * @filter: seccomp filter to add to the current process 501 * 502 * Caller must be holding current->sighand->siglock lock. 503 * 504 * Returns 0 on success, -ve on error, or 505 * - in TSYNC mode: the pid of a thread which was either not in the correct 506 * seccomp mode or did not have an ancestral seccomp filter 507 * - in NEW_LISTENER mode: the fd of the new listener 508 */ 509 static long seccomp_attach_filter(unsigned int flags, 510 struct seccomp_filter *filter) 511 { 512 unsigned long total_insns; 513 struct seccomp_filter *walker; 514 515 assert_spin_locked(¤t->sighand->siglock); 516 517 /* Validate resulting filter length. */ 518 total_insns = filter->prog->len; 519 for (walker = current->seccomp.filter; walker; walker = walker->prev) 520 total_insns += walker->prog->len + 4; /* 4 instr penalty */ 521 if (total_insns > MAX_INSNS_PER_PATH) 522 return -ENOMEM; 523 524 /* If thread sync has been requested, check that it is possible. */ 525 if (flags & SECCOMP_FILTER_FLAG_TSYNC) { 526 int ret; 527 528 ret = seccomp_can_sync_threads(); 529 if (ret) 530 return ret; 531 } 532 533 /* Set log flag, if present. */ 534 if (flags & SECCOMP_FILTER_FLAG_LOG) 535 filter->log = true; 536 537 /* 538 * If there is an existing filter, make it the prev and don't drop its 539 * task reference. 540 */ 541 filter->prev = current->seccomp.filter; 542 current->seccomp.filter = filter; 543 544 /* Now that the new filter is in place, synchronize to all threads. */ 545 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 546 seccomp_sync_threads(flags); 547 548 return 0; 549 } 550 551 static void __get_seccomp_filter(struct seccomp_filter *filter) 552 { 553 refcount_inc(&filter->usage); 554 } 555 556 /* get_seccomp_filter - increments the reference count of the filter on @tsk */ 557 void get_seccomp_filter(struct task_struct *tsk) 558 { 559 struct seccomp_filter *orig = tsk->seccomp.filter; 560 if (!orig) 561 return; 562 __get_seccomp_filter(orig); 563 } 564 565 static inline void seccomp_filter_free(struct seccomp_filter *filter) 566 { 567 if (filter) { 568 bpf_prog_destroy(filter->prog); 569 kfree(filter); 570 } 571 } 572 573 static void __put_seccomp_filter(struct seccomp_filter *orig) 574 { 575 /* Clean up single-reference branches iteratively. */ 576 while (orig && refcount_dec_and_test(&orig->usage)) { 577 struct seccomp_filter *freeme = orig; 578 orig = orig->prev; 579 seccomp_filter_free(freeme); 580 } 581 } 582 583 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ 584 void put_seccomp_filter(struct task_struct *tsk) 585 { 586 __put_seccomp_filter(tsk->seccomp.filter); 587 } 588 589 static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason) 590 { 591 clear_siginfo(info); 592 info->si_signo = SIGSYS; 593 info->si_code = SYS_SECCOMP; 594 info->si_call_addr = (void __user *)KSTK_EIP(current); 595 info->si_errno = reason; 596 info->si_arch = syscall_get_arch(current); 597 info->si_syscall = syscall; 598 } 599 600 /** 601 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation 602 * @syscall: syscall number to send to userland 603 * @reason: filter-supplied reason code to send to userland (via si_errno) 604 * 605 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. 606 */ 607 static void seccomp_send_sigsys(int syscall, int reason) 608 { 609 struct kernel_siginfo info; 610 seccomp_init_siginfo(&info, syscall, reason); 611 force_sig_info(&info); 612 } 613 #endif /* CONFIG_SECCOMP_FILTER */ 614 615 /* For use with seccomp_actions_logged */ 616 #define SECCOMP_LOG_KILL_PROCESS (1 << 0) 617 #define SECCOMP_LOG_KILL_THREAD (1 << 1) 618 #define SECCOMP_LOG_TRAP (1 << 2) 619 #define SECCOMP_LOG_ERRNO (1 << 3) 620 #define SECCOMP_LOG_TRACE (1 << 4) 621 #define SECCOMP_LOG_LOG (1 << 5) 622 #define SECCOMP_LOG_ALLOW (1 << 6) 623 #define SECCOMP_LOG_USER_NOTIF (1 << 7) 624 625 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS | 626 SECCOMP_LOG_KILL_THREAD | 627 SECCOMP_LOG_TRAP | 628 SECCOMP_LOG_ERRNO | 629 SECCOMP_LOG_USER_NOTIF | 630 SECCOMP_LOG_TRACE | 631 SECCOMP_LOG_LOG; 632 633 static inline void seccomp_log(unsigned long syscall, long signr, u32 action, 634 bool requested) 635 { 636 bool log = false; 637 638 switch (action) { 639 case SECCOMP_RET_ALLOW: 640 break; 641 case SECCOMP_RET_TRAP: 642 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP; 643 break; 644 case SECCOMP_RET_ERRNO: 645 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO; 646 break; 647 case SECCOMP_RET_TRACE: 648 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE; 649 break; 650 case SECCOMP_RET_USER_NOTIF: 651 log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF; 652 break; 653 case SECCOMP_RET_LOG: 654 log = seccomp_actions_logged & SECCOMP_LOG_LOG; 655 break; 656 case SECCOMP_RET_KILL_THREAD: 657 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD; 658 break; 659 case SECCOMP_RET_KILL_PROCESS: 660 default: 661 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS; 662 } 663 664 /* 665 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the 666 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence 667 * any action from being logged by removing the action name from the 668 * seccomp_actions_logged sysctl. 669 */ 670 if (!log) 671 return; 672 673 audit_seccomp(syscall, signr, action); 674 } 675 676 /* 677 * Secure computing mode 1 allows only read/write/exit/sigreturn. 678 * To be fully secure this must be combined with rlimit 679 * to limit the stack allocations too. 680 */ 681 static const int mode1_syscalls[] = { 682 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, 683 0, /* null terminated */ 684 }; 685 686 static void __secure_computing_strict(int this_syscall) 687 { 688 const int *syscall_whitelist = mode1_syscalls; 689 #ifdef CONFIG_COMPAT 690 if (in_compat_syscall()) 691 syscall_whitelist = get_compat_mode1_syscalls(); 692 #endif 693 do { 694 if (*syscall_whitelist == this_syscall) 695 return; 696 } while (*++syscall_whitelist); 697 698 #ifdef SECCOMP_DEBUG 699 dump_stack(); 700 #endif 701 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true); 702 do_exit(SIGKILL); 703 } 704 705 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER 706 void secure_computing_strict(int this_syscall) 707 { 708 int mode = current->seccomp.mode; 709 710 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && 711 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) 712 return; 713 714 if (mode == SECCOMP_MODE_DISABLED) 715 return; 716 else if (mode == SECCOMP_MODE_STRICT) 717 __secure_computing_strict(this_syscall); 718 else 719 BUG(); 720 } 721 #else 722 723 #ifdef CONFIG_SECCOMP_FILTER 724 static u64 seccomp_next_notify_id(struct seccomp_filter *filter) 725 { 726 /* 727 * Note: overflow is ok here, the id just needs to be unique per 728 * filter. 729 */ 730 lockdep_assert_held(&filter->notify_lock); 731 return filter->notif->next_id++; 732 } 733 734 static int seccomp_do_user_notification(int this_syscall, 735 struct seccomp_filter *match, 736 const struct seccomp_data *sd) 737 { 738 int err; 739 u32 flags = 0; 740 long ret = 0; 741 struct seccomp_knotif n = {}; 742 743 mutex_lock(&match->notify_lock); 744 err = -ENOSYS; 745 if (!match->notif) 746 goto out; 747 748 n.task = current; 749 n.state = SECCOMP_NOTIFY_INIT; 750 n.data = sd; 751 n.id = seccomp_next_notify_id(match); 752 init_completion(&n.ready); 753 list_add(&n.list, &match->notif->notifications); 754 755 up(&match->notif->request); 756 wake_up_poll(&match->notif->wqh, EPOLLIN | EPOLLRDNORM); 757 mutex_unlock(&match->notify_lock); 758 759 /* 760 * This is where we wait for a reply from userspace. 761 */ 762 err = wait_for_completion_interruptible(&n.ready); 763 mutex_lock(&match->notify_lock); 764 if (err == 0) { 765 ret = n.val; 766 err = n.error; 767 flags = n.flags; 768 } 769 770 /* 771 * Note that it's possible the listener died in between the time when 772 * we were notified of a respons (or a signal) and when we were able to 773 * re-acquire the lock, so only delete from the list if the 774 * notification actually exists. 775 * 776 * Also note that this test is only valid because there's no way to 777 * *reattach* to a notifier right now. If one is added, we'll need to 778 * keep track of the notif itself and make sure they match here. 779 */ 780 if (match->notif) 781 list_del(&n.list); 782 out: 783 mutex_unlock(&match->notify_lock); 784 785 /* Userspace requests to continue the syscall. */ 786 if (flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) 787 return 0; 788 789 syscall_set_return_value(current, task_pt_regs(current), 790 err, ret); 791 return -1; 792 } 793 794 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, 795 const bool recheck_after_trace) 796 { 797 u32 filter_ret, action; 798 struct seccomp_filter *match = NULL; 799 int data; 800 struct seccomp_data sd_local; 801 802 /* 803 * Make sure that any changes to mode from another thread have 804 * been seen after TIF_SECCOMP was seen. 805 */ 806 rmb(); 807 808 if (!sd) { 809 populate_seccomp_data(&sd_local); 810 sd = &sd_local; 811 } 812 813 filter_ret = seccomp_run_filters(sd, &match); 814 data = filter_ret & SECCOMP_RET_DATA; 815 action = filter_ret & SECCOMP_RET_ACTION_FULL; 816 817 switch (action) { 818 case SECCOMP_RET_ERRNO: 819 /* Set low-order bits as an errno, capped at MAX_ERRNO. */ 820 if (data > MAX_ERRNO) 821 data = MAX_ERRNO; 822 syscall_set_return_value(current, task_pt_regs(current), 823 -data, 0); 824 goto skip; 825 826 case SECCOMP_RET_TRAP: 827 /* Show the handler the original registers. */ 828 syscall_rollback(current, task_pt_regs(current)); 829 /* Let the filter pass back 16 bits of data. */ 830 seccomp_send_sigsys(this_syscall, data); 831 goto skip; 832 833 case SECCOMP_RET_TRACE: 834 /* We've been put in this state by the ptracer already. */ 835 if (recheck_after_trace) 836 return 0; 837 838 /* ENOSYS these calls if there is no tracer attached. */ 839 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { 840 syscall_set_return_value(current, 841 task_pt_regs(current), 842 -ENOSYS, 0); 843 goto skip; 844 } 845 846 /* Allow the BPF to provide the event message */ 847 ptrace_event(PTRACE_EVENT_SECCOMP, data); 848 /* 849 * The delivery of a fatal signal during event 850 * notification may silently skip tracer notification, 851 * which could leave us with a potentially unmodified 852 * syscall that the tracer would have liked to have 853 * changed. Since the process is about to die, we just 854 * force the syscall to be skipped and let the signal 855 * kill the process and correctly handle any tracer exit 856 * notifications. 857 */ 858 if (fatal_signal_pending(current)) 859 goto skip; 860 /* Check if the tracer forced the syscall to be skipped. */ 861 this_syscall = syscall_get_nr(current, task_pt_regs(current)); 862 if (this_syscall < 0) 863 goto skip; 864 865 /* 866 * Recheck the syscall, since it may have changed. This 867 * intentionally uses a NULL struct seccomp_data to force 868 * a reload of all registers. This does not goto skip since 869 * a skip would have already been reported. 870 */ 871 if (__seccomp_filter(this_syscall, NULL, true)) 872 return -1; 873 874 return 0; 875 876 case SECCOMP_RET_USER_NOTIF: 877 if (seccomp_do_user_notification(this_syscall, match, sd)) 878 goto skip; 879 880 return 0; 881 882 case SECCOMP_RET_LOG: 883 seccomp_log(this_syscall, 0, action, true); 884 return 0; 885 886 case SECCOMP_RET_ALLOW: 887 /* 888 * Note that the "match" filter will always be NULL for 889 * this action since SECCOMP_RET_ALLOW is the starting 890 * state in seccomp_run_filters(). 891 */ 892 return 0; 893 894 case SECCOMP_RET_KILL_THREAD: 895 case SECCOMP_RET_KILL_PROCESS: 896 default: 897 seccomp_log(this_syscall, SIGSYS, action, true); 898 /* Dump core only if this is the last remaining thread. */ 899 if (action == SECCOMP_RET_KILL_PROCESS || 900 get_nr_threads(current) == 1) { 901 kernel_siginfo_t info; 902 903 /* Show the original registers in the dump. */ 904 syscall_rollback(current, task_pt_regs(current)); 905 /* Trigger a manual coredump since do_exit skips it. */ 906 seccomp_init_siginfo(&info, this_syscall, data); 907 do_coredump(&info); 908 } 909 if (action == SECCOMP_RET_KILL_PROCESS) 910 do_group_exit(SIGSYS); 911 else 912 do_exit(SIGSYS); 913 } 914 915 unreachable(); 916 917 skip: 918 seccomp_log(this_syscall, 0, action, match ? match->log : false); 919 return -1; 920 } 921 #else 922 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, 923 const bool recheck_after_trace) 924 { 925 BUG(); 926 } 927 #endif 928 929 int __secure_computing(const struct seccomp_data *sd) 930 { 931 int mode = current->seccomp.mode; 932 int this_syscall; 933 934 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) && 935 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) 936 return 0; 937 938 this_syscall = sd ? sd->nr : 939 syscall_get_nr(current, task_pt_regs(current)); 940 941 switch (mode) { 942 case SECCOMP_MODE_STRICT: 943 __secure_computing_strict(this_syscall); /* may call do_exit */ 944 return 0; 945 case SECCOMP_MODE_FILTER: 946 return __seccomp_filter(this_syscall, sd, false); 947 default: 948 BUG(); 949 } 950 } 951 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */ 952 953 long prctl_get_seccomp(void) 954 { 955 return current->seccomp.mode; 956 } 957 958 /** 959 * seccomp_set_mode_strict: internal function for setting strict seccomp 960 * 961 * Once current->seccomp.mode is non-zero, it may not be changed. 962 * 963 * Returns 0 on success or -EINVAL on failure. 964 */ 965 static long seccomp_set_mode_strict(void) 966 { 967 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT; 968 long ret = -EINVAL; 969 970 spin_lock_irq(¤t->sighand->siglock); 971 972 if (!seccomp_may_assign_mode(seccomp_mode)) 973 goto out; 974 975 #ifdef TIF_NOTSC 976 disable_TSC(); 977 #endif 978 seccomp_assign_mode(current, seccomp_mode, 0); 979 ret = 0; 980 981 out: 982 spin_unlock_irq(¤t->sighand->siglock); 983 984 return ret; 985 } 986 987 #ifdef CONFIG_SECCOMP_FILTER 988 static int seccomp_notify_release(struct inode *inode, struct file *file) 989 { 990 struct seccomp_filter *filter = file->private_data; 991 struct seccomp_knotif *knotif; 992 993 if (!filter) 994 return 0; 995 996 mutex_lock(&filter->notify_lock); 997 998 /* 999 * If this file is being closed because e.g. the task who owned it 1000 * died, let's wake everyone up who was waiting on us. 1001 */ 1002 list_for_each_entry(knotif, &filter->notif->notifications, list) { 1003 if (knotif->state == SECCOMP_NOTIFY_REPLIED) 1004 continue; 1005 1006 knotif->state = SECCOMP_NOTIFY_REPLIED; 1007 knotif->error = -ENOSYS; 1008 knotif->val = 0; 1009 1010 complete(&knotif->ready); 1011 } 1012 1013 kfree(filter->notif); 1014 filter->notif = NULL; 1015 mutex_unlock(&filter->notify_lock); 1016 __put_seccomp_filter(filter); 1017 return 0; 1018 } 1019 1020 static long seccomp_notify_recv(struct seccomp_filter *filter, 1021 void __user *buf) 1022 { 1023 struct seccomp_knotif *knotif = NULL, *cur; 1024 struct seccomp_notif unotif; 1025 ssize_t ret; 1026 1027 /* Verify that we're not given garbage to keep struct extensible. */ 1028 ret = check_zeroed_user(buf, sizeof(unotif)); 1029 if (ret < 0) 1030 return ret; 1031 if (!ret) 1032 return -EINVAL; 1033 1034 memset(&unotif, 0, sizeof(unotif)); 1035 1036 ret = down_interruptible(&filter->notif->request); 1037 if (ret < 0) 1038 return ret; 1039 1040 mutex_lock(&filter->notify_lock); 1041 list_for_each_entry(cur, &filter->notif->notifications, list) { 1042 if (cur->state == SECCOMP_NOTIFY_INIT) { 1043 knotif = cur; 1044 break; 1045 } 1046 } 1047 1048 /* 1049 * If we didn't find a notification, it could be that the task was 1050 * interrupted by a fatal signal between the time we were woken and 1051 * when we were able to acquire the rw lock. 1052 */ 1053 if (!knotif) { 1054 ret = -ENOENT; 1055 goto out; 1056 } 1057 1058 unotif.id = knotif->id; 1059 unotif.pid = task_pid_vnr(knotif->task); 1060 unotif.data = *(knotif->data); 1061 1062 knotif->state = SECCOMP_NOTIFY_SENT; 1063 wake_up_poll(&filter->notif->wqh, EPOLLOUT | EPOLLWRNORM); 1064 ret = 0; 1065 out: 1066 mutex_unlock(&filter->notify_lock); 1067 1068 if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) { 1069 ret = -EFAULT; 1070 1071 /* 1072 * Userspace screwed up. To make sure that we keep this 1073 * notification alive, let's reset it back to INIT. It 1074 * may have died when we released the lock, so we need to make 1075 * sure it's still around. 1076 */ 1077 knotif = NULL; 1078 mutex_lock(&filter->notify_lock); 1079 list_for_each_entry(cur, &filter->notif->notifications, list) { 1080 if (cur->id == unotif.id) { 1081 knotif = cur; 1082 break; 1083 } 1084 } 1085 1086 if (knotif) { 1087 knotif->state = SECCOMP_NOTIFY_INIT; 1088 up(&filter->notif->request); 1089 } 1090 mutex_unlock(&filter->notify_lock); 1091 } 1092 1093 return ret; 1094 } 1095 1096 static long seccomp_notify_send(struct seccomp_filter *filter, 1097 void __user *buf) 1098 { 1099 struct seccomp_notif_resp resp = {}; 1100 struct seccomp_knotif *knotif = NULL, *cur; 1101 long ret; 1102 1103 if (copy_from_user(&resp, buf, sizeof(resp))) 1104 return -EFAULT; 1105 1106 if (resp.flags & ~SECCOMP_USER_NOTIF_FLAG_CONTINUE) 1107 return -EINVAL; 1108 1109 if ((resp.flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) && 1110 (resp.error || resp.val)) 1111 return -EINVAL; 1112 1113 ret = mutex_lock_interruptible(&filter->notify_lock); 1114 if (ret < 0) 1115 return ret; 1116 1117 list_for_each_entry(cur, &filter->notif->notifications, list) { 1118 if (cur->id == resp.id) { 1119 knotif = cur; 1120 break; 1121 } 1122 } 1123 1124 if (!knotif) { 1125 ret = -ENOENT; 1126 goto out; 1127 } 1128 1129 /* Allow exactly one reply. */ 1130 if (knotif->state != SECCOMP_NOTIFY_SENT) { 1131 ret = -EINPROGRESS; 1132 goto out; 1133 } 1134 1135 ret = 0; 1136 knotif->state = SECCOMP_NOTIFY_REPLIED; 1137 knotif->error = resp.error; 1138 knotif->val = resp.val; 1139 knotif->flags = resp.flags; 1140 complete(&knotif->ready); 1141 out: 1142 mutex_unlock(&filter->notify_lock); 1143 return ret; 1144 } 1145 1146 static long seccomp_notify_id_valid(struct seccomp_filter *filter, 1147 void __user *buf) 1148 { 1149 struct seccomp_knotif *knotif = NULL; 1150 u64 id; 1151 long ret; 1152 1153 if (copy_from_user(&id, buf, sizeof(id))) 1154 return -EFAULT; 1155 1156 ret = mutex_lock_interruptible(&filter->notify_lock); 1157 if (ret < 0) 1158 return ret; 1159 1160 ret = -ENOENT; 1161 list_for_each_entry(knotif, &filter->notif->notifications, list) { 1162 if (knotif->id == id) { 1163 if (knotif->state == SECCOMP_NOTIFY_SENT) 1164 ret = 0; 1165 goto out; 1166 } 1167 } 1168 1169 out: 1170 mutex_unlock(&filter->notify_lock); 1171 return ret; 1172 } 1173 1174 static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, 1175 unsigned long arg) 1176 { 1177 struct seccomp_filter *filter = file->private_data; 1178 void __user *buf = (void __user *)arg; 1179 1180 switch (cmd) { 1181 case SECCOMP_IOCTL_NOTIF_RECV: 1182 return seccomp_notify_recv(filter, buf); 1183 case SECCOMP_IOCTL_NOTIF_SEND: 1184 return seccomp_notify_send(filter, buf); 1185 case SECCOMP_IOCTL_NOTIF_ID_VALID: 1186 return seccomp_notify_id_valid(filter, buf); 1187 default: 1188 return -EINVAL; 1189 } 1190 } 1191 1192 static __poll_t seccomp_notify_poll(struct file *file, 1193 struct poll_table_struct *poll_tab) 1194 { 1195 struct seccomp_filter *filter = file->private_data; 1196 __poll_t ret = 0; 1197 struct seccomp_knotif *cur; 1198 1199 poll_wait(file, &filter->notif->wqh, poll_tab); 1200 1201 if (mutex_lock_interruptible(&filter->notify_lock) < 0) 1202 return EPOLLERR; 1203 1204 list_for_each_entry(cur, &filter->notif->notifications, list) { 1205 if (cur->state == SECCOMP_NOTIFY_INIT) 1206 ret |= EPOLLIN | EPOLLRDNORM; 1207 if (cur->state == SECCOMP_NOTIFY_SENT) 1208 ret |= EPOLLOUT | EPOLLWRNORM; 1209 if ((ret & EPOLLIN) && (ret & EPOLLOUT)) 1210 break; 1211 } 1212 1213 mutex_unlock(&filter->notify_lock); 1214 1215 return ret; 1216 } 1217 1218 static const struct file_operations seccomp_notify_ops = { 1219 .poll = seccomp_notify_poll, 1220 .release = seccomp_notify_release, 1221 .unlocked_ioctl = seccomp_notify_ioctl, 1222 }; 1223 1224 static struct file *init_listener(struct seccomp_filter *filter) 1225 { 1226 struct file *ret = ERR_PTR(-EBUSY); 1227 struct seccomp_filter *cur; 1228 1229 for (cur = current->seccomp.filter; cur; cur = cur->prev) { 1230 if (cur->notif) 1231 goto out; 1232 } 1233 1234 ret = ERR_PTR(-ENOMEM); 1235 filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL); 1236 if (!filter->notif) 1237 goto out; 1238 1239 sema_init(&filter->notif->request, 0); 1240 filter->notif->next_id = get_random_u64(); 1241 INIT_LIST_HEAD(&filter->notif->notifications); 1242 init_waitqueue_head(&filter->notif->wqh); 1243 1244 ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops, 1245 filter, O_RDWR); 1246 if (IS_ERR(ret)) 1247 goto out_notif; 1248 1249 /* The file has a reference to it now */ 1250 __get_seccomp_filter(filter); 1251 1252 out_notif: 1253 if (IS_ERR(ret)) 1254 kfree(filter->notif); 1255 out: 1256 return ret; 1257 } 1258 1259 /** 1260 * seccomp_set_mode_filter: internal function for setting seccomp filter 1261 * @flags: flags to change filter behavior 1262 * @filter: struct sock_fprog containing filter 1263 * 1264 * This function may be called repeatedly to install additional filters. 1265 * Every filter successfully installed will be evaluated (in reverse order) 1266 * for each system call the task makes. 1267 * 1268 * Once current->seccomp.mode is non-zero, it may not be changed. 1269 * 1270 * Returns 0 on success or -EINVAL on failure. 1271 */ 1272 static long seccomp_set_mode_filter(unsigned int flags, 1273 const char __user *filter) 1274 { 1275 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER; 1276 struct seccomp_filter *prepared = NULL; 1277 long ret = -EINVAL; 1278 int listener = -1; 1279 struct file *listener_f = NULL; 1280 1281 /* Validate flags. */ 1282 if (flags & ~SECCOMP_FILTER_FLAG_MASK) 1283 return -EINVAL; 1284 1285 /* 1286 * In the successful case, NEW_LISTENER returns the new listener fd. 1287 * But in the failure case, TSYNC returns the thread that died. If you 1288 * combine these two flags, there's no way to tell whether something 1289 * succeeded or failed. So, let's disallow this combination. 1290 */ 1291 if ((flags & SECCOMP_FILTER_FLAG_TSYNC) && 1292 (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER)) 1293 return -EINVAL; 1294 1295 /* Prepare the new filter before holding any locks. */ 1296 prepared = seccomp_prepare_user_filter(filter); 1297 if (IS_ERR(prepared)) 1298 return PTR_ERR(prepared); 1299 1300 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1301 listener = get_unused_fd_flags(O_CLOEXEC); 1302 if (listener < 0) { 1303 ret = listener; 1304 goto out_free; 1305 } 1306 1307 listener_f = init_listener(prepared); 1308 if (IS_ERR(listener_f)) { 1309 put_unused_fd(listener); 1310 ret = PTR_ERR(listener_f); 1311 goto out_free; 1312 } 1313 } 1314 1315 /* 1316 * Make sure we cannot change seccomp or nnp state via TSYNC 1317 * while another thread is in the middle of calling exec. 1318 */ 1319 if (flags & SECCOMP_FILTER_FLAG_TSYNC && 1320 mutex_lock_killable(¤t->signal->cred_guard_mutex)) 1321 goto out_put_fd; 1322 1323 spin_lock_irq(¤t->sighand->siglock); 1324 1325 if (!seccomp_may_assign_mode(seccomp_mode)) 1326 goto out; 1327 1328 ret = seccomp_attach_filter(flags, prepared); 1329 if (ret) 1330 goto out; 1331 /* Do not free the successfully attached filter. */ 1332 prepared = NULL; 1333 1334 seccomp_assign_mode(current, seccomp_mode, flags); 1335 out: 1336 spin_unlock_irq(¤t->sighand->siglock); 1337 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 1338 mutex_unlock(¤t->signal->cred_guard_mutex); 1339 out_put_fd: 1340 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1341 if (ret) { 1342 listener_f->private_data = NULL; 1343 fput(listener_f); 1344 put_unused_fd(listener); 1345 } else { 1346 fd_install(listener, listener_f); 1347 ret = listener; 1348 } 1349 } 1350 out_free: 1351 seccomp_filter_free(prepared); 1352 return ret; 1353 } 1354 #else 1355 static inline long seccomp_set_mode_filter(unsigned int flags, 1356 const char __user *filter) 1357 { 1358 return -EINVAL; 1359 } 1360 #endif 1361 1362 static long seccomp_get_action_avail(const char __user *uaction) 1363 { 1364 u32 action; 1365 1366 if (copy_from_user(&action, uaction, sizeof(action))) 1367 return -EFAULT; 1368 1369 switch (action) { 1370 case SECCOMP_RET_KILL_PROCESS: 1371 case SECCOMP_RET_KILL_THREAD: 1372 case SECCOMP_RET_TRAP: 1373 case SECCOMP_RET_ERRNO: 1374 case SECCOMP_RET_USER_NOTIF: 1375 case SECCOMP_RET_TRACE: 1376 case SECCOMP_RET_LOG: 1377 case SECCOMP_RET_ALLOW: 1378 break; 1379 default: 1380 return -EOPNOTSUPP; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static long seccomp_get_notif_sizes(void __user *usizes) 1387 { 1388 struct seccomp_notif_sizes sizes = { 1389 .seccomp_notif = sizeof(struct seccomp_notif), 1390 .seccomp_notif_resp = sizeof(struct seccomp_notif_resp), 1391 .seccomp_data = sizeof(struct seccomp_data), 1392 }; 1393 1394 if (copy_to_user(usizes, &sizes, sizeof(sizes))) 1395 return -EFAULT; 1396 1397 return 0; 1398 } 1399 1400 /* Common entry point for both prctl and syscall. */ 1401 static long do_seccomp(unsigned int op, unsigned int flags, 1402 void __user *uargs) 1403 { 1404 switch (op) { 1405 case SECCOMP_SET_MODE_STRICT: 1406 if (flags != 0 || uargs != NULL) 1407 return -EINVAL; 1408 return seccomp_set_mode_strict(); 1409 case SECCOMP_SET_MODE_FILTER: 1410 return seccomp_set_mode_filter(flags, uargs); 1411 case SECCOMP_GET_ACTION_AVAIL: 1412 if (flags != 0) 1413 return -EINVAL; 1414 1415 return seccomp_get_action_avail(uargs); 1416 case SECCOMP_GET_NOTIF_SIZES: 1417 if (flags != 0) 1418 return -EINVAL; 1419 1420 return seccomp_get_notif_sizes(uargs); 1421 default: 1422 return -EINVAL; 1423 } 1424 } 1425 1426 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, 1427 void __user *, uargs) 1428 { 1429 return do_seccomp(op, flags, uargs); 1430 } 1431 1432 /** 1433 * prctl_set_seccomp: configures current->seccomp.mode 1434 * @seccomp_mode: requested mode to use 1435 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER 1436 * 1437 * Returns 0 on success or -EINVAL on failure. 1438 */ 1439 long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter) 1440 { 1441 unsigned int op; 1442 void __user *uargs; 1443 1444 switch (seccomp_mode) { 1445 case SECCOMP_MODE_STRICT: 1446 op = SECCOMP_SET_MODE_STRICT; 1447 /* 1448 * Setting strict mode through prctl always ignored filter, 1449 * so make sure it is always NULL here to pass the internal 1450 * check in do_seccomp(). 1451 */ 1452 uargs = NULL; 1453 break; 1454 case SECCOMP_MODE_FILTER: 1455 op = SECCOMP_SET_MODE_FILTER; 1456 uargs = filter; 1457 break; 1458 default: 1459 return -EINVAL; 1460 } 1461 1462 /* prctl interface doesn't have flags, so they are always zero. */ 1463 return do_seccomp(op, 0, uargs); 1464 } 1465 1466 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) 1467 static struct seccomp_filter *get_nth_filter(struct task_struct *task, 1468 unsigned long filter_off) 1469 { 1470 struct seccomp_filter *orig, *filter; 1471 unsigned long count; 1472 1473 /* 1474 * Note: this is only correct because the caller should be the (ptrace) 1475 * tracer of the task, otherwise lock_task_sighand is needed. 1476 */ 1477 spin_lock_irq(&task->sighand->siglock); 1478 1479 if (task->seccomp.mode != SECCOMP_MODE_FILTER) { 1480 spin_unlock_irq(&task->sighand->siglock); 1481 return ERR_PTR(-EINVAL); 1482 } 1483 1484 orig = task->seccomp.filter; 1485 __get_seccomp_filter(orig); 1486 spin_unlock_irq(&task->sighand->siglock); 1487 1488 count = 0; 1489 for (filter = orig; filter; filter = filter->prev) 1490 count++; 1491 1492 if (filter_off >= count) { 1493 filter = ERR_PTR(-ENOENT); 1494 goto out; 1495 } 1496 1497 count -= filter_off; 1498 for (filter = orig; filter && count > 1; filter = filter->prev) 1499 count--; 1500 1501 if (WARN_ON(count != 1 || !filter)) { 1502 filter = ERR_PTR(-ENOENT); 1503 goto out; 1504 } 1505 1506 __get_seccomp_filter(filter); 1507 1508 out: 1509 __put_seccomp_filter(orig); 1510 return filter; 1511 } 1512 1513 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, 1514 void __user *data) 1515 { 1516 struct seccomp_filter *filter; 1517 struct sock_fprog_kern *fprog; 1518 long ret; 1519 1520 if (!capable(CAP_SYS_ADMIN) || 1521 current->seccomp.mode != SECCOMP_MODE_DISABLED) { 1522 return -EACCES; 1523 } 1524 1525 filter = get_nth_filter(task, filter_off); 1526 if (IS_ERR(filter)) 1527 return PTR_ERR(filter); 1528 1529 fprog = filter->prog->orig_prog; 1530 if (!fprog) { 1531 /* This must be a new non-cBPF filter, since we save 1532 * every cBPF filter's orig_prog above when 1533 * CONFIG_CHECKPOINT_RESTORE is enabled. 1534 */ 1535 ret = -EMEDIUMTYPE; 1536 goto out; 1537 } 1538 1539 ret = fprog->len; 1540 if (!data) 1541 goto out; 1542 1543 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog))) 1544 ret = -EFAULT; 1545 1546 out: 1547 __put_seccomp_filter(filter); 1548 return ret; 1549 } 1550 1551 long seccomp_get_metadata(struct task_struct *task, 1552 unsigned long size, void __user *data) 1553 { 1554 long ret; 1555 struct seccomp_filter *filter; 1556 struct seccomp_metadata kmd = {}; 1557 1558 if (!capable(CAP_SYS_ADMIN) || 1559 current->seccomp.mode != SECCOMP_MODE_DISABLED) { 1560 return -EACCES; 1561 } 1562 1563 size = min_t(unsigned long, size, sizeof(kmd)); 1564 1565 if (size < sizeof(kmd.filter_off)) 1566 return -EINVAL; 1567 1568 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off))) 1569 return -EFAULT; 1570 1571 filter = get_nth_filter(task, kmd.filter_off); 1572 if (IS_ERR(filter)) 1573 return PTR_ERR(filter); 1574 1575 if (filter->log) 1576 kmd.flags |= SECCOMP_FILTER_FLAG_LOG; 1577 1578 ret = size; 1579 if (copy_to_user(data, &kmd, size)) 1580 ret = -EFAULT; 1581 1582 __put_seccomp_filter(filter); 1583 return ret; 1584 } 1585 #endif 1586 1587 #ifdef CONFIG_SYSCTL 1588 1589 /* Human readable action names for friendly sysctl interaction */ 1590 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process" 1591 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread" 1592 #define SECCOMP_RET_TRAP_NAME "trap" 1593 #define SECCOMP_RET_ERRNO_NAME "errno" 1594 #define SECCOMP_RET_USER_NOTIF_NAME "user_notif" 1595 #define SECCOMP_RET_TRACE_NAME "trace" 1596 #define SECCOMP_RET_LOG_NAME "log" 1597 #define SECCOMP_RET_ALLOW_NAME "allow" 1598 1599 static const char seccomp_actions_avail[] = 1600 SECCOMP_RET_KILL_PROCESS_NAME " " 1601 SECCOMP_RET_KILL_THREAD_NAME " " 1602 SECCOMP_RET_TRAP_NAME " " 1603 SECCOMP_RET_ERRNO_NAME " " 1604 SECCOMP_RET_USER_NOTIF_NAME " " 1605 SECCOMP_RET_TRACE_NAME " " 1606 SECCOMP_RET_LOG_NAME " " 1607 SECCOMP_RET_ALLOW_NAME; 1608 1609 struct seccomp_log_name { 1610 u32 log; 1611 const char *name; 1612 }; 1613 1614 static const struct seccomp_log_name seccomp_log_names[] = { 1615 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME }, 1616 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME }, 1617 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME }, 1618 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME }, 1619 { SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME }, 1620 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME }, 1621 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME }, 1622 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME }, 1623 { } 1624 }; 1625 1626 static bool seccomp_names_from_actions_logged(char *names, size_t size, 1627 u32 actions_logged, 1628 const char *sep) 1629 { 1630 const struct seccomp_log_name *cur; 1631 bool append_sep = false; 1632 1633 for (cur = seccomp_log_names; cur->name && size; cur++) { 1634 ssize_t ret; 1635 1636 if (!(actions_logged & cur->log)) 1637 continue; 1638 1639 if (append_sep) { 1640 ret = strscpy(names, sep, size); 1641 if (ret < 0) 1642 return false; 1643 1644 names += ret; 1645 size -= ret; 1646 } else 1647 append_sep = true; 1648 1649 ret = strscpy(names, cur->name, size); 1650 if (ret < 0) 1651 return false; 1652 1653 names += ret; 1654 size -= ret; 1655 } 1656 1657 return true; 1658 } 1659 1660 static bool seccomp_action_logged_from_name(u32 *action_logged, 1661 const char *name) 1662 { 1663 const struct seccomp_log_name *cur; 1664 1665 for (cur = seccomp_log_names; cur->name; cur++) { 1666 if (!strcmp(cur->name, name)) { 1667 *action_logged = cur->log; 1668 return true; 1669 } 1670 } 1671 1672 return false; 1673 } 1674 1675 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names) 1676 { 1677 char *name; 1678 1679 *actions_logged = 0; 1680 while ((name = strsep(&names, " ")) && *name) { 1681 u32 action_logged = 0; 1682 1683 if (!seccomp_action_logged_from_name(&action_logged, name)) 1684 return false; 1685 1686 *actions_logged |= action_logged; 1687 } 1688 1689 return true; 1690 } 1691 1692 static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer, 1693 size_t *lenp, loff_t *ppos) 1694 { 1695 char names[sizeof(seccomp_actions_avail)]; 1696 struct ctl_table table; 1697 1698 memset(names, 0, sizeof(names)); 1699 1700 if (!seccomp_names_from_actions_logged(names, sizeof(names), 1701 seccomp_actions_logged, " ")) 1702 return -EINVAL; 1703 1704 table = *ro_table; 1705 table.data = names; 1706 table.maxlen = sizeof(names); 1707 return proc_dostring(&table, 0, buffer, lenp, ppos); 1708 } 1709 1710 static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer, 1711 size_t *lenp, loff_t *ppos, u32 *actions_logged) 1712 { 1713 char names[sizeof(seccomp_actions_avail)]; 1714 struct ctl_table table; 1715 int ret; 1716 1717 if (!capable(CAP_SYS_ADMIN)) 1718 return -EPERM; 1719 1720 memset(names, 0, sizeof(names)); 1721 1722 table = *ro_table; 1723 table.data = names; 1724 table.maxlen = sizeof(names); 1725 ret = proc_dostring(&table, 1, buffer, lenp, ppos); 1726 if (ret) 1727 return ret; 1728 1729 if (!seccomp_actions_logged_from_names(actions_logged, table.data)) 1730 return -EINVAL; 1731 1732 if (*actions_logged & SECCOMP_LOG_ALLOW) 1733 return -EINVAL; 1734 1735 seccomp_actions_logged = *actions_logged; 1736 return 0; 1737 } 1738 1739 static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged, 1740 int ret) 1741 { 1742 char names[sizeof(seccomp_actions_avail)]; 1743 char old_names[sizeof(seccomp_actions_avail)]; 1744 const char *new = names; 1745 const char *old = old_names; 1746 1747 if (!audit_enabled) 1748 return; 1749 1750 memset(names, 0, sizeof(names)); 1751 memset(old_names, 0, sizeof(old_names)); 1752 1753 if (ret) 1754 new = "?"; 1755 else if (!actions_logged) 1756 new = "(none)"; 1757 else if (!seccomp_names_from_actions_logged(names, sizeof(names), 1758 actions_logged, ",")) 1759 new = "?"; 1760 1761 if (!old_actions_logged) 1762 old = "(none)"; 1763 else if (!seccomp_names_from_actions_logged(old_names, 1764 sizeof(old_names), 1765 old_actions_logged, ",")) 1766 old = "?"; 1767 1768 return audit_seccomp_actions_logged(new, old, !ret); 1769 } 1770 1771 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write, 1772 void __user *buffer, size_t *lenp, 1773 loff_t *ppos) 1774 { 1775 int ret; 1776 1777 if (write) { 1778 u32 actions_logged = 0; 1779 u32 old_actions_logged = seccomp_actions_logged; 1780 1781 ret = write_actions_logged(ro_table, buffer, lenp, ppos, 1782 &actions_logged); 1783 audit_actions_logged(actions_logged, old_actions_logged, ret); 1784 } else 1785 ret = read_actions_logged(ro_table, buffer, lenp, ppos); 1786 1787 return ret; 1788 } 1789 1790 static struct ctl_path seccomp_sysctl_path[] = { 1791 { .procname = "kernel", }, 1792 { .procname = "seccomp", }, 1793 { } 1794 }; 1795 1796 static struct ctl_table seccomp_sysctl_table[] = { 1797 { 1798 .procname = "actions_avail", 1799 .data = (void *) &seccomp_actions_avail, 1800 .maxlen = sizeof(seccomp_actions_avail), 1801 .mode = 0444, 1802 .proc_handler = proc_dostring, 1803 }, 1804 { 1805 .procname = "actions_logged", 1806 .mode = 0644, 1807 .proc_handler = seccomp_actions_logged_handler, 1808 }, 1809 { } 1810 }; 1811 1812 static int __init seccomp_sysctl_init(void) 1813 { 1814 struct ctl_table_header *hdr; 1815 1816 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table); 1817 if (!hdr) 1818 pr_warn("seccomp: sysctl registration failed\n"); 1819 else 1820 kmemleak_not_leak(hdr); 1821 1822 return 0; 1823 } 1824 1825 device_initcall(seccomp_sysctl_init) 1826 1827 #endif /* CONFIG_SYSCTL */ 1828