1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/exec.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * #!-checking implemented by tytso. 10 */ 11 /* 12 * Demand-loading implemented 01.12.91 - no need to read anything but 13 * the header into memory. The inode of the executable is put into 14 * "current->executable", and page faults do the actual loading. Clean. 15 * 16 * Once more I can proudly say that linux stood up to being changed: it 17 * was less than 2 hours work to get demand-loading completely implemented. 18 * 19 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead, 20 * current->executable is only used by the procfs. This allows a dispatch 21 * table to check for several different types of binary formats. We keep 22 * trying until we recognize the file or we run out of supported binary 23 * formats. 24 */ 25 26 #include <linux/kernel_read_file.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/fdtable.h> 30 #include <linux/mm.h> 31 #include <linux/stat.h> 32 #include <linux/fcntl.h> 33 #include <linux/swap.h> 34 #include <linux/string.h> 35 #include <linux/init.h> 36 #include <linux/sched/mm.h> 37 #include <linux/sched/coredump.h> 38 #include <linux/sched/signal.h> 39 #include <linux/sched/numa_balancing.h> 40 #include <linux/sched/task.h> 41 #include <linux/pagemap.h> 42 #include <linux/perf_event.h> 43 #include <linux/highmem.h> 44 #include <linux/spinlock.h> 45 #include <linux/key.h> 46 #include <linux/personality.h> 47 #include <linux/binfmts.h> 48 #include <linux/utsname.h> 49 #include <linux/pid_namespace.h> 50 #include <linux/module.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/security.h> 54 #include <linux/syscalls.h> 55 #include <linux/tsacct_kern.h> 56 #include <linux/cn_proc.h> 57 #include <linux/audit.h> 58 #include <linux/kmod.h> 59 #include <linux/fsnotify.h> 60 #include <linux/fs_struct.h> 61 #include <linux/oom.h> 62 #include <linux/compat.h> 63 #include <linux/vmalloc.h> 64 #include <linux/io_uring.h> 65 #include <linux/syscall_user_dispatch.h> 66 #include <linux/coredump.h> 67 #include <linux/time_namespace.h> 68 #include <linux/user_events.h> 69 #include <linux/rseq.h> 70 #include <linux/ksm.h> 71 72 #include <linux/uaccess.h> 73 #include <asm/mmu_context.h> 74 #include <asm/tlb.h> 75 76 #include <trace/events/task.h> 77 #include "internal.h" 78 79 #include <trace/events/sched.h> 80 81 static int bprm_creds_from_file(struct linux_binprm *bprm); 82 83 int suid_dumpable = 0; 84 85 static LIST_HEAD(formats); 86 static DEFINE_RWLOCK(binfmt_lock); 87 88 void __register_binfmt(struct linux_binfmt * fmt, int insert) 89 { 90 write_lock(&binfmt_lock); 91 insert ? list_add(&fmt->lh, &formats) : 92 list_add_tail(&fmt->lh, &formats); 93 write_unlock(&binfmt_lock); 94 } 95 96 EXPORT_SYMBOL(__register_binfmt); 97 98 void unregister_binfmt(struct linux_binfmt * fmt) 99 { 100 write_lock(&binfmt_lock); 101 list_del(&fmt->lh); 102 write_unlock(&binfmt_lock); 103 } 104 105 EXPORT_SYMBOL(unregister_binfmt); 106 107 static inline void put_binfmt(struct linux_binfmt * fmt) 108 { 109 module_put(fmt->module); 110 } 111 112 bool path_noexec(const struct path *path) 113 { 114 return (path->mnt->mnt_flags & MNT_NOEXEC) || 115 (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC); 116 } 117 118 #ifdef CONFIG_USELIB 119 /* 120 * Note that a shared library must be both readable and executable due to 121 * security reasons. 122 * 123 * Also note that we take the address to load from the file itself. 124 */ 125 SYSCALL_DEFINE1(uselib, const char __user *, library) 126 { 127 struct linux_binfmt *fmt; 128 struct file *file; 129 struct filename *tmp = getname(library); 130 int error = PTR_ERR(tmp); 131 static const struct open_flags uselib_flags = { 132 .open_flag = O_LARGEFILE | O_RDONLY, 133 .acc_mode = MAY_READ | MAY_EXEC, 134 .intent = LOOKUP_OPEN, 135 .lookup_flags = LOOKUP_FOLLOW, 136 }; 137 138 if (IS_ERR(tmp)) 139 goto out; 140 141 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); 142 putname(tmp); 143 error = PTR_ERR(file); 144 if (IS_ERR(file)) 145 goto out; 146 147 /* 148 * may_open() has already checked for this, so it should be 149 * impossible to trip now. But we need to be extra cautious 150 * and check again at the very end too. 151 */ 152 error = -EACCES; 153 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 154 path_noexec(&file->f_path))) 155 goto exit; 156 157 error = -ENOEXEC; 158 159 read_lock(&binfmt_lock); 160 list_for_each_entry(fmt, &formats, lh) { 161 if (!fmt->load_shlib) 162 continue; 163 if (!try_module_get(fmt->module)) 164 continue; 165 read_unlock(&binfmt_lock); 166 error = fmt->load_shlib(file); 167 read_lock(&binfmt_lock); 168 put_binfmt(fmt); 169 if (error != -ENOEXEC) 170 break; 171 } 172 read_unlock(&binfmt_lock); 173 exit: 174 fput(file); 175 out: 176 return error; 177 } 178 #endif /* #ifdef CONFIG_USELIB */ 179 180 #ifdef CONFIG_MMU 181 /* 182 * The nascent bprm->mm is not visible until exec_mmap() but it can 183 * use a lot of memory, account these pages in current->mm temporary 184 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we 185 * change the counter back via acct_arg_size(0). 186 */ 187 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 188 { 189 struct mm_struct *mm = current->mm; 190 long diff = (long)(pages - bprm->vma_pages); 191 192 if (!mm || !diff) 193 return; 194 195 bprm->vma_pages = pages; 196 add_mm_counter(mm, MM_ANONPAGES, diff); 197 } 198 199 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 200 int write) 201 { 202 struct page *page; 203 struct vm_area_struct *vma = bprm->vma; 204 struct mm_struct *mm = bprm->mm; 205 int ret; 206 207 /* 208 * Avoid relying on expanding the stack down in GUP (which 209 * does not work for STACK_GROWSUP anyway), and just do it 210 * by hand ahead of time. 211 */ 212 if (write && pos < vma->vm_start) { 213 mmap_write_lock(mm); 214 ret = expand_downwards(vma, pos); 215 if (unlikely(ret < 0)) { 216 mmap_write_unlock(mm); 217 return NULL; 218 } 219 mmap_write_downgrade(mm); 220 } else 221 mmap_read_lock(mm); 222 223 /* 224 * We are doing an exec(). 'current' is the process 225 * doing the exec and 'mm' is the new process's mm. 226 */ 227 ret = get_user_pages_remote(mm, pos, 1, 228 write ? FOLL_WRITE : 0, 229 &page, NULL); 230 mmap_read_unlock(mm); 231 if (ret <= 0) 232 return NULL; 233 234 if (write) 235 acct_arg_size(bprm, vma_pages(vma)); 236 237 return page; 238 } 239 240 static void put_arg_page(struct page *page) 241 { 242 put_page(page); 243 } 244 245 static void free_arg_pages(struct linux_binprm *bprm) 246 { 247 } 248 249 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 250 struct page *page) 251 { 252 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); 253 } 254 255 static int __bprm_mm_init(struct linux_binprm *bprm) 256 { 257 int err; 258 struct vm_area_struct *vma = NULL; 259 struct mm_struct *mm = bprm->mm; 260 261 bprm->vma = vma = vm_area_alloc(mm); 262 if (!vma) 263 return -ENOMEM; 264 vma_set_anonymous(vma); 265 266 if (mmap_write_lock_killable(mm)) { 267 err = -EINTR; 268 goto err_free; 269 } 270 271 /* 272 * Need to be called with mmap write lock 273 * held, to avoid race with ksmd. 274 */ 275 err = ksm_execve(mm); 276 if (err) 277 goto err_ksm; 278 279 /* 280 * Place the stack at the largest stack address the architecture 281 * supports. Later, we'll move this to an appropriate place. We don't 282 * use STACK_TOP because that can depend on attributes which aren't 283 * configured yet. 284 */ 285 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); 286 vma->vm_end = STACK_TOP_MAX; 287 vma->vm_start = vma->vm_end - PAGE_SIZE; 288 vm_flags_init(vma, VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP); 289 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 290 291 err = insert_vm_struct(mm, vma); 292 if (err) 293 goto err; 294 295 mm->stack_vm = mm->total_vm = 1; 296 mmap_write_unlock(mm); 297 bprm->p = vma->vm_end - sizeof(void *); 298 return 0; 299 err: 300 ksm_exit(mm); 301 err_ksm: 302 mmap_write_unlock(mm); 303 err_free: 304 bprm->vma = NULL; 305 vm_area_free(vma); 306 return err; 307 } 308 309 static bool valid_arg_len(struct linux_binprm *bprm, long len) 310 { 311 return len <= MAX_ARG_STRLEN; 312 } 313 314 #else 315 316 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) 317 { 318 } 319 320 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 321 int write) 322 { 323 struct page *page; 324 325 page = bprm->page[pos / PAGE_SIZE]; 326 if (!page && write) { 327 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO); 328 if (!page) 329 return NULL; 330 bprm->page[pos / PAGE_SIZE] = page; 331 } 332 333 return page; 334 } 335 336 static void put_arg_page(struct page *page) 337 { 338 } 339 340 static void free_arg_page(struct linux_binprm *bprm, int i) 341 { 342 if (bprm->page[i]) { 343 __free_page(bprm->page[i]); 344 bprm->page[i] = NULL; 345 } 346 } 347 348 static void free_arg_pages(struct linux_binprm *bprm) 349 { 350 int i; 351 352 for (i = 0; i < MAX_ARG_PAGES; i++) 353 free_arg_page(bprm, i); 354 } 355 356 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos, 357 struct page *page) 358 { 359 } 360 361 static int __bprm_mm_init(struct linux_binprm *bprm) 362 { 363 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *); 364 return 0; 365 } 366 367 static bool valid_arg_len(struct linux_binprm *bprm, long len) 368 { 369 return len <= bprm->p; 370 } 371 372 #endif /* CONFIG_MMU */ 373 374 /* 375 * Create a new mm_struct and populate it with a temporary stack 376 * vm_area_struct. We don't have enough context at this point to set the stack 377 * flags, permissions, and offset, so we use temporary values. We'll update 378 * them later in setup_arg_pages(). 379 */ 380 static int bprm_mm_init(struct linux_binprm *bprm) 381 { 382 int err; 383 struct mm_struct *mm = NULL; 384 385 bprm->mm = mm = mm_alloc(); 386 err = -ENOMEM; 387 if (!mm) 388 goto err; 389 390 /* Save current stack limit for all calculations made during exec. */ 391 task_lock(current->group_leader); 392 bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK]; 393 task_unlock(current->group_leader); 394 395 err = __bprm_mm_init(bprm); 396 if (err) 397 goto err; 398 399 return 0; 400 401 err: 402 if (mm) { 403 bprm->mm = NULL; 404 mmdrop(mm); 405 } 406 407 return err; 408 } 409 410 struct user_arg_ptr { 411 #ifdef CONFIG_COMPAT 412 bool is_compat; 413 #endif 414 union { 415 const char __user *const __user *native; 416 #ifdef CONFIG_COMPAT 417 const compat_uptr_t __user *compat; 418 #endif 419 } ptr; 420 }; 421 422 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) 423 { 424 const char __user *native; 425 426 #ifdef CONFIG_COMPAT 427 if (unlikely(argv.is_compat)) { 428 compat_uptr_t compat; 429 430 if (get_user(compat, argv.ptr.compat + nr)) 431 return ERR_PTR(-EFAULT); 432 433 return compat_ptr(compat); 434 } 435 #endif 436 437 if (get_user(native, argv.ptr.native + nr)) 438 return ERR_PTR(-EFAULT); 439 440 return native; 441 } 442 443 /* 444 * count() counts the number of strings in array ARGV. 445 */ 446 static int count(struct user_arg_ptr argv, int max) 447 { 448 int i = 0; 449 450 if (argv.ptr.native != NULL) { 451 for (;;) { 452 const char __user *p = get_user_arg_ptr(argv, i); 453 454 if (!p) 455 break; 456 457 if (IS_ERR(p)) 458 return -EFAULT; 459 460 if (i >= max) 461 return -E2BIG; 462 ++i; 463 464 if (fatal_signal_pending(current)) 465 return -ERESTARTNOHAND; 466 cond_resched(); 467 } 468 } 469 return i; 470 } 471 472 static int count_strings_kernel(const char *const *argv) 473 { 474 int i; 475 476 if (!argv) 477 return 0; 478 479 for (i = 0; argv[i]; ++i) { 480 if (i >= MAX_ARG_STRINGS) 481 return -E2BIG; 482 if (fatal_signal_pending(current)) 483 return -ERESTARTNOHAND; 484 cond_resched(); 485 } 486 return i; 487 } 488 489 static int bprm_stack_limits(struct linux_binprm *bprm) 490 { 491 unsigned long limit, ptr_size; 492 493 /* 494 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM 495 * (whichever is smaller) for the argv+env strings. 496 * This ensures that: 497 * - the remaining binfmt code will not run out of stack space, 498 * - the program will have a reasonable amount of stack left 499 * to work from. 500 */ 501 limit = _STK_LIM / 4 * 3; 502 limit = min(limit, bprm->rlim_stack.rlim_cur / 4); 503 /* 504 * We've historically supported up to 32 pages (ARG_MAX) 505 * of argument strings even with small stacks 506 */ 507 limit = max_t(unsigned long, limit, ARG_MAX); 508 /* 509 * We must account for the size of all the argv and envp pointers to 510 * the argv and envp strings, since they will also take up space in 511 * the stack. They aren't stored until much later when we can't 512 * signal to the parent that the child has run out of stack space. 513 * Instead, calculate it here so it's possible to fail gracefully. 514 * 515 * In the case of argc = 0, make sure there is space for adding a 516 * empty string (which will bump argc to 1), to ensure confused 517 * userspace programs don't start processing from argv[1], thinking 518 * argc can never be 0, to keep them from walking envp by accident. 519 * See do_execveat_common(). 520 */ 521 ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); 522 if (limit <= ptr_size) 523 return -E2BIG; 524 limit -= ptr_size; 525 526 bprm->argmin = bprm->p - limit; 527 return 0; 528 } 529 530 /* 531 * 'copy_strings()' copies argument/environment strings from the old 532 * processes's memory to the new process's stack. The call to get_user_pages() 533 * ensures the destination page is created and not swapped out. 534 */ 535 static int copy_strings(int argc, struct user_arg_ptr argv, 536 struct linux_binprm *bprm) 537 { 538 struct page *kmapped_page = NULL; 539 char *kaddr = NULL; 540 unsigned long kpos = 0; 541 int ret; 542 543 while (argc-- > 0) { 544 const char __user *str; 545 int len; 546 unsigned long pos; 547 548 ret = -EFAULT; 549 str = get_user_arg_ptr(argv, argc); 550 if (IS_ERR(str)) 551 goto out; 552 553 len = strnlen_user(str, MAX_ARG_STRLEN); 554 if (!len) 555 goto out; 556 557 ret = -E2BIG; 558 if (!valid_arg_len(bprm, len)) 559 goto out; 560 561 /* We're going to work our way backwards. */ 562 pos = bprm->p; 563 str += len; 564 bprm->p -= len; 565 #ifdef CONFIG_MMU 566 if (bprm->p < bprm->argmin) 567 goto out; 568 #endif 569 570 while (len > 0) { 571 int offset, bytes_to_copy; 572 573 if (fatal_signal_pending(current)) { 574 ret = -ERESTARTNOHAND; 575 goto out; 576 } 577 cond_resched(); 578 579 offset = pos % PAGE_SIZE; 580 if (offset == 0) 581 offset = PAGE_SIZE; 582 583 bytes_to_copy = offset; 584 if (bytes_to_copy > len) 585 bytes_to_copy = len; 586 587 offset -= bytes_to_copy; 588 pos -= bytes_to_copy; 589 str -= bytes_to_copy; 590 len -= bytes_to_copy; 591 592 if (!kmapped_page || kpos != (pos & PAGE_MASK)) { 593 struct page *page; 594 595 page = get_arg_page(bprm, pos, 1); 596 if (!page) { 597 ret = -E2BIG; 598 goto out; 599 } 600 601 if (kmapped_page) { 602 flush_dcache_page(kmapped_page); 603 kunmap_local(kaddr); 604 put_arg_page(kmapped_page); 605 } 606 kmapped_page = page; 607 kaddr = kmap_local_page(kmapped_page); 608 kpos = pos & PAGE_MASK; 609 flush_arg_page(bprm, kpos, kmapped_page); 610 } 611 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) { 612 ret = -EFAULT; 613 goto out; 614 } 615 } 616 } 617 ret = 0; 618 out: 619 if (kmapped_page) { 620 flush_dcache_page(kmapped_page); 621 kunmap_local(kaddr); 622 put_arg_page(kmapped_page); 623 } 624 return ret; 625 } 626 627 /* 628 * Copy and argument/environment string from the kernel to the processes stack. 629 */ 630 int copy_string_kernel(const char *arg, struct linux_binprm *bprm) 631 { 632 int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */; 633 unsigned long pos = bprm->p; 634 635 if (len == 0) 636 return -EFAULT; 637 if (!valid_arg_len(bprm, len)) 638 return -E2BIG; 639 640 /* We're going to work our way backwards. */ 641 arg += len; 642 bprm->p -= len; 643 if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin) 644 return -E2BIG; 645 646 while (len > 0) { 647 unsigned int bytes_to_copy = min_t(unsigned int, len, 648 min_not_zero(offset_in_page(pos), PAGE_SIZE)); 649 struct page *page; 650 651 pos -= bytes_to_copy; 652 arg -= bytes_to_copy; 653 len -= bytes_to_copy; 654 655 page = get_arg_page(bprm, pos, 1); 656 if (!page) 657 return -E2BIG; 658 flush_arg_page(bprm, pos & PAGE_MASK, page); 659 memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy); 660 put_arg_page(page); 661 } 662 663 return 0; 664 } 665 EXPORT_SYMBOL(copy_string_kernel); 666 667 static int copy_strings_kernel(int argc, const char *const *argv, 668 struct linux_binprm *bprm) 669 { 670 while (argc-- > 0) { 671 int ret = copy_string_kernel(argv[argc], bprm); 672 if (ret < 0) 673 return ret; 674 if (fatal_signal_pending(current)) 675 return -ERESTARTNOHAND; 676 cond_resched(); 677 } 678 return 0; 679 } 680 681 #ifdef CONFIG_MMU 682 683 /* 684 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once 685 * the binfmt code determines where the new stack should reside, we shift it to 686 * its final location. The process proceeds as follows: 687 * 688 * 1) Use shift to calculate the new vma endpoints. 689 * 2) Extend vma to cover both the old and new ranges. This ensures the 690 * arguments passed to subsequent functions are consistent. 691 * 3) Move vma's page tables to the new range. 692 * 4) Free up any cleared pgd range. 693 * 5) Shrink the vma to cover only the new range. 694 */ 695 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) 696 { 697 struct mm_struct *mm = vma->vm_mm; 698 unsigned long old_start = vma->vm_start; 699 unsigned long old_end = vma->vm_end; 700 unsigned long length = old_end - old_start; 701 unsigned long new_start = old_start - shift; 702 unsigned long new_end = old_end - shift; 703 VMA_ITERATOR(vmi, mm, new_start); 704 struct vm_area_struct *next; 705 struct mmu_gather tlb; 706 707 BUG_ON(new_start > new_end); 708 709 /* 710 * ensure there are no vmas between where we want to go 711 * and where we are 712 */ 713 if (vma != vma_next(&vmi)) 714 return -EFAULT; 715 716 vma_iter_prev_range(&vmi); 717 /* 718 * cover the whole range: [new_start, old_end) 719 */ 720 if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL)) 721 return -ENOMEM; 722 723 /* 724 * move the page tables downwards, on failure we rely on 725 * process cleanup to remove whatever mess we made. 726 */ 727 if (length != move_page_tables(vma, old_start, 728 vma, new_start, length, false, true)) 729 return -ENOMEM; 730 731 lru_add_drain(); 732 tlb_gather_mmu(&tlb, mm); 733 next = vma_next(&vmi); 734 if (new_end > old_start) { 735 /* 736 * when the old and new regions overlap clear from new_end. 737 */ 738 free_pgd_range(&tlb, new_end, old_end, new_end, 739 next ? next->vm_start : USER_PGTABLES_CEILING); 740 } else { 741 /* 742 * otherwise, clean from old_start; this is done to not touch 743 * the address space in [new_end, old_start) some architectures 744 * have constraints on va-space that make this illegal (IA64) - 745 * for the others its just a little faster. 746 */ 747 free_pgd_range(&tlb, old_start, old_end, new_end, 748 next ? next->vm_start : USER_PGTABLES_CEILING); 749 } 750 tlb_finish_mmu(&tlb); 751 752 vma_prev(&vmi); 753 /* Shrink the vma to just the new range */ 754 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); 755 } 756 757 /* 758 * Finalizes the stack vm_area_struct. The flags and permissions are updated, 759 * the stack is optionally relocated, and some extra space is added. 760 */ 761 int setup_arg_pages(struct linux_binprm *bprm, 762 unsigned long stack_top, 763 int executable_stack) 764 { 765 unsigned long ret; 766 unsigned long stack_shift; 767 struct mm_struct *mm = current->mm; 768 struct vm_area_struct *vma = bprm->vma; 769 struct vm_area_struct *prev = NULL; 770 unsigned long vm_flags; 771 unsigned long stack_base; 772 unsigned long stack_size; 773 unsigned long stack_expand; 774 unsigned long rlim_stack; 775 struct mmu_gather tlb; 776 struct vma_iterator vmi; 777 778 #ifdef CONFIG_STACK_GROWSUP 779 /* Limit stack size */ 780 stack_base = bprm->rlim_stack.rlim_max; 781 782 stack_base = calc_max_stack_size(stack_base); 783 784 /* Add space for stack randomization. */ 785 stack_base += (STACK_RND_MASK << PAGE_SHIFT); 786 787 /* Make sure we didn't let the argument array grow too large. */ 788 if (vma->vm_end - vma->vm_start > stack_base) 789 return -ENOMEM; 790 791 stack_base = PAGE_ALIGN(stack_top - stack_base); 792 793 stack_shift = vma->vm_start - stack_base; 794 mm->arg_start = bprm->p - stack_shift; 795 bprm->p = vma->vm_end - stack_shift; 796 #else 797 stack_top = arch_align_stack(stack_top); 798 stack_top = PAGE_ALIGN(stack_top); 799 800 if (unlikely(stack_top < mmap_min_addr) || 801 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) 802 return -ENOMEM; 803 804 stack_shift = vma->vm_end - stack_top; 805 806 bprm->p -= stack_shift; 807 mm->arg_start = bprm->p; 808 #endif 809 810 if (bprm->loader) 811 bprm->loader -= stack_shift; 812 bprm->exec -= stack_shift; 813 814 if (mmap_write_lock_killable(mm)) 815 return -EINTR; 816 817 vm_flags = VM_STACK_FLAGS; 818 819 /* 820 * Adjust stack execute permissions; explicitly enable for 821 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone 822 * (arch default) otherwise. 823 */ 824 if (unlikely(executable_stack == EXSTACK_ENABLE_X)) 825 vm_flags |= VM_EXEC; 826 else if (executable_stack == EXSTACK_DISABLE_X) 827 vm_flags &= ~VM_EXEC; 828 vm_flags |= mm->def_flags; 829 vm_flags |= VM_STACK_INCOMPLETE_SETUP; 830 831 vma_iter_init(&vmi, mm, vma->vm_start); 832 833 tlb_gather_mmu(&tlb, mm); 834 ret = mprotect_fixup(&vmi, &tlb, vma, &prev, vma->vm_start, vma->vm_end, 835 vm_flags); 836 tlb_finish_mmu(&tlb); 837 838 if (ret) 839 goto out_unlock; 840 BUG_ON(prev != vma); 841 842 if (unlikely(vm_flags & VM_EXEC)) { 843 pr_warn_once("process '%pD4' started with executable stack\n", 844 bprm->file); 845 } 846 847 /* Move stack pages down in memory. */ 848 if (stack_shift) { 849 ret = shift_arg_pages(vma, stack_shift); 850 if (ret) 851 goto out_unlock; 852 } 853 854 /* mprotect_fixup is overkill to remove the temporary stack flags */ 855 vm_flags_clear(vma, VM_STACK_INCOMPLETE_SETUP); 856 857 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */ 858 stack_size = vma->vm_end - vma->vm_start; 859 /* 860 * Align this down to a page boundary as expand_stack 861 * will align it up. 862 */ 863 rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK; 864 865 stack_expand = min(rlim_stack, stack_size + stack_expand); 866 867 #ifdef CONFIG_STACK_GROWSUP 868 stack_base = vma->vm_start + stack_expand; 869 #else 870 stack_base = vma->vm_end - stack_expand; 871 #endif 872 current->mm->start_stack = bprm->p; 873 ret = expand_stack_locked(vma, stack_base); 874 if (ret) 875 ret = -EFAULT; 876 877 out_unlock: 878 mmap_write_unlock(mm); 879 return ret; 880 } 881 EXPORT_SYMBOL(setup_arg_pages); 882 883 #else 884 885 /* 886 * Transfer the program arguments and environment from the holding pages 887 * onto the stack. The provided stack pointer is adjusted accordingly. 888 */ 889 int transfer_args_to_stack(struct linux_binprm *bprm, 890 unsigned long *sp_location) 891 { 892 unsigned long index, stop, sp; 893 int ret = 0; 894 895 stop = bprm->p >> PAGE_SHIFT; 896 sp = *sp_location; 897 898 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { 899 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0; 900 char *src = kmap_local_page(bprm->page[index]) + offset; 901 sp -= PAGE_SIZE - offset; 902 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0) 903 ret = -EFAULT; 904 kunmap_local(src); 905 if (ret) 906 goto out; 907 } 908 909 bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE; 910 *sp_location = sp; 911 912 out: 913 return ret; 914 } 915 EXPORT_SYMBOL(transfer_args_to_stack); 916 917 #endif /* CONFIG_MMU */ 918 919 /* 920 * On success, caller must call do_close_execat() on the returned 921 * struct file to close it. 922 */ 923 static struct file *do_open_execat(int fd, struct filename *name, int flags) 924 { 925 struct file *file; 926 int err; 927 struct open_flags open_exec_flags = { 928 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 929 .acc_mode = MAY_EXEC, 930 .intent = LOOKUP_OPEN, 931 .lookup_flags = LOOKUP_FOLLOW, 932 }; 933 934 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) 935 return ERR_PTR(-EINVAL); 936 if (flags & AT_SYMLINK_NOFOLLOW) 937 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW; 938 if (flags & AT_EMPTY_PATH) 939 open_exec_flags.lookup_flags |= LOOKUP_EMPTY; 940 941 file = do_filp_open(fd, name, &open_exec_flags); 942 if (IS_ERR(file)) 943 goto out; 944 945 /* 946 * may_open() has already checked for this, so it should be 947 * impossible to trip now. But we need to be extra cautious 948 * and check again at the very end too. 949 */ 950 err = -EACCES; 951 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || 952 path_noexec(&file->f_path))) 953 goto exit; 954 955 err = deny_write_access(file); 956 if (err) 957 goto exit; 958 959 out: 960 return file; 961 962 exit: 963 fput(file); 964 return ERR_PTR(err); 965 } 966 967 /** 968 * open_exec - Open a path name for execution 969 * 970 * @name: path name to open with the intent of executing it. 971 * 972 * Returns ERR_PTR on failure or allocated struct file on success. 973 * 974 * As this is a wrapper for the internal do_open_execat(), callers 975 * must call allow_write_access() before fput() on release. Also see 976 * do_close_execat(). 977 */ 978 struct file *open_exec(const char *name) 979 { 980 struct filename *filename = getname_kernel(name); 981 struct file *f = ERR_CAST(filename); 982 983 if (!IS_ERR(filename)) { 984 f = do_open_execat(AT_FDCWD, filename, 0); 985 putname(filename); 986 } 987 return f; 988 } 989 EXPORT_SYMBOL(open_exec); 990 991 #if defined(CONFIG_BINFMT_FLAT) || defined(CONFIG_BINFMT_ELF_FDPIC) 992 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) 993 { 994 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos); 995 if (res > 0) 996 flush_icache_user_range(addr, addr + len); 997 return res; 998 } 999 EXPORT_SYMBOL(read_code); 1000 #endif 1001 1002 /* 1003 * Maps the mm_struct mm into the current task struct. 1004 * On success, this function returns with exec_update_lock 1005 * held for writing. 1006 */ 1007 static int exec_mmap(struct mm_struct *mm) 1008 { 1009 struct task_struct *tsk; 1010 struct mm_struct *old_mm, *active_mm; 1011 int ret; 1012 1013 /* Notify parent that we're no longer interested in the old VM */ 1014 tsk = current; 1015 old_mm = current->mm; 1016 exec_mm_release(tsk, old_mm); 1017 1018 ret = down_write_killable(&tsk->signal->exec_update_lock); 1019 if (ret) 1020 return ret; 1021 1022 if (old_mm) { 1023 /* 1024 * If there is a pending fatal signal perhaps a signal 1025 * whose default action is to create a coredump get 1026 * out and die instead of going through with the exec. 1027 */ 1028 ret = mmap_read_lock_killable(old_mm); 1029 if (ret) { 1030 up_write(&tsk->signal->exec_update_lock); 1031 return ret; 1032 } 1033 } 1034 1035 task_lock(tsk); 1036 membarrier_exec_mmap(mm); 1037 1038 local_irq_disable(); 1039 active_mm = tsk->active_mm; 1040 tsk->active_mm = mm; 1041 tsk->mm = mm; 1042 mm_init_cid(mm); 1043 /* 1044 * This prevents preemption while active_mm is being loaded and 1045 * it and mm are being updated, which could cause problems for 1046 * lazy tlb mm refcounting when these are updated by context 1047 * switches. Not all architectures can handle irqs off over 1048 * activate_mm yet. 1049 */ 1050 if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1051 local_irq_enable(); 1052 activate_mm(active_mm, mm); 1053 if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) 1054 local_irq_enable(); 1055 lru_gen_add_mm(mm); 1056 task_unlock(tsk); 1057 lru_gen_use_mm(mm); 1058 if (old_mm) { 1059 mmap_read_unlock(old_mm); 1060 BUG_ON(active_mm != old_mm); 1061 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm); 1062 mm_update_next_owner(old_mm); 1063 mmput(old_mm); 1064 return 0; 1065 } 1066 mmdrop_lazy_tlb(active_mm); 1067 return 0; 1068 } 1069 1070 static int de_thread(struct task_struct *tsk) 1071 { 1072 struct signal_struct *sig = tsk->signal; 1073 struct sighand_struct *oldsighand = tsk->sighand; 1074 spinlock_t *lock = &oldsighand->siglock; 1075 1076 if (thread_group_empty(tsk)) 1077 goto no_thread_group; 1078 1079 /* 1080 * Kill all other threads in the thread group. 1081 */ 1082 spin_lock_irq(lock); 1083 if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) { 1084 /* 1085 * Another group action in progress, just 1086 * return so that the signal is processed. 1087 */ 1088 spin_unlock_irq(lock); 1089 return -EAGAIN; 1090 } 1091 1092 sig->group_exec_task = tsk; 1093 sig->notify_count = zap_other_threads(tsk); 1094 if (!thread_group_leader(tsk)) 1095 sig->notify_count--; 1096 1097 while (sig->notify_count) { 1098 __set_current_state(TASK_KILLABLE); 1099 spin_unlock_irq(lock); 1100 schedule(); 1101 if (__fatal_signal_pending(tsk)) 1102 goto killed; 1103 spin_lock_irq(lock); 1104 } 1105 spin_unlock_irq(lock); 1106 1107 /* 1108 * At this point all other threads have exited, all we have to 1109 * do is to wait for the thread group leader to become inactive, 1110 * and to assume its PID: 1111 */ 1112 if (!thread_group_leader(tsk)) { 1113 struct task_struct *leader = tsk->group_leader; 1114 1115 for (;;) { 1116 cgroup_threadgroup_change_begin(tsk); 1117 write_lock_irq(&tasklist_lock); 1118 /* 1119 * Do this under tasklist_lock to ensure that 1120 * exit_notify() can't miss ->group_exec_task 1121 */ 1122 sig->notify_count = -1; 1123 if (likely(leader->exit_state)) 1124 break; 1125 __set_current_state(TASK_KILLABLE); 1126 write_unlock_irq(&tasklist_lock); 1127 cgroup_threadgroup_change_end(tsk); 1128 schedule(); 1129 if (__fatal_signal_pending(tsk)) 1130 goto killed; 1131 } 1132 1133 /* 1134 * The only record we have of the real-time age of a 1135 * process, regardless of execs it's done, is start_time. 1136 * All the past CPU time is accumulated in signal_struct 1137 * from sister threads now dead. But in this non-leader 1138 * exec, nothing survives from the original leader thread, 1139 * whose birth marks the true age of this process now. 1140 * When we take on its identity by switching to its PID, we 1141 * also take its birthdate (always earlier than our own). 1142 */ 1143 tsk->start_time = leader->start_time; 1144 tsk->start_boottime = leader->start_boottime; 1145 1146 BUG_ON(!same_thread_group(leader, tsk)); 1147 /* 1148 * An exec() starts a new thread group with the 1149 * TGID of the previous thread group. Rehash the 1150 * two threads with a switched PID, and release 1151 * the former thread group leader: 1152 */ 1153 1154 /* Become a process group leader with the old leader's pid. 1155 * The old leader becomes a thread of the this thread group. 1156 */ 1157 exchange_tids(tsk, leader); 1158 transfer_pid(leader, tsk, PIDTYPE_TGID); 1159 transfer_pid(leader, tsk, PIDTYPE_PGID); 1160 transfer_pid(leader, tsk, PIDTYPE_SID); 1161 1162 list_replace_rcu(&leader->tasks, &tsk->tasks); 1163 list_replace_init(&leader->sibling, &tsk->sibling); 1164 1165 tsk->group_leader = tsk; 1166 leader->group_leader = tsk; 1167 1168 tsk->exit_signal = SIGCHLD; 1169 leader->exit_signal = -1; 1170 1171 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 1172 leader->exit_state = EXIT_DEAD; 1173 /* 1174 * We are going to release_task()->ptrace_unlink() silently, 1175 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees 1176 * the tracer won't block again waiting for this thread. 1177 */ 1178 if (unlikely(leader->ptrace)) 1179 __wake_up_parent(leader, leader->parent); 1180 write_unlock_irq(&tasklist_lock); 1181 cgroup_threadgroup_change_end(tsk); 1182 1183 release_task(leader); 1184 } 1185 1186 sig->group_exec_task = NULL; 1187 sig->notify_count = 0; 1188 1189 no_thread_group: 1190 /* we have changed execution domain */ 1191 tsk->exit_signal = SIGCHLD; 1192 1193 BUG_ON(!thread_group_leader(tsk)); 1194 return 0; 1195 1196 killed: 1197 /* protects against exit_notify() and __exit_signal() */ 1198 read_lock(&tasklist_lock); 1199 sig->group_exec_task = NULL; 1200 sig->notify_count = 0; 1201 read_unlock(&tasklist_lock); 1202 return -EAGAIN; 1203 } 1204 1205 1206 /* 1207 * This function makes sure the current process has its own signal table, 1208 * so that flush_signal_handlers can later reset the handlers without 1209 * disturbing other processes. (Other processes might share the signal 1210 * table via the CLONE_SIGHAND option to clone().) 1211 */ 1212 static int unshare_sighand(struct task_struct *me) 1213 { 1214 struct sighand_struct *oldsighand = me->sighand; 1215 1216 if (refcount_read(&oldsighand->count) != 1) { 1217 struct sighand_struct *newsighand; 1218 /* 1219 * This ->sighand is shared with the CLONE_SIGHAND 1220 * but not CLONE_THREAD task, switch to the new one. 1221 */ 1222 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1223 if (!newsighand) 1224 return -ENOMEM; 1225 1226 refcount_set(&newsighand->count, 1); 1227 1228 write_lock_irq(&tasklist_lock); 1229 spin_lock(&oldsighand->siglock); 1230 memcpy(newsighand->action, oldsighand->action, 1231 sizeof(newsighand->action)); 1232 rcu_assign_pointer(me->sighand, newsighand); 1233 spin_unlock(&oldsighand->siglock); 1234 write_unlock_irq(&tasklist_lock); 1235 1236 __cleanup_sighand(oldsighand); 1237 } 1238 return 0; 1239 } 1240 1241 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) 1242 { 1243 task_lock(tsk); 1244 /* Always NUL terminated and zero-padded */ 1245 strscpy_pad(buf, tsk->comm, buf_size); 1246 task_unlock(tsk); 1247 return buf; 1248 } 1249 EXPORT_SYMBOL_GPL(__get_task_comm); 1250 1251 /* 1252 * These functions flushes out all traces of the currently running executable 1253 * so that a new one can be started 1254 */ 1255 1256 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) 1257 { 1258 task_lock(tsk); 1259 trace_task_rename(tsk, buf); 1260 strscpy_pad(tsk->comm, buf, sizeof(tsk->comm)); 1261 task_unlock(tsk); 1262 perf_event_comm(tsk, exec); 1263 } 1264 1265 /* 1266 * Calling this is the point of no return. None of the failures will be 1267 * seen by userspace since either the process is already taking a fatal 1268 * signal (via de_thread() or coredump), or will have SEGV raised 1269 * (after exec_mmap()) by search_binary_handler (see below). 1270 */ 1271 int begin_new_exec(struct linux_binprm * bprm) 1272 { 1273 struct task_struct *me = current; 1274 int retval; 1275 1276 /* Once we are committed compute the creds */ 1277 retval = bprm_creds_from_file(bprm); 1278 if (retval) 1279 return retval; 1280 1281 /* 1282 * Ensure all future errors are fatal. 1283 */ 1284 bprm->point_of_no_return = true; 1285 1286 /* 1287 * Make this the only thread in the thread group. 1288 */ 1289 retval = de_thread(me); 1290 if (retval) 1291 goto out; 1292 1293 /* 1294 * Cancel any io_uring activity across execve 1295 */ 1296 io_uring_task_cancel(); 1297 1298 /* Ensure the files table is not shared. */ 1299 retval = unshare_files(); 1300 if (retval) 1301 goto out; 1302 1303 /* 1304 * Must be called _before_ exec_mmap() as bprm->mm is 1305 * not visible until then. Doing it here also ensures 1306 * we don't race against replace_mm_exe_file(). 1307 */ 1308 retval = set_mm_exe_file(bprm->mm, bprm->file); 1309 if (retval) 1310 goto out; 1311 1312 /* If the binary is not readable then enforce mm->dumpable=0 */ 1313 would_dump(bprm, bprm->file); 1314 if (bprm->have_execfd) 1315 would_dump(bprm, bprm->executable); 1316 1317 /* 1318 * Release all of the old mmap stuff 1319 */ 1320 acct_arg_size(bprm, 0); 1321 retval = exec_mmap(bprm->mm); 1322 if (retval) 1323 goto out; 1324 1325 bprm->mm = NULL; 1326 1327 retval = exec_task_namespaces(); 1328 if (retval) 1329 goto out_unlock; 1330 1331 #ifdef CONFIG_POSIX_TIMERS 1332 spin_lock_irq(&me->sighand->siglock); 1333 posix_cpu_timers_exit(me); 1334 spin_unlock_irq(&me->sighand->siglock); 1335 exit_itimers(me); 1336 flush_itimer_signals(); 1337 #endif 1338 1339 /* 1340 * Make the signal table private. 1341 */ 1342 retval = unshare_sighand(me); 1343 if (retval) 1344 goto out_unlock; 1345 1346 me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | 1347 PF_NOFREEZE | PF_NO_SETAFFINITY); 1348 flush_thread(); 1349 me->personality &= ~bprm->per_clear; 1350 1351 clear_syscall_work_syscall_user_dispatch(me); 1352 1353 /* 1354 * We have to apply CLOEXEC before we change whether the process is 1355 * dumpable (in setup_new_exec) to avoid a race with a process in userspace 1356 * trying to access the should-be-closed file descriptors of a process 1357 * undergoing exec(2). 1358 */ 1359 do_close_on_exec(me->files); 1360 1361 if (bprm->secureexec) { 1362 /* Make sure parent cannot signal privileged process. */ 1363 me->pdeath_signal = 0; 1364 1365 /* 1366 * For secureexec, reset the stack limit to sane default to 1367 * avoid bad behavior from the prior rlimits. This has to 1368 * happen before arch_pick_mmap_layout(), which examines 1369 * RLIMIT_STACK, but after the point of no return to avoid 1370 * needing to clean up the change on failure. 1371 */ 1372 if (bprm->rlim_stack.rlim_cur > _STK_LIM) 1373 bprm->rlim_stack.rlim_cur = _STK_LIM; 1374 } 1375 1376 me->sas_ss_sp = me->sas_ss_size = 0; 1377 1378 /* 1379 * Figure out dumpability. Note that this checking only of current 1380 * is wrong, but userspace depends on it. This should be testing 1381 * bprm->secureexec instead. 1382 */ 1383 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || 1384 !(uid_eq(current_euid(), current_uid()) && 1385 gid_eq(current_egid(), current_gid()))) 1386 set_dumpable(current->mm, suid_dumpable); 1387 else 1388 set_dumpable(current->mm, SUID_DUMP_USER); 1389 1390 perf_event_exec(); 1391 __set_task_comm(me, kbasename(bprm->filename), true); 1392 1393 /* An exec changes our domain. We are no longer part of the thread 1394 group */ 1395 WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1); 1396 flush_signal_handlers(me, 0); 1397 1398 retval = set_cred_ucounts(bprm->cred); 1399 if (retval < 0) 1400 goto out_unlock; 1401 1402 /* 1403 * install the new credentials for this executable 1404 */ 1405 security_bprm_committing_creds(bprm); 1406 1407 commit_creds(bprm->cred); 1408 bprm->cred = NULL; 1409 1410 /* 1411 * Disable monitoring for regular users 1412 * when executing setuid binaries. Must 1413 * wait until new credentials are committed 1414 * by commit_creds() above 1415 */ 1416 if (get_dumpable(me->mm) != SUID_DUMP_USER) 1417 perf_event_exit_task(me); 1418 /* 1419 * cred_guard_mutex must be held at least to this point to prevent 1420 * ptrace_attach() from altering our determination of the task's 1421 * credentials; any time after this it may be unlocked. 1422 */ 1423 security_bprm_committed_creds(bprm); 1424 1425 /* Pass the opened binary to the interpreter. */ 1426 if (bprm->have_execfd) { 1427 retval = get_unused_fd_flags(0); 1428 if (retval < 0) 1429 goto out_unlock; 1430 fd_install(retval, bprm->executable); 1431 bprm->executable = NULL; 1432 bprm->execfd = retval; 1433 } 1434 return 0; 1435 1436 out_unlock: 1437 up_write(&me->signal->exec_update_lock); 1438 if (!bprm->cred) 1439 mutex_unlock(&me->signal->cred_guard_mutex); 1440 1441 out: 1442 return retval; 1443 } 1444 EXPORT_SYMBOL(begin_new_exec); 1445 1446 void would_dump(struct linux_binprm *bprm, struct file *file) 1447 { 1448 struct inode *inode = file_inode(file); 1449 struct mnt_idmap *idmap = file_mnt_idmap(file); 1450 if (inode_permission(idmap, inode, MAY_READ) < 0) { 1451 struct user_namespace *old, *user_ns; 1452 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 1453 1454 /* Ensure mm->user_ns contains the executable */ 1455 user_ns = old = bprm->mm->user_ns; 1456 while ((user_ns != &init_user_ns) && 1457 !privileged_wrt_inode_uidgid(user_ns, idmap, inode)) 1458 user_ns = user_ns->parent; 1459 1460 if (old != user_ns) { 1461 bprm->mm->user_ns = get_user_ns(user_ns); 1462 put_user_ns(old); 1463 } 1464 } 1465 } 1466 EXPORT_SYMBOL(would_dump); 1467 1468 void setup_new_exec(struct linux_binprm * bprm) 1469 { 1470 /* Setup things that can depend upon the personality */ 1471 struct task_struct *me = current; 1472 1473 arch_pick_mmap_layout(me->mm, &bprm->rlim_stack); 1474 1475 arch_setup_new_exec(); 1476 1477 /* Set the new mm task size. We have to do that late because it may 1478 * depend on TIF_32BIT which is only updated in flush_thread() on 1479 * some architectures like powerpc 1480 */ 1481 me->mm->task_size = TASK_SIZE; 1482 up_write(&me->signal->exec_update_lock); 1483 mutex_unlock(&me->signal->cred_guard_mutex); 1484 } 1485 EXPORT_SYMBOL(setup_new_exec); 1486 1487 /* Runs immediately before start_thread() takes over. */ 1488 void finalize_exec(struct linux_binprm *bprm) 1489 { 1490 /* Store any stack rlimit changes before starting thread. */ 1491 task_lock(current->group_leader); 1492 current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack; 1493 task_unlock(current->group_leader); 1494 } 1495 EXPORT_SYMBOL(finalize_exec); 1496 1497 /* 1498 * Prepare credentials and lock ->cred_guard_mutex. 1499 * setup_new_exec() commits the new creds and drops the lock. 1500 * Or, if exec fails before, free_bprm() should release ->cred 1501 * and unlock. 1502 */ 1503 static int prepare_bprm_creds(struct linux_binprm *bprm) 1504 { 1505 if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) 1506 return -ERESTARTNOINTR; 1507 1508 bprm->cred = prepare_exec_creds(); 1509 if (likely(bprm->cred)) 1510 return 0; 1511 1512 mutex_unlock(¤t->signal->cred_guard_mutex); 1513 return -ENOMEM; 1514 } 1515 1516 /* Matches do_open_execat() */ 1517 static void do_close_execat(struct file *file) 1518 { 1519 if (!file) 1520 return; 1521 allow_write_access(file); 1522 fput(file); 1523 } 1524 1525 static void free_bprm(struct linux_binprm *bprm) 1526 { 1527 if (bprm->mm) { 1528 acct_arg_size(bprm, 0); 1529 mmput(bprm->mm); 1530 } 1531 free_arg_pages(bprm); 1532 if (bprm->cred) { 1533 mutex_unlock(¤t->signal->cred_guard_mutex); 1534 abort_creds(bprm->cred); 1535 } 1536 do_close_execat(bprm->file); 1537 if (bprm->executable) 1538 fput(bprm->executable); 1539 /* If a binfmt changed the interp, free it. */ 1540 if (bprm->interp != bprm->filename) 1541 kfree(bprm->interp); 1542 kfree(bprm->fdpath); 1543 kfree(bprm); 1544 } 1545 1546 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int flags) 1547 { 1548 struct linux_binprm *bprm; 1549 struct file *file; 1550 int retval = -ENOMEM; 1551 1552 file = do_open_execat(fd, filename, flags); 1553 if (IS_ERR(file)) 1554 return ERR_CAST(file); 1555 1556 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); 1557 if (!bprm) { 1558 do_close_execat(file); 1559 return ERR_PTR(-ENOMEM); 1560 } 1561 1562 bprm->file = file; 1563 1564 if (fd == AT_FDCWD || filename->name[0] == '/') { 1565 bprm->filename = filename->name; 1566 } else { 1567 if (filename->name[0] == '\0') 1568 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); 1569 else 1570 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", 1571 fd, filename->name); 1572 if (!bprm->fdpath) 1573 goto out_free; 1574 1575 /* 1576 * Record that a name derived from an O_CLOEXEC fd will be 1577 * inaccessible after exec. This allows the code in exec to 1578 * choose to fail when the executable is not mmaped into the 1579 * interpreter and an open file descriptor is not passed to 1580 * the interpreter. This makes for a better user experience 1581 * than having the interpreter start and then immediately fail 1582 * when it finds the executable is inaccessible. 1583 */ 1584 if (get_close_on_exec(fd)) 1585 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; 1586 1587 bprm->filename = bprm->fdpath; 1588 } 1589 bprm->interp = bprm->filename; 1590 1591 retval = bprm_mm_init(bprm); 1592 if (!retval) 1593 return bprm; 1594 1595 out_free: 1596 free_bprm(bprm); 1597 return ERR_PTR(retval); 1598 } 1599 1600 int bprm_change_interp(const char *interp, struct linux_binprm *bprm) 1601 { 1602 /* If a binfmt changed the interp, free it first. */ 1603 if (bprm->interp != bprm->filename) 1604 kfree(bprm->interp); 1605 bprm->interp = kstrdup(interp, GFP_KERNEL); 1606 if (!bprm->interp) 1607 return -ENOMEM; 1608 return 0; 1609 } 1610 EXPORT_SYMBOL(bprm_change_interp); 1611 1612 /* 1613 * determine how safe it is to execute the proposed program 1614 * - the caller must hold ->cred_guard_mutex to protect against 1615 * PTRACE_ATTACH or seccomp thread-sync 1616 */ 1617 static void check_unsafe_exec(struct linux_binprm *bprm) 1618 { 1619 struct task_struct *p = current, *t; 1620 unsigned n_fs; 1621 1622 if (p->ptrace) 1623 bprm->unsafe |= LSM_UNSAFE_PTRACE; 1624 1625 /* 1626 * This isn't strictly necessary, but it makes it harder for LSMs to 1627 * mess up. 1628 */ 1629 if (task_no_new_privs(current)) 1630 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS; 1631 1632 /* 1633 * If another task is sharing our fs, we cannot safely 1634 * suid exec because the differently privileged task 1635 * will be able to manipulate the current directory, etc. 1636 * It would be nice to force an unshare instead... 1637 */ 1638 n_fs = 1; 1639 spin_lock(&p->fs->lock); 1640 rcu_read_lock(); 1641 for_other_threads(p, t) { 1642 if (t->fs == p->fs) 1643 n_fs++; 1644 } 1645 rcu_read_unlock(); 1646 1647 /* "users" and "in_exec" locked for copy_fs() */ 1648 if (p->fs->users > n_fs) 1649 bprm->unsafe |= LSM_UNSAFE_SHARE; 1650 else 1651 p->fs->in_exec = 1; 1652 spin_unlock(&p->fs->lock); 1653 } 1654 1655 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file) 1656 { 1657 /* Handle suid and sgid on files */ 1658 struct mnt_idmap *idmap; 1659 struct inode *inode = file_inode(file); 1660 unsigned int mode; 1661 vfsuid_t vfsuid; 1662 vfsgid_t vfsgid; 1663 1664 if (!mnt_may_suid(file->f_path.mnt)) 1665 return; 1666 1667 if (task_no_new_privs(current)) 1668 return; 1669 1670 mode = READ_ONCE(inode->i_mode); 1671 if (!(mode & (S_ISUID|S_ISGID))) 1672 return; 1673 1674 idmap = file_mnt_idmap(file); 1675 1676 /* Be careful if suid/sgid is set */ 1677 inode_lock(inode); 1678 1679 /* reload atomically mode/uid/gid now that lock held */ 1680 mode = inode->i_mode; 1681 vfsuid = i_uid_into_vfsuid(idmap, inode); 1682 vfsgid = i_gid_into_vfsgid(idmap, inode); 1683 inode_unlock(inode); 1684 1685 /* We ignore suid/sgid if there are no mappings for them in the ns */ 1686 if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) || 1687 !vfsgid_has_mapping(bprm->cred->user_ns, vfsgid)) 1688 return; 1689 1690 if (mode & S_ISUID) { 1691 bprm->per_clear |= PER_CLEAR_ON_SETID; 1692 bprm->cred->euid = vfsuid_into_kuid(vfsuid); 1693 } 1694 1695 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { 1696 bprm->per_clear |= PER_CLEAR_ON_SETID; 1697 bprm->cred->egid = vfsgid_into_kgid(vfsgid); 1698 } 1699 } 1700 1701 /* 1702 * Compute brpm->cred based upon the final binary. 1703 */ 1704 static int bprm_creds_from_file(struct linux_binprm *bprm) 1705 { 1706 /* Compute creds based on which file? */ 1707 struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file; 1708 1709 bprm_fill_uid(bprm, file); 1710 return security_bprm_creds_from_file(bprm, file); 1711 } 1712 1713 /* 1714 * Fill the binprm structure from the inode. 1715 * Read the first BINPRM_BUF_SIZE bytes 1716 * 1717 * This may be called multiple times for binary chains (scripts for example). 1718 */ 1719 static int prepare_binprm(struct linux_binprm *bprm) 1720 { 1721 loff_t pos = 0; 1722 1723 memset(bprm->buf, 0, BINPRM_BUF_SIZE); 1724 return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos); 1725 } 1726 1727 /* 1728 * Arguments are '\0' separated strings found at the location bprm->p 1729 * points to; chop off the first by relocating brpm->p to right after 1730 * the first '\0' encountered. 1731 */ 1732 int remove_arg_zero(struct linux_binprm *bprm) 1733 { 1734 unsigned long offset; 1735 char *kaddr; 1736 struct page *page; 1737 1738 if (!bprm->argc) 1739 return 0; 1740 1741 do { 1742 offset = bprm->p & ~PAGE_MASK; 1743 page = get_arg_page(bprm, bprm->p, 0); 1744 if (!page) 1745 return -EFAULT; 1746 kaddr = kmap_local_page(page); 1747 1748 for (; offset < PAGE_SIZE && kaddr[offset]; 1749 offset++, bprm->p++) 1750 ; 1751 1752 kunmap_local(kaddr); 1753 put_arg_page(page); 1754 } while (offset == PAGE_SIZE); 1755 1756 bprm->p++; 1757 bprm->argc--; 1758 1759 return 0; 1760 } 1761 EXPORT_SYMBOL(remove_arg_zero); 1762 1763 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) 1764 /* 1765 * cycle the list of binary formats handler, until one recognizes the image 1766 */ 1767 static int search_binary_handler(struct linux_binprm *bprm) 1768 { 1769 bool need_retry = IS_ENABLED(CONFIG_MODULES); 1770 struct linux_binfmt *fmt; 1771 int retval; 1772 1773 retval = prepare_binprm(bprm); 1774 if (retval < 0) 1775 return retval; 1776 1777 retval = security_bprm_check(bprm); 1778 if (retval) 1779 return retval; 1780 1781 retval = -ENOENT; 1782 retry: 1783 read_lock(&binfmt_lock); 1784 list_for_each_entry(fmt, &formats, lh) { 1785 if (!try_module_get(fmt->module)) 1786 continue; 1787 read_unlock(&binfmt_lock); 1788 1789 retval = fmt->load_binary(bprm); 1790 1791 read_lock(&binfmt_lock); 1792 put_binfmt(fmt); 1793 if (bprm->point_of_no_return || (retval != -ENOEXEC)) { 1794 read_unlock(&binfmt_lock); 1795 return retval; 1796 } 1797 } 1798 read_unlock(&binfmt_lock); 1799 1800 if (need_retry) { 1801 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && 1802 printable(bprm->buf[2]) && printable(bprm->buf[3])) 1803 return retval; 1804 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) 1805 return retval; 1806 need_retry = false; 1807 goto retry; 1808 } 1809 1810 return retval; 1811 } 1812 1813 /* binfmt handlers will call back into begin_new_exec() on success. */ 1814 static int exec_binprm(struct linux_binprm *bprm) 1815 { 1816 pid_t old_pid, old_vpid; 1817 int ret, depth; 1818 1819 /* Need to fetch pid before load_binary changes it */ 1820 old_pid = current->pid; 1821 rcu_read_lock(); 1822 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); 1823 rcu_read_unlock(); 1824 1825 /* This allows 4 levels of binfmt rewrites before failing hard. */ 1826 for (depth = 0;; depth++) { 1827 struct file *exec; 1828 if (depth > 5) 1829 return -ELOOP; 1830 1831 ret = search_binary_handler(bprm); 1832 if (ret < 0) 1833 return ret; 1834 if (!bprm->interpreter) 1835 break; 1836 1837 exec = bprm->file; 1838 bprm->file = bprm->interpreter; 1839 bprm->interpreter = NULL; 1840 1841 allow_write_access(exec); 1842 if (unlikely(bprm->have_execfd)) { 1843 if (bprm->executable) { 1844 fput(exec); 1845 return -ENOEXEC; 1846 } 1847 bprm->executable = exec; 1848 } else 1849 fput(exec); 1850 } 1851 1852 audit_bprm(bprm); 1853 trace_sched_process_exec(current, old_pid, bprm); 1854 ptrace_event(PTRACE_EVENT_EXEC, old_vpid); 1855 proc_exec_connector(current); 1856 return 0; 1857 } 1858 1859 static int bprm_execve(struct linux_binprm *bprm) 1860 { 1861 int retval; 1862 1863 retval = prepare_bprm_creds(bprm); 1864 if (retval) 1865 return retval; 1866 1867 /* 1868 * Check for unsafe execution states before exec_binprm(), which 1869 * will call back into begin_new_exec(), into bprm_creds_from_file(), 1870 * where setuid-ness is evaluated. 1871 */ 1872 check_unsafe_exec(bprm); 1873 current->in_execve = 1; 1874 sched_mm_cid_before_execve(current); 1875 1876 sched_exec(); 1877 1878 /* Set the unchanging part of bprm->cred */ 1879 retval = security_bprm_creds_for_exec(bprm); 1880 if (retval) 1881 goto out; 1882 1883 retval = exec_binprm(bprm); 1884 if (retval < 0) 1885 goto out; 1886 1887 sched_mm_cid_after_execve(current); 1888 /* execve succeeded */ 1889 current->fs->in_exec = 0; 1890 current->in_execve = 0; 1891 rseq_execve(current); 1892 user_events_execve(current); 1893 acct_update_integrals(current); 1894 task_numa_free(current, false); 1895 return retval; 1896 1897 out: 1898 /* 1899 * If past the point of no return ensure the code never 1900 * returns to the userspace process. Use an existing fatal 1901 * signal if present otherwise terminate the process with 1902 * SIGSEGV. 1903 */ 1904 if (bprm->point_of_no_return && !fatal_signal_pending(current)) 1905 force_fatal_sig(SIGSEGV); 1906 1907 sched_mm_cid_after_execve(current); 1908 current->fs->in_exec = 0; 1909 current->in_execve = 0; 1910 1911 return retval; 1912 } 1913 1914 static int do_execveat_common(int fd, struct filename *filename, 1915 struct user_arg_ptr argv, 1916 struct user_arg_ptr envp, 1917 int flags) 1918 { 1919 struct linux_binprm *bprm; 1920 int retval; 1921 1922 if (IS_ERR(filename)) 1923 return PTR_ERR(filename); 1924 1925 /* 1926 * We move the actual failure in case of RLIMIT_NPROC excess from 1927 * set*uid() to execve() because too many poorly written programs 1928 * don't check setuid() return code. Here we additionally recheck 1929 * whether NPROC limit is still exceeded. 1930 */ 1931 if ((current->flags & PF_NPROC_EXCEEDED) && 1932 is_rlimit_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 1933 retval = -EAGAIN; 1934 goto out_ret; 1935 } 1936 1937 /* We're below the limit (still or again), so we don't want to make 1938 * further execve() calls fail. */ 1939 current->flags &= ~PF_NPROC_EXCEEDED; 1940 1941 bprm = alloc_bprm(fd, filename, flags); 1942 if (IS_ERR(bprm)) { 1943 retval = PTR_ERR(bprm); 1944 goto out_ret; 1945 } 1946 1947 retval = count(argv, MAX_ARG_STRINGS); 1948 if (retval == 0) 1949 pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", 1950 current->comm, bprm->filename); 1951 if (retval < 0) 1952 goto out_free; 1953 bprm->argc = retval; 1954 1955 retval = count(envp, MAX_ARG_STRINGS); 1956 if (retval < 0) 1957 goto out_free; 1958 bprm->envc = retval; 1959 1960 retval = bprm_stack_limits(bprm); 1961 if (retval < 0) 1962 goto out_free; 1963 1964 retval = copy_string_kernel(bprm->filename, bprm); 1965 if (retval < 0) 1966 goto out_free; 1967 bprm->exec = bprm->p; 1968 1969 retval = copy_strings(bprm->envc, envp, bprm); 1970 if (retval < 0) 1971 goto out_free; 1972 1973 retval = copy_strings(bprm->argc, argv, bprm); 1974 if (retval < 0) 1975 goto out_free; 1976 1977 /* 1978 * When argv is empty, add an empty string ("") as argv[0] to 1979 * ensure confused userspace programs that start processing 1980 * from argv[1] won't end up walking envp. See also 1981 * bprm_stack_limits(). 1982 */ 1983 if (bprm->argc == 0) { 1984 retval = copy_string_kernel("", bprm); 1985 if (retval < 0) 1986 goto out_free; 1987 bprm->argc = 1; 1988 } 1989 1990 retval = bprm_execve(bprm); 1991 out_free: 1992 free_bprm(bprm); 1993 1994 out_ret: 1995 putname(filename); 1996 return retval; 1997 } 1998 1999 int kernel_execve(const char *kernel_filename, 2000 const char *const *argv, const char *const *envp) 2001 { 2002 struct filename *filename; 2003 struct linux_binprm *bprm; 2004 int fd = AT_FDCWD; 2005 int retval; 2006 2007 /* It is non-sense for kernel threads to call execve */ 2008 if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) 2009 return -EINVAL; 2010 2011 filename = getname_kernel(kernel_filename); 2012 if (IS_ERR(filename)) 2013 return PTR_ERR(filename); 2014 2015 bprm = alloc_bprm(fd, filename, 0); 2016 if (IS_ERR(bprm)) { 2017 retval = PTR_ERR(bprm); 2018 goto out_ret; 2019 } 2020 2021 retval = count_strings_kernel(argv); 2022 if (WARN_ON_ONCE(retval == 0)) 2023 retval = -EINVAL; 2024 if (retval < 0) 2025 goto out_free; 2026 bprm->argc = retval; 2027 2028 retval = count_strings_kernel(envp); 2029 if (retval < 0) 2030 goto out_free; 2031 bprm->envc = retval; 2032 2033 retval = bprm_stack_limits(bprm); 2034 if (retval < 0) 2035 goto out_free; 2036 2037 retval = copy_string_kernel(bprm->filename, bprm); 2038 if (retval < 0) 2039 goto out_free; 2040 bprm->exec = bprm->p; 2041 2042 retval = copy_strings_kernel(bprm->envc, envp, bprm); 2043 if (retval < 0) 2044 goto out_free; 2045 2046 retval = copy_strings_kernel(bprm->argc, argv, bprm); 2047 if (retval < 0) 2048 goto out_free; 2049 2050 retval = bprm_execve(bprm); 2051 out_free: 2052 free_bprm(bprm); 2053 out_ret: 2054 putname(filename); 2055 return retval; 2056 } 2057 2058 static int do_execve(struct filename *filename, 2059 const char __user *const __user *__argv, 2060 const char __user *const __user *__envp) 2061 { 2062 struct user_arg_ptr argv = { .ptr.native = __argv }; 2063 struct user_arg_ptr envp = { .ptr.native = __envp }; 2064 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 2065 } 2066 2067 static int do_execveat(int fd, struct filename *filename, 2068 const char __user *const __user *__argv, 2069 const char __user *const __user *__envp, 2070 int flags) 2071 { 2072 struct user_arg_ptr argv = { .ptr.native = __argv }; 2073 struct user_arg_ptr envp = { .ptr.native = __envp }; 2074 2075 return do_execveat_common(fd, filename, argv, envp, flags); 2076 } 2077 2078 #ifdef CONFIG_COMPAT 2079 static int compat_do_execve(struct filename *filename, 2080 const compat_uptr_t __user *__argv, 2081 const compat_uptr_t __user *__envp) 2082 { 2083 struct user_arg_ptr argv = { 2084 .is_compat = true, 2085 .ptr.compat = __argv, 2086 }; 2087 struct user_arg_ptr envp = { 2088 .is_compat = true, 2089 .ptr.compat = __envp, 2090 }; 2091 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); 2092 } 2093 2094 static int compat_do_execveat(int fd, struct filename *filename, 2095 const compat_uptr_t __user *__argv, 2096 const compat_uptr_t __user *__envp, 2097 int flags) 2098 { 2099 struct user_arg_ptr argv = { 2100 .is_compat = true, 2101 .ptr.compat = __argv, 2102 }; 2103 struct user_arg_ptr envp = { 2104 .is_compat = true, 2105 .ptr.compat = __envp, 2106 }; 2107 return do_execveat_common(fd, filename, argv, envp, flags); 2108 } 2109 #endif 2110 2111 void set_binfmt(struct linux_binfmt *new) 2112 { 2113 struct mm_struct *mm = current->mm; 2114 2115 if (mm->binfmt) 2116 module_put(mm->binfmt->module); 2117 2118 mm->binfmt = new; 2119 if (new) 2120 __module_get(new->module); 2121 } 2122 EXPORT_SYMBOL(set_binfmt); 2123 2124 /* 2125 * set_dumpable stores three-value SUID_DUMP_* into mm->flags. 2126 */ 2127 void set_dumpable(struct mm_struct *mm, int value) 2128 { 2129 if (WARN_ON((unsigned)value > SUID_DUMP_ROOT)) 2130 return; 2131 2132 set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value); 2133 } 2134 2135 SYSCALL_DEFINE3(execve, 2136 const char __user *, filename, 2137 const char __user *const __user *, argv, 2138 const char __user *const __user *, envp) 2139 { 2140 return do_execve(getname(filename), argv, envp); 2141 } 2142 2143 SYSCALL_DEFINE5(execveat, 2144 int, fd, const char __user *, filename, 2145 const char __user *const __user *, argv, 2146 const char __user *const __user *, envp, 2147 int, flags) 2148 { 2149 return do_execveat(fd, 2150 getname_uflags(filename, flags), 2151 argv, envp, flags); 2152 } 2153 2154 #ifdef CONFIG_COMPAT 2155 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename, 2156 const compat_uptr_t __user *, argv, 2157 const compat_uptr_t __user *, envp) 2158 { 2159 return compat_do_execve(getname(filename), argv, envp); 2160 } 2161 2162 COMPAT_SYSCALL_DEFINE5(execveat, int, fd, 2163 const char __user *, filename, 2164 const compat_uptr_t __user *, argv, 2165 const compat_uptr_t __user *, envp, 2166 int, flags) 2167 { 2168 return compat_do_execveat(fd, 2169 getname_uflags(filename, flags), 2170 argv, envp, flags); 2171 } 2172 #endif 2173 2174 #ifdef CONFIG_SYSCTL 2175 2176 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, 2177 void *buffer, size_t *lenp, loff_t *ppos) 2178 { 2179 int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2180 2181 if (!error) 2182 validate_coredump_safety(); 2183 return error; 2184 } 2185 2186 static struct ctl_table fs_exec_sysctls[] = { 2187 { 2188 .procname = "suid_dumpable", 2189 .data = &suid_dumpable, 2190 .maxlen = sizeof(int), 2191 .mode = 0644, 2192 .proc_handler = proc_dointvec_minmax_coredump, 2193 .extra1 = SYSCTL_ZERO, 2194 .extra2 = SYSCTL_TWO, 2195 }, 2196 }; 2197 2198 static int __init init_fs_exec_sysctls(void) 2199 { 2200 register_sysctl_init("fs", fs_exec_sysctls); 2201 return 0; 2202 } 2203 2204 fs_initcall(init_fs_exec_sysctls); 2205 #endif /* CONFIG_SYSCTL */ 2206