1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/fork.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 /* 9 * 'fork.c' contains the help-routines for the 'fork' system call 10 * (see also entry.S and others). 11 * Fork is rather simple, once you get the hang of it, but the memory 12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 13 */ 14 15 #include <linux/anon_inodes.h> 16 #include <linux/slab.h> 17 #include <linux/sched/autogroup.h> 18 #include <linux/sched/mm.h> 19 #include <linux/sched/coredump.h> 20 #include <linux/sched/user.h> 21 #include <linux/sched/numa_balancing.h> 22 #include <linux/sched/stat.h> 23 #include <linux/sched/task.h> 24 #include <linux/sched/task_stack.h> 25 #include <linux/sched/cputime.h> 26 #include <linux/seq_file.h> 27 #include <linux/rtmutex.h> 28 #include <linux/init.h> 29 #include <linux/unistd.h> 30 #include <linux/module.h> 31 #include <linux/vmalloc.h> 32 #include <linux/completion.h> 33 #include <linux/personality.h> 34 #include <linux/mempolicy.h> 35 #include <linux/sem.h> 36 #include <linux/file.h> 37 #include <linux/fdtable.h> 38 #include <linux/iocontext.h> 39 #include <linux/key.h> 40 #include <linux/kmsan.h> 41 #include <linux/binfmts.h> 42 #include <linux/mman.h> 43 #include <linux/mmu_notifier.h> 44 #include <linux/fs.h> 45 #include <linux/mm.h> 46 #include <linux/mm_inline.h> 47 #include <linux/nsproxy.h> 48 #include <linux/capability.h> 49 #include <linux/cpu.h> 50 #include <linux/cgroup.h> 51 #include <linux/security.h> 52 #include <linux/hugetlb.h> 53 #include <linux/seccomp.h> 54 #include <linux/swap.h> 55 #include <linux/syscalls.h> 56 #include <linux/syscall_user_dispatch.h> 57 #include <linux/jiffies.h> 58 #include <linux/futex.h> 59 #include <linux/compat.h> 60 #include <linux/kthread.h> 61 #include <linux/task_io_accounting_ops.h> 62 #include <linux/rcupdate.h> 63 #include <linux/ptrace.h> 64 #include <linux/mount.h> 65 #include <linux/audit.h> 66 #include <linux/memcontrol.h> 67 #include <linux/ftrace.h> 68 #include <linux/proc_fs.h> 69 #include <linux/profile.h> 70 #include <linux/rmap.h> 71 #include <linux/ksm.h> 72 #include <linux/acct.h> 73 #include <linux/userfaultfd_k.h> 74 #include <linux/tsacct_kern.h> 75 #include <linux/cn_proc.h> 76 #include <linux/freezer.h> 77 #include <linux/delayacct.h> 78 #include <linux/taskstats_kern.h> 79 #include <linux/tty.h> 80 #include <linux/fs_struct.h> 81 #include <linux/magic.h> 82 #include <linux/perf_event.h> 83 #include <linux/posix-timers.h> 84 #include <linux/user-return-notifier.h> 85 #include <linux/oom.h> 86 #include <linux/khugepaged.h> 87 #include <linux/signalfd.h> 88 #include <linux/uprobes.h> 89 #include <linux/aio.h> 90 #include <linux/compiler.h> 91 #include <linux/sysctl.h> 92 #include <linux/kcov.h> 93 #include <linux/livepatch.h> 94 #include <linux/thread_info.h> 95 #include <linux/stackleak.h> 96 #include <linux/kasan.h> 97 #include <linux/scs.h> 98 #include <linux/io_uring.h> 99 #include <linux/bpf.h> 100 #include <linux/stackprotector.h> 101 #include <linux/user_events.h> 102 #include <linux/iommu.h> 103 #include <linux/rseq.h> 104 #include <uapi/linux/pidfd.h> 105 #include <linux/pidfs.h> 106 107 #include <asm/pgalloc.h> 108 #include <linux/uaccess.h> 109 #include <asm/mmu_context.h> 110 #include <asm/cacheflush.h> 111 #include <asm/tlbflush.h> 112 113 #include <trace/events/sched.h> 114 115 #define CREATE_TRACE_POINTS 116 #include <trace/events/task.h> 117 118 #include <kunit/visibility.h> 119 120 /* 121 * Minimum number of threads to boot the kernel 122 */ 123 #define MIN_THREADS 20 124 125 /* 126 * Maximum number of threads 127 */ 128 #define MAX_THREADS FUTEX_TID_MASK 129 130 /* 131 * Protected counters by write_lock_irq(&tasklist_lock) 132 */ 133 unsigned long total_forks; /* Handle normal Linux uptimes. */ 134 int nr_threads; /* The idle threads do not count.. */ 135 136 static int max_threads; /* tunable limit on nr_threads */ 137 138 #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) 139 140 static const char * const resident_page_types[] = { 141 NAMED_ARRAY_INDEX(MM_FILEPAGES), 142 NAMED_ARRAY_INDEX(MM_ANONPAGES), 143 NAMED_ARRAY_INDEX(MM_SWAPENTS), 144 NAMED_ARRAY_INDEX(MM_SHMEMPAGES), 145 }; 146 147 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 148 149 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 150 151 #ifdef CONFIG_PROVE_RCU 152 int lockdep_tasklist_lock_is_held(void) 153 { 154 return lockdep_is_held(&tasklist_lock); 155 } 156 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 157 #endif /* #ifdef CONFIG_PROVE_RCU */ 158 159 int nr_processes(void) 160 { 161 int cpu; 162 int total = 0; 163 164 for_each_possible_cpu(cpu) 165 total += per_cpu(process_counts, cpu); 166 167 return total; 168 } 169 170 void __weak arch_release_task_struct(struct task_struct *tsk) 171 { 172 } 173 174 static struct kmem_cache *task_struct_cachep; 175 176 static inline struct task_struct *alloc_task_struct_node(int node) 177 { 178 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 179 } 180 181 static inline void free_task_struct(struct task_struct *tsk) 182 { 183 kmem_cache_free(task_struct_cachep, tsk); 184 } 185 186 /* 187 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 188 * kmemcache based allocator. 189 */ 190 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) 191 192 # ifdef CONFIG_VMAP_STACK 193 /* 194 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB 195 * flush. Try to minimize the number of calls by caching stacks. 196 */ 197 #define NR_CACHED_STACKS 2 198 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); 199 200 struct vm_stack { 201 struct rcu_head rcu; 202 struct vm_struct *stack_vm_area; 203 }; 204 205 static bool try_release_thread_stack_to_cache(struct vm_struct *vm) 206 { 207 unsigned int i; 208 209 for (i = 0; i < NR_CACHED_STACKS; i++) { 210 if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 211 continue; 212 return true; 213 } 214 return false; 215 } 216 217 static void thread_stack_free_rcu(struct rcu_head *rh) 218 { 219 struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); 220 221 if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) 222 return; 223 224 vfree(vm_stack); 225 } 226 227 static void thread_stack_delayed_free(struct task_struct *tsk) 228 { 229 struct vm_stack *vm_stack = tsk->stack; 230 231 vm_stack->stack_vm_area = tsk->stack_vm_area; 232 call_rcu(&vm_stack->rcu, thread_stack_free_rcu); 233 } 234 235 static int free_vm_stack_cache(unsigned int cpu) 236 { 237 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); 238 int i; 239 240 for (i = 0; i < NR_CACHED_STACKS; i++) { 241 struct vm_struct *vm_stack = cached_vm_stacks[i]; 242 243 if (!vm_stack) 244 continue; 245 246 vfree(vm_stack->addr); 247 cached_vm_stacks[i] = NULL; 248 } 249 250 return 0; 251 } 252 253 static int memcg_charge_kernel_stack(struct vm_struct *vm) 254 { 255 int i; 256 int ret; 257 int nr_charged = 0; 258 259 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); 260 261 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { 262 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); 263 if (ret) 264 goto err; 265 nr_charged++; 266 } 267 return 0; 268 err: 269 for (i = 0; i < nr_charged; i++) 270 memcg_kmem_uncharge_page(vm->pages[i], 0); 271 return ret; 272 } 273 274 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 275 { 276 struct vm_struct *vm; 277 void *stack; 278 int i; 279 280 for (i = 0; i < NR_CACHED_STACKS; i++) { 281 struct vm_struct *s; 282 283 s = this_cpu_xchg(cached_stacks[i], NULL); 284 285 if (!s) 286 continue; 287 288 /* Reset stack metadata. */ 289 kasan_unpoison_range(s->addr, THREAD_SIZE); 290 291 stack = kasan_reset_tag(s->addr); 292 293 /* Clear stale pointers from reused stack. */ 294 memset(stack, 0, THREAD_SIZE); 295 296 if (memcg_charge_kernel_stack(s)) { 297 vfree(s->addr); 298 return -ENOMEM; 299 } 300 301 tsk->stack_vm_area = s; 302 tsk->stack = stack; 303 return 0; 304 } 305 306 /* 307 * Allocated stacks are cached and later reused by new threads, 308 * so memcg accounting is performed manually on assigning/releasing 309 * stacks to tasks. Drop __GFP_ACCOUNT. 310 */ 311 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, 312 VMALLOC_START, VMALLOC_END, 313 THREADINFO_GFP & ~__GFP_ACCOUNT, 314 PAGE_KERNEL, 315 0, node, __builtin_return_address(0)); 316 if (!stack) 317 return -ENOMEM; 318 319 vm = find_vm_area(stack); 320 if (memcg_charge_kernel_stack(vm)) { 321 vfree(stack); 322 return -ENOMEM; 323 } 324 /* 325 * We can't call find_vm_area() in interrupt context, and 326 * free_thread_stack() can be called in interrupt context, 327 * so cache the vm_struct. 328 */ 329 tsk->stack_vm_area = vm; 330 stack = kasan_reset_tag(stack); 331 tsk->stack = stack; 332 return 0; 333 } 334 335 static void free_thread_stack(struct task_struct *tsk) 336 { 337 if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) 338 thread_stack_delayed_free(tsk); 339 340 tsk->stack = NULL; 341 tsk->stack_vm_area = NULL; 342 } 343 344 # else /* !CONFIG_VMAP_STACK */ 345 346 static void thread_stack_free_rcu(struct rcu_head *rh) 347 { 348 __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); 349 } 350 351 static void thread_stack_delayed_free(struct task_struct *tsk) 352 { 353 struct rcu_head *rh = tsk->stack; 354 355 call_rcu(rh, thread_stack_free_rcu); 356 } 357 358 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 359 { 360 struct page *page = alloc_pages_node(node, THREADINFO_GFP, 361 THREAD_SIZE_ORDER); 362 363 if (likely(page)) { 364 tsk->stack = kasan_reset_tag(page_address(page)); 365 return 0; 366 } 367 return -ENOMEM; 368 } 369 370 static void free_thread_stack(struct task_struct *tsk) 371 { 372 thread_stack_delayed_free(tsk); 373 tsk->stack = NULL; 374 } 375 376 # endif /* CONFIG_VMAP_STACK */ 377 # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ 378 379 static struct kmem_cache *thread_stack_cache; 380 381 static void thread_stack_free_rcu(struct rcu_head *rh) 382 { 383 kmem_cache_free(thread_stack_cache, rh); 384 } 385 386 static void thread_stack_delayed_free(struct task_struct *tsk) 387 { 388 struct rcu_head *rh = tsk->stack; 389 390 call_rcu(rh, thread_stack_free_rcu); 391 } 392 393 static int alloc_thread_stack_node(struct task_struct *tsk, int node) 394 { 395 unsigned long *stack; 396 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); 397 stack = kasan_reset_tag(stack); 398 tsk->stack = stack; 399 return stack ? 0 : -ENOMEM; 400 } 401 402 static void free_thread_stack(struct task_struct *tsk) 403 { 404 thread_stack_delayed_free(tsk); 405 tsk->stack = NULL; 406 } 407 408 void thread_stack_cache_init(void) 409 { 410 thread_stack_cache = kmem_cache_create_usercopy("thread_stack", 411 THREAD_SIZE, THREAD_SIZE, 0, 0, 412 THREAD_SIZE, NULL); 413 BUG_ON(thread_stack_cache == NULL); 414 } 415 416 # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ 417 418 /* SLAB cache for signal_struct structures (tsk->signal) */ 419 static struct kmem_cache *signal_cachep; 420 421 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 422 struct kmem_cache *sighand_cachep; 423 424 /* SLAB cache for files_struct structures (tsk->files) */ 425 struct kmem_cache *files_cachep; 426 427 /* SLAB cache for fs_struct structures (tsk->fs) */ 428 struct kmem_cache *fs_cachep; 429 430 /* SLAB cache for vm_area_struct structures */ 431 static struct kmem_cache *vm_area_cachep; 432 433 /* SLAB cache for mm_struct structures (tsk->mm) */ 434 static struct kmem_cache *mm_cachep; 435 436 #ifdef CONFIG_PER_VMA_LOCK 437 438 /* SLAB cache for vm_area_struct.lock */ 439 static struct kmem_cache *vma_lock_cachep; 440 441 static bool vma_lock_alloc(struct vm_area_struct *vma) 442 { 443 vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); 444 if (!vma->vm_lock) 445 return false; 446 447 init_rwsem(&vma->vm_lock->lock); 448 vma->vm_lock_seq = -1; 449 450 return true; 451 } 452 453 static inline void vma_lock_free(struct vm_area_struct *vma) 454 { 455 kmem_cache_free(vma_lock_cachep, vma->vm_lock); 456 } 457 458 #else /* CONFIG_PER_VMA_LOCK */ 459 460 static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } 461 static inline void vma_lock_free(struct vm_area_struct *vma) {} 462 463 #endif /* CONFIG_PER_VMA_LOCK */ 464 465 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 466 { 467 struct vm_area_struct *vma; 468 469 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 470 if (!vma) 471 return NULL; 472 473 vma_init(vma, mm); 474 if (!vma_lock_alloc(vma)) { 475 kmem_cache_free(vm_area_cachep, vma); 476 return NULL; 477 } 478 479 return vma; 480 } 481 482 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) 483 { 484 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 485 486 if (!new) 487 return NULL; 488 489 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); 490 ASSERT_EXCLUSIVE_WRITER(orig->vm_file); 491 /* 492 * orig->shared.rb may be modified concurrently, but the clone 493 * will be reinitialized. 494 */ 495 data_race(memcpy(new, orig, sizeof(*new))); 496 if (!vma_lock_alloc(new)) { 497 kmem_cache_free(vm_area_cachep, new); 498 return NULL; 499 } 500 INIT_LIST_HEAD(&new->anon_vma_chain); 501 vma_numab_state_init(new); 502 dup_anon_vma_name(orig, new); 503 504 return new; 505 } 506 507 void __vm_area_free(struct vm_area_struct *vma) 508 { 509 vma_numab_state_free(vma); 510 free_anon_vma_name(vma); 511 vma_lock_free(vma); 512 kmem_cache_free(vm_area_cachep, vma); 513 } 514 515 #ifdef CONFIG_PER_VMA_LOCK 516 static void vm_area_free_rcu_cb(struct rcu_head *head) 517 { 518 struct vm_area_struct *vma = container_of(head, struct vm_area_struct, 519 vm_rcu); 520 521 /* The vma should not be locked while being destroyed. */ 522 VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); 523 __vm_area_free(vma); 524 } 525 #endif 526 527 void vm_area_free(struct vm_area_struct *vma) 528 { 529 #ifdef CONFIG_PER_VMA_LOCK 530 call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); 531 #else 532 __vm_area_free(vma); 533 #endif 534 } 535 536 static void account_kernel_stack(struct task_struct *tsk, int account) 537 { 538 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 539 struct vm_struct *vm = task_stack_vm_area(tsk); 540 int i; 541 542 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 543 mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, 544 account * (PAGE_SIZE / 1024)); 545 } else { 546 void *stack = task_stack_page(tsk); 547 548 /* All stack pages are in the same node. */ 549 mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, 550 account * (THREAD_SIZE / 1024)); 551 } 552 } 553 554 void exit_task_stack_account(struct task_struct *tsk) 555 { 556 account_kernel_stack(tsk, -1); 557 558 if (IS_ENABLED(CONFIG_VMAP_STACK)) { 559 struct vm_struct *vm; 560 int i; 561 562 vm = task_stack_vm_area(tsk); 563 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) 564 memcg_kmem_uncharge_page(vm->pages[i], 0); 565 } 566 } 567 568 static void release_task_stack(struct task_struct *tsk) 569 { 570 if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) 571 return; /* Better to leak the stack than to free prematurely */ 572 573 free_thread_stack(tsk); 574 } 575 576 #ifdef CONFIG_THREAD_INFO_IN_TASK 577 void put_task_stack(struct task_struct *tsk) 578 { 579 if (refcount_dec_and_test(&tsk->stack_refcount)) 580 release_task_stack(tsk); 581 } 582 #endif 583 584 void free_task(struct task_struct *tsk) 585 { 586 #ifdef CONFIG_SECCOMP 587 WARN_ON_ONCE(tsk->seccomp.filter); 588 #endif 589 release_user_cpus_ptr(tsk); 590 scs_release(tsk); 591 592 #ifndef CONFIG_THREAD_INFO_IN_TASK 593 /* 594 * The task is finally done with both the stack and thread_info, 595 * so free both. 596 */ 597 release_task_stack(tsk); 598 #else 599 /* 600 * If the task had a separate stack allocation, it should be gone 601 * by now. 602 */ 603 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); 604 #endif 605 rt_mutex_debug_task_free(tsk); 606 ftrace_graph_exit_task(tsk); 607 arch_release_task_struct(tsk); 608 if (tsk->flags & PF_KTHREAD) 609 free_kthread_struct(tsk); 610 bpf_task_storage_free(tsk); 611 free_task_struct(tsk); 612 } 613 EXPORT_SYMBOL(free_task); 614 615 static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) 616 { 617 struct file *exe_file; 618 619 exe_file = get_mm_exe_file(oldmm); 620 RCU_INIT_POINTER(mm->exe_file, exe_file); 621 } 622 623 #ifdef CONFIG_MMU 624 static __latent_entropy int dup_mmap(struct mm_struct *mm, 625 struct mm_struct *oldmm) 626 { 627 struct vm_area_struct *mpnt, *tmp; 628 int retval; 629 unsigned long charge = 0; 630 LIST_HEAD(uf); 631 VMA_ITERATOR(vmi, mm, 0); 632 633 uprobe_start_dup_mmap(); 634 if (mmap_write_lock_killable(oldmm)) { 635 retval = -EINTR; 636 goto fail_uprobe_end; 637 } 638 flush_cache_dup_mm(oldmm); 639 uprobe_dup_mmap(oldmm, mm); 640 /* 641 * Not linked in yet - no deadlock potential: 642 */ 643 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); 644 645 /* No ordering required: file already has been exposed. */ 646 dup_mm_exe_file(mm, oldmm); 647 648 mm->total_vm = oldmm->total_vm; 649 mm->data_vm = oldmm->data_vm; 650 mm->exec_vm = oldmm->exec_vm; 651 mm->stack_vm = oldmm->stack_vm; 652 653 retval = ksm_fork(mm, oldmm); 654 if (retval) 655 goto out; 656 khugepaged_fork(mm, oldmm); 657 658 /* Use __mt_dup() to efficiently build an identical maple tree. */ 659 retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); 660 if (unlikely(retval)) 661 goto out; 662 663 mt_clear_in_rcu(vmi.mas.tree); 664 for_each_vma(vmi, mpnt) { 665 struct file *file; 666 667 vma_start_write(mpnt); 668 if (mpnt->vm_flags & VM_DONTCOPY) { 669 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, 670 mpnt->vm_end, GFP_KERNEL); 671 if (retval) 672 goto loop_out; 673 674 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); 675 continue; 676 } 677 charge = 0; 678 /* 679 * Don't duplicate many vmas if we've been oom-killed (for 680 * example) 681 */ 682 if (fatal_signal_pending(current)) { 683 retval = -EINTR; 684 goto loop_out; 685 } 686 if (mpnt->vm_flags & VM_ACCOUNT) { 687 unsigned long len = vma_pages(mpnt); 688 689 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 690 goto fail_nomem; 691 charge = len; 692 } 693 tmp = vm_area_dup(mpnt); 694 if (!tmp) 695 goto fail_nomem; 696 retval = vma_dup_policy(mpnt, tmp); 697 if (retval) 698 goto fail_nomem_policy; 699 tmp->vm_mm = mm; 700 retval = dup_userfaultfd(tmp, &uf); 701 if (retval) 702 goto fail_nomem_anon_vma_fork; 703 if (tmp->vm_flags & VM_WIPEONFORK) { 704 /* 705 * VM_WIPEONFORK gets a clean slate in the child. 706 * Don't prepare anon_vma until fault since we don't 707 * copy page for current vma. 708 */ 709 tmp->anon_vma = NULL; 710 } else if (anon_vma_fork(tmp, mpnt)) 711 goto fail_nomem_anon_vma_fork; 712 vm_flags_clear(tmp, VM_LOCKED_MASK); 713 /* 714 * Copy/update hugetlb private vma information. 715 */ 716 if (is_vm_hugetlb_page(tmp)) 717 hugetlb_dup_vma_private(tmp); 718 719 /* 720 * Link the vma into the MT. After using __mt_dup(), memory 721 * allocation is not necessary here, so it cannot fail. 722 */ 723 vma_iter_bulk_store(&vmi, tmp); 724 725 mm->map_count++; 726 727 if (tmp->vm_ops && tmp->vm_ops->open) 728 tmp->vm_ops->open(tmp); 729 730 file = tmp->vm_file; 731 if (file) { 732 struct address_space *mapping = file->f_mapping; 733 734 get_file(file); 735 i_mmap_lock_write(mapping); 736 if (vma_is_shared_maywrite(tmp)) 737 mapping_allow_writable(mapping); 738 flush_dcache_mmap_lock(mapping); 739 /* insert tmp into the share list, just after mpnt */ 740 vma_interval_tree_insert_after(tmp, mpnt, 741 &mapping->i_mmap); 742 flush_dcache_mmap_unlock(mapping); 743 i_mmap_unlock_write(mapping); 744 } 745 746 if (!(tmp->vm_flags & VM_WIPEONFORK)) 747 retval = copy_page_range(tmp, mpnt); 748 749 if (retval) { 750 mpnt = vma_next(&vmi); 751 goto loop_out; 752 } 753 } 754 /* a new mm has just been created */ 755 retval = arch_dup_mmap(oldmm, mm); 756 loop_out: 757 vma_iter_free(&vmi); 758 if (!retval) { 759 mt_set_in_rcu(vmi.mas.tree); 760 } else if (mpnt) { 761 /* 762 * The entire maple tree has already been duplicated. If the 763 * mmap duplication fails, mark the failure point with 764 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, 765 * stop releasing VMAs that have not been duplicated after this 766 * point. 767 */ 768 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); 769 mas_store(&vmi.mas, XA_ZERO_ENTRY); 770 } 771 out: 772 mmap_write_unlock(mm); 773 flush_tlb_mm(oldmm); 774 mmap_write_unlock(oldmm); 775 dup_userfaultfd_complete(&uf); 776 fail_uprobe_end: 777 uprobe_end_dup_mmap(); 778 return retval; 779 780 fail_nomem_anon_vma_fork: 781 mpol_put(vma_policy(tmp)); 782 fail_nomem_policy: 783 vm_area_free(tmp); 784 fail_nomem: 785 retval = -ENOMEM; 786 vm_unacct_memory(charge); 787 goto loop_out; 788 } 789 790 static inline int mm_alloc_pgd(struct mm_struct *mm) 791 { 792 mm->pgd = pgd_alloc(mm); 793 if (unlikely(!mm->pgd)) 794 return -ENOMEM; 795 return 0; 796 } 797 798 static inline void mm_free_pgd(struct mm_struct *mm) 799 { 800 pgd_free(mm, mm->pgd); 801 } 802 #else 803 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 804 { 805 mmap_write_lock(oldmm); 806 dup_mm_exe_file(mm, oldmm); 807 mmap_write_unlock(oldmm); 808 return 0; 809 } 810 #define mm_alloc_pgd(mm) (0) 811 #define mm_free_pgd(mm) 812 #endif /* CONFIG_MMU */ 813 814 static void check_mm(struct mm_struct *mm) 815 { 816 int i; 817 818 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, 819 "Please make sure 'struct resident_page_types[]' is updated as well"); 820 821 for (i = 0; i < NR_MM_COUNTERS; i++) { 822 long x = percpu_counter_sum(&mm->rss_stat[i]); 823 824 if (unlikely(x)) 825 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", 826 mm, resident_page_types[i], x); 827 } 828 829 if (mm_pgtables_bytes(mm)) 830 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", 831 mm_pgtables_bytes(mm)); 832 833 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 834 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 835 #endif 836 } 837 838 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 839 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 840 841 static void do_check_lazy_tlb(void *arg) 842 { 843 struct mm_struct *mm = arg; 844 845 WARN_ON_ONCE(current->active_mm == mm); 846 } 847 848 static void do_shoot_lazy_tlb(void *arg) 849 { 850 struct mm_struct *mm = arg; 851 852 if (current->active_mm == mm) { 853 WARN_ON_ONCE(current->mm); 854 current->active_mm = &init_mm; 855 switch_mm(mm, &init_mm, current); 856 } 857 } 858 859 static void cleanup_lazy_tlbs(struct mm_struct *mm) 860 { 861 if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) { 862 /* 863 * In this case, lazy tlb mms are refounted and would not reach 864 * __mmdrop until all CPUs have switched away and mmdrop()ed. 865 */ 866 return; 867 } 868 869 /* 870 * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it 871 * requires lazy mm users to switch to another mm when the refcount 872 * drops to zero, before the mm is freed. This requires IPIs here to 873 * switch kernel threads to init_mm. 874 * 875 * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm 876 * switch with the final userspace teardown TLB flush which leaves the 877 * mm lazy on this CPU but no others, reducing the need for additional 878 * IPIs here. There are cases where a final IPI is still required here, 879 * such as the final mmdrop being performed on a different CPU than the 880 * one exiting, or kernel threads using the mm when userspace exits. 881 * 882 * IPI overheads have not found to be expensive, but they could be 883 * reduced in a number of possible ways, for example (roughly 884 * increasing order of complexity): 885 * - The last lazy reference created by exit_mm() could instead switch 886 * to init_mm, however it's probable this will run on the same CPU 887 * immediately afterwards, so this may not reduce IPIs much. 888 * - A batch of mms requiring IPIs could be gathered and freed at once. 889 * - CPUs store active_mm where it can be remotely checked without a 890 * lock, to filter out false-positives in the cpumask. 891 * - After mm_users or mm_count reaches zero, switching away from the 892 * mm could clear mm_cpumask to reduce some IPIs, perhaps together 893 * with some batching or delaying of the final IPIs. 894 * - A delayed freeing and RCU-like quiescing sequence based on mm 895 * switching to avoid IPIs completely. 896 */ 897 on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1); 898 if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES)) 899 on_each_cpu(do_check_lazy_tlb, (void *)mm, 1); 900 } 901 902 /* 903 * Called when the last reference to the mm 904 * is dropped: either by a lazy thread or by 905 * mmput. Free the page directory and the mm. 906 */ 907 void __mmdrop(struct mm_struct *mm) 908 { 909 BUG_ON(mm == &init_mm); 910 WARN_ON_ONCE(mm == current->mm); 911 912 /* Ensure no CPUs are using this as their lazy tlb mm */ 913 cleanup_lazy_tlbs(mm); 914 915 WARN_ON_ONCE(mm == current->active_mm); 916 mm_free_pgd(mm); 917 destroy_context(mm); 918 mmu_notifier_subscriptions_destroy(mm); 919 check_mm(mm); 920 put_user_ns(mm->user_ns); 921 mm_pasid_drop(mm); 922 mm_destroy_cid(mm); 923 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); 924 925 free_mm(mm); 926 } 927 EXPORT_SYMBOL_GPL(__mmdrop); 928 929 static void mmdrop_async_fn(struct work_struct *work) 930 { 931 struct mm_struct *mm; 932 933 mm = container_of(work, struct mm_struct, async_put_work); 934 __mmdrop(mm); 935 } 936 937 static void mmdrop_async(struct mm_struct *mm) 938 { 939 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { 940 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); 941 schedule_work(&mm->async_put_work); 942 } 943 } 944 945 static inline void free_signal_struct(struct signal_struct *sig) 946 { 947 taskstats_tgid_free(sig); 948 sched_autogroup_exit(sig); 949 /* 950 * __mmdrop is not safe to call from softirq context on x86 due to 951 * pgd_dtor so postpone it to the async context 952 */ 953 if (sig->oom_mm) 954 mmdrop_async(sig->oom_mm); 955 kmem_cache_free(signal_cachep, sig); 956 } 957 958 static inline void put_signal_struct(struct signal_struct *sig) 959 { 960 if (refcount_dec_and_test(&sig->sigcnt)) 961 free_signal_struct(sig); 962 } 963 964 void __put_task_struct(struct task_struct *tsk) 965 { 966 WARN_ON(!tsk->exit_state); 967 WARN_ON(refcount_read(&tsk->usage)); 968 WARN_ON(tsk == current); 969 970 io_uring_free(tsk); 971 cgroup_free(tsk); 972 task_numa_free(tsk, true); 973 security_task_free(tsk); 974 exit_creds(tsk); 975 delayacct_tsk_free(tsk); 976 put_signal_struct(tsk->signal); 977 sched_core_free(tsk); 978 free_task(tsk); 979 } 980 EXPORT_SYMBOL_GPL(__put_task_struct); 981 982 void __put_task_struct_rcu_cb(struct rcu_head *rhp) 983 { 984 struct task_struct *task = container_of(rhp, struct task_struct, rcu); 985 986 __put_task_struct(task); 987 } 988 EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); 989 990 void __init __weak arch_task_cache_init(void) { } 991 992 /* 993 * set_max_threads 994 */ 995 static void set_max_threads(unsigned int max_threads_suggested) 996 { 997 u64 threads; 998 unsigned long nr_pages = totalram_pages(); 999 1000 /* 1001 * The number of threads shall be limited such that the thread 1002 * structures may only consume a small part of the available memory. 1003 */ 1004 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) 1005 threads = MAX_THREADS; 1006 else 1007 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, 1008 (u64) THREAD_SIZE * 8UL); 1009 1010 if (threads > max_threads_suggested) 1011 threads = max_threads_suggested; 1012 1013 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 1014 } 1015 1016 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT 1017 /* Initialized by the architecture: */ 1018 int arch_task_struct_size __read_mostly; 1019 #endif 1020 1021 static void task_struct_whitelist(unsigned long *offset, unsigned long *size) 1022 { 1023 /* Fetch thread_struct whitelist for the architecture. */ 1024 arch_thread_struct_whitelist(offset, size); 1025 1026 /* 1027 * Handle zero-sized whitelist or empty thread_struct, otherwise 1028 * adjust offset to position of thread_struct in task_struct. 1029 */ 1030 if (unlikely(*size == 0)) 1031 *offset = 0; 1032 else 1033 *offset += offsetof(struct task_struct, thread); 1034 } 1035 1036 void __init fork_init(void) 1037 { 1038 int i; 1039 #ifndef ARCH_MIN_TASKALIGN 1040 #define ARCH_MIN_TASKALIGN 0 1041 #endif 1042 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); 1043 unsigned long useroffset, usersize; 1044 1045 /* create a slab on which task_structs can be allocated */ 1046 task_struct_whitelist(&useroffset, &usersize); 1047 task_struct_cachep = kmem_cache_create_usercopy("task_struct", 1048 arch_task_struct_size, align, 1049 SLAB_PANIC|SLAB_ACCOUNT, 1050 useroffset, usersize, NULL); 1051 1052 /* do the arch specific task caches init */ 1053 arch_task_cache_init(); 1054 1055 set_max_threads(MAX_THREADS); 1056 1057 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 1058 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 1059 init_task.signal->rlim[RLIMIT_SIGPENDING] = 1060 init_task.signal->rlim[RLIMIT_NPROC]; 1061 1062 for (i = 0; i < UCOUNT_COUNTS; i++) 1063 init_user_ns.ucount_max[i] = max_threads/2; 1064 1065 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); 1066 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); 1067 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); 1068 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); 1069 1070 #ifdef CONFIG_VMAP_STACK 1071 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", 1072 NULL, free_vm_stack_cache); 1073 #endif 1074 1075 scs_init(); 1076 1077 lockdep_init_task(&init_task); 1078 uprobes_init(); 1079 } 1080 1081 int __weak arch_dup_task_struct(struct task_struct *dst, 1082 struct task_struct *src) 1083 { 1084 *dst = *src; 1085 return 0; 1086 } 1087 1088 void set_task_stack_end_magic(struct task_struct *tsk) 1089 { 1090 unsigned long *stackend; 1091 1092 stackend = end_of_stack(tsk); 1093 *stackend = STACK_END_MAGIC; /* for overflow detection */ 1094 } 1095 1096 static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 1097 { 1098 struct task_struct *tsk; 1099 int err; 1100 1101 if (node == NUMA_NO_NODE) 1102 node = tsk_fork_get_node(orig); 1103 tsk = alloc_task_struct_node(node); 1104 if (!tsk) 1105 return NULL; 1106 1107 err = arch_dup_task_struct(tsk, orig); 1108 if (err) 1109 goto free_tsk; 1110 1111 err = alloc_thread_stack_node(tsk, node); 1112 if (err) 1113 goto free_tsk; 1114 1115 #ifdef CONFIG_THREAD_INFO_IN_TASK 1116 refcount_set(&tsk->stack_refcount, 1); 1117 #endif 1118 account_kernel_stack(tsk, 1); 1119 1120 err = scs_prepare(tsk, node); 1121 if (err) 1122 goto free_stack; 1123 1124 #ifdef CONFIG_SECCOMP 1125 /* 1126 * We must handle setting up seccomp filters once we're under 1127 * the sighand lock in case orig has changed between now and 1128 * then. Until then, filter must be NULL to avoid messing up 1129 * the usage counts on the error path calling free_task. 1130 */ 1131 tsk->seccomp.filter = NULL; 1132 #endif 1133 1134 setup_thread_stack(tsk, orig); 1135 clear_user_return_notifier(tsk); 1136 clear_tsk_need_resched(tsk); 1137 set_task_stack_end_magic(tsk); 1138 clear_syscall_work_syscall_user_dispatch(tsk); 1139 1140 #ifdef CONFIG_STACKPROTECTOR 1141 tsk->stack_canary = get_random_canary(); 1142 #endif 1143 if (orig->cpus_ptr == &orig->cpus_mask) 1144 tsk->cpus_ptr = &tsk->cpus_mask; 1145 dup_user_cpus_ptr(tsk, orig, node); 1146 1147 /* 1148 * One for the user space visible state that goes away when reaped. 1149 * One for the scheduler. 1150 */ 1151 refcount_set(&tsk->rcu_users, 2); 1152 /* One for the rcu users */ 1153 refcount_set(&tsk->usage, 1); 1154 #ifdef CONFIG_BLK_DEV_IO_TRACE 1155 tsk->btrace_seq = 0; 1156 #endif 1157 tsk->splice_pipe = NULL; 1158 tsk->task_frag.page = NULL; 1159 tsk->wake_q.next = NULL; 1160 tsk->worker_private = NULL; 1161 1162 kcov_task_init(tsk); 1163 kmsan_task_create(tsk); 1164 kmap_local_fork(tsk); 1165 1166 #ifdef CONFIG_FAULT_INJECTION 1167 tsk->fail_nth = 0; 1168 #endif 1169 1170 #ifdef CONFIG_BLK_CGROUP 1171 tsk->throttle_disk = NULL; 1172 tsk->use_memdelay = 0; 1173 #endif 1174 1175 #ifdef CONFIG_ARCH_HAS_CPU_PASID 1176 tsk->pasid_activated = 0; 1177 #endif 1178 1179 #ifdef CONFIG_MEMCG 1180 tsk->active_memcg = NULL; 1181 #endif 1182 1183 #ifdef CONFIG_CPU_SUP_INTEL 1184 tsk->reported_split_lock = 0; 1185 #endif 1186 1187 #ifdef CONFIG_SCHED_MM_CID 1188 tsk->mm_cid = -1; 1189 tsk->last_mm_cid = -1; 1190 tsk->mm_cid_active = 0; 1191 tsk->migrate_from_cpu = -1; 1192 #endif 1193 return tsk; 1194 1195 free_stack: 1196 exit_task_stack_account(tsk); 1197 free_thread_stack(tsk); 1198 free_tsk: 1199 free_task_struct(tsk); 1200 return NULL; 1201 } 1202 1203 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 1204 1205 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 1206 1207 static int __init coredump_filter_setup(char *s) 1208 { 1209 default_dump_filter = 1210 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 1211 MMF_DUMP_FILTER_MASK; 1212 return 1; 1213 } 1214 1215 __setup("coredump_filter=", coredump_filter_setup); 1216 1217 #include <linux/init_task.h> 1218 1219 static void mm_init_aio(struct mm_struct *mm) 1220 { 1221 #ifdef CONFIG_AIO 1222 spin_lock_init(&mm->ioctx_lock); 1223 mm->ioctx_table = NULL; 1224 #endif 1225 } 1226 1227 static __always_inline void mm_clear_owner(struct mm_struct *mm, 1228 struct task_struct *p) 1229 { 1230 #ifdef CONFIG_MEMCG 1231 if (mm->owner == p) 1232 WRITE_ONCE(mm->owner, NULL); 1233 #endif 1234 } 1235 1236 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 1237 { 1238 #ifdef CONFIG_MEMCG 1239 mm->owner = p; 1240 #endif 1241 } 1242 1243 static void mm_init_uprobes_state(struct mm_struct *mm) 1244 { 1245 #ifdef CONFIG_UPROBES 1246 mm->uprobes_state.xol_area = NULL; 1247 #endif 1248 } 1249 1250 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, 1251 struct user_namespace *user_ns) 1252 { 1253 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); 1254 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); 1255 atomic_set(&mm->mm_users, 1); 1256 atomic_set(&mm->mm_count, 1); 1257 seqcount_init(&mm->write_protect_seq); 1258 mmap_init_lock(mm); 1259 INIT_LIST_HEAD(&mm->mmlist); 1260 #ifdef CONFIG_PER_VMA_LOCK 1261 mm->mm_lock_seq = 0; 1262 #endif 1263 mm_pgtables_bytes_init(mm); 1264 mm->map_count = 0; 1265 mm->locked_vm = 0; 1266 atomic64_set(&mm->pinned_vm, 0); 1267 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 1268 spin_lock_init(&mm->page_table_lock); 1269 spin_lock_init(&mm->arg_lock); 1270 mm_init_cpumask(mm); 1271 mm_init_aio(mm); 1272 mm_init_owner(mm, p); 1273 mm_pasid_init(mm); 1274 RCU_INIT_POINTER(mm->exe_file, NULL); 1275 mmu_notifier_subscriptions_init(mm); 1276 init_tlb_flush_pending(mm); 1277 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 1278 mm->pmd_huge_pte = NULL; 1279 #endif 1280 mm_init_uprobes_state(mm); 1281 hugetlb_count_init(mm); 1282 1283 if (current->mm) { 1284 mm->flags = mmf_init_flags(current->mm->flags); 1285 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 1286 } else { 1287 mm->flags = default_dump_filter; 1288 mm->def_flags = 0; 1289 } 1290 1291 if (mm_alloc_pgd(mm)) 1292 goto fail_nopgd; 1293 1294 if (init_new_context(p, mm)) 1295 goto fail_nocontext; 1296 1297 if (mm_alloc_cid(mm)) 1298 goto fail_cid; 1299 1300 if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, 1301 NR_MM_COUNTERS)) 1302 goto fail_pcpu; 1303 1304 mm->user_ns = get_user_ns(user_ns); 1305 lru_gen_init_mm(mm); 1306 return mm; 1307 1308 fail_pcpu: 1309 mm_destroy_cid(mm); 1310 fail_cid: 1311 destroy_context(mm); 1312 fail_nocontext: 1313 mm_free_pgd(mm); 1314 fail_nopgd: 1315 free_mm(mm); 1316 return NULL; 1317 } 1318 1319 /* 1320 * Allocate and initialize an mm_struct. 1321 */ 1322 struct mm_struct *mm_alloc(void) 1323 { 1324 struct mm_struct *mm; 1325 1326 mm = allocate_mm(); 1327 if (!mm) 1328 return NULL; 1329 1330 memset(mm, 0, sizeof(*mm)); 1331 return mm_init(mm, current, current_user_ns()); 1332 } 1333 EXPORT_SYMBOL_IF_KUNIT(mm_alloc); 1334 1335 static inline void __mmput(struct mm_struct *mm) 1336 { 1337 VM_BUG_ON(atomic_read(&mm->mm_users)); 1338 1339 uprobe_clear_state(mm); 1340 exit_aio(mm); 1341 ksm_exit(mm); 1342 khugepaged_exit(mm); /* must run before exit_mmap */ 1343 exit_mmap(mm); 1344 mm_put_huge_zero_folio(mm); 1345 set_mm_exe_file(mm, NULL); 1346 if (!list_empty(&mm->mmlist)) { 1347 spin_lock(&mmlist_lock); 1348 list_del(&mm->mmlist); 1349 spin_unlock(&mmlist_lock); 1350 } 1351 if (mm->binfmt) 1352 module_put(mm->binfmt->module); 1353 lru_gen_del_mm(mm); 1354 mmdrop(mm); 1355 } 1356 1357 /* 1358 * Decrement the use count and release all resources for an mm. 1359 */ 1360 void mmput(struct mm_struct *mm) 1361 { 1362 might_sleep(); 1363 1364 if (atomic_dec_and_test(&mm->mm_users)) 1365 __mmput(mm); 1366 } 1367 EXPORT_SYMBOL_GPL(mmput); 1368 1369 #ifdef CONFIG_MMU 1370 static void mmput_async_fn(struct work_struct *work) 1371 { 1372 struct mm_struct *mm = container_of(work, struct mm_struct, 1373 async_put_work); 1374 1375 __mmput(mm); 1376 } 1377 1378 void mmput_async(struct mm_struct *mm) 1379 { 1380 if (atomic_dec_and_test(&mm->mm_users)) { 1381 INIT_WORK(&mm->async_put_work, mmput_async_fn); 1382 schedule_work(&mm->async_put_work); 1383 } 1384 } 1385 EXPORT_SYMBOL_GPL(mmput_async); 1386 #endif 1387 1388 /** 1389 * set_mm_exe_file - change a reference to the mm's executable file 1390 * @mm: The mm to change. 1391 * @new_exe_file: The new file to use. 1392 * 1393 * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 1394 * 1395 * Main users are mmput() and sys_execve(). Callers prevent concurrent 1396 * invocations: in mmput() nobody alive left, in execve it happens before 1397 * the new mm is made visible to anyone. 1398 * 1399 * Can only fail if new_exe_file != NULL. 1400 */ 1401 int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1402 { 1403 struct file *old_exe_file; 1404 1405 /* 1406 * It is safe to dereference the exe_file without RCU as 1407 * this function is only called if nobody else can access 1408 * this mm -- see comment above for justification. 1409 */ 1410 old_exe_file = rcu_dereference_raw(mm->exe_file); 1411 1412 if (new_exe_file) 1413 get_file(new_exe_file); 1414 rcu_assign_pointer(mm->exe_file, new_exe_file); 1415 if (old_exe_file) 1416 fput(old_exe_file); 1417 return 0; 1418 } 1419 1420 /** 1421 * replace_mm_exe_file - replace a reference to the mm's executable file 1422 * @mm: The mm to change. 1423 * @new_exe_file: The new file to use. 1424 * 1425 * This changes mm's executable file (shown as symlink /proc/[pid]/exe). 1426 * 1427 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). 1428 */ 1429 int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 1430 { 1431 struct vm_area_struct *vma; 1432 struct file *old_exe_file; 1433 int ret = 0; 1434 1435 /* Forbid mm->exe_file change if old file still mapped. */ 1436 old_exe_file = get_mm_exe_file(mm); 1437 if (old_exe_file) { 1438 VMA_ITERATOR(vmi, mm, 0); 1439 mmap_read_lock(mm); 1440 for_each_vma(vmi, vma) { 1441 if (!vma->vm_file) 1442 continue; 1443 if (path_equal(&vma->vm_file->f_path, 1444 &old_exe_file->f_path)) { 1445 ret = -EBUSY; 1446 break; 1447 } 1448 } 1449 mmap_read_unlock(mm); 1450 fput(old_exe_file); 1451 if (ret) 1452 return ret; 1453 } 1454 1455 get_file(new_exe_file); 1456 1457 /* set the new file */ 1458 mmap_write_lock(mm); 1459 old_exe_file = rcu_dereference_raw(mm->exe_file); 1460 rcu_assign_pointer(mm->exe_file, new_exe_file); 1461 mmap_write_unlock(mm); 1462 1463 if (old_exe_file) 1464 fput(old_exe_file); 1465 return 0; 1466 } 1467 1468 /** 1469 * get_mm_exe_file - acquire a reference to the mm's executable file 1470 * @mm: The mm of interest. 1471 * 1472 * Returns %NULL if mm has no associated executable file. 1473 * User must release file via fput(). 1474 */ 1475 struct file *get_mm_exe_file(struct mm_struct *mm) 1476 { 1477 struct file *exe_file; 1478 1479 rcu_read_lock(); 1480 exe_file = get_file_rcu(&mm->exe_file); 1481 rcu_read_unlock(); 1482 return exe_file; 1483 } 1484 1485 /** 1486 * get_task_exe_file - acquire a reference to the task's executable file 1487 * @task: The task. 1488 * 1489 * Returns %NULL if task's mm (if any) has no associated executable file or 1490 * this is a kernel thread with borrowed mm (see the comment above get_task_mm). 1491 * User must release file via fput(). 1492 */ 1493 struct file *get_task_exe_file(struct task_struct *task) 1494 { 1495 struct file *exe_file = NULL; 1496 struct mm_struct *mm; 1497 1498 task_lock(task); 1499 mm = task->mm; 1500 if (mm) { 1501 if (!(task->flags & PF_KTHREAD)) 1502 exe_file = get_mm_exe_file(mm); 1503 } 1504 task_unlock(task); 1505 return exe_file; 1506 } 1507 1508 /** 1509 * get_task_mm - acquire a reference to the task's mm 1510 * @task: The task. 1511 * 1512 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 1513 * this kernel workthread has transiently adopted a user mm with use_mm, 1514 * to do its AIO) is not set and if so returns a reference to it, after 1515 * bumping up the use count. User must release the mm via mmput() 1516 * after use. Typically used by /proc and ptrace. 1517 */ 1518 struct mm_struct *get_task_mm(struct task_struct *task) 1519 { 1520 struct mm_struct *mm; 1521 1522 task_lock(task); 1523 mm = task->mm; 1524 if (mm) { 1525 if (task->flags & PF_KTHREAD) 1526 mm = NULL; 1527 else 1528 mmget(mm); 1529 } 1530 task_unlock(task); 1531 return mm; 1532 } 1533 EXPORT_SYMBOL_GPL(get_task_mm); 1534 1535 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 1536 { 1537 struct mm_struct *mm; 1538 int err; 1539 1540 err = down_read_killable(&task->signal->exec_update_lock); 1541 if (err) 1542 return ERR_PTR(err); 1543 1544 mm = get_task_mm(task); 1545 if (mm && mm != current->mm && 1546 !ptrace_may_access(task, mode)) { 1547 mmput(mm); 1548 mm = ERR_PTR(-EACCES); 1549 } 1550 up_read(&task->signal->exec_update_lock); 1551 1552 return mm; 1553 } 1554 1555 static void complete_vfork_done(struct task_struct *tsk) 1556 { 1557 struct completion *vfork; 1558 1559 task_lock(tsk); 1560 vfork = tsk->vfork_done; 1561 if (likely(vfork)) { 1562 tsk->vfork_done = NULL; 1563 complete(vfork); 1564 } 1565 task_unlock(tsk); 1566 } 1567 1568 static int wait_for_vfork_done(struct task_struct *child, 1569 struct completion *vfork) 1570 { 1571 unsigned int state = TASK_KILLABLE|TASK_FREEZABLE; 1572 int killed; 1573 1574 cgroup_enter_frozen(); 1575 killed = wait_for_completion_state(vfork, state); 1576 cgroup_leave_frozen(false); 1577 1578 if (killed) { 1579 task_lock(child); 1580 child->vfork_done = NULL; 1581 task_unlock(child); 1582 } 1583 1584 put_task_struct(child); 1585 return killed; 1586 } 1587 1588 /* Please note the differences between mmput and mm_release. 1589 * mmput is called whenever we stop holding onto a mm_struct, 1590 * error success whatever. 1591 * 1592 * mm_release is called after a mm_struct has been removed 1593 * from the current process. 1594 * 1595 * This difference is important for error handling, when we 1596 * only half set up a mm_struct for a new process and need to restore 1597 * the old one. Because we mmput the new mm_struct before 1598 * restoring the old one. . . 1599 * Eric Biederman 10 January 1998 1600 */ 1601 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) 1602 { 1603 uprobe_free_utask(tsk); 1604 1605 /* Get rid of any cached register state */ 1606 deactivate_mm(tsk, mm); 1607 1608 /* 1609 * Signal userspace if we're not exiting with a core dump 1610 * because we want to leave the value intact for debugging 1611 * purposes. 1612 */ 1613 if (tsk->clear_child_tid) { 1614 if (atomic_read(&mm->mm_users) > 1) { 1615 /* 1616 * We don't check the error code - if userspace has 1617 * not set up a proper pointer then tough luck. 1618 */ 1619 put_user(0, tsk->clear_child_tid); 1620 do_futex(tsk->clear_child_tid, FUTEX_WAKE, 1621 1, NULL, NULL, 0, 0); 1622 } 1623 tsk->clear_child_tid = NULL; 1624 } 1625 1626 /* 1627 * All done, finally we can wake up parent and return this mm to him. 1628 * Also kthread_stop() uses this completion for synchronization. 1629 */ 1630 if (tsk->vfork_done) 1631 complete_vfork_done(tsk); 1632 } 1633 1634 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) 1635 { 1636 futex_exit_release(tsk); 1637 mm_release(tsk, mm); 1638 } 1639 1640 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) 1641 { 1642 futex_exec_release(tsk); 1643 mm_release(tsk, mm); 1644 } 1645 1646 /** 1647 * dup_mm() - duplicates an existing mm structure 1648 * @tsk: the task_struct with which the new mm will be associated. 1649 * @oldmm: the mm to duplicate. 1650 * 1651 * Allocates a new mm structure and duplicates the provided @oldmm structure 1652 * content into it. 1653 * 1654 * Return: the duplicated mm or NULL on failure. 1655 */ 1656 static struct mm_struct *dup_mm(struct task_struct *tsk, 1657 struct mm_struct *oldmm) 1658 { 1659 struct mm_struct *mm; 1660 int err; 1661 1662 mm = allocate_mm(); 1663 if (!mm) 1664 goto fail_nomem; 1665 1666 memcpy(mm, oldmm, sizeof(*mm)); 1667 1668 if (!mm_init(mm, tsk, mm->user_ns)) 1669 goto fail_nomem; 1670 1671 err = dup_mmap(mm, oldmm); 1672 if (err) 1673 goto free_pt; 1674 1675 mm->hiwater_rss = get_mm_rss(mm); 1676 mm->hiwater_vm = mm->total_vm; 1677 1678 if (mm->binfmt && !try_module_get(mm->binfmt->module)) 1679 goto free_pt; 1680 1681 return mm; 1682 1683 free_pt: 1684 /* don't put binfmt in mmput, we haven't got module yet */ 1685 mm->binfmt = NULL; 1686 mm_init_owner(mm, NULL); 1687 mmput(mm); 1688 1689 fail_nomem: 1690 return NULL; 1691 } 1692 1693 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 1694 { 1695 struct mm_struct *mm, *oldmm; 1696 1697 tsk->min_flt = tsk->maj_flt = 0; 1698 tsk->nvcsw = tsk->nivcsw = 0; 1699 #ifdef CONFIG_DETECT_HUNG_TASK 1700 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1701 tsk->last_switch_time = 0; 1702 #endif 1703 1704 tsk->mm = NULL; 1705 tsk->active_mm = NULL; 1706 1707 /* 1708 * Are we cloning a kernel thread? 1709 * 1710 * We need to steal a active VM for that.. 1711 */ 1712 oldmm = current->mm; 1713 if (!oldmm) 1714 return 0; 1715 1716 if (clone_flags & CLONE_VM) { 1717 mmget(oldmm); 1718 mm = oldmm; 1719 } else { 1720 mm = dup_mm(tsk, current->mm); 1721 if (!mm) 1722 return -ENOMEM; 1723 } 1724 1725 tsk->mm = mm; 1726 tsk->active_mm = mm; 1727 sched_mm_cid_fork(tsk); 1728 return 0; 1729 } 1730 1731 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 1732 { 1733 struct fs_struct *fs = current->fs; 1734 if (clone_flags & CLONE_FS) { 1735 /* tsk->fs is already what we want */ 1736 spin_lock(&fs->lock); 1737 /* "users" and "in_exec" locked for check_unsafe_exec() */ 1738 if (fs->in_exec) { 1739 spin_unlock(&fs->lock); 1740 return -EAGAIN; 1741 } 1742 fs->users++; 1743 spin_unlock(&fs->lock); 1744 return 0; 1745 } 1746 tsk->fs = copy_fs_struct(fs); 1747 if (!tsk->fs) 1748 return -ENOMEM; 1749 return 0; 1750 } 1751 1752 static int copy_files(unsigned long clone_flags, struct task_struct *tsk, 1753 int no_files) 1754 { 1755 struct files_struct *oldf, *newf; 1756 int error = 0; 1757 1758 /* 1759 * A background process may not have any files ... 1760 */ 1761 oldf = current->files; 1762 if (!oldf) 1763 goto out; 1764 1765 if (no_files) { 1766 tsk->files = NULL; 1767 goto out; 1768 } 1769 1770 if (clone_flags & CLONE_FILES) { 1771 atomic_inc(&oldf->count); 1772 goto out; 1773 } 1774 1775 newf = dup_fd(oldf, NR_OPEN_MAX, &error); 1776 if (!newf) 1777 goto out; 1778 1779 tsk->files = newf; 1780 error = 0; 1781 out: 1782 return error; 1783 } 1784 1785 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 1786 { 1787 struct sighand_struct *sig; 1788 1789 if (clone_flags & CLONE_SIGHAND) { 1790 refcount_inc(¤t->sighand->count); 1791 return 0; 1792 } 1793 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1794 RCU_INIT_POINTER(tsk->sighand, sig); 1795 if (!sig) 1796 return -ENOMEM; 1797 1798 refcount_set(&sig->count, 1); 1799 spin_lock_irq(¤t->sighand->siglock); 1800 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1801 spin_unlock_irq(¤t->sighand->siglock); 1802 1803 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ 1804 if (clone_flags & CLONE_CLEAR_SIGHAND) 1805 flush_signal_handlers(tsk, 0); 1806 1807 return 0; 1808 } 1809 1810 void __cleanup_sighand(struct sighand_struct *sighand) 1811 { 1812 if (refcount_dec_and_test(&sighand->count)) { 1813 signalfd_cleanup(sighand); 1814 /* 1815 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it 1816 * without an RCU grace period, see __lock_task_sighand(). 1817 */ 1818 kmem_cache_free(sighand_cachep, sighand); 1819 } 1820 } 1821 1822 /* 1823 * Initialize POSIX timer handling for a thread group. 1824 */ 1825 static void posix_cpu_timers_init_group(struct signal_struct *sig) 1826 { 1827 struct posix_cputimers *pct = &sig->posix_cputimers; 1828 unsigned long cpu_limit; 1829 1830 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1831 posix_cputimers_group_init(pct, cpu_limit); 1832 } 1833 1834 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 1835 { 1836 struct signal_struct *sig; 1837 1838 if (clone_flags & CLONE_THREAD) 1839 return 0; 1840 1841 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 1842 tsk->signal = sig; 1843 if (!sig) 1844 return -ENOMEM; 1845 1846 sig->nr_threads = 1; 1847 sig->quick_threads = 1; 1848 atomic_set(&sig->live, 1); 1849 refcount_set(&sig->sigcnt, 1); 1850 1851 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 1852 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 1853 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 1854 1855 init_waitqueue_head(&sig->wait_chldexit); 1856 sig->curr_target = tsk; 1857 init_sigpending(&sig->shared_pending); 1858 INIT_HLIST_HEAD(&sig->multiprocess); 1859 seqlock_init(&sig->stats_lock); 1860 prev_cputime_init(&sig->prev_cputime); 1861 1862 #ifdef CONFIG_POSIX_TIMERS 1863 INIT_LIST_HEAD(&sig->posix_timers); 1864 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1865 sig->real_timer.function = it_real_fn; 1866 #endif 1867 1868 task_lock(current->group_leader); 1869 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 1870 task_unlock(current->group_leader); 1871 1872 posix_cpu_timers_init_group(sig); 1873 1874 tty_audit_fork(sig); 1875 sched_autogroup_fork(sig); 1876 1877 sig->oom_score_adj = current->signal->oom_score_adj; 1878 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1879 1880 mutex_init(&sig->cred_guard_mutex); 1881 init_rwsem(&sig->exec_update_lock); 1882 1883 return 0; 1884 } 1885 1886 static void copy_seccomp(struct task_struct *p) 1887 { 1888 #ifdef CONFIG_SECCOMP 1889 /* 1890 * Must be called with sighand->lock held, which is common to 1891 * all threads in the group. Holding cred_guard_mutex is not 1892 * needed because this new task is not yet running and cannot 1893 * be racing exec. 1894 */ 1895 assert_spin_locked(¤t->sighand->siglock); 1896 1897 /* Ref-count the new filter user, and assign it. */ 1898 get_seccomp_filter(current); 1899 p->seccomp = current->seccomp; 1900 1901 /* 1902 * Explicitly enable no_new_privs here in case it got set 1903 * between the task_struct being duplicated and holding the 1904 * sighand lock. The seccomp state and nnp must be in sync. 1905 */ 1906 if (task_no_new_privs(current)) 1907 task_set_no_new_privs(p); 1908 1909 /* 1910 * If the parent gained a seccomp mode after copying thread 1911 * flags and between before we held the sighand lock, we have 1912 * to manually enable the seccomp thread flag here. 1913 */ 1914 if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1915 set_task_syscall_work(p, SECCOMP); 1916 #endif 1917 } 1918 1919 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 1920 { 1921 current->clear_child_tid = tidptr; 1922 1923 return task_pid_vnr(current); 1924 } 1925 1926 static void rt_mutex_init_task(struct task_struct *p) 1927 { 1928 raw_spin_lock_init(&p->pi_lock); 1929 #ifdef CONFIG_RT_MUTEXES 1930 p->pi_waiters = RB_ROOT_CACHED; 1931 p->pi_top_task = NULL; 1932 p->pi_blocked_on = NULL; 1933 #endif 1934 } 1935 1936 static inline void init_task_pid_links(struct task_struct *task) 1937 { 1938 enum pid_type type; 1939 1940 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) 1941 INIT_HLIST_NODE(&task->pid_links[type]); 1942 } 1943 1944 static inline void 1945 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 1946 { 1947 if (type == PIDTYPE_PID) 1948 task->thread_pid = pid; 1949 else 1950 task->signal->pids[type] = pid; 1951 } 1952 1953 static inline void rcu_copy_process(struct task_struct *p) 1954 { 1955 #ifdef CONFIG_PREEMPT_RCU 1956 p->rcu_read_lock_nesting = 0; 1957 p->rcu_read_unlock_special.s = 0; 1958 p->rcu_blocked_node = NULL; 1959 INIT_LIST_HEAD(&p->rcu_node_entry); 1960 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1961 #ifdef CONFIG_TASKS_RCU 1962 p->rcu_tasks_holdout = false; 1963 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); 1964 p->rcu_tasks_idle_cpu = -1; 1965 INIT_LIST_HEAD(&p->rcu_tasks_exit_list); 1966 #endif /* #ifdef CONFIG_TASKS_RCU */ 1967 #ifdef CONFIG_TASKS_TRACE_RCU 1968 p->trc_reader_nesting = 0; 1969 p->trc_reader_special.s = 0; 1970 INIT_LIST_HEAD(&p->trc_holdout_list); 1971 INIT_LIST_HEAD(&p->trc_blkd_node); 1972 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 1973 } 1974 1975 /** 1976 * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd 1977 * @pid: the struct pid for which to create a pidfd 1978 * @flags: flags of the new @pidfd 1979 * @ret: Where to return the file for the pidfd. 1980 * 1981 * Allocate a new file that stashes @pid and reserve a new pidfd number in the 1982 * caller's file descriptor table. The pidfd is reserved but not installed yet. 1983 * 1984 * The helper doesn't perform checks on @pid which makes it useful for pidfds 1985 * created via CLONE_PIDFD where @pid has no task attached when the pidfd and 1986 * pidfd file are prepared. 1987 * 1988 * If this function returns successfully the caller is responsible to either 1989 * call fd_install() passing the returned pidfd and pidfd file as arguments in 1990 * order to install the pidfd into its file descriptor table or they must use 1991 * put_unused_fd() and fput() on the returned pidfd and pidfd file 1992 * respectively. 1993 * 1994 * This function is useful when a pidfd must already be reserved but there 1995 * might still be points of failure afterwards and the caller wants to ensure 1996 * that no pidfd is leaked into its file descriptor table. 1997 * 1998 * Return: On success, a reserved pidfd is returned from the function and a new 1999 * pidfd file is returned in the last argument to the function. On 2000 * error, a negative error code is returned from the function and the 2001 * last argument remains unchanged. 2002 */ 2003 static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 2004 { 2005 int pidfd; 2006 struct file *pidfd_file; 2007 2008 pidfd = get_unused_fd_flags(O_CLOEXEC); 2009 if (pidfd < 0) 2010 return pidfd; 2011 2012 pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR); 2013 if (IS_ERR(pidfd_file)) { 2014 put_unused_fd(pidfd); 2015 return PTR_ERR(pidfd_file); 2016 } 2017 /* 2018 * anon_inode_getfile() ignores everything outside of the 2019 * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually. 2020 */ 2021 pidfd_file->f_flags |= (flags & PIDFD_THREAD); 2022 *ret = pidfd_file; 2023 return pidfd; 2024 } 2025 2026 /** 2027 * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd 2028 * @pid: the struct pid for which to create a pidfd 2029 * @flags: flags of the new @pidfd 2030 * @ret: Where to return the pidfd. 2031 * 2032 * Allocate a new file that stashes @pid and reserve a new pidfd number in the 2033 * caller's file descriptor table. The pidfd is reserved but not installed yet. 2034 * 2035 * The helper verifies that @pid is still in use, without PIDFD_THREAD the 2036 * task identified by @pid must be a thread-group leader. 2037 * 2038 * If this function returns successfully the caller is responsible to either 2039 * call fd_install() passing the returned pidfd and pidfd file as arguments in 2040 * order to install the pidfd into its file descriptor table or they must use 2041 * put_unused_fd() and fput() on the returned pidfd and pidfd file 2042 * respectively. 2043 * 2044 * This function is useful when a pidfd must already be reserved but there 2045 * might still be points of failure afterwards and the caller wants to ensure 2046 * that no pidfd is leaked into its file descriptor table. 2047 * 2048 * Return: On success, a reserved pidfd is returned from the function and a new 2049 * pidfd file is returned in the last argument to the function. On 2050 * error, a negative error code is returned from the function and the 2051 * last argument remains unchanged. 2052 */ 2053 int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) 2054 { 2055 bool thread = flags & PIDFD_THREAD; 2056 2057 if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) 2058 return -EINVAL; 2059 2060 return __pidfd_prepare(pid, flags, ret); 2061 } 2062 2063 static void __delayed_free_task(struct rcu_head *rhp) 2064 { 2065 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 2066 2067 free_task(tsk); 2068 } 2069 2070 static __always_inline void delayed_free_task(struct task_struct *tsk) 2071 { 2072 if (IS_ENABLED(CONFIG_MEMCG)) 2073 call_rcu(&tsk->rcu, __delayed_free_task); 2074 else 2075 free_task(tsk); 2076 } 2077 2078 static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) 2079 { 2080 /* Skip if kernel thread */ 2081 if (!tsk->mm) 2082 return; 2083 2084 /* Skip if spawning a thread or using vfork */ 2085 if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) 2086 return; 2087 2088 /* We need to synchronize with __set_oom_adj */ 2089 mutex_lock(&oom_adj_mutex); 2090 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); 2091 /* Update the values in case they were changed after copy_signal */ 2092 tsk->signal->oom_score_adj = current->signal->oom_score_adj; 2093 tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; 2094 mutex_unlock(&oom_adj_mutex); 2095 } 2096 2097 #ifdef CONFIG_RV 2098 static void rv_task_fork(struct task_struct *p) 2099 { 2100 int i; 2101 2102 for (i = 0; i < RV_PER_TASK_MONITORS; i++) 2103 p->rv[i].da_mon.monitoring = false; 2104 } 2105 #else 2106 #define rv_task_fork(p) do {} while (0) 2107 #endif 2108 2109 /* 2110 * This creates a new process as a copy of the old one, 2111 * but does not actually start it yet. 2112 * 2113 * It copies the registers, and all the appropriate 2114 * parts of the process environment (as per the clone 2115 * flags). The actual kick-off is left to the caller. 2116 */ 2117 __latent_entropy struct task_struct *copy_process( 2118 struct pid *pid, 2119 int trace, 2120 int node, 2121 struct kernel_clone_args *args) 2122 { 2123 int pidfd = -1, retval; 2124 struct task_struct *p; 2125 struct multiprocess_signals delayed; 2126 struct file *pidfile = NULL; 2127 const u64 clone_flags = args->flags; 2128 struct nsproxy *nsp = current->nsproxy; 2129 2130 /* 2131 * Don't allow sharing the root directory with processes in a different 2132 * namespace 2133 */ 2134 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 2135 return ERR_PTR(-EINVAL); 2136 2137 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 2138 return ERR_PTR(-EINVAL); 2139 2140 /* 2141 * Thread groups must share signals as well, and detached threads 2142 * can only be started up within the thread group. 2143 */ 2144 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 2145 return ERR_PTR(-EINVAL); 2146 2147 /* 2148 * Shared signal handlers imply shared VM. By way of the above, 2149 * thread groups also imply shared VM. Blocking this case allows 2150 * for various simplifications in other code. 2151 */ 2152 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 2153 return ERR_PTR(-EINVAL); 2154 2155 /* 2156 * Siblings of global init remain as zombies on exit since they are 2157 * not reaped by their parent (swapper). To solve this and to avoid 2158 * multi-rooted process trees, prevent global and container-inits 2159 * from creating siblings. 2160 */ 2161 if ((clone_flags & CLONE_PARENT) && 2162 current->signal->flags & SIGNAL_UNKILLABLE) 2163 return ERR_PTR(-EINVAL); 2164 2165 /* 2166 * If the new process will be in a different pid or user namespace 2167 * do not allow it to share a thread group with the forking task. 2168 */ 2169 if (clone_flags & CLONE_THREAD) { 2170 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 2171 (task_active_pid_ns(current) != nsp->pid_ns_for_children)) 2172 return ERR_PTR(-EINVAL); 2173 } 2174 2175 if (clone_flags & CLONE_PIDFD) { 2176 /* 2177 * - CLONE_DETACHED is blocked so that we can potentially 2178 * reuse it later for CLONE_PIDFD. 2179 */ 2180 if (clone_flags & CLONE_DETACHED) 2181 return ERR_PTR(-EINVAL); 2182 } 2183 2184 /* 2185 * Force any signals received before this point to be delivered 2186 * before the fork happens. Collect up signals sent to multiple 2187 * processes that happen during the fork and delay them so that 2188 * they appear to happen after the fork. 2189 */ 2190 sigemptyset(&delayed.signal); 2191 INIT_HLIST_NODE(&delayed.node); 2192 2193 spin_lock_irq(¤t->sighand->siglock); 2194 if (!(clone_flags & CLONE_THREAD)) 2195 hlist_add_head(&delayed.node, ¤t->signal->multiprocess); 2196 recalc_sigpending(); 2197 spin_unlock_irq(¤t->sighand->siglock); 2198 retval = -ERESTARTNOINTR; 2199 if (task_sigpending(current)) 2200 goto fork_out; 2201 2202 retval = -ENOMEM; 2203 p = dup_task_struct(current, node); 2204 if (!p) 2205 goto fork_out; 2206 p->flags &= ~PF_KTHREAD; 2207 if (args->kthread) 2208 p->flags |= PF_KTHREAD; 2209 if (args->user_worker) { 2210 /* 2211 * Mark us a user worker, and block any signal that isn't 2212 * fatal or STOP 2213 */ 2214 p->flags |= PF_USER_WORKER; 2215 siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2216 } 2217 if (args->io_thread) 2218 p->flags |= PF_IO_WORKER; 2219 2220 if (args->name) 2221 strscpy_pad(p->comm, args->name, sizeof(p->comm)); 2222 2223 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; 2224 /* 2225 * Clear TID on mm_release()? 2226 */ 2227 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; 2228 2229 ftrace_graph_init_task(p); 2230 2231 rt_mutex_init_task(p); 2232 2233 lockdep_assert_irqs_enabled(); 2234 #ifdef CONFIG_PROVE_LOCKING 2235 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 2236 #endif 2237 retval = copy_creds(p, clone_flags); 2238 if (retval < 0) 2239 goto bad_fork_free; 2240 2241 retval = -EAGAIN; 2242 if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { 2243 if (p->real_cred->user != INIT_USER && 2244 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 2245 goto bad_fork_cleanup_count; 2246 } 2247 current->flags &= ~PF_NPROC_EXCEEDED; 2248 2249 /* 2250 * If multiple threads are within copy_process(), then this check 2251 * triggers too late. This doesn't hurt, the check is only there 2252 * to stop root fork bombs. 2253 */ 2254 retval = -EAGAIN; 2255 if (data_race(nr_threads >= max_threads)) 2256 goto bad_fork_cleanup_count; 2257 2258 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 2259 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); 2260 p->flags |= PF_FORKNOEXEC; 2261 INIT_LIST_HEAD(&p->children); 2262 INIT_LIST_HEAD(&p->sibling); 2263 rcu_copy_process(p); 2264 p->vfork_done = NULL; 2265 spin_lock_init(&p->alloc_lock); 2266 2267 init_sigpending(&p->pending); 2268 2269 p->utime = p->stime = p->gtime = 0; 2270 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 2271 p->utimescaled = p->stimescaled = 0; 2272 #endif 2273 prev_cputime_init(&p->prev_cputime); 2274 2275 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2276 seqcount_init(&p->vtime.seqcount); 2277 p->vtime.starttime = 0; 2278 p->vtime.state = VTIME_INACTIVE; 2279 #endif 2280 2281 #ifdef CONFIG_IO_URING 2282 p->io_uring = NULL; 2283 #endif 2284 2285 p->default_timer_slack_ns = current->timer_slack_ns; 2286 2287 #ifdef CONFIG_PSI 2288 p->psi_flags = 0; 2289 #endif 2290 2291 task_io_accounting_init(&p->ioac); 2292 acct_clear_integrals(p); 2293 2294 posix_cputimers_init(&p->posix_cputimers); 2295 2296 p->io_context = NULL; 2297 audit_set_context(p, NULL); 2298 cgroup_fork(p); 2299 if (args->kthread) { 2300 if (!set_kthread_struct(p)) 2301 goto bad_fork_cleanup_delayacct; 2302 } 2303 #ifdef CONFIG_NUMA 2304 p->mempolicy = mpol_dup(p->mempolicy); 2305 if (IS_ERR(p->mempolicy)) { 2306 retval = PTR_ERR(p->mempolicy); 2307 p->mempolicy = NULL; 2308 goto bad_fork_cleanup_delayacct; 2309 } 2310 #endif 2311 #ifdef CONFIG_CPUSETS 2312 p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 2313 p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 2314 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); 2315 #endif 2316 #ifdef CONFIG_TRACE_IRQFLAGS 2317 memset(&p->irqtrace, 0, sizeof(p->irqtrace)); 2318 p->irqtrace.hardirq_disable_ip = _THIS_IP_; 2319 p->irqtrace.softirq_enable_ip = _THIS_IP_; 2320 p->softirqs_enabled = 1; 2321 p->softirq_context = 0; 2322 #endif 2323 2324 p->pagefault_disabled = 0; 2325 2326 #ifdef CONFIG_LOCKDEP 2327 lockdep_init_task(p); 2328 #endif 2329 2330 #ifdef CONFIG_DEBUG_MUTEXES 2331 p->blocked_on = NULL; /* not blocked yet */ 2332 #endif 2333 #ifdef CONFIG_BCACHE 2334 p->sequential_io = 0; 2335 p->sequential_io_avg = 0; 2336 #endif 2337 #ifdef CONFIG_BPF_SYSCALL 2338 RCU_INIT_POINTER(p->bpf_storage, NULL); 2339 p->bpf_ctx = NULL; 2340 #endif 2341 2342 /* Perform scheduler related setup. Assign this task to a CPU. */ 2343 retval = sched_fork(clone_flags, p); 2344 if (retval) 2345 goto bad_fork_cleanup_policy; 2346 2347 retval = perf_event_init_task(p, clone_flags); 2348 if (retval) 2349 goto bad_fork_cleanup_policy; 2350 retval = audit_alloc(p); 2351 if (retval) 2352 goto bad_fork_cleanup_perf; 2353 /* copy all the process information */ 2354 shm_init_task(p); 2355 retval = security_task_alloc(p, clone_flags); 2356 if (retval) 2357 goto bad_fork_cleanup_audit; 2358 retval = copy_semundo(clone_flags, p); 2359 if (retval) 2360 goto bad_fork_cleanup_security; 2361 retval = copy_files(clone_flags, p, args->no_files); 2362 if (retval) 2363 goto bad_fork_cleanup_semundo; 2364 retval = copy_fs(clone_flags, p); 2365 if (retval) 2366 goto bad_fork_cleanup_files; 2367 retval = copy_sighand(clone_flags, p); 2368 if (retval) 2369 goto bad_fork_cleanup_fs; 2370 retval = copy_signal(clone_flags, p); 2371 if (retval) 2372 goto bad_fork_cleanup_sighand; 2373 retval = copy_mm(clone_flags, p); 2374 if (retval) 2375 goto bad_fork_cleanup_signal; 2376 retval = copy_namespaces(clone_flags, p); 2377 if (retval) 2378 goto bad_fork_cleanup_mm; 2379 retval = copy_io(clone_flags, p); 2380 if (retval) 2381 goto bad_fork_cleanup_namespaces; 2382 retval = copy_thread(p, args); 2383 if (retval) 2384 goto bad_fork_cleanup_io; 2385 2386 stackleak_task_init(p); 2387 2388 if (pid != &init_struct_pid) { 2389 pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, 2390 args->set_tid_size); 2391 if (IS_ERR(pid)) { 2392 retval = PTR_ERR(pid); 2393 goto bad_fork_cleanup_thread; 2394 } 2395 } 2396 2397 /* 2398 * This has to happen after we've potentially unshared the file 2399 * descriptor table (so that the pidfd doesn't leak into the child 2400 * if the fd table isn't shared). 2401 */ 2402 if (clone_flags & CLONE_PIDFD) { 2403 int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; 2404 2405 /* Note that no task has been attached to @pid yet. */ 2406 retval = __pidfd_prepare(pid, flags, &pidfile); 2407 if (retval < 0) 2408 goto bad_fork_free_pid; 2409 pidfd = retval; 2410 2411 retval = put_user(pidfd, args->pidfd); 2412 if (retval) 2413 goto bad_fork_put_pidfd; 2414 } 2415 2416 #ifdef CONFIG_BLOCK 2417 p->plug = NULL; 2418 #endif 2419 futex_init_task(p); 2420 2421 /* 2422 * sigaltstack should be cleared when sharing the same VM 2423 */ 2424 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 2425 sas_ss_reset(p); 2426 2427 /* 2428 * Syscall tracing and stepping should be turned off in the 2429 * child regardless of CLONE_PTRACE. 2430 */ 2431 user_disable_single_step(p); 2432 clear_task_syscall_work(p, SYSCALL_TRACE); 2433 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 2434 clear_task_syscall_work(p, SYSCALL_EMU); 2435 #endif 2436 clear_tsk_latency_tracing(p); 2437 2438 /* ok, now we should be set up.. */ 2439 p->pid = pid_nr(pid); 2440 if (clone_flags & CLONE_THREAD) { 2441 p->group_leader = current->group_leader; 2442 p->tgid = current->tgid; 2443 } else { 2444 p->group_leader = p; 2445 p->tgid = p->pid; 2446 } 2447 2448 p->nr_dirtied = 0; 2449 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 2450 p->dirty_paused_when = 0; 2451 2452 p->pdeath_signal = 0; 2453 p->task_works = NULL; 2454 clear_posix_cputimers_work(p); 2455 2456 #ifdef CONFIG_KRETPROBES 2457 p->kretprobe_instances.first = NULL; 2458 #endif 2459 #ifdef CONFIG_RETHOOK 2460 p->rethooks.first = NULL; 2461 #endif 2462 2463 /* 2464 * Ensure that the cgroup subsystem policies allow the new process to be 2465 * forked. It should be noted that the new process's css_set can be changed 2466 * between here and cgroup_post_fork() if an organisation operation is in 2467 * progress. 2468 */ 2469 retval = cgroup_can_fork(p, args); 2470 if (retval) 2471 goto bad_fork_put_pidfd; 2472 2473 /* 2474 * Now that the cgroups are pinned, re-clone the parent cgroup and put 2475 * the new task on the correct runqueue. All this *before* the task 2476 * becomes visible. 2477 * 2478 * This isn't part of ->can_fork() because while the re-cloning is 2479 * cgroup specific, it unconditionally needs to place the task on a 2480 * runqueue. 2481 */ 2482 sched_cgroup_fork(p, args); 2483 2484 /* 2485 * From this point on we must avoid any synchronous user-space 2486 * communication until we take the tasklist-lock. In particular, we do 2487 * not want user-space to be able to predict the process start-time by 2488 * stalling fork(2) after we recorded the start_time but before it is 2489 * visible to the system. 2490 */ 2491 2492 p->start_time = ktime_get_ns(); 2493 p->start_boottime = ktime_get_boottime_ns(); 2494 2495 /* 2496 * Make it visible to the rest of the system, but dont wake it up yet. 2497 * Need tasklist lock for parent etc handling! 2498 */ 2499 write_lock_irq(&tasklist_lock); 2500 2501 /* CLONE_PARENT re-uses the old parent */ 2502 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 2503 p->real_parent = current->real_parent; 2504 p->parent_exec_id = current->parent_exec_id; 2505 if (clone_flags & CLONE_THREAD) 2506 p->exit_signal = -1; 2507 else 2508 p->exit_signal = current->group_leader->exit_signal; 2509 } else { 2510 p->real_parent = current; 2511 p->parent_exec_id = current->self_exec_id; 2512 p->exit_signal = args->exit_signal; 2513 } 2514 2515 klp_copy_process(p); 2516 2517 sched_core_fork(p); 2518 2519 spin_lock(¤t->sighand->siglock); 2520 2521 rv_task_fork(p); 2522 2523 rseq_fork(p, clone_flags); 2524 2525 /* Don't start children in a dying pid namespace */ 2526 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { 2527 retval = -ENOMEM; 2528 goto bad_fork_cancel_cgroup; 2529 } 2530 2531 /* Let kill terminate clone/fork in the middle */ 2532 if (fatal_signal_pending(current)) { 2533 retval = -EINTR; 2534 goto bad_fork_cancel_cgroup; 2535 } 2536 2537 /* No more failure paths after this point. */ 2538 2539 /* 2540 * Copy seccomp details explicitly here, in case they were changed 2541 * before holding sighand lock. 2542 */ 2543 copy_seccomp(p); 2544 2545 init_task_pid_links(p); 2546 if (likely(p->pid)) { 2547 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 2548 2549 init_task_pid(p, PIDTYPE_PID, pid); 2550 if (thread_group_leader(p)) { 2551 init_task_pid(p, PIDTYPE_TGID, pid); 2552 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 2553 init_task_pid(p, PIDTYPE_SID, task_session(current)); 2554 2555 if (is_child_reaper(pid)) { 2556 ns_of_pid(pid)->child_reaper = p; 2557 p->signal->flags |= SIGNAL_UNKILLABLE; 2558 } 2559 p->signal->shared_pending.signal = delayed.signal; 2560 p->signal->tty = tty_kref_get(current->signal->tty); 2561 /* 2562 * Inherit has_child_subreaper flag under the same 2563 * tasklist_lock with adding child to the process tree 2564 * for propagate_has_child_subreaper optimization. 2565 */ 2566 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || 2567 p->real_parent->signal->is_child_subreaper; 2568 list_add_tail(&p->sibling, &p->real_parent->children); 2569 list_add_tail_rcu(&p->tasks, &init_task.tasks); 2570 attach_pid(p, PIDTYPE_TGID); 2571 attach_pid(p, PIDTYPE_PGID); 2572 attach_pid(p, PIDTYPE_SID); 2573 __this_cpu_inc(process_counts); 2574 } else { 2575 current->signal->nr_threads++; 2576 current->signal->quick_threads++; 2577 atomic_inc(¤t->signal->live); 2578 refcount_inc(¤t->signal->sigcnt); 2579 task_join_group_stop(p); 2580 list_add_tail_rcu(&p->thread_node, 2581 &p->signal->thread_head); 2582 } 2583 attach_pid(p, PIDTYPE_PID); 2584 nr_threads++; 2585 } 2586 total_forks++; 2587 hlist_del_init(&delayed.node); 2588 spin_unlock(¤t->sighand->siglock); 2589 syscall_tracepoint_update(p); 2590 write_unlock_irq(&tasklist_lock); 2591 2592 if (pidfile) 2593 fd_install(pidfd, pidfile); 2594 2595 proc_fork_connector(p); 2596 sched_post_fork(p); 2597 cgroup_post_fork(p, args); 2598 perf_event_fork(p); 2599 2600 trace_task_newtask(p, clone_flags); 2601 uprobe_copy_process(p, clone_flags); 2602 user_events_fork(p, clone_flags); 2603 2604 copy_oom_score_adj(clone_flags, p); 2605 2606 return p; 2607 2608 bad_fork_cancel_cgroup: 2609 sched_core_free(p); 2610 spin_unlock(¤t->sighand->siglock); 2611 write_unlock_irq(&tasklist_lock); 2612 cgroup_cancel_fork(p, args); 2613 bad_fork_put_pidfd: 2614 if (clone_flags & CLONE_PIDFD) { 2615 fput(pidfile); 2616 put_unused_fd(pidfd); 2617 } 2618 bad_fork_free_pid: 2619 if (pid != &init_struct_pid) 2620 free_pid(pid); 2621 bad_fork_cleanup_thread: 2622 exit_thread(p); 2623 bad_fork_cleanup_io: 2624 if (p->io_context) 2625 exit_io_context(p); 2626 bad_fork_cleanup_namespaces: 2627 exit_task_namespaces(p); 2628 bad_fork_cleanup_mm: 2629 if (p->mm) { 2630 mm_clear_owner(p->mm, p); 2631 mmput(p->mm); 2632 } 2633 bad_fork_cleanup_signal: 2634 if (!(clone_flags & CLONE_THREAD)) 2635 free_signal_struct(p->signal); 2636 bad_fork_cleanup_sighand: 2637 __cleanup_sighand(p->sighand); 2638 bad_fork_cleanup_fs: 2639 exit_fs(p); /* blocking */ 2640 bad_fork_cleanup_files: 2641 exit_files(p); /* blocking */ 2642 bad_fork_cleanup_semundo: 2643 exit_sem(p); 2644 bad_fork_cleanup_security: 2645 security_task_free(p); 2646 bad_fork_cleanup_audit: 2647 audit_free(p); 2648 bad_fork_cleanup_perf: 2649 perf_event_free_task(p); 2650 bad_fork_cleanup_policy: 2651 lockdep_free_task(p); 2652 #ifdef CONFIG_NUMA 2653 mpol_put(p->mempolicy); 2654 #endif 2655 bad_fork_cleanup_delayacct: 2656 delayacct_tsk_free(p); 2657 bad_fork_cleanup_count: 2658 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 2659 exit_creds(p); 2660 bad_fork_free: 2661 WRITE_ONCE(p->__state, TASK_DEAD); 2662 exit_task_stack_account(p); 2663 put_task_stack(p); 2664 delayed_free_task(p); 2665 fork_out: 2666 spin_lock_irq(¤t->sighand->siglock); 2667 hlist_del_init(&delayed.node); 2668 spin_unlock_irq(¤t->sighand->siglock); 2669 return ERR_PTR(retval); 2670 } 2671 2672 static inline void init_idle_pids(struct task_struct *idle) 2673 { 2674 enum pid_type type; 2675 2676 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 2677 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ 2678 init_task_pid(idle, type, &init_struct_pid); 2679 } 2680 } 2681 2682 static int idle_dummy(void *dummy) 2683 { 2684 /* This function is never called */ 2685 return 0; 2686 } 2687 2688 struct task_struct * __init fork_idle(int cpu) 2689 { 2690 struct task_struct *task; 2691 struct kernel_clone_args args = { 2692 .flags = CLONE_VM, 2693 .fn = &idle_dummy, 2694 .fn_arg = NULL, 2695 .kthread = 1, 2696 .idle = 1, 2697 }; 2698 2699 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); 2700 if (!IS_ERR(task)) { 2701 init_idle_pids(task); 2702 init_idle(task, cpu); 2703 } 2704 2705 return task; 2706 } 2707 2708 /* 2709 * This is like kernel_clone(), but shaved down and tailored to just 2710 * creating io_uring workers. It returns a created task, or an error pointer. 2711 * The returned task is inactive, and the caller must fire it up through 2712 * wake_up_new_task(p). All signals are blocked in the created task. 2713 */ 2714 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) 2715 { 2716 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| 2717 CLONE_IO; 2718 struct kernel_clone_args args = { 2719 .flags = ((lower_32_bits(flags) | CLONE_VM | 2720 CLONE_UNTRACED) & ~CSIGNAL), 2721 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2722 .fn = fn, 2723 .fn_arg = arg, 2724 .io_thread = 1, 2725 .user_worker = 1, 2726 }; 2727 2728 return copy_process(NULL, 0, node, &args); 2729 } 2730 2731 /* 2732 * Ok, this is the main fork-routine. 2733 * 2734 * It copies the process, and if successful kick-starts 2735 * it and waits for it to finish using the VM if required. 2736 * 2737 * args->exit_signal is expected to be checked for sanity by the caller. 2738 */ 2739 pid_t kernel_clone(struct kernel_clone_args *args) 2740 { 2741 u64 clone_flags = args->flags; 2742 struct completion vfork; 2743 struct pid *pid; 2744 struct task_struct *p; 2745 int trace = 0; 2746 pid_t nr; 2747 2748 /* 2749 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument 2750 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are 2751 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate 2752 * field in struct clone_args and it still doesn't make sense to have 2753 * them both point at the same memory location. Performing this check 2754 * here has the advantage that we don't need to have a separate helper 2755 * to check for legacy clone(). 2756 */ 2757 if ((clone_flags & CLONE_PIDFD) && 2758 (clone_flags & CLONE_PARENT_SETTID) && 2759 (args->pidfd == args->parent_tid)) 2760 return -EINVAL; 2761 2762 /* 2763 * Determine whether and which event to report to ptracer. When 2764 * called from kernel_thread or CLONE_UNTRACED is explicitly 2765 * requested, no event is reported; otherwise, report if the event 2766 * for the type of forking is enabled. 2767 */ 2768 if (!(clone_flags & CLONE_UNTRACED)) { 2769 if (clone_flags & CLONE_VFORK) 2770 trace = PTRACE_EVENT_VFORK; 2771 else if (args->exit_signal != SIGCHLD) 2772 trace = PTRACE_EVENT_CLONE; 2773 else 2774 trace = PTRACE_EVENT_FORK; 2775 2776 if (likely(!ptrace_event_enabled(current, trace))) 2777 trace = 0; 2778 } 2779 2780 p = copy_process(NULL, trace, NUMA_NO_NODE, args); 2781 add_latent_entropy(); 2782 2783 if (IS_ERR(p)) 2784 return PTR_ERR(p); 2785 2786 /* 2787 * Do this prior waking up the new thread - the thread pointer 2788 * might get invalid after that point, if the thread exits quickly. 2789 */ 2790 trace_sched_process_fork(current, p); 2791 2792 pid = get_task_pid(p, PIDTYPE_PID); 2793 nr = pid_vnr(pid); 2794 2795 if (clone_flags & CLONE_PARENT_SETTID) 2796 put_user(nr, args->parent_tid); 2797 2798 if (clone_flags & CLONE_VFORK) { 2799 p->vfork_done = &vfork; 2800 init_completion(&vfork); 2801 get_task_struct(p); 2802 } 2803 2804 if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) { 2805 /* lock the task to synchronize with memcg migration */ 2806 task_lock(p); 2807 lru_gen_add_mm(p->mm); 2808 task_unlock(p); 2809 } 2810 2811 wake_up_new_task(p); 2812 2813 /* forking complete and child started to run, tell ptracer */ 2814 if (unlikely(trace)) 2815 ptrace_event_pid(trace, pid); 2816 2817 if (clone_flags & CLONE_VFORK) { 2818 if (!wait_for_vfork_done(p, &vfork)) 2819 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 2820 } 2821 2822 put_pid(pid); 2823 return nr; 2824 } 2825 2826 /* 2827 * Create a kernel thread. 2828 */ 2829 pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, 2830 unsigned long flags) 2831 { 2832 struct kernel_clone_args args = { 2833 .flags = ((lower_32_bits(flags) | CLONE_VM | 2834 CLONE_UNTRACED) & ~CSIGNAL), 2835 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2836 .fn = fn, 2837 .fn_arg = arg, 2838 .name = name, 2839 .kthread = 1, 2840 }; 2841 2842 return kernel_clone(&args); 2843 } 2844 2845 /* 2846 * Create a user mode thread. 2847 */ 2848 pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) 2849 { 2850 struct kernel_clone_args args = { 2851 .flags = ((lower_32_bits(flags) | CLONE_VM | 2852 CLONE_UNTRACED) & ~CSIGNAL), 2853 .exit_signal = (lower_32_bits(flags) & CSIGNAL), 2854 .fn = fn, 2855 .fn_arg = arg, 2856 }; 2857 2858 return kernel_clone(&args); 2859 } 2860 2861 #ifdef __ARCH_WANT_SYS_FORK 2862 SYSCALL_DEFINE0(fork) 2863 { 2864 #ifdef CONFIG_MMU 2865 struct kernel_clone_args args = { 2866 .exit_signal = SIGCHLD, 2867 }; 2868 2869 return kernel_clone(&args); 2870 #else 2871 /* can not support in nommu mode */ 2872 return -EINVAL; 2873 #endif 2874 } 2875 #endif 2876 2877 #ifdef __ARCH_WANT_SYS_VFORK 2878 SYSCALL_DEFINE0(vfork) 2879 { 2880 struct kernel_clone_args args = { 2881 .flags = CLONE_VFORK | CLONE_VM, 2882 .exit_signal = SIGCHLD, 2883 }; 2884 2885 return kernel_clone(&args); 2886 } 2887 #endif 2888 2889 #ifdef __ARCH_WANT_SYS_CLONE 2890 #ifdef CONFIG_CLONE_BACKWARDS 2891 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2892 int __user *, parent_tidptr, 2893 unsigned long, tls, 2894 int __user *, child_tidptr) 2895 #elif defined(CONFIG_CLONE_BACKWARDS2) 2896 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 2897 int __user *, parent_tidptr, 2898 int __user *, child_tidptr, 2899 unsigned long, tls) 2900 #elif defined(CONFIG_CLONE_BACKWARDS3) 2901 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 2902 int, stack_size, 2903 int __user *, parent_tidptr, 2904 int __user *, child_tidptr, 2905 unsigned long, tls) 2906 #else 2907 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 2908 int __user *, parent_tidptr, 2909 int __user *, child_tidptr, 2910 unsigned long, tls) 2911 #endif 2912 { 2913 struct kernel_clone_args args = { 2914 .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), 2915 .pidfd = parent_tidptr, 2916 .child_tid = child_tidptr, 2917 .parent_tid = parent_tidptr, 2918 .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), 2919 .stack = newsp, 2920 .tls = tls, 2921 }; 2922 2923 return kernel_clone(&args); 2924 } 2925 #endif 2926 2927 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 2928 struct clone_args __user *uargs, 2929 size_t usize) 2930 { 2931 int err; 2932 struct clone_args args; 2933 pid_t *kset_tid = kargs->set_tid; 2934 2935 BUILD_BUG_ON(offsetofend(struct clone_args, tls) != 2936 CLONE_ARGS_SIZE_VER0); 2937 BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != 2938 CLONE_ARGS_SIZE_VER1); 2939 BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != 2940 CLONE_ARGS_SIZE_VER2); 2941 BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); 2942 2943 if (unlikely(usize > PAGE_SIZE)) 2944 return -E2BIG; 2945 if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) 2946 return -EINVAL; 2947 2948 err = copy_struct_from_user(&args, sizeof(args), uargs, usize); 2949 if (err) 2950 return err; 2951 2952 if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) 2953 return -EINVAL; 2954 2955 if (unlikely(!args.set_tid && args.set_tid_size > 0)) 2956 return -EINVAL; 2957 2958 if (unlikely(args.set_tid && args.set_tid_size == 0)) 2959 return -EINVAL; 2960 2961 /* 2962 * Verify that higher 32bits of exit_signal are unset and that 2963 * it is a valid signal 2964 */ 2965 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || 2966 !valid_signal(args.exit_signal))) 2967 return -EINVAL; 2968 2969 if ((args.flags & CLONE_INTO_CGROUP) && 2970 (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) 2971 return -EINVAL; 2972 2973 *kargs = (struct kernel_clone_args){ 2974 .flags = args.flags, 2975 .pidfd = u64_to_user_ptr(args.pidfd), 2976 .child_tid = u64_to_user_ptr(args.child_tid), 2977 .parent_tid = u64_to_user_ptr(args.parent_tid), 2978 .exit_signal = args.exit_signal, 2979 .stack = args.stack, 2980 .stack_size = args.stack_size, 2981 .tls = args.tls, 2982 .set_tid_size = args.set_tid_size, 2983 .cgroup = args.cgroup, 2984 }; 2985 2986 if (args.set_tid && 2987 copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), 2988 (kargs->set_tid_size * sizeof(pid_t)))) 2989 return -EFAULT; 2990 2991 kargs->set_tid = kset_tid; 2992 2993 return 0; 2994 } 2995 2996 /** 2997 * clone3_stack_valid - check and prepare stack 2998 * @kargs: kernel clone args 2999 * 3000 * Verify that the stack arguments userspace gave us are sane. 3001 * In addition, set the stack direction for userspace since it's easy for us to 3002 * determine. 3003 */ 3004 static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) 3005 { 3006 if (kargs->stack == 0) { 3007 if (kargs->stack_size > 0) 3008 return false; 3009 } else { 3010 if (kargs->stack_size == 0) 3011 return false; 3012 3013 if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) 3014 return false; 3015 3016 #if !defined(CONFIG_STACK_GROWSUP) 3017 kargs->stack += kargs->stack_size; 3018 #endif 3019 } 3020 3021 return true; 3022 } 3023 3024 static bool clone3_args_valid(struct kernel_clone_args *kargs) 3025 { 3026 /* Verify that no unknown flags are passed along. */ 3027 if (kargs->flags & 3028 ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) 3029 return false; 3030 3031 /* 3032 * - make the CLONE_DETACHED bit reusable for clone3 3033 * - make the CSIGNAL bits reusable for clone3 3034 */ 3035 if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) 3036 return false; 3037 3038 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == 3039 (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) 3040 return false; 3041 3042 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && 3043 kargs->exit_signal) 3044 return false; 3045 3046 if (!clone3_stack_valid(kargs)) 3047 return false; 3048 3049 return true; 3050 } 3051 3052 /** 3053 * sys_clone3 - create a new process with specific properties 3054 * @uargs: argument structure 3055 * @size: size of @uargs 3056 * 3057 * clone3() is the extensible successor to clone()/clone2(). 3058 * It takes a struct as argument that is versioned by its size. 3059 * 3060 * Return: On success, a positive PID for the child process. 3061 * On error, a negative errno number. 3062 */ 3063 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 3064 { 3065 int err; 3066 3067 struct kernel_clone_args kargs; 3068 pid_t set_tid[MAX_PID_NS_LEVEL]; 3069 3070 #ifdef __ARCH_BROKEN_SYS_CLONE3 3071 #warning clone3() entry point is missing, please fix 3072 return -ENOSYS; 3073 #endif 3074 3075 kargs.set_tid = set_tid; 3076 3077 err = copy_clone_args_from_user(&kargs, uargs, size); 3078 if (err) 3079 return err; 3080 3081 if (!clone3_args_valid(&kargs)) 3082 return -EINVAL; 3083 3084 return kernel_clone(&kargs); 3085 } 3086 3087 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) 3088 { 3089 struct task_struct *leader, *parent, *child; 3090 int res; 3091 3092 read_lock(&tasklist_lock); 3093 leader = top = top->group_leader; 3094 down: 3095 for_each_thread(leader, parent) { 3096 list_for_each_entry(child, &parent->children, sibling) { 3097 res = visitor(child, data); 3098 if (res) { 3099 if (res < 0) 3100 goto out; 3101 leader = child; 3102 goto down; 3103 } 3104 up: 3105 ; 3106 } 3107 } 3108 3109 if (leader != top) { 3110 child = leader; 3111 parent = child->real_parent; 3112 leader = parent->group_leader; 3113 goto up; 3114 } 3115 out: 3116 read_unlock(&tasklist_lock); 3117 } 3118 3119 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 3120 #define ARCH_MIN_MMSTRUCT_ALIGN 0 3121 #endif 3122 3123 static void sighand_ctor(void *data) 3124 { 3125 struct sighand_struct *sighand = data; 3126 3127 spin_lock_init(&sighand->siglock); 3128 init_waitqueue_head(&sighand->signalfd_wqh); 3129 } 3130 3131 void __init mm_cache_init(void) 3132 { 3133 unsigned int mm_size; 3134 3135 /* 3136 * The mm_cpumask is located at the end of mm_struct, and is 3137 * dynamically sized based on the maximum CPU number this system 3138 * can have, taking hotplug into account (nr_cpu_ids). 3139 */ 3140 mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); 3141 3142 mm_cachep = kmem_cache_create_usercopy("mm_struct", 3143 mm_size, ARCH_MIN_MMSTRUCT_ALIGN, 3144 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3145 offsetof(struct mm_struct, saved_auxv), 3146 sizeof_field(struct mm_struct, saved_auxv), 3147 NULL); 3148 } 3149 3150 void __init proc_caches_init(void) 3151 { 3152 sighand_cachep = kmem_cache_create("sighand_cache", 3153 sizeof(struct sighand_struct), 0, 3154 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 3155 SLAB_ACCOUNT, sighand_ctor); 3156 signal_cachep = kmem_cache_create("signal_cache", 3157 sizeof(struct signal_struct), 0, 3158 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3159 NULL); 3160 files_cachep = kmem_cache_create("files_cache", 3161 sizeof(struct files_struct), 0, 3162 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3163 NULL); 3164 fs_cachep = kmem_cache_create("fs_cache", 3165 sizeof(struct fs_struct), 0, 3166 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 3167 NULL); 3168 3169 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 3170 #ifdef CONFIG_PER_VMA_LOCK 3171 vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); 3172 #endif 3173 mmap_init(); 3174 nsproxy_cache_init(); 3175 } 3176 3177 /* 3178 * Check constraints on flags passed to the unshare system call. 3179 */ 3180 static int check_unshare_flags(unsigned long unshare_flags) 3181 { 3182 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 3183 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 3184 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 3185 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| 3186 CLONE_NEWTIME)) 3187 return -EINVAL; 3188 /* 3189 * Not implemented, but pretend it works if there is nothing 3190 * to unshare. Note that unsharing the address space or the 3191 * signal handlers also need to unshare the signal queues (aka 3192 * CLONE_THREAD). 3193 */ 3194 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 3195 if (!thread_group_empty(current)) 3196 return -EINVAL; 3197 } 3198 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { 3199 if (refcount_read(¤t->sighand->count) > 1) 3200 return -EINVAL; 3201 } 3202 if (unshare_flags & CLONE_VM) { 3203 if (!current_is_single_threaded()) 3204 return -EINVAL; 3205 } 3206 3207 return 0; 3208 } 3209 3210 /* 3211 * Unshare the filesystem structure if it is being shared 3212 */ 3213 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 3214 { 3215 struct fs_struct *fs = current->fs; 3216 3217 if (!(unshare_flags & CLONE_FS) || !fs) 3218 return 0; 3219 3220 /* don't need lock here; in the worst case we'll do useless copy */ 3221 if (fs->users == 1) 3222 return 0; 3223 3224 *new_fsp = copy_fs_struct(fs); 3225 if (!*new_fsp) 3226 return -ENOMEM; 3227 3228 return 0; 3229 } 3230 3231 /* 3232 * Unshare file descriptor table if it is being shared 3233 */ 3234 int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, 3235 struct files_struct **new_fdp) 3236 { 3237 struct files_struct *fd = current->files; 3238 int error = 0; 3239 3240 if ((unshare_flags & CLONE_FILES) && 3241 (fd && atomic_read(&fd->count) > 1)) { 3242 *new_fdp = dup_fd(fd, max_fds, &error); 3243 if (!*new_fdp) 3244 return error; 3245 } 3246 3247 return 0; 3248 } 3249 3250 /* 3251 * unshare allows a process to 'unshare' part of the process 3252 * context which was originally shared using clone. copy_* 3253 * functions used by kernel_clone() cannot be used here directly 3254 * because they modify an inactive task_struct that is being 3255 * constructed. Here we are modifying the current, active, 3256 * task_struct. 3257 */ 3258 int ksys_unshare(unsigned long unshare_flags) 3259 { 3260 struct fs_struct *fs, *new_fs = NULL; 3261 struct files_struct *new_fd = NULL; 3262 struct cred *new_cred = NULL; 3263 struct nsproxy *new_nsproxy = NULL; 3264 int do_sysvsem = 0; 3265 int err; 3266 3267 /* 3268 * If unsharing a user namespace must also unshare the thread group 3269 * and unshare the filesystem root and working directories. 3270 */ 3271 if (unshare_flags & CLONE_NEWUSER) 3272 unshare_flags |= CLONE_THREAD | CLONE_FS; 3273 /* 3274 * If unsharing vm, must also unshare signal handlers. 3275 */ 3276 if (unshare_flags & CLONE_VM) 3277 unshare_flags |= CLONE_SIGHAND; 3278 /* 3279 * If unsharing a signal handlers, must also unshare the signal queues. 3280 */ 3281 if (unshare_flags & CLONE_SIGHAND) 3282 unshare_flags |= CLONE_THREAD; 3283 /* 3284 * If unsharing namespace, must also unshare filesystem information. 3285 */ 3286 if (unshare_flags & CLONE_NEWNS) 3287 unshare_flags |= CLONE_FS; 3288 3289 err = check_unshare_flags(unshare_flags); 3290 if (err) 3291 goto bad_unshare_out; 3292 /* 3293 * CLONE_NEWIPC must also detach from the undolist: after switching 3294 * to a new ipc namespace, the semaphore arrays from the old 3295 * namespace are unreachable. 3296 */ 3297 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 3298 do_sysvsem = 1; 3299 err = unshare_fs(unshare_flags, &new_fs); 3300 if (err) 3301 goto bad_unshare_out; 3302 err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); 3303 if (err) 3304 goto bad_unshare_cleanup_fs; 3305 err = unshare_userns(unshare_flags, &new_cred); 3306 if (err) 3307 goto bad_unshare_cleanup_fd; 3308 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 3309 new_cred, new_fs); 3310 if (err) 3311 goto bad_unshare_cleanup_cred; 3312 3313 if (new_cred) { 3314 err = set_cred_ucounts(new_cred); 3315 if (err) 3316 goto bad_unshare_cleanup_cred; 3317 } 3318 3319 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 3320 if (do_sysvsem) { 3321 /* 3322 * CLONE_SYSVSEM is equivalent to sys_exit(). 3323 */ 3324 exit_sem(current); 3325 } 3326 if (unshare_flags & CLONE_NEWIPC) { 3327 /* Orphan segments in old ns (see sem above). */ 3328 exit_shm(current); 3329 shm_init_task(current); 3330 } 3331 3332 if (new_nsproxy) 3333 switch_task_namespaces(current, new_nsproxy); 3334 3335 task_lock(current); 3336 3337 if (new_fs) { 3338 fs = current->fs; 3339 spin_lock(&fs->lock); 3340 current->fs = new_fs; 3341 if (--fs->users) 3342 new_fs = NULL; 3343 else 3344 new_fs = fs; 3345 spin_unlock(&fs->lock); 3346 } 3347 3348 if (new_fd) 3349 swap(current->files, new_fd); 3350 3351 task_unlock(current); 3352 3353 if (new_cred) { 3354 /* Install the new user namespace */ 3355 commit_creds(new_cred); 3356 new_cred = NULL; 3357 } 3358 } 3359 3360 perf_event_namespaces(current); 3361 3362 bad_unshare_cleanup_cred: 3363 if (new_cred) 3364 put_cred(new_cred); 3365 bad_unshare_cleanup_fd: 3366 if (new_fd) 3367 put_files_struct(new_fd); 3368 3369 bad_unshare_cleanup_fs: 3370 if (new_fs) 3371 free_fs_struct(new_fs); 3372 3373 bad_unshare_out: 3374 return err; 3375 } 3376 3377 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 3378 { 3379 return ksys_unshare(unshare_flags); 3380 } 3381 3382 /* 3383 * Helper to unshare the files of the current task. 3384 * We don't want to expose copy_files internals to 3385 * the exec layer of the kernel. 3386 */ 3387 3388 int unshare_files(void) 3389 { 3390 struct task_struct *task = current; 3391 struct files_struct *old, *copy = NULL; 3392 int error; 3393 3394 error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); 3395 if (error || !copy) 3396 return error; 3397 3398 old = task->files; 3399 task_lock(task); 3400 task->files = copy; 3401 task_unlock(task); 3402 put_files_struct(old); 3403 return 0; 3404 } 3405 3406 int sysctl_max_threads(struct ctl_table *table, int write, 3407 void *buffer, size_t *lenp, loff_t *ppos) 3408 { 3409 struct ctl_table t; 3410 int ret; 3411 int threads = max_threads; 3412 int min = 1; 3413 int max = MAX_THREADS; 3414 3415 t = *table; 3416 t.data = &threads; 3417 t.extra1 = &min; 3418 t.extra2 = &max; 3419 3420 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 3421 if (ret || !write) 3422 return ret; 3423 3424 max_threads = threads; 3425 3426 return 0; 3427 } 3428