1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/mempolicy.h> 22 #include <linux/sem.h> 23 #include <linux/file.h> 24 #include <linux/fdtable.h> 25 #include <linux/iocontext.h> 26 #include <linux/key.h> 27 #include <linux/binfmts.h> 28 #include <linux/mman.h> 29 #include <linux/mmu_notifier.h> 30 #include <linux/fs.h> 31 #include <linux/mm.h> 32 #include <linux/vmacache.h> 33 #include <linux/nsproxy.h> 34 #include <linux/capability.h> 35 #include <linux/cpu.h> 36 #include <linux/cgroup.h> 37 #include <linux/security.h> 38 #include <linux/hugetlb.h> 39 #include <linux/seccomp.h> 40 #include <linux/swap.h> 41 #include <linux/syscalls.h> 42 #include <linux/jiffies.h> 43 #include <linux/futex.h> 44 #include <linux/compat.h> 45 #include <linux/kthread.h> 46 #include <linux/task_io_accounting_ops.h> 47 #include <linux/rcupdate.h> 48 #include <linux/ptrace.h> 49 #include <linux/mount.h> 50 #include <linux/audit.h> 51 #include <linux/memcontrol.h> 52 #include <linux/ftrace.h> 53 #include <linux/proc_fs.h> 54 #include <linux/profile.h> 55 #include <linux/rmap.h> 56 #include <linux/ksm.h> 57 #include <linux/acct.h> 58 #include <linux/tsacct_kern.h> 59 #include <linux/cn_proc.h> 60 #include <linux/freezer.h> 61 #include <linux/delayacct.h> 62 #include <linux/taskstats_kern.h> 63 #include <linux/random.h> 64 #include <linux/tty.h> 65 #include <linux/blkdev.h> 66 #include <linux/fs_struct.h> 67 #include <linux/magic.h> 68 #include <linux/perf_event.h> 69 #include <linux/posix-timers.h> 70 #include <linux/user-return-notifier.h> 71 #include <linux/oom.h> 72 #include <linux/khugepaged.h> 73 #include <linux/signalfd.h> 74 #include <linux/uprobes.h> 75 #include <linux/aio.h> 76 #include <linux/compiler.h> 77 78 #include <asm/pgtable.h> 79 #include <asm/pgalloc.h> 80 #include <asm/uaccess.h> 81 #include <asm/mmu_context.h> 82 #include <asm/cacheflush.h> 83 #include <asm/tlbflush.h> 84 85 #include <trace/events/sched.h> 86 87 #define CREATE_TRACE_POINTS 88 #include <trace/events/task.h> 89 90 /* 91 * Protected counters by write_lock_irq(&tasklist_lock) 92 */ 93 unsigned long total_forks; /* Handle normal Linux uptimes. */ 94 int nr_threads; /* The idle threads do not count.. */ 95 96 int max_threads; /* tunable limit on nr_threads */ 97 98 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 99 100 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 101 102 #ifdef CONFIG_PROVE_RCU 103 int lockdep_tasklist_lock_is_held(void) 104 { 105 return lockdep_is_held(&tasklist_lock); 106 } 107 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); 108 #endif /* #ifdef CONFIG_PROVE_RCU */ 109 110 int nr_processes(void) 111 { 112 int cpu; 113 int total = 0; 114 115 for_each_possible_cpu(cpu) 116 total += per_cpu(process_counts, cpu); 117 118 return total; 119 } 120 121 void __weak arch_release_task_struct(struct task_struct *tsk) 122 { 123 } 124 125 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 126 static struct kmem_cache *task_struct_cachep; 127 128 static inline struct task_struct *alloc_task_struct_node(int node) 129 { 130 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 131 } 132 133 static inline void free_task_struct(struct task_struct *tsk) 134 { 135 kmem_cache_free(task_struct_cachep, tsk); 136 } 137 #endif 138 139 void __weak arch_release_thread_info(struct thread_info *ti) 140 { 141 } 142 143 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR 144 145 /* 146 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 147 * kmemcache based allocator. 148 */ 149 # if THREAD_SIZE >= PAGE_SIZE 150 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 151 int node) 152 { 153 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, 154 THREAD_SIZE_ORDER); 155 156 return page ? page_address(page) : NULL; 157 } 158 159 static inline void free_thread_info(struct thread_info *ti) 160 { 161 free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); 162 } 163 # else 164 static struct kmem_cache *thread_info_cache; 165 166 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 167 int node) 168 { 169 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); 170 } 171 172 static void free_thread_info(struct thread_info *ti) 173 { 174 kmem_cache_free(thread_info_cache, ti); 175 } 176 177 void thread_info_cache_init(void) 178 { 179 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 180 THREAD_SIZE, 0, NULL); 181 BUG_ON(thread_info_cache == NULL); 182 } 183 # endif 184 #endif 185 186 /* SLAB cache for signal_struct structures (tsk->signal) */ 187 static struct kmem_cache *signal_cachep; 188 189 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 190 struct kmem_cache *sighand_cachep; 191 192 /* SLAB cache for files_struct structures (tsk->files) */ 193 struct kmem_cache *files_cachep; 194 195 /* SLAB cache for fs_struct structures (tsk->fs) */ 196 struct kmem_cache *fs_cachep; 197 198 /* SLAB cache for vm_area_struct structures */ 199 struct kmem_cache *vm_area_cachep; 200 201 /* SLAB cache for mm_struct structures (tsk->mm) */ 202 static struct kmem_cache *mm_cachep; 203 204 static void account_kernel_stack(struct thread_info *ti, int account) 205 { 206 struct zone *zone = page_zone(virt_to_page(ti)); 207 208 mod_zone_page_state(zone, NR_KERNEL_STACK, account); 209 } 210 211 void free_task(struct task_struct *tsk) 212 { 213 account_kernel_stack(tsk->stack, -1); 214 arch_release_thread_info(tsk->stack); 215 free_thread_info(tsk->stack); 216 rt_mutex_debug_task_free(tsk); 217 ftrace_graph_exit_task(tsk); 218 put_seccomp_filter(tsk); 219 arch_release_task_struct(tsk); 220 free_task_struct(tsk); 221 } 222 EXPORT_SYMBOL(free_task); 223 224 static inline void free_signal_struct(struct signal_struct *sig) 225 { 226 taskstats_tgid_free(sig); 227 sched_autogroup_exit(sig); 228 kmem_cache_free(signal_cachep, sig); 229 } 230 231 static inline void put_signal_struct(struct signal_struct *sig) 232 { 233 if (atomic_dec_and_test(&sig->sigcnt)) 234 free_signal_struct(sig); 235 } 236 237 void __put_task_struct(struct task_struct *tsk) 238 { 239 WARN_ON(!tsk->exit_state); 240 WARN_ON(atomic_read(&tsk->usage)); 241 WARN_ON(tsk == current); 242 243 task_numa_free(tsk); 244 security_task_free(tsk); 245 exit_creds(tsk); 246 delayacct_tsk_free(tsk); 247 put_signal_struct(tsk->signal); 248 249 if (!profile_handoff_task(tsk)) 250 free_task(tsk); 251 } 252 EXPORT_SYMBOL_GPL(__put_task_struct); 253 254 void __init __weak arch_task_cache_init(void) { } 255 256 void __init fork_init(unsigned long mempages) 257 { 258 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 259 #ifndef ARCH_MIN_TASKALIGN 260 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 261 #endif 262 /* create a slab on which task_structs can be allocated */ 263 task_struct_cachep = 264 kmem_cache_create("task_struct", sizeof(struct task_struct), 265 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 266 #endif 267 268 /* do the arch specific task caches init */ 269 arch_task_cache_init(); 270 271 /* 272 * The default maximum number of threads is set to a safe 273 * value: the thread structures can take up at most half 274 * of memory. 275 */ 276 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 277 278 /* 279 * we need to allow at least 20 threads to boot a system 280 */ 281 if (max_threads < 20) 282 max_threads = 20; 283 284 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 285 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 286 init_task.signal->rlim[RLIMIT_SIGPENDING] = 287 init_task.signal->rlim[RLIMIT_NPROC]; 288 } 289 290 int __weak arch_dup_task_struct(struct task_struct *dst, 291 struct task_struct *src) 292 { 293 *dst = *src; 294 return 0; 295 } 296 297 static struct task_struct *dup_task_struct(struct task_struct *orig) 298 { 299 struct task_struct *tsk; 300 struct thread_info *ti; 301 unsigned long *stackend; 302 int node = tsk_fork_get_node(orig); 303 int err; 304 305 tsk = alloc_task_struct_node(node); 306 if (!tsk) 307 return NULL; 308 309 ti = alloc_thread_info_node(tsk, node); 310 if (!ti) 311 goto free_tsk; 312 313 err = arch_dup_task_struct(tsk, orig); 314 if (err) 315 goto free_ti; 316 317 tsk->stack = ti; 318 #ifdef CONFIG_SECCOMP 319 /* 320 * We must handle setting up seccomp filters once we're under 321 * the sighand lock in case orig has changed between now and 322 * then. Until then, filter must be NULL to avoid messing up 323 * the usage counts on the error path calling free_task. 324 */ 325 tsk->seccomp.filter = NULL; 326 #endif 327 328 setup_thread_stack(tsk, orig); 329 clear_user_return_notifier(tsk); 330 clear_tsk_need_resched(tsk); 331 stackend = end_of_stack(tsk); 332 *stackend = STACK_END_MAGIC; /* for overflow detection */ 333 334 #ifdef CONFIG_CC_STACKPROTECTOR 335 tsk->stack_canary = get_random_int(); 336 #endif 337 338 /* 339 * One for us, one for whoever does the "release_task()" (usually 340 * parent) 341 */ 342 atomic_set(&tsk->usage, 2); 343 #ifdef CONFIG_BLK_DEV_IO_TRACE 344 tsk->btrace_seq = 0; 345 #endif 346 tsk->splice_pipe = NULL; 347 tsk->task_frag.page = NULL; 348 349 account_kernel_stack(ti, 1); 350 351 return tsk; 352 353 free_ti: 354 free_thread_info(ti); 355 free_tsk: 356 free_task_struct(tsk); 357 return NULL; 358 } 359 360 #ifdef CONFIG_MMU 361 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 362 { 363 struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 364 struct rb_node **rb_link, *rb_parent; 365 int retval; 366 unsigned long charge; 367 368 uprobe_start_dup_mmap(); 369 down_write(&oldmm->mmap_sem); 370 flush_cache_dup_mm(oldmm); 371 uprobe_dup_mmap(oldmm, mm); 372 /* 373 * Not linked in yet - no deadlock potential: 374 */ 375 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 376 377 mm->total_vm = oldmm->total_vm; 378 mm->shared_vm = oldmm->shared_vm; 379 mm->exec_vm = oldmm->exec_vm; 380 mm->stack_vm = oldmm->stack_vm; 381 382 rb_link = &mm->mm_rb.rb_node; 383 rb_parent = NULL; 384 pprev = &mm->mmap; 385 retval = ksm_fork(mm, oldmm); 386 if (retval) 387 goto out; 388 retval = khugepaged_fork(mm, oldmm); 389 if (retval) 390 goto out; 391 392 prev = NULL; 393 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 394 struct file *file; 395 396 if (mpnt->vm_flags & VM_DONTCOPY) { 397 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 398 -vma_pages(mpnt)); 399 continue; 400 } 401 charge = 0; 402 if (mpnt->vm_flags & VM_ACCOUNT) { 403 unsigned long len = vma_pages(mpnt); 404 405 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 406 goto fail_nomem; 407 charge = len; 408 } 409 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 410 if (!tmp) 411 goto fail_nomem; 412 *tmp = *mpnt; 413 INIT_LIST_HEAD(&tmp->anon_vma_chain); 414 retval = vma_dup_policy(mpnt, tmp); 415 if (retval) 416 goto fail_nomem_policy; 417 tmp->vm_mm = mm; 418 if (anon_vma_fork(tmp, mpnt)) 419 goto fail_nomem_anon_vma_fork; 420 tmp->vm_flags &= ~VM_LOCKED; 421 tmp->vm_next = tmp->vm_prev = NULL; 422 file = tmp->vm_file; 423 if (file) { 424 struct inode *inode = file_inode(file); 425 struct address_space *mapping = file->f_mapping; 426 427 get_file(file); 428 if (tmp->vm_flags & VM_DENYWRITE) 429 atomic_dec(&inode->i_writecount); 430 mutex_lock(&mapping->i_mmap_mutex); 431 if (tmp->vm_flags & VM_SHARED) 432 atomic_inc(&mapping->i_mmap_writable); 433 flush_dcache_mmap_lock(mapping); 434 /* insert tmp into the share list, just after mpnt */ 435 if (unlikely(tmp->vm_flags & VM_NONLINEAR)) 436 vma_nonlinear_insert(tmp, 437 &mapping->i_mmap_nonlinear); 438 else 439 vma_interval_tree_insert_after(tmp, mpnt, 440 &mapping->i_mmap); 441 flush_dcache_mmap_unlock(mapping); 442 mutex_unlock(&mapping->i_mmap_mutex); 443 } 444 445 /* 446 * Clear hugetlb-related page reserves for children. This only 447 * affects MAP_PRIVATE mappings. Faults generated by the child 448 * are not guaranteed to succeed, even if read-only 449 */ 450 if (is_vm_hugetlb_page(tmp)) 451 reset_vma_resv_huge_pages(tmp); 452 453 /* 454 * Link in the new vma and copy the page table entries. 455 */ 456 *pprev = tmp; 457 pprev = &tmp->vm_next; 458 tmp->vm_prev = prev; 459 prev = tmp; 460 461 __vma_link_rb(mm, tmp, rb_link, rb_parent); 462 rb_link = &tmp->vm_rb.rb_right; 463 rb_parent = &tmp->vm_rb; 464 465 mm->map_count++; 466 retval = copy_page_range(mm, oldmm, mpnt); 467 468 if (tmp->vm_ops && tmp->vm_ops->open) 469 tmp->vm_ops->open(tmp); 470 471 if (retval) 472 goto out; 473 } 474 /* a new mm has just been created */ 475 arch_dup_mmap(oldmm, mm); 476 retval = 0; 477 out: 478 up_write(&mm->mmap_sem); 479 flush_tlb_mm(oldmm); 480 up_write(&oldmm->mmap_sem); 481 uprobe_end_dup_mmap(); 482 return retval; 483 fail_nomem_anon_vma_fork: 484 mpol_put(vma_policy(tmp)); 485 fail_nomem_policy: 486 kmem_cache_free(vm_area_cachep, tmp); 487 fail_nomem: 488 retval = -ENOMEM; 489 vm_unacct_memory(charge); 490 goto out; 491 } 492 493 static inline int mm_alloc_pgd(struct mm_struct *mm) 494 { 495 mm->pgd = pgd_alloc(mm); 496 if (unlikely(!mm->pgd)) 497 return -ENOMEM; 498 return 0; 499 } 500 501 static inline void mm_free_pgd(struct mm_struct *mm) 502 { 503 pgd_free(mm, mm->pgd); 504 } 505 #else 506 #define dup_mmap(mm, oldmm) (0) 507 #define mm_alloc_pgd(mm) (0) 508 #define mm_free_pgd(mm) 509 #endif /* CONFIG_MMU */ 510 511 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 512 513 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 514 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 515 516 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 517 518 static int __init coredump_filter_setup(char *s) 519 { 520 default_dump_filter = 521 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 522 MMF_DUMP_FILTER_MASK; 523 return 1; 524 } 525 526 __setup("coredump_filter=", coredump_filter_setup); 527 528 #include <linux/init_task.h> 529 530 static void mm_init_aio(struct mm_struct *mm) 531 { 532 #ifdef CONFIG_AIO 533 spin_lock_init(&mm->ioctx_lock); 534 mm->ioctx_table = NULL; 535 #endif 536 } 537 538 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 539 { 540 #ifdef CONFIG_MEMCG 541 mm->owner = p; 542 #endif 543 } 544 545 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) 546 { 547 mm->mmap = NULL; 548 mm->mm_rb = RB_ROOT; 549 mm->vmacache_seqnum = 0; 550 atomic_set(&mm->mm_users, 1); 551 atomic_set(&mm->mm_count, 1); 552 init_rwsem(&mm->mmap_sem); 553 INIT_LIST_HEAD(&mm->mmlist); 554 mm->core_state = NULL; 555 atomic_long_set(&mm->nr_ptes, 0); 556 mm->map_count = 0; 557 mm->locked_vm = 0; 558 mm->pinned_vm = 0; 559 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 560 spin_lock_init(&mm->page_table_lock); 561 mm_init_cpumask(mm); 562 mm_init_aio(mm); 563 mm_init_owner(mm, p); 564 mmu_notifier_mm_init(mm); 565 clear_tlb_flush_pending(mm); 566 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 567 mm->pmd_huge_pte = NULL; 568 #endif 569 570 if (current->mm) { 571 mm->flags = current->mm->flags & MMF_INIT_MASK; 572 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; 573 } else { 574 mm->flags = default_dump_filter; 575 mm->def_flags = 0; 576 } 577 578 if (mm_alloc_pgd(mm)) 579 goto fail_nopgd; 580 581 if (init_new_context(p, mm)) 582 goto fail_nocontext; 583 584 return mm; 585 586 fail_nocontext: 587 mm_free_pgd(mm); 588 fail_nopgd: 589 free_mm(mm); 590 return NULL; 591 } 592 593 static void check_mm(struct mm_struct *mm) 594 { 595 int i; 596 597 for (i = 0; i < NR_MM_COUNTERS; i++) { 598 long x = atomic_long_read(&mm->rss_stat.count[i]); 599 600 if (unlikely(x)) 601 printk(KERN_ALERT "BUG: Bad rss-counter state " 602 "mm:%p idx:%d val:%ld\n", mm, i, x); 603 } 604 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 605 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); 606 #endif 607 } 608 609 /* 610 * Allocate and initialize an mm_struct. 611 */ 612 struct mm_struct *mm_alloc(void) 613 { 614 struct mm_struct *mm; 615 616 mm = allocate_mm(); 617 if (!mm) 618 return NULL; 619 620 memset(mm, 0, sizeof(*mm)); 621 return mm_init(mm, current); 622 } 623 624 /* 625 * Called when the last reference to the mm 626 * is dropped: either by a lazy thread or by 627 * mmput. Free the page directory and the mm. 628 */ 629 void __mmdrop(struct mm_struct *mm) 630 { 631 BUG_ON(mm == &init_mm); 632 mm_free_pgd(mm); 633 destroy_context(mm); 634 mmu_notifier_mm_destroy(mm); 635 check_mm(mm); 636 free_mm(mm); 637 } 638 EXPORT_SYMBOL_GPL(__mmdrop); 639 640 /* 641 * Decrement the use count and release all resources for an mm. 642 */ 643 void mmput(struct mm_struct *mm) 644 { 645 might_sleep(); 646 647 if (atomic_dec_and_test(&mm->mm_users)) { 648 uprobe_clear_state(mm); 649 exit_aio(mm); 650 ksm_exit(mm); 651 khugepaged_exit(mm); /* must run before exit_mmap */ 652 exit_mmap(mm); 653 set_mm_exe_file(mm, NULL); 654 if (!list_empty(&mm->mmlist)) { 655 spin_lock(&mmlist_lock); 656 list_del(&mm->mmlist); 657 spin_unlock(&mmlist_lock); 658 } 659 if (mm->binfmt) 660 module_put(mm->binfmt->module); 661 mmdrop(mm); 662 } 663 } 664 EXPORT_SYMBOL_GPL(mmput); 665 666 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) 667 { 668 if (new_exe_file) 669 get_file(new_exe_file); 670 if (mm->exe_file) 671 fput(mm->exe_file); 672 mm->exe_file = new_exe_file; 673 } 674 675 struct file *get_mm_exe_file(struct mm_struct *mm) 676 { 677 struct file *exe_file; 678 679 /* We need mmap_sem to protect against races with removal of exe_file */ 680 down_read(&mm->mmap_sem); 681 exe_file = mm->exe_file; 682 if (exe_file) 683 get_file(exe_file); 684 up_read(&mm->mmap_sem); 685 return exe_file; 686 } 687 688 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) 689 { 690 /* It's safe to write the exe_file pointer without exe_file_lock because 691 * this is called during fork when the task is not yet in /proc */ 692 newmm->exe_file = get_mm_exe_file(oldmm); 693 } 694 695 /** 696 * get_task_mm - acquire a reference to the task's mm 697 * 698 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 699 * this kernel workthread has transiently adopted a user mm with use_mm, 700 * to do its AIO) is not set and if so returns a reference to it, after 701 * bumping up the use count. User must release the mm via mmput() 702 * after use. Typically used by /proc and ptrace. 703 */ 704 struct mm_struct *get_task_mm(struct task_struct *task) 705 { 706 struct mm_struct *mm; 707 708 task_lock(task); 709 mm = task->mm; 710 if (mm) { 711 if (task->flags & PF_KTHREAD) 712 mm = NULL; 713 else 714 atomic_inc(&mm->mm_users); 715 } 716 task_unlock(task); 717 return mm; 718 } 719 EXPORT_SYMBOL_GPL(get_task_mm); 720 721 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 722 { 723 struct mm_struct *mm; 724 int err; 725 726 err = mutex_lock_killable(&task->signal->cred_guard_mutex); 727 if (err) 728 return ERR_PTR(err); 729 730 mm = get_task_mm(task); 731 if (mm && mm != current->mm && 732 !ptrace_may_access(task, mode)) { 733 mmput(mm); 734 mm = ERR_PTR(-EACCES); 735 } 736 mutex_unlock(&task->signal->cred_guard_mutex); 737 738 return mm; 739 } 740 741 static void complete_vfork_done(struct task_struct *tsk) 742 { 743 struct completion *vfork; 744 745 task_lock(tsk); 746 vfork = tsk->vfork_done; 747 if (likely(vfork)) { 748 tsk->vfork_done = NULL; 749 complete(vfork); 750 } 751 task_unlock(tsk); 752 } 753 754 static int wait_for_vfork_done(struct task_struct *child, 755 struct completion *vfork) 756 { 757 int killed; 758 759 freezer_do_not_count(); 760 killed = wait_for_completion_killable(vfork); 761 freezer_count(); 762 763 if (killed) { 764 task_lock(child); 765 child->vfork_done = NULL; 766 task_unlock(child); 767 } 768 769 put_task_struct(child); 770 return killed; 771 } 772 773 /* Please note the differences between mmput and mm_release. 774 * mmput is called whenever we stop holding onto a mm_struct, 775 * error success whatever. 776 * 777 * mm_release is called after a mm_struct has been removed 778 * from the current process. 779 * 780 * This difference is important for error handling, when we 781 * only half set up a mm_struct for a new process and need to restore 782 * the old one. Because we mmput the new mm_struct before 783 * restoring the old one. . . 784 * Eric Biederman 10 January 1998 785 */ 786 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 787 { 788 /* Get rid of any futexes when releasing the mm */ 789 #ifdef CONFIG_FUTEX 790 if (unlikely(tsk->robust_list)) { 791 exit_robust_list(tsk); 792 tsk->robust_list = NULL; 793 } 794 #ifdef CONFIG_COMPAT 795 if (unlikely(tsk->compat_robust_list)) { 796 compat_exit_robust_list(tsk); 797 tsk->compat_robust_list = NULL; 798 } 799 #endif 800 if (unlikely(!list_empty(&tsk->pi_state_list))) 801 exit_pi_state_list(tsk); 802 #endif 803 804 uprobe_free_utask(tsk); 805 806 /* Get rid of any cached register state */ 807 deactivate_mm(tsk, mm); 808 809 /* 810 * If we're exiting normally, clear a user-space tid field if 811 * requested. We leave this alone when dying by signal, to leave 812 * the value intact in a core dump, and to save the unnecessary 813 * trouble, say, a killed vfork parent shouldn't touch this mm. 814 * Userland only wants this done for a sys_exit. 815 */ 816 if (tsk->clear_child_tid) { 817 if (!(tsk->flags & PF_SIGNALED) && 818 atomic_read(&mm->mm_users) > 1) { 819 /* 820 * We don't check the error code - if userspace has 821 * not set up a proper pointer then tough luck. 822 */ 823 put_user(0, tsk->clear_child_tid); 824 sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 825 1, NULL, NULL, 0); 826 } 827 tsk->clear_child_tid = NULL; 828 } 829 830 /* 831 * All done, finally we can wake up parent and return this mm to him. 832 * Also kthread_stop() uses this completion for synchronization. 833 */ 834 if (tsk->vfork_done) 835 complete_vfork_done(tsk); 836 } 837 838 /* 839 * Allocate a new mm structure and copy contents from the 840 * mm structure of the passed in task structure. 841 */ 842 static struct mm_struct *dup_mm(struct task_struct *tsk) 843 { 844 struct mm_struct *mm, *oldmm = current->mm; 845 int err; 846 847 mm = allocate_mm(); 848 if (!mm) 849 goto fail_nomem; 850 851 memcpy(mm, oldmm, sizeof(*mm)); 852 853 if (!mm_init(mm, tsk)) 854 goto fail_nomem; 855 856 dup_mm_exe_file(oldmm, mm); 857 858 err = dup_mmap(mm, oldmm); 859 if (err) 860 goto free_pt; 861 862 mm->hiwater_rss = get_mm_rss(mm); 863 mm->hiwater_vm = mm->total_vm; 864 865 if (mm->binfmt && !try_module_get(mm->binfmt->module)) 866 goto free_pt; 867 868 return mm; 869 870 free_pt: 871 /* don't put binfmt in mmput, we haven't got module yet */ 872 mm->binfmt = NULL; 873 mmput(mm); 874 875 fail_nomem: 876 return NULL; 877 } 878 879 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 880 { 881 struct mm_struct *mm, *oldmm; 882 int retval; 883 884 tsk->min_flt = tsk->maj_flt = 0; 885 tsk->nvcsw = tsk->nivcsw = 0; 886 #ifdef CONFIG_DETECT_HUNG_TASK 887 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 888 #endif 889 890 tsk->mm = NULL; 891 tsk->active_mm = NULL; 892 893 /* 894 * Are we cloning a kernel thread? 895 * 896 * We need to steal a active VM for that.. 897 */ 898 oldmm = current->mm; 899 if (!oldmm) 900 return 0; 901 902 /* initialize the new vmacache entries */ 903 vmacache_flush(tsk); 904 905 if (clone_flags & CLONE_VM) { 906 atomic_inc(&oldmm->mm_users); 907 mm = oldmm; 908 goto good_mm; 909 } 910 911 retval = -ENOMEM; 912 mm = dup_mm(tsk); 913 if (!mm) 914 goto fail_nomem; 915 916 good_mm: 917 tsk->mm = mm; 918 tsk->active_mm = mm; 919 return 0; 920 921 fail_nomem: 922 return retval; 923 } 924 925 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 926 { 927 struct fs_struct *fs = current->fs; 928 if (clone_flags & CLONE_FS) { 929 /* tsk->fs is already what we want */ 930 spin_lock(&fs->lock); 931 if (fs->in_exec) { 932 spin_unlock(&fs->lock); 933 return -EAGAIN; 934 } 935 fs->users++; 936 spin_unlock(&fs->lock); 937 return 0; 938 } 939 tsk->fs = copy_fs_struct(fs); 940 if (!tsk->fs) 941 return -ENOMEM; 942 return 0; 943 } 944 945 static int copy_files(unsigned long clone_flags, struct task_struct *tsk) 946 { 947 struct files_struct *oldf, *newf; 948 int error = 0; 949 950 /* 951 * A background process may not have any files ... 952 */ 953 oldf = current->files; 954 if (!oldf) 955 goto out; 956 957 if (clone_flags & CLONE_FILES) { 958 atomic_inc(&oldf->count); 959 goto out; 960 } 961 962 newf = dup_fd(oldf, &error); 963 if (!newf) 964 goto out; 965 966 tsk->files = newf; 967 error = 0; 968 out: 969 return error; 970 } 971 972 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 973 { 974 #ifdef CONFIG_BLOCK 975 struct io_context *ioc = current->io_context; 976 struct io_context *new_ioc; 977 978 if (!ioc) 979 return 0; 980 /* 981 * Share io context with parent, if CLONE_IO is set 982 */ 983 if (clone_flags & CLONE_IO) { 984 ioc_task_link(ioc); 985 tsk->io_context = ioc; 986 } else if (ioprio_valid(ioc->ioprio)) { 987 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 988 if (unlikely(!new_ioc)) 989 return -ENOMEM; 990 991 new_ioc->ioprio = ioc->ioprio; 992 put_io_context(new_ioc); 993 } 994 #endif 995 return 0; 996 } 997 998 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 999 { 1000 struct sighand_struct *sig; 1001 1002 if (clone_flags & CLONE_SIGHAND) { 1003 atomic_inc(¤t->sighand->count); 1004 return 0; 1005 } 1006 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 1007 rcu_assign_pointer(tsk->sighand, sig); 1008 if (!sig) 1009 return -ENOMEM; 1010 atomic_set(&sig->count, 1); 1011 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1012 return 0; 1013 } 1014 1015 void __cleanup_sighand(struct sighand_struct *sighand) 1016 { 1017 if (atomic_dec_and_test(&sighand->count)) { 1018 signalfd_cleanup(sighand); 1019 kmem_cache_free(sighand_cachep, sighand); 1020 } 1021 } 1022 1023 1024 /* 1025 * Initialize POSIX timer handling for a thread group. 1026 */ 1027 static void posix_cpu_timers_init_group(struct signal_struct *sig) 1028 { 1029 unsigned long cpu_limit; 1030 1031 /* Thread group counters. */ 1032 thread_group_cputime_init(sig); 1033 1034 cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 1035 if (cpu_limit != RLIM_INFINITY) { 1036 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); 1037 sig->cputimer.running = 1; 1038 } 1039 1040 /* The timer lists. */ 1041 INIT_LIST_HEAD(&sig->cpu_timers[0]); 1042 INIT_LIST_HEAD(&sig->cpu_timers[1]); 1043 INIT_LIST_HEAD(&sig->cpu_timers[2]); 1044 } 1045 1046 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 1047 { 1048 struct signal_struct *sig; 1049 1050 if (clone_flags & CLONE_THREAD) 1051 return 0; 1052 1053 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); 1054 tsk->signal = sig; 1055 if (!sig) 1056 return -ENOMEM; 1057 1058 sig->nr_threads = 1; 1059 atomic_set(&sig->live, 1); 1060 atomic_set(&sig->sigcnt, 1); 1061 1062 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ 1063 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); 1064 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); 1065 1066 init_waitqueue_head(&sig->wait_chldexit); 1067 sig->curr_target = tsk; 1068 init_sigpending(&sig->shared_pending); 1069 INIT_LIST_HEAD(&sig->posix_timers); 1070 1071 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1072 sig->real_timer.function = it_real_fn; 1073 1074 task_lock(current->group_leader); 1075 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 1076 task_unlock(current->group_leader); 1077 1078 posix_cpu_timers_init_group(sig); 1079 1080 tty_audit_fork(sig); 1081 sched_autogroup_fork(sig); 1082 1083 #ifdef CONFIG_CGROUPS 1084 init_rwsem(&sig->group_rwsem); 1085 #endif 1086 1087 sig->oom_score_adj = current->signal->oom_score_adj; 1088 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1089 1090 sig->has_child_subreaper = current->signal->has_child_subreaper || 1091 current->signal->is_child_subreaper; 1092 1093 mutex_init(&sig->cred_guard_mutex); 1094 1095 return 0; 1096 } 1097 1098 static void copy_seccomp(struct task_struct *p) 1099 { 1100 #ifdef CONFIG_SECCOMP 1101 /* 1102 * Must be called with sighand->lock held, which is common to 1103 * all threads in the group. Holding cred_guard_mutex is not 1104 * needed because this new task is not yet running and cannot 1105 * be racing exec. 1106 */ 1107 assert_spin_locked(¤t->sighand->siglock); 1108 1109 /* Ref-count the new filter user, and assign it. */ 1110 get_seccomp_filter(current); 1111 p->seccomp = current->seccomp; 1112 1113 /* 1114 * Explicitly enable no_new_privs here in case it got set 1115 * between the task_struct being duplicated and holding the 1116 * sighand lock. The seccomp state and nnp must be in sync. 1117 */ 1118 if (task_no_new_privs(current)) 1119 task_set_no_new_privs(p); 1120 1121 /* 1122 * If the parent gained a seccomp mode after copying thread 1123 * flags and between before we held the sighand lock, we have 1124 * to manually enable the seccomp thread flag here. 1125 */ 1126 if (p->seccomp.mode != SECCOMP_MODE_DISABLED) 1127 set_tsk_thread_flag(p, TIF_SECCOMP); 1128 #endif 1129 } 1130 1131 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 1132 { 1133 current->clear_child_tid = tidptr; 1134 1135 return task_pid_vnr(current); 1136 } 1137 1138 static void rt_mutex_init_task(struct task_struct *p) 1139 { 1140 raw_spin_lock_init(&p->pi_lock); 1141 #ifdef CONFIG_RT_MUTEXES 1142 p->pi_waiters = RB_ROOT; 1143 p->pi_waiters_leftmost = NULL; 1144 p->pi_blocked_on = NULL; 1145 #endif 1146 } 1147 1148 /* 1149 * Initialize POSIX timer handling for a single task. 1150 */ 1151 static void posix_cpu_timers_init(struct task_struct *tsk) 1152 { 1153 tsk->cputime_expires.prof_exp = 0; 1154 tsk->cputime_expires.virt_exp = 0; 1155 tsk->cputime_expires.sched_exp = 0; 1156 INIT_LIST_HEAD(&tsk->cpu_timers[0]); 1157 INIT_LIST_HEAD(&tsk->cpu_timers[1]); 1158 INIT_LIST_HEAD(&tsk->cpu_timers[2]); 1159 } 1160 1161 static inline void 1162 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) 1163 { 1164 task->pids[type].pid = pid; 1165 } 1166 1167 /* 1168 * This creates a new process as a copy of the old one, 1169 * but does not actually start it yet. 1170 * 1171 * It copies the registers, and all the appropriate 1172 * parts of the process environment (as per the clone 1173 * flags). The actual kick-off is left to the caller. 1174 */ 1175 static struct task_struct *copy_process(unsigned long clone_flags, 1176 unsigned long stack_start, 1177 unsigned long stack_size, 1178 int __user *child_tidptr, 1179 struct pid *pid, 1180 int trace) 1181 { 1182 int retval; 1183 struct task_struct *p; 1184 1185 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1186 return ERR_PTR(-EINVAL); 1187 1188 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1189 return ERR_PTR(-EINVAL); 1190 1191 /* 1192 * Thread groups must share signals as well, and detached threads 1193 * can only be started up within the thread group. 1194 */ 1195 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 1196 return ERR_PTR(-EINVAL); 1197 1198 /* 1199 * Shared signal handlers imply shared VM. By way of the above, 1200 * thread groups also imply shared VM. Blocking this case allows 1201 * for various simplifications in other code. 1202 */ 1203 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 1204 return ERR_PTR(-EINVAL); 1205 1206 /* 1207 * Siblings of global init remain as zombies on exit since they are 1208 * not reaped by their parent (swapper). To solve this and to avoid 1209 * multi-rooted process trees, prevent global and container-inits 1210 * from creating siblings. 1211 */ 1212 if ((clone_flags & CLONE_PARENT) && 1213 current->signal->flags & SIGNAL_UNKILLABLE) 1214 return ERR_PTR(-EINVAL); 1215 1216 /* 1217 * If the new process will be in a different pid or user namespace 1218 * do not allow it to share a thread group or signal handlers or 1219 * parent with the forking task. 1220 */ 1221 if (clone_flags & CLONE_SIGHAND) { 1222 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || 1223 (task_active_pid_ns(current) != 1224 current->nsproxy->pid_ns_for_children)) 1225 return ERR_PTR(-EINVAL); 1226 } 1227 1228 retval = security_task_create(clone_flags); 1229 if (retval) 1230 goto fork_out; 1231 1232 retval = -ENOMEM; 1233 p = dup_task_struct(current); 1234 if (!p) 1235 goto fork_out; 1236 1237 ftrace_graph_init_task(p); 1238 1239 rt_mutex_init_task(p); 1240 1241 #ifdef CONFIG_PROVE_LOCKING 1242 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1243 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1244 #endif 1245 retval = -EAGAIN; 1246 if (atomic_read(&p->real_cred->user->processes) >= 1247 task_rlimit(p, RLIMIT_NPROC)) { 1248 if (p->real_cred->user != INIT_USER && 1249 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) 1250 goto bad_fork_free; 1251 } 1252 current->flags &= ~PF_NPROC_EXCEEDED; 1253 1254 retval = copy_creds(p, clone_flags); 1255 if (retval < 0) 1256 goto bad_fork_free; 1257 1258 /* 1259 * If multiple threads are within copy_process(), then this check 1260 * triggers too late. This doesn't hurt, the check is only there 1261 * to stop root fork bombs. 1262 */ 1263 retval = -EAGAIN; 1264 if (nr_threads >= max_threads) 1265 goto bad_fork_cleanup_count; 1266 1267 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1268 goto bad_fork_cleanup_count; 1269 1270 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1271 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); 1272 p->flags |= PF_FORKNOEXEC; 1273 INIT_LIST_HEAD(&p->children); 1274 INIT_LIST_HEAD(&p->sibling); 1275 rcu_copy_process(p); 1276 p->vfork_done = NULL; 1277 spin_lock_init(&p->alloc_lock); 1278 1279 init_sigpending(&p->pending); 1280 1281 p->utime = p->stime = p->gtime = 0; 1282 p->utimescaled = p->stimescaled = 0; 1283 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1284 p->prev_cputime.utime = p->prev_cputime.stime = 0; 1285 #endif 1286 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1287 seqlock_init(&p->vtime_seqlock); 1288 p->vtime_snap = 0; 1289 p->vtime_snap_whence = VTIME_SLEEPING; 1290 #endif 1291 1292 #if defined(SPLIT_RSS_COUNTING) 1293 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1294 #endif 1295 1296 p->default_timer_slack_ns = current->timer_slack_ns; 1297 1298 task_io_accounting_init(&p->ioac); 1299 acct_clear_integrals(p); 1300 1301 posix_cpu_timers_init(p); 1302 1303 p->start_time = ktime_get_ns(); 1304 p->real_start_time = ktime_get_boot_ns(); 1305 p->io_context = NULL; 1306 p->audit_context = NULL; 1307 if (clone_flags & CLONE_THREAD) 1308 threadgroup_change_begin(current); 1309 cgroup_fork(p); 1310 #ifdef CONFIG_NUMA 1311 p->mempolicy = mpol_dup(p->mempolicy); 1312 if (IS_ERR(p->mempolicy)) { 1313 retval = PTR_ERR(p->mempolicy); 1314 p->mempolicy = NULL; 1315 goto bad_fork_cleanup_threadgroup_lock; 1316 } 1317 #endif 1318 #ifdef CONFIG_CPUSETS 1319 p->cpuset_mem_spread_rotor = NUMA_NO_NODE; 1320 p->cpuset_slab_spread_rotor = NUMA_NO_NODE; 1321 seqcount_init(&p->mems_allowed_seq); 1322 #endif 1323 #ifdef CONFIG_TRACE_IRQFLAGS 1324 p->irq_events = 0; 1325 p->hardirqs_enabled = 0; 1326 p->hardirq_enable_ip = 0; 1327 p->hardirq_enable_event = 0; 1328 p->hardirq_disable_ip = _THIS_IP_; 1329 p->hardirq_disable_event = 0; 1330 p->softirqs_enabled = 1; 1331 p->softirq_enable_ip = _THIS_IP_; 1332 p->softirq_enable_event = 0; 1333 p->softirq_disable_ip = 0; 1334 p->softirq_disable_event = 0; 1335 p->hardirq_context = 0; 1336 p->softirq_context = 0; 1337 #endif 1338 #ifdef CONFIG_LOCKDEP 1339 p->lockdep_depth = 0; /* no locks held yet */ 1340 p->curr_chain_key = 0; 1341 p->lockdep_recursion = 0; 1342 #endif 1343 1344 #ifdef CONFIG_DEBUG_MUTEXES 1345 p->blocked_on = NULL; /* not blocked yet */ 1346 #endif 1347 #ifdef CONFIG_BCACHE 1348 p->sequential_io = 0; 1349 p->sequential_io_avg = 0; 1350 #endif 1351 1352 /* Perform scheduler related setup. Assign this task to a CPU. */ 1353 retval = sched_fork(clone_flags, p); 1354 if (retval) 1355 goto bad_fork_cleanup_policy; 1356 1357 retval = perf_event_init_task(p); 1358 if (retval) 1359 goto bad_fork_cleanup_policy; 1360 retval = audit_alloc(p); 1361 if (retval) 1362 goto bad_fork_cleanup_perf; 1363 /* copy all the process information */ 1364 shm_init_task(p); 1365 retval = copy_semundo(clone_flags, p); 1366 if (retval) 1367 goto bad_fork_cleanup_audit; 1368 retval = copy_files(clone_flags, p); 1369 if (retval) 1370 goto bad_fork_cleanup_semundo; 1371 retval = copy_fs(clone_flags, p); 1372 if (retval) 1373 goto bad_fork_cleanup_files; 1374 retval = copy_sighand(clone_flags, p); 1375 if (retval) 1376 goto bad_fork_cleanup_fs; 1377 retval = copy_signal(clone_flags, p); 1378 if (retval) 1379 goto bad_fork_cleanup_sighand; 1380 retval = copy_mm(clone_flags, p); 1381 if (retval) 1382 goto bad_fork_cleanup_signal; 1383 retval = copy_namespaces(clone_flags, p); 1384 if (retval) 1385 goto bad_fork_cleanup_mm; 1386 retval = copy_io(clone_flags, p); 1387 if (retval) 1388 goto bad_fork_cleanup_namespaces; 1389 retval = copy_thread(clone_flags, stack_start, stack_size, p); 1390 if (retval) 1391 goto bad_fork_cleanup_io; 1392 1393 if (pid != &init_struct_pid) { 1394 retval = -ENOMEM; 1395 pid = alloc_pid(p->nsproxy->pid_ns_for_children); 1396 if (!pid) 1397 goto bad_fork_cleanup_io; 1398 } 1399 1400 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1401 /* 1402 * Clear TID on mm_release()? 1403 */ 1404 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; 1405 #ifdef CONFIG_BLOCK 1406 p->plug = NULL; 1407 #endif 1408 #ifdef CONFIG_FUTEX 1409 p->robust_list = NULL; 1410 #ifdef CONFIG_COMPAT 1411 p->compat_robust_list = NULL; 1412 #endif 1413 INIT_LIST_HEAD(&p->pi_state_list); 1414 p->pi_state_cache = NULL; 1415 #endif 1416 /* 1417 * sigaltstack should be cleared when sharing the same VM 1418 */ 1419 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1420 p->sas_ss_sp = p->sas_ss_size = 0; 1421 1422 /* 1423 * Syscall tracing and stepping should be turned off in the 1424 * child regardless of CLONE_PTRACE. 1425 */ 1426 user_disable_single_step(p); 1427 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1428 #ifdef TIF_SYSCALL_EMU 1429 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1430 #endif 1431 clear_all_latency_tracing(p); 1432 1433 /* ok, now we should be set up.. */ 1434 p->pid = pid_nr(pid); 1435 if (clone_flags & CLONE_THREAD) { 1436 p->exit_signal = -1; 1437 p->group_leader = current->group_leader; 1438 p->tgid = current->tgid; 1439 } else { 1440 if (clone_flags & CLONE_PARENT) 1441 p->exit_signal = current->group_leader->exit_signal; 1442 else 1443 p->exit_signal = (clone_flags & CSIGNAL); 1444 p->group_leader = p; 1445 p->tgid = p->pid; 1446 } 1447 1448 p->nr_dirtied = 0; 1449 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); 1450 p->dirty_paused_when = 0; 1451 1452 p->pdeath_signal = 0; 1453 INIT_LIST_HEAD(&p->thread_group); 1454 p->task_works = NULL; 1455 1456 /* 1457 * Make it visible to the rest of the system, but dont wake it up yet. 1458 * Need tasklist lock for parent etc handling! 1459 */ 1460 write_lock_irq(&tasklist_lock); 1461 1462 /* CLONE_PARENT re-uses the old parent */ 1463 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 1464 p->real_parent = current->real_parent; 1465 p->parent_exec_id = current->parent_exec_id; 1466 } else { 1467 p->real_parent = current; 1468 p->parent_exec_id = current->self_exec_id; 1469 } 1470 1471 spin_lock(¤t->sighand->siglock); 1472 1473 /* 1474 * Copy seccomp details explicitly here, in case they were changed 1475 * before holding sighand lock. 1476 */ 1477 copy_seccomp(p); 1478 1479 /* 1480 * Process group and session signals need to be delivered to just the 1481 * parent before the fork or both the parent and the child after the 1482 * fork. Restart if a signal comes in before we add the new process to 1483 * it's process group. 1484 * A fatal signal pending means that current will exit, so the new 1485 * thread can't slip out of an OOM kill (or normal SIGKILL). 1486 */ 1487 recalc_sigpending(); 1488 if (signal_pending(current)) { 1489 spin_unlock(¤t->sighand->siglock); 1490 write_unlock_irq(&tasklist_lock); 1491 retval = -ERESTARTNOINTR; 1492 goto bad_fork_free_pid; 1493 } 1494 1495 if (likely(p->pid)) { 1496 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 1497 1498 init_task_pid(p, PIDTYPE_PID, pid); 1499 if (thread_group_leader(p)) { 1500 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1501 init_task_pid(p, PIDTYPE_SID, task_session(current)); 1502 1503 if (is_child_reaper(pid)) { 1504 ns_of_pid(pid)->child_reaper = p; 1505 p->signal->flags |= SIGNAL_UNKILLABLE; 1506 } 1507 1508 p->signal->leader_pid = pid; 1509 p->signal->tty = tty_kref_get(current->signal->tty); 1510 list_add_tail(&p->sibling, &p->real_parent->children); 1511 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1512 attach_pid(p, PIDTYPE_PGID); 1513 attach_pid(p, PIDTYPE_SID); 1514 __this_cpu_inc(process_counts); 1515 } else { 1516 current->signal->nr_threads++; 1517 atomic_inc(¤t->signal->live); 1518 atomic_inc(¤t->signal->sigcnt); 1519 list_add_tail_rcu(&p->thread_group, 1520 &p->group_leader->thread_group); 1521 list_add_tail_rcu(&p->thread_node, 1522 &p->signal->thread_head); 1523 } 1524 attach_pid(p, PIDTYPE_PID); 1525 nr_threads++; 1526 } 1527 1528 total_forks++; 1529 spin_unlock(¤t->sighand->siglock); 1530 syscall_tracepoint_update(p); 1531 write_unlock_irq(&tasklist_lock); 1532 1533 proc_fork_connector(p); 1534 cgroup_post_fork(p); 1535 if (clone_flags & CLONE_THREAD) 1536 threadgroup_change_end(current); 1537 perf_event_fork(p); 1538 1539 trace_task_newtask(p, clone_flags); 1540 uprobe_copy_process(p, clone_flags); 1541 1542 return p; 1543 1544 bad_fork_free_pid: 1545 if (pid != &init_struct_pid) 1546 free_pid(pid); 1547 bad_fork_cleanup_io: 1548 if (p->io_context) 1549 exit_io_context(p); 1550 bad_fork_cleanup_namespaces: 1551 exit_task_namespaces(p); 1552 bad_fork_cleanup_mm: 1553 if (p->mm) 1554 mmput(p->mm); 1555 bad_fork_cleanup_signal: 1556 if (!(clone_flags & CLONE_THREAD)) 1557 free_signal_struct(p->signal); 1558 bad_fork_cleanup_sighand: 1559 __cleanup_sighand(p->sighand); 1560 bad_fork_cleanup_fs: 1561 exit_fs(p); /* blocking */ 1562 bad_fork_cleanup_files: 1563 exit_files(p); /* blocking */ 1564 bad_fork_cleanup_semundo: 1565 exit_sem(p); 1566 bad_fork_cleanup_audit: 1567 audit_free(p); 1568 bad_fork_cleanup_perf: 1569 perf_event_free_task(p); 1570 bad_fork_cleanup_policy: 1571 #ifdef CONFIG_NUMA 1572 mpol_put(p->mempolicy); 1573 bad_fork_cleanup_threadgroup_lock: 1574 #endif 1575 if (clone_flags & CLONE_THREAD) 1576 threadgroup_change_end(current); 1577 delayacct_tsk_free(p); 1578 module_put(task_thread_info(p)->exec_domain->module); 1579 bad_fork_cleanup_count: 1580 atomic_dec(&p->cred->user->processes); 1581 exit_creds(p); 1582 bad_fork_free: 1583 free_task(p); 1584 fork_out: 1585 return ERR_PTR(retval); 1586 } 1587 1588 static inline void init_idle_pids(struct pid_link *links) 1589 { 1590 enum pid_type type; 1591 1592 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { 1593 INIT_HLIST_NODE(&links[type].node); /* not really needed */ 1594 links[type].pid = &init_struct_pid; 1595 } 1596 } 1597 1598 struct task_struct *fork_idle(int cpu) 1599 { 1600 struct task_struct *task; 1601 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); 1602 if (!IS_ERR(task)) { 1603 init_idle_pids(task->pids); 1604 init_idle(task, cpu); 1605 } 1606 1607 return task; 1608 } 1609 1610 /* 1611 * Ok, this is the main fork-routine. 1612 * 1613 * It copies the process, and if successful kick-starts 1614 * it and waits for it to finish using the VM if required. 1615 */ 1616 long do_fork(unsigned long clone_flags, 1617 unsigned long stack_start, 1618 unsigned long stack_size, 1619 int __user *parent_tidptr, 1620 int __user *child_tidptr) 1621 { 1622 struct task_struct *p; 1623 int trace = 0; 1624 long nr; 1625 1626 /* 1627 * Determine whether and which event to report to ptracer. When 1628 * called from kernel_thread or CLONE_UNTRACED is explicitly 1629 * requested, no event is reported; otherwise, report if the event 1630 * for the type of forking is enabled. 1631 */ 1632 if (!(clone_flags & CLONE_UNTRACED)) { 1633 if (clone_flags & CLONE_VFORK) 1634 trace = PTRACE_EVENT_VFORK; 1635 else if ((clone_flags & CSIGNAL) != SIGCHLD) 1636 trace = PTRACE_EVENT_CLONE; 1637 else 1638 trace = PTRACE_EVENT_FORK; 1639 1640 if (likely(!ptrace_event_enabled(current, trace))) 1641 trace = 0; 1642 } 1643 1644 p = copy_process(clone_flags, stack_start, stack_size, 1645 child_tidptr, NULL, trace); 1646 /* 1647 * Do this prior waking up the new thread - the thread pointer 1648 * might get invalid after that point, if the thread exits quickly. 1649 */ 1650 if (!IS_ERR(p)) { 1651 struct completion vfork; 1652 struct pid *pid; 1653 1654 trace_sched_process_fork(current, p); 1655 1656 pid = get_task_pid(p, PIDTYPE_PID); 1657 nr = pid_vnr(pid); 1658 1659 if (clone_flags & CLONE_PARENT_SETTID) 1660 put_user(nr, parent_tidptr); 1661 1662 if (clone_flags & CLONE_VFORK) { 1663 p->vfork_done = &vfork; 1664 init_completion(&vfork); 1665 get_task_struct(p); 1666 } 1667 1668 wake_up_new_task(p); 1669 1670 /* forking complete and child started to run, tell ptracer */ 1671 if (unlikely(trace)) 1672 ptrace_event_pid(trace, pid); 1673 1674 if (clone_flags & CLONE_VFORK) { 1675 if (!wait_for_vfork_done(p, &vfork)) 1676 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); 1677 } 1678 1679 put_pid(pid); 1680 } else { 1681 nr = PTR_ERR(p); 1682 } 1683 return nr; 1684 } 1685 1686 /* 1687 * Create a kernel thread. 1688 */ 1689 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 1690 { 1691 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, 1692 (unsigned long)arg, NULL, NULL); 1693 } 1694 1695 #ifdef __ARCH_WANT_SYS_FORK 1696 SYSCALL_DEFINE0(fork) 1697 { 1698 #ifdef CONFIG_MMU 1699 return do_fork(SIGCHLD, 0, 0, NULL, NULL); 1700 #else 1701 /* can not support in nommu mode */ 1702 return -EINVAL; 1703 #endif 1704 } 1705 #endif 1706 1707 #ifdef __ARCH_WANT_SYS_VFORK 1708 SYSCALL_DEFINE0(vfork) 1709 { 1710 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 1711 0, NULL, NULL); 1712 } 1713 #endif 1714 1715 #ifdef __ARCH_WANT_SYS_CLONE 1716 #ifdef CONFIG_CLONE_BACKWARDS 1717 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1718 int __user *, parent_tidptr, 1719 int, tls_val, 1720 int __user *, child_tidptr) 1721 #elif defined(CONFIG_CLONE_BACKWARDS2) 1722 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, 1723 int __user *, parent_tidptr, 1724 int __user *, child_tidptr, 1725 int, tls_val) 1726 #elif defined(CONFIG_CLONE_BACKWARDS3) 1727 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, 1728 int, stack_size, 1729 int __user *, parent_tidptr, 1730 int __user *, child_tidptr, 1731 int, tls_val) 1732 #else 1733 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1734 int __user *, parent_tidptr, 1735 int __user *, child_tidptr, 1736 int, tls_val) 1737 #endif 1738 { 1739 return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); 1740 } 1741 #endif 1742 1743 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1744 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1745 #endif 1746 1747 static void sighand_ctor(void *data) 1748 { 1749 struct sighand_struct *sighand = data; 1750 1751 spin_lock_init(&sighand->siglock); 1752 init_waitqueue_head(&sighand->signalfd_wqh); 1753 } 1754 1755 void __init proc_caches_init(void) 1756 { 1757 sighand_cachep = kmem_cache_create("sighand_cache", 1758 sizeof(struct sighand_struct), 0, 1759 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| 1760 SLAB_NOTRACK, sighand_ctor); 1761 signal_cachep = kmem_cache_create("signal_cache", 1762 sizeof(struct signal_struct), 0, 1763 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1764 files_cachep = kmem_cache_create("files_cache", 1765 sizeof(struct files_struct), 0, 1766 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1767 fs_cachep = kmem_cache_create("fs_cache", 1768 sizeof(struct fs_struct), 0, 1769 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1770 /* 1771 * FIXME! The "sizeof(struct mm_struct)" currently includes the 1772 * whole struct cpumask for the OFFSTACK case. We could change 1773 * this to *only* allocate as much of it as required by the 1774 * maximum number of CPU's we can ever have. The cpumask_allocation 1775 * is at the end of the structure, exactly for that reason. 1776 */ 1777 mm_cachep = kmem_cache_create("mm_struct", 1778 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1779 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1780 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1781 mmap_init(); 1782 nsproxy_cache_init(); 1783 } 1784 1785 /* 1786 * Check constraints on flags passed to the unshare system call. 1787 */ 1788 static int check_unshare_flags(unsigned long unshare_flags) 1789 { 1790 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1791 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1792 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| 1793 CLONE_NEWUSER|CLONE_NEWPID)) 1794 return -EINVAL; 1795 /* 1796 * Not implemented, but pretend it works if there is nothing to 1797 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND 1798 * needs to unshare vm. 1799 */ 1800 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { 1801 /* FIXME: get_task_mm() increments ->mm_users */ 1802 if (atomic_read(¤t->mm->mm_users) > 1) 1803 return -EINVAL; 1804 } 1805 1806 return 0; 1807 } 1808 1809 /* 1810 * Unshare the filesystem structure if it is being shared 1811 */ 1812 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1813 { 1814 struct fs_struct *fs = current->fs; 1815 1816 if (!(unshare_flags & CLONE_FS) || !fs) 1817 return 0; 1818 1819 /* don't need lock here; in the worst case we'll do useless copy */ 1820 if (fs->users == 1) 1821 return 0; 1822 1823 *new_fsp = copy_fs_struct(fs); 1824 if (!*new_fsp) 1825 return -ENOMEM; 1826 1827 return 0; 1828 } 1829 1830 /* 1831 * Unshare file descriptor table if it is being shared 1832 */ 1833 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1834 { 1835 struct files_struct *fd = current->files; 1836 int error = 0; 1837 1838 if ((unshare_flags & CLONE_FILES) && 1839 (fd && atomic_read(&fd->count) > 1)) { 1840 *new_fdp = dup_fd(fd, &error); 1841 if (!*new_fdp) 1842 return error; 1843 } 1844 1845 return 0; 1846 } 1847 1848 /* 1849 * unshare allows a process to 'unshare' part of the process 1850 * context which was originally shared using clone. copy_* 1851 * functions used by do_fork() cannot be used here directly 1852 * because they modify an inactive task_struct that is being 1853 * constructed. Here we are modifying the current, active, 1854 * task_struct. 1855 */ 1856 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 1857 { 1858 struct fs_struct *fs, *new_fs = NULL; 1859 struct files_struct *fd, *new_fd = NULL; 1860 struct cred *new_cred = NULL; 1861 struct nsproxy *new_nsproxy = NULL; 1862 int do_sysvsem = 0; 1863 int err; 1864 1865 /* 1866 * If unsharing a user namespace must also unshare the thread. 1867 */ 1868 if (unshare_flags & CLONE_NEWUSER) 1869 unshare_flags |= CLONE_THREAD | CLONE_FS; 1870 /* 1871 * If unsharing a thread from a thread group, must also unshare vm. 1872 */ 1873 if (unshare_flags & CLONE_THREAD) 1874 unshare_flags |= CLONE_VM; 1875 /* 1876 * If unsharing vm, must also unshare signal handlers. 1877 */ 1878 if (unshare_flags & CLONE_VM) 1879 unshare_flags |= CLONE_SIGHAND; 1880 /* 1881 * If unsharing namespace, must also unshare filesystem information. 1882 */ 1883 if (unshare_flags & CLONE_NEWNS) 1884 unshare_flags |= CLONE_FS; 1885 1886 err = check_unshare_flags(unshare_flags); 1887 if (err) 1888 goto bad_unshare_out; 1889 /* 1890 * CLONE_NEWIPC must also detach from the undolist: after switching 1891 * to a new ipc namespace, the semaphore arrays from the old 1892 * namespace are unreachable. 1893 */ 1894 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 1895 do_sysvsem = 1; 1896 err = unshare_fs(unshare_flags, &new_fs); 1897 if (err) 1898 goto bad_unshare_out; 1899 err = unshare_fd(unshare_flags, &new_fd); 1900 if (err) 1901 goto bad_unshare_cleanup_fs; 1902 err = unshare_userns(unshare_flags, &new_cred); 1903 if (err) 1904 goto bad_unshare_cleanup_fd; 1905 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1906 new_cred, new_fs); 1907 if (err) 1908 goto bad_unshare_cleanup_cred; 1909 1910 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { 1911 if (do_sysvsem) { 1912 /* 1913 * CLONE_SYSVSEM is equivalent to sys_exit(). 1914 */ 1915 exit_sem(current); 1916 } 1917 if (unshare_flags & CLONE_NEWIPC) { 1918 /* Orphan segments in old ns (see sem above). */ 1919 exit_shm(current); 1920 shm_init_task(current); 1921 } 1922 1923 if (new_nsproxy) 1924 switch_task_namespaces(current, new_nsproxy); 1925 1926 task_lock(current); 1927 1928 if (new_fs) { 1929 fs = current->fs; 1930 spin_lock(&fs->lock); 1931 current->fs = new_fs; 1932 if (--fs->users) 1933 new_fs = NULL; 1934 else 1935 new_fs = fs; 1936 spin_unlock(&fs->lock); 1937 } 1938 1939 if (new_fd) { 1940 fd = current->files; 1941 current->files = new_fd; 1942 new_fd = fd; 1943 } 1944 1945 task_unlock(current); 1946 1947 if (new_cred) { 1948 /* Install the new user namespace */ 1949 commit_creds(new_cred); 1950 new_cred = NULL; 1951 } 1952 } 1953 1954 bad_unshare_cleanup_cred: 1955 if (new_cred) 1956 put_cred(new_cred); 1957 bad_unshare_cleanup_fd: 1958 if (new_fd) 1959 put_files_struct(new_fd); 1960 1961 bad_unshare_cleanup_fs: 1962 if (new_fs) 1963 free_fs_struct(new_fs); 1964 1965 bad_unshare_out: 1966 return err; 1967 } 1968 1969 /* 1970 * Helper to unshare the files of the current task. 1971 * We don't want to expose copy_files internals to 1972 * the exec layer of the kernel. 1973 */ 1974 1975 int unshare_files(struct files_struct **displaced) 1976 { 1977 struct task_struct *task = current; 1978 struct files_struct *copy = NULL; 1979 int error; 1980 1981 error = unshare_fd(CLONE_FILES, ©); 1982 if (error || !copy) { 1983 *displaced = NULL; 1984 return error; 1985 } 1986 *displaced = task->files; 1987 task_lock(task); 1988 task->files = copy; 1989 task_unlock(task); 1990 return 0; 1991 } 1992