1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/mempolicy.h> 22 #include <linux/sem.h> 23 #include <linux/file.h> 24 #include <linux/fdtable.h> 25 #include <linux/iocontext.h> 26 #include <linux/key.h> 27 #include <linux/binfmts.h> 28 #include <linux/mman.h> 29 #include <linux/mmu_notifier.h> 30 #include <linux/fs.h> 31 #include <linux/nsproxy.h> 32 #include <linux/capability.h> 33 #include <linux/cpu.h> 34 #include <linux/cgroup.h> 35 #include <linux/security.h> 36 #include <linux/hugetlb.h> 37 #include <linux/swap.h> 38 #include <linux/syscalls.h> 39 #include <linux/jiffies.h> 40 #include <linux/tracehook.h> 41 #include <linux/futex.h> 42 #include <linux/compat.h> 43 #include <linux/task_io_accounting_ops.h> 44 #include <linux/rcupdate.h> 45 #include <linux/ptrace.h> 46 #include <linux/mount.h> 47 #include <linux/audit.h> 48 #include <linux/memcontrol.h> 49 #include <linux/ftrace.h> 50 #include <linux/profile.h> 51 #include <linux/rmap.h> 52 #include <linux/ksm.h> 53 #include <linux/acct.h> 54 #include <linux/tsacct_kern.h> 55 #include <linux/cn_proc.h> 56 #include <linux/freezer.h> 57 #include <linux/delayacct.h> 58 #include <linux/taskstats_kern.h> 59 #include <linux/random.h> 60 #include <linux/tty.h> 61 #include <linux/proc_fs.h> 62 #include <linux/blkdev.h> 63 #include <linux/fs_struct.h> 64 #include <linux/magic.h> 65 #include <linux/perf_event.h> 66 67 #include <asm/pgtable.h> 68 #include <asm/pgalloc.h> 69 #include <asm/uaccess.h> 70 #include <asm/mmu_context.h> 71 #include <asm/cacheflush.h> 72 #include <asm/tlbflush.h> 73 74 #include <trace/events/sched.h> 75 76 /* 77 * Protected counters by write_lock_irq(&tasklist_lock) 78 */ 79 unsigned long total_forks; /* Handle normal Linux uptimes. */ 80 int nr_threads; /* The idle threads do not count.. */ 81 82 int max_threads; /* tunable limit on nr_threads */ 83 84 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 85 86 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 87 88 int nr_processes(void) 89 { 90 int cpu; 91 int total = 0; 92 93 for_each_online_cpu(cpu) 94 total += per_cpu(process_counts, cpu); 95 96 return total; 97 } 98 99 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 100 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 101 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 102 static struct kmem_cache *task_struct_cachep; 103 #endif 104 105 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 106 static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) 107 { 108 #ifdef CONFIG_DEBUG_STACK_USAGE 109 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 110 #else 111 gfp_t mask = GFP_KERNEL; 112 #endif 113 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 114 } 115 116 static inline void free_thread_info(struct thread_info *ti) 117 { 118 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 119 } 120 #endif 121 122 /* SLAB cache for signal_struct structures (tsk->signal) */ 123 static struct kmem_cache *signal_cachep; 124 125 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 126 struct kmem_cache *sighand_cachep; 127 128 /* SLAB cache for files_struct structures (tsk->files) */ 129 struct kmem_cache *files_cachep; 130 131 /* SLAB cache for fs_struct structures (tsk->fs) */ 132 struct kmem_cache *fs_cachep; 133 134 /* SLAB cache for vm_area_struct structures */ 135 struct kmem_cache *vm_area_cachep; 136 137 /* SLAB cache for mm_struct structures (tsk->mm) */ 138 static struct kmem_cache *mm_cachep; 139 140 static void account_kernel_stack(struct thread_info *ti, int account) 141 { 142 struct zone *zone = page_zone(virt_to_page(ti)); 143 144 mod_zone_page_state(zone, NR_KERNEL_STACK, account); 145 } 146 147 void free_task(struct task_struct *tsk) 148 { 149 prop_local_destroy_single(&tsk->dirties); 150 account_kernel_stack(tsk->stack, -1); 151 free_thread_info(tsk->stack); 152 rt_mutex_debug_task_free(tsk); 153 ftrace_graph_exit_task(tsk); 154 free_task_struct(tsk); 155 } 156 EXPORT_SYMBOL(free_task); 157 158 void __put_task_struct(struct task_struct *tsk) 159 { 160 WARN_ON(!tsk->exit_state); 161 WARN_ON(atomic_read(&tsk->usage)); 162 WARN_ON(tsk == current); 163 164 exit_creds(tsk); 165 delayacct_tsk_free(tsk); 166 167 if (!profile_handoff_task(tsk)) 168 free_task(tsk); 169 } 170 171 /* 172 * macro override instead of weak attribute alias, to workaround 173 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. 174 */ 175 #ifndef arch_task_cache_init 176 #define arch_task_cache_init() 177 #endif 178 179 void __init fork_init(unsigned long mempages) 180 { 181 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 182 #ifndef ARCH_MIN_TASKALIGN 183 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 184 #endif 185 /* create a slab on which task_structs can be allocated */ 186 task_struct_cachep = 187 kmem_cache_create("task_struct", sizeof(struct task_struct), 188 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 189 #endif 190 191 /* do the arch specific task caches init */ 192 arch_task_cache_init(); 193 194 /* 195 * The default maximum number of threads is set to a safe 196 * value: the thread structures can take up at most half 197 * of memory. 198 */ 199 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 200 201 /* 202 * we need to allow at least 20 threads to boot a system 203 */ 204 if(max_threads < 20) 205 max_threads = 20; 206 207 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 208 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 209 init_task.signal->rlim[RLIMIT_SIGPENDING] = 210 init_task.signal->rlim[RLIMIT_NPROC]; 211 } 212 213 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, 214 struct task_struct *src) 215 { 216 *dst = *src; 217 return 0; 218 } 219 220 static struct task_struct *dup_task_struct(struct task_struct *orig) 221 { 222 struct task_struct *tsk; 223 struct thread_info *ti; 224 unsigned long *stackend; 225 226 int err; 227 228 prepare_to_copy(orig); 229 230 tsk = alloc_task_struct(); 231 if (!tsk) 232 return NULL; 233 234 ti = alloc_thread_info(tsk); 235 if (!ti) { 236 free_task_struct(tsk); 237 return NULL; 238 } 239 240 err = arch_dup_task_struct(tsk, orig); 241 if (err) 242 goto out; 243 244 tsk->stack = ti; 245 246 err = prop_local_init_single(&tsk->dirties); 247 if (err) 248 goto out; 249 250 setup_thread_stack(tsk, orig); 251 stackend = end_of_stack(tsk); 252 *stackend = STACK_END_MAGIC; /* for overflow detection */ 253 254 #ifdef CONFIG_CC_STACKPROTECTOR 255 tsk->stack_canary = get_random_int(); 256 #endif 257 258 /* One for us, one for whoever does the "release_task()" (usually parent) */ 259 atomic_set(&tsk->usage,2); 260 atomic_set(&tsk->fs_excl, 0); 261 #ifdef CONFIG_BLK_DEV_IO_TRACE 262 tsk->btrace_seq = 0; 263 #endif 264 tsk->splice_pipe = NULL; 265 266 account_kernel_stack(ti, 1); 267 268 return tsk; 269 270 out: 271 free_thread_info(ti); 272 free_task_struct(tsk); 273 return NULL; 274 } 275 276 #ifdef CONFIG_MMU 277 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 278 { 279 struct vm_area_struct *mpnt, *tmp, **pprev; 280 struct rb_node **rb_link, *rb_parent; 281 int retval; 282 unsigned long charge; 283 struct mempolicy *pol; 284 285 down_write(&oldmm->mmap_sem); 286 flush_cache_dup_mm(oldmm); 287 /* 288 * Not linked in yet - no deadlock potential: 289 */ 290 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 291 292 mm->locked_vm = 0; 293 mm->mmap = NULL; 294 mm->mmap_cache = NULL; 295 mm->free_area_cache = oldmm->mmap_base; 296 mm->cached_hole_size = ~0UL; 297 mm->map_count = 0; 298 cpumask_clear(mm_cpumask(mm)); 299 mm->mm_rb = RB_ROOT; 300 rb_link = &mm->mm_rb.rb_node; 301 rb_parent = NULL; 302 pprev = &mm->mmap; 303 retval = ksm_fork(mm, oldmm); 304 if (retval) 305 goto out; 306 307 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 308 struct file *file; 309 310 if (mpnt->vm_flags & VM_DONTCOPY) { 311 long pages = vma_pages(mpnt); 312 mm->total_vm -= pages; 313 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 314 -pages); 315 continue; 316 } 317 charge = 0; 318 if (mpnt->vm_flags & VM_ACCOUNT) { 319 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 320 if (security_vm_enough_memory(len)) 321 goto fail_nomem; 322 charge = len; 323 } 324 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 325 if (!tmp) 326 goto fail_nomem; 327 *tmp = *mpnt; 328 pol = mpol_dup(vma_policy(mpnt)); 329 retval = PTR_ERR(pol); 330 if (IS_ERR(pol)) 331 goto fail_nomem_policy; 332 vma_set_policy(tmp, pol); 333 tmp->vm_flags &= ~VM_LOCKED; 334 tmp->vm_mm = mm; 335 tmp->vm_next = NULL; 336 anon_vma_link(tmp); 337 file = tmp->vm_file; 338 if (file) { 339 struct inode *inode = file->f_path.dentry->d_inode; 340 struct address_space *mapping = file->f_mapping; 341 342 get_file(file); 343 if (tmp->vm_flags & VM_DENYWRITE) 344 atomic_dec(&inode->i_writecount); 345 spin_lock(&mapping->i_mmap_lock); 346 if (tmp->vm_flags & VM_SHARED) 347 mapping->i_mmap_writable++; 348 tmp->vm_truncate_count = mpnt->vm_truncate_count; 349 flush_dcache_mmap_lock(mapping); 350 /* insert tmp into the share list, just after mpnt */ 351 vma_prio_tree_add(tmp, mpnt); 352 flush_dcache_mmap_unlock(mapping); 353 spin_unlock(&mapping->i_mmap_lock); 354 } 355 356 /* 357 * Clear hugetlb-related page reserves for children. This only 358 * affects MAP_PRIVATE mappings. Faults generated by the child 359 * are not guaranteed to succeed, even if read-only 360 */ 361 if (is_vm_hugetlb_page(tmp)) 362 reset_vma_resv_huge_pages(tmp); 363 364 /* 365 * Link in the new vma and copy the page table entries. 366 */ 367 *pprev = tmp; 368 pprev = &tmp->vm_next; 369 370 __vma_link_rb(mm, tmp, rb_link, rb_parent); 371 rb_link = &tmp->vm_rb.rb_right; 372 rb_parent = &tmp->vm_rb; 373 374 mm->map_count++; 375 retval = copy_page_range(mm, oldmm, mpnt); 376 377 if (tmp->vm_ops && tmp->vm_ops->open) 378 tmp->vm_ops->open(tmp); 379 380 if (retval) 381 goto out; 382 } 383 /* a new mm has just been created */ 384 arch_dup_mmap(oldmm, mm); 385 retval = 0; 386 out: 387 up_write(&mm->mmap_sem); 388 flush_tlb_mm(oldmm); 389 up_write(&oldmm->mmap_sem); 390 return retval; 391 fail_nomem_policy: 392 kmem_cache_free(vm_area_cachep, tmp); 393 fail_nomem: 394 retval = -ENOMEM; 395 vm_unacct_memory(charge); 396 goto out; 397 } 398 399 static inline int mm_alloc_pgd(struct mm_struct * mm) 400 { 401 mm->pgd = pgd_alloc(mm); 402 if (unlikely(!mm->pgd)) 403 return -ENOMEM; 404 return 0; 405 } 406 407 static inline void mm_free_pgd(struct mm_struct * mm) 408 { 409 pgd_free(mm, mm->pgd); 410 } 411 #else 412 #define dup_mmap(mm, oldmm) (0) 413 #define mm_alloc_pgd(mm) (0) 414 #define mm_free_pgd(mm) 415 #endif /* CONFIG_MMU */ 416 417 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 418 419 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 420 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 421 422 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; 423 424 static int __init coredump_filter_setup(char *s) 425 { 426 default_dump_filter = 427 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & 428 MMF_DUMP_FILTER_MASK; 429 return 1; 430 } 431 432 __setup("coredump_filter=", coredump_filter_setup); 433 434 #include <linux/init_task.h> 435 436 static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 437 { 438 atomic_set(&mm->mm_users, 1); 439 atomic_set(&mm->mm_count, 1); 440 init_rwsem(&mm->mmap_sem); 441 INIT_LIST_HEAD(&mm->mmlist); 442 mm->flags = (current->mm) ? 443 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; 444 mm->core_state = NULL; 445 mm->nr_ptes = 0; 446 set_mm_counter(mm, file_rss, 0); 447 set_mm_counter(mm, anon_rss, 0); 448 spin_lock_init(&mm->page_table_lock); 449 spin_lock_init(&mm->ioctx_lock); 450 INIT_HLIST_HEAD(&mm->ioctx_list); 451 mm->free_area_cache = TASK_UNMAPPED_BASE; 452 mm->cached_hole_size = ~0UL; 453 mm_init_owner(mm, p); 454 455 if (likely(!mm_alloc_pgd(mm))) { 456 mm->def_flags = 0; 457 mmu_notifier_mm_init(mm); 458 return mm; 459 } 460 461 free_mm(mm); 462 return NULL; 463 } 464 465 /* 466 * Allocate and initialize an mm_struct. 467 */ 468 struct mm_struct * mm_alloc(void) 469 { 470 struct mm_struct * mm; 471 472 mm = allocate_mm(); 473 if (mm) { 474 memset(mm, 0, sizeof(*mm)); 475 mm = mm_init(mm, current); 476 } 477 return mm; 478 } 479 480 /* 481 * Called when the last reference to the mm 482 * is dropped: either by a lazy thread or by 483 * mmput. Free the page directory and the mm. 484 */ 485 void __mmdrop(struct mm_struct *mm) 486 { 487 BUG_ON(mm == &init_mm); 488 mm_free_pgd(mm); 489 destroy_context(mm); 490 mmu_notifier_mm_destroy(mm); 491 free_mm(mm); 492 } 493 EXPORT_SYMBOL_GPL(__mmdrop); 494 495 /* 496 * Decrement the use count and release all resources for an mm. 497 */ 498 void mmput(struct mm_struct *mm) 499 { 500 might_sleep(); 501 502 if (atomic_dec_and_test(&mm->mm_users)) { 503 exit_aio(mm); 504 ksm_exit(mm); 505 exit_mmap(mm); 506 set_mm_exe_file(mm, NULL); 507 if (!list_empty(&mm->mmlist)) { 508 spin_lock(&mmlist_lock); 509 list_del(&mm->mmlist); 510 spin_unlock(&mmlist_lock); 511 } 512 put_swap_token(mm); 513 mmdrop(mm); 514 } 515 } 516 EXPORT_SYMBOL_GPL(mmput); 517 518 /** 519 * get_task_mm - acquire a reference to the task's mm 520 * 521 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 522 * this kernel workthread has transiently adopted a user mm with use_mm, 523 * to do its AIO) is not set and if so returns a reference to it, after 524 * bumping up the use count. User must release the mm via mmput() 525 * after use. Typically used by /proc and ptrace. 526 */ 527 struct mm_struct *get_task_mm(struct task_struct *task) 528 { 529 struct mm_struct *mm; 530 531 task_lock(task); 532 mm = task->mm; 533 if (mm) { 534 if (task->flags & PF_KTHREAD) 535 mm = NULL; 536 else 537 atomic_inc(&mm->mm_users); 538 } 539 task_unlock(task); 540 return mm; 541 } 542 EXPORT_SYMBOL_GPL(get_task_mm); 543 544 /* Please note the differences between mmput and mm_release. 545 * mmput is called whenever we stop holding onto a mm_struct, 546 * error success whatever. 547 * 548 * mm_release is called after a mm_struct has been removed 549 * from the current process. 550 * 551 * This difference is important for error handling, when we 552 * only half set up a mm_struct for a new process and need to restore 553 * the old one. Because we mmput the new mm_struct before 554 * restoring the old one. . . 555 * Eric Biederman 10 January 1998 556 */ 557 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 558 { 559 struct completion *vfork_done = tsk->vfork_done; 560 561 /* Get rid of any futexes when releasing the mm */ 562 #ifdef CONFIG_FUTEX 563 if (unlikely(tsk->robust_list)) 564 exit_robust_list(tsk); 565 #ifdef CONFIG_COMPAT 566 if (unlikely(tsk->compat_robust_list)) 567 compat_exit_robust_list(tsk); 568 #endif 569 #endif 570 571 /* Get rid of any cached register state */ 572 deactivate_mm(tsk, mm); 573 574 /* notify parent sleeping on vfork() */ 575 if (vfork_done) { 576 tsk->vfork_done = NULL; 577 complete(vfork_done); 578 } 579 580 /* 581 * If we're exiting normally, clear a user-space tid field if 582 * requested. We leave this alone when dying by signal, to leave 583 * the value intact in a core dump, and to save the unnecessary 584 * trouble otherwise. Userland only wants this done for a sys_exit. 585 */ 586 if (tsk->clear_child_tid) { 587 if (!(tsk->flags & PF_SIGNALED) && 588 atomic_read(&mm->mm_users) > 1) { 589 /* 590 * We don't check the error code - if userspace has 591 * not set up a proper pointer then tough luck. 592 */ 593 put_user(0, tsk->clear_child_tid); 594 sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 595 1, NULL, NULL, 0); 596 } 597 tsk->clear_child_tid = NULL; 598 } 599 } 600 601 /* 602 * Allocate a new mm structure and copy contents from the 603 * mm structure of the passed in task structure. 604 */ 605 struct mm_struct *dup_mm(struct task_struct *tsk) 606 { 607 struct mm_struct *mm, *oldmm = current->mm; 608 int err; 609 610 if (!oldmm) 611 return NULL; 612 613 mm = allocate_mm(); 614 if (!mm) 615 goto fail_nomem; 616 617 memcpy(mm, oldmm, sizeof(*mm)); 618 619 /* Initializing for Swap token stuff */ 620 mm->token_priority = 0; 621 mm->last_interval = 0; 622 623 if (!mm_init(mm, tsk)) 624 goto fail_nomem; 625 626 if (init_new_context(tsk, mm)) 627 goto fail_nocontext; 628 629 dup_mm_exe_file(oldmm, mm); 630 631 err = dup_mmap(mm, oldmm); 632 if (err) 633 goto free_pt; 634 635 mm->hiwater_rss = get_mm_rss(mm); 636 mm->hiwater_vm = mm->total_vm; 637 638 return mm; 639 640 free_pt: 641 mmput(mm); 642 643 fail_nomem: 644 return NULL; 645 646 fail_nocontext: 647 /* 648 * If init_new_context() failed, we cannot use mmput() to free the mm 649 * because it calls destroy_context() 650 */ 651 mm_free_pgd(mm); 652 free_mm(mm); 653 return NULL; 654 } 655 656 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 657 { 658 struct mm_struct * mm, *oldmm; 659 int retval; 660 661 tsk->min_flt = tsk->maj_flt = 0; 662 tsk->nvcsw = tsk->nivcsw = 0; 663 #ifdef CONFIG_DETECT_HUNG_TASK 664 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 665 #endif 666 667 tsk->mm = NULL; 668 tsk->active_mm = NULL; 669 670 /* 671 * Are we cloning a kernel thread? 672 * 673 * We need to steal a active VM for that.. 674 */ 675 oldmm = current->mm; 676 if (!oldmm) 677 return 0; 678 679 if (clone_flags & CLONE_VM) { 680 atomic_inc(&oldmm->mm_users); 681 mm = oldmm; 682 goto good_mm; 683 } 684 685 retval = -ENOMEM; 686 mm = dup_mm(tsk); 687 if (!mm) 688 goto fail_nomem; 689 690 good_mm: 691 /* Initializing for Swap token stuff */ 692 mm->token_priority = 0; 693 mm->last_interval = 0; 694 695 tsk->mm = mm; 696 tsk->active_mm = mm; 697 return 0; 698 699 fail_nomem: 700 return retval; 701 } 702 703 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 704 { 705 struct fs_struct *fs = current->fs; 706 if (clone_flags & CLONE_FS) { 707 /* tsk->fs is already what we want */ 708 write_lock(&fs->lock); 709 if (fs->in_exec) { 710 write_unlock(&fs->lock); 711 return -EAGAIN; 712 } 713 fs->users++; 714 write_unlock(&fs->lock); 715 return 0; 716 } 717 tsk->fs = copy_fs_struct(fs); 718 if (!tsk->fs) 719 return -ENOMEM; 720 return 0; 721 } 722 723 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 724 { 725 struct files_struct *oldf, *newf; 726 int error = 0; 727 728 /* 729 * A background process may not have any files ... 730 */ 731 oldf = current->files; 732 if (!oldf) 733 goto out; 734 735 if (clone_flags & CLONE_FILES) { 736 atomic_inc(&oldf->count); 737 goto out; 738 } 739 740 newf = dup_fd(oldf, &error); 741 if (!newf) 742 goto out; 743 744 tsk->files = newf; 745 error = 0; 746 out: 747 return error; 748 } 749 750 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 751 { 752 #ifdef CONFIG_BLOCK 753 struct io_context *ioc = current->io_context; 754 755 if (!ioc) 756 return 0; 757 /* 758 * Share io context with parent, if CLONE_IO is set 759 */ 760 if (clone_flags & CLONE_IO) { 761 tsk->io_context = ioc_task_link(ioc); 762 if (unlikely(!tsk->io_context)) 763 return -ENOMEM; 764 } else if (ioprio_valid(ioc->ioprio)) { 765 tsk->io_context = alloc_io_context(GFP_KERNEL, -1); 766 if (unlikely(!tsk->io_context)) 767 return -ENOMEM; 768 769 tsk->io_context->ioprio = ioc->ioprio; 770 } 771 #endif 772 return 0; 773 } 774 775 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 776 { 777 struct sighand_struct *sig; 778 779 if (clone_flags & CLONE_SIGHAND) { 780 atomic_inc(¤t->sighand->count); 781 return 0; 782 } 783 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 784 rcu_assign_pointer(tsk->sighand, sig); 785 if (!sig) 786 return -ENOMEM; 787 atomic_set(&sig->count, 1); 788 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 789 return 0; 790 } 791 792 void __cleanup_sighand(struct sighand_struct *sighand) 793 { 794 if (atomic_dec_and_test(&sighand->count)) 795 kmem_cache_free(sighand_cachep, sighand); 796 } 797 798 799 /* 800 * Initialize POSIX timer handling for a thread group. 801 */ 802 static void posix_cpu_timers_init_group(struct signal_struct *sig) 803 { 804 /* Thread group counters. */ 805 thread_group_cputime_init(sig); 806 807 /* Expiration times and increments. */ 808 sig->it_virt_expires = cputime_zero; 809 sig->it_virt_incr = cputime_zero; 810 sig->it_prof_expires = cputime_zero; 811 sig->it_prof_incr = cputime_zero; 812 813 /* Cached expiration times. */ 814 sig->cputime_expires.prof_exp = cputime_zero; 815 sig->cputime_expires.virt_exp = cputime_zero; 816 sig->cputime_expires.sched_exp = 0; 817 818 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 819 sig->cputime_expires.prof_exp = 820 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 821 sig->cputimer.running = 1; 822 } 823 824 /* The timer lists. */ 825 INIT_LIST_HEAD(&sig->cpu_timers[0]); 826 INIT_LIST_HEAD(&sig->cpu_timers[1]); 827 INIT_LIST_HEAD(&sig->cpu_timers[2]); 828 } 829 830 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 831 { 832 struct signal_struct *sig; 833 834 if (clone_flags & CLONE_THREAD) 835 return 0; 836 837 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 838 tsk->signal = sig; 839 if (!sig) 840 return -ENOMEM; 841 842 atomic_set(&sig->count, 1); 843 atomic_set(&sig->live, 1); 844 init_waitqueue_head(&sig->wait_chldexit); 845 sig->flags = 0; 846 if (clone_flags & CLONE_NEWPID) 847 sig->flags |= SIGNAL_UNKILLABLE; 848 sig->group_exit_code = 0; 849 sig->group_exit_task = NULL; 850 sig->group_stop_count = 0; 851 sig->curr_target = tsk; 852 init_sigpending(&sig->shared_pending); 853 INIT_LIST_HEAD(&sig->posix_timers); 854 855 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 856 sig->it_real_incr.tv64 = 0; 857 sig->real_timer.function = it_real_fn; 858 859 sig->leader = 0; /* session leadership doesn't inherit */ 860 sig->tty_old_pgrp = NULL; 861 sig->tty = NULL; 862 863 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 864 sig->gtime = cputime_zero; 865 sig->cgtime = cputime_zero; 866 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 867 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 868 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 869 task_io_accounting_init(&sig->ioac); 870 sig->sum_sched_runtime = 0; 871 taskstats_tgid_init(sig); 872 873 task_lock(current->group_leader); 874 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 875 task_unlock(current->group_leader); 876 877 posix_cpu_timers_init_group(sig); 878 879 acct_init_pacct(&sig->pacct); 880 881 tty_audit_fork(sig); 882 883 sig->oom_adj = current->signal->oom_adj; 884 885 return 0; 886 } 887 888 void __cleanup_signal(struct signal_struct *sig) 889 { 890 thread_group_cputime_free(sig); 891 tty_kref_put(sig->tty); 892 kmem_cache_free(signal_cachep, sig); 893 } 894 895 static void copy_flags(unsigned long clone_flags, struct task_struct *p) 896 { 897 unsigned long new_flags = p->flags; 898 899 new_flags &= ~PF_SUPERPRIV; 900 new_flags |= PF_FORKNOEXEC; 901 new_flags |= PF_STARTING; 902 p->flags = new_flags; 903 clear_freeze_flag(p); 904 } 905 906 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) 907 { 908 current->clear_child_tid = tidptr; 909 910 return task_pid_vnr(current); 911 } 912 913 static void rt_mutex_init_task(struct task_struct *p) 914 { 915 spin_lock_init(&p->pi_lock); 916 #ifdef CONFIG_RT_MUTEXES 917 plist_head_init(&p->pi_waiters, &p->pi_lock); 918 p->pi_blocked_on = NULL; 919 #endif 920 } 921 922 #ifdef CONFIG_MM_OWNER 923 void mm_init_owner(struct mm_struct *mm, struct task_struct *p) 924 { 925 mm->owner = p; 926 } 927 #endif /* CONFIG_MM_OWNER */ 928 929 /* 930 * Initialize POSIX timer handling for a single task. 931 */ 932 static void posix_cpu_timers_init(struct task_struct *tsk) 933 { 934 tsk->cputime_expires.prof_exp = cputime_zero; 935 tsk->cputime_expires.virt_exp = cputime_zero; 936 tsk->cputime_expires.sched_exp = 0; 937 INIT_LIST_HEAD(&tsk->cpu_timers[0]); 938 INIT_LIST_HEAD(&tsk->cpu_timers[1]); 939 INIT_LIST_HEAD(&tsk->cpu_timers[2]); 940 } 941 942 /* 943 * This creates a new process as a copy of the old one, 944 * but does not actually start it yet. 945 * 946 * It copies the registers, and all the appropriate 947 * parts of the process environment (as per the clone 948 * flags). The actual kick-off is left to the caller. 949 */ 950 static struct task_struct *copy_process(unsigned long clone_flags, 951 unsigned long stack_start, 952 struct pt_regs *regs, 953 unsigned long stack_size, 954 int __user *child_tidptr, 955 struct pid *pid, 956 int trace) 957 { 958 int retval; 959 struct task_struct *p; 960 int cgroup_callbacks_done = 0; 961 962 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 963 return ERR_PTR(-EINVAL); 964 965 /* 966 * Thread groups must share signals as well, and detached threads 967 * can only be started up within the thread group. 968 */ 969 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 970 return ERR_PTR(-EINVAL); 971 972 /* 973 * Shared signal handlers imply shared VM. By way of the above, 974 * thread groups also imply shared VM. Blocking this case allows 975 * for various simplifications in other code. 976 */ 977 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 978 return ERR_PTR(-EINVAL); 979 980 retval = security_task_create(clone_flags); 981 if (retval) 982 goto fork_out; 983 984 retval = -ENOMEM; 985 p = dup_task_struct(current); 986 if (!p) 987 goto fork_out; 988 989 ftrace_graph_init_task(p); 990 991 rt_mutex_init_task(p); 992 993 #ifdef CONFIG_PROVE_LOCKING 994 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 995 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 996 #endif 997 retval = -EAGAIN; 998 if (atomic_read(&p->real_cred->user->processes) >= 999 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 1000 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 1001 p->real_cred->user != INIT_USER) 1002 goto bad_fork_free; 1003 } 1004 1005 retval = copy_creds(p, clone_flags); 1006 if (retval < 0) 1007 goto bad_fork_free; 1008 1009 /* 1010 * If multiple threads are within copy_process(), then this check 1011 * triggers too late. This doesn't hurt, the check is only there 1012 * to stop root fork bombs. 1013 */ 1014 retval = -EAGAIN; 1015 if (nr_threads >= max_threads) 1016 goto bad_fork_cleanup_count; 1017 1018 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1019 goto bad_fork_cleanup_count; 1020 1021 if (p->binfmt && !try_module_get(p->binfmt->module)) 1022 goto bad_fork_cleanup_put_domain; 1023 1024 p->did_exec = 0; 1025 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1026 copy_flags(clone_flags, p); 1027 INIT_LIST_HEAD(&p->children); 1028 INIT_LIST_HEAD(&p->sibling); 1029 rcu_copy_process(p); 1030 p->vfork_done = NULL; 1031 spin_lock_init(&p->alloc_lock); 1032 1033 init_sigpending(&p->pending); 1034 1035 p->utime = cputime_zero; 1036 p->stime = cputime_zero; 1037 p->gtime = cputime_zero; 1038 p->utimescaled = cputime_zero; 1039 p->stimescaled = cputime_zero; 1040 p->prev_utime = cputime_zero; 1041 p->prev_stime = cputime_zero; 1042 1043 p->default_timer_slack_ns = current->timer_slack_ns; 1044 1045 task_io_accounting_init(&p->ioac); 1046 acct_clear_integrals(p); 1047 1048 posix_cpu_timers_init(p); 1049 1050 p->lock_depth = -1; /* -1 = no lock */ 1051 do_posix_clock_monotonic_gettime(&p->start_time); 1052 p->real_start_time = p->start_time; 1053 monotonic_to_bootbased(&p->real_start_time); 1054 p->io_context = NULL; 1055 p->audit_context = NULL; 1056 cgroup_fork(p); 1057 #ifdef CONFIG_NUMA 1058 p->mempolicy = mpol_dup(p->mempolicy); 1059 if (IS_ERR(p->mempolicy)) { 1060 retval = PTR_ERR(p->mempolicy); 1061 p->mempolicy = NULL; 1062 goto bad_fork_cleanup_cgroup; 1063 } 1064 mpol_fix_fork_child_flag(p); 1065 #endif 1066 #ifdef CONFIG_TRACE_IRQFLAGS 1067 p->irq_events = 0; 1068 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1069 p->hardirqs_enabled = 1; 1070 #else 1071 p->hardirqs_enabled = 0; 1072 #endif 1073 p->hardirq_enable_ip = 0; 1074 p->hardirq_enable_event = 0; 1075 p->hardirq_disable_ip = _THIS_IP_; 1076 p->hardirq_disable_event = 0; 1077 p->softirqs_enabled = 1; 1078 p->softirq_enable_ip = _THIS_IP_; 1079 p->softirq_enable_event = 0; 1080 p->softirq_disable_ip = 0; 1081 p->softirq_disable_event = 0; 1082 p->hardirq_context = 0; 1083 p->softirq_context = 0; 1084 #endif 1085 #ifdef CONFIG_LOCKDEP 1086 p->lockdep_depth = 0; /* no locks held yet */ 1087 p->curr_chain_key = 0; 1088 p->lockdep_recursion = 0; 1089 #endif 1090 1091 #ifdef CONFIG_DEBUG_MUTEXES 1092 p->blocked_on = NULL; /* not blocked yet */ 1093 #endif 1094 1095 p->bts = NULL; 1096 1097 /* Perform scheduler related setup. Assign this task to a CPU. */ 1098 sched_fork(p, clone_flags); 1099 1100 retval = perf_event_init_task(p); 1101 if (retval) 1102 goto bad_fork_cleanup_policy; 1103 1104 if ((retval = audit_alloc(p))) 1105 goto bad_fork_cleanup_policy; 1106 /* copy all the process information */ 1107 if ((retval = copy_semundo(clone_flags, p))) 1108 goto bad_fork_cleanup_audit; 1109 if ((retval = copy_files(clone_flags, p))) 1110 goto bad_fork_cleanup_semundo; 1111 if ((retval = copy_fs(clone_flags, p))) 1112 goto bad_fork_cleanup_files; 1113 if ((retval = copy_sighand(clone_flags, p))) 1114 goto bad_fork_cleanup_fs; 1115 if ((retval = copy_signal(clone_flags, p))) 1116 goto bad_fork_cleanup_sighand; 1117 if ((retval = copy_mm(clone_flags, p))) 1118 goto bad_fork_cleanup_signal; 1119 if ((retval = copy_namespaces(clone_flags, p))) 1120 goto bad_fork_cleanup_mm; 1121 if ((retval = copy_io(clone_flags, p))) 1122 goto bad_fork_cleanup_namespaces; 1123 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); 1124 if (retval) 1125 goto bad_fork_cleanup_io; 1126 1127 if (pid != &init_struct_pid) { 1128 retval = -ENOMEM; 1129 pid = alloc_pid(p->nsproxy->pid_ns); 1130 if (!pid) 1131 goto bad_fork_cleanup_io; 1132 1133 if (clone_flags & CLONE_NEWPID) { 1134 retval = pid_ns_prepare_proc(p->nsproxy->pid_ns); 1135 if (retval < 0) 1136 goto bad_fork_free_pid; 1137 } 1138 } 1139 1140 p->pid = pid_nr(pid); 1141 p->tgid = p->pid; 1142 if (clone_flags & CLONE_THREAD) 1143 p->tgid = current->tgid; 1144 1145 if (current->nsproxy != p->nsproxy) { 1146 retval = ns_cgroup_clone(p, pid); 1147 if (retval) 1148 goto bad_fork_free_pid; 1149 } 1150 1151 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1152 /* 1153 * Clear TID on mm_release()? 1154 */ 1155 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1156 #ifdef CONFIG_FUTEX 1157 p->robust_list = NULL; 1158 #ifdef CONFIG_COMPAT 1159 p->compat_robust_list = NULL; 1160 #endif 1161 INIT_LIST_HEAD(&p->pi_state_list); 1162 p->pi_state_cache = NULL; 1163 #endif 1164 /* 1165 * sigaltstack should be cleared when sharing the same VM 1166 */ 1167 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1168 p->sas_ss_sp = p->sas_ss_size = 0; 1169 1170 /* 1171 * Syscall tracing should be turned off in the child regardless 1172 * of CLONE_PTRACE. 1173 */ 1174 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1175 #ifdef TIF_SYSCALL_EMU 1176 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1177 #endif 1178 clear_all_latency_tracing(p); 1179 1180 /* ok, now we should be set up.. */ 1181 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1182 p->pdeath_signal = 0; 1183 p->exit_state = 0; 1184 1185 /* 1186 * Ok, make it visible to the rest of the system. 1187 * We dont wake it up yet. 1188 */ 1189 p->group_leader = p; 1190 INIT_LIST_HEAD(&p->thread_group); 1191 1192 /* Now that the task is set up, run cgroup callbacks if 1193 * necessary. We need to run them before the task is visible 1194 * on the tasklist. */ 1195 cgroup_fork_callbacks(p); 1196 cgroup_callbacks_done = 1; 1197 1198 /* Need tasklist lock for parent etc handling! */ 1199 write_lock_irq(&tasklist_lock); 1200 1201 /* 1202 * The task hasn't been attached yet, so its cpus_allowed mask will 1203 * not be changed, nor will its assigned CPU. 1204 * 1205 * The cpus_allowed mask of the parent may have changed after it was 1206 * copied first time - so re-copy it here, then check the child's CPU 1207 * to ensure it is on a valid CPU (and if not, just force it back to 1208 * parent's CPU). This avoids alot of nasty races. 1209 */ 1210 p->cpus_allowed = current->cpus_allowed; 1211 p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; 1212 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || 1213 !cpu_online(task_cpu(p)))) 1214 set_task_cpu(p, smp_processor_id()); 1215 1216 /* CLONE_PARENT re-uses the old parent */ 1217 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 1218 p->real_parent = current->real_parent; 1219 p->parent_exec_id = current->parent_exec_id; 1220 } else { 1221 p->real_parent = current; 1222 p->parent_exec_id = current->self_exec_id; 1223 } 1224 1225 spin_lock(¤t->sighand->siglock); 1226 1227 /* 1228 * Process group and session signals need to be delivered to just the 1229 * parent before the fork or both the parent and the child after the 1230 * fork. Restart if a signal comes in before we add the new process to 1231 * it's process group. 1232 * A fatal signal pending means that current will exit, so the new 1233 * thread can't slip out of an OOM kill (or normal SIGKILL). 1234 */ 1235 recalc_sigpending(); 1236 if (signal_pending(current)) { 1237 spin_unlock(¤t->sighand->siglock); 1238 write_unlock_irq(&tasklist_lock); 1239 retval = -ERESTARTNOINTR; 1240 goto bad_fork_free_pid; 1241 } 1242 1243 if (clone_flags & CLONE_THREAD) { 1244 atomic_inc(¤t->signal->count); 1245 atomic_inc(¤t->signal->live); 1246 p->group_leader = current->group_leader; 1247 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1248 } 1249 1250 if (likely(p->pid)) { 1251 list_add_tail(&p->sibling, &p->real_parent->children); 1252 tracehook_finish_clone(p, clone_flags, trace); 1253 1254 if (thread_group_leader(p)) { 1255 if (clone_flags & CLONE_NEWPID) 1256 p->nsproxy->pid_ns->child_reaper = p; 1257 1258 p->signal->leader_pid = pid; 1259 tty_kref_put(p->signal->tty); 1260 p->signal->tty = tty_kref_get(current->signal->tty); 1261 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1262 attach_pid(p, PIDTYPE_SID, task_session(current)); 1263 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1264 __get_cpu_var(process_counts)++; 1265 } 1266 attach_pid(p, PIDTYPE_PID, pid); 1267 nr_threads++; 1268 } 1269 1270 total_forks++; 1271 spin_unlock(¤t->sighand->siglock); 1272 write_unlock_irq(&tasklist_lock); 1273 proc_fork_connector(p); 1274 cgroup_post_fork(p); 1275 perf_event_fork(p); 1276 return p; 1277 1278 bad_fork_free_pid: 1279 if (pid != &init_struct_pid) 1280 free_pid(pid); 1281 bad_fork_cleanup_io: 1282 put_io_context(p->io_context); 1283 bad_fork_cleanup_namespaces: 1284 exit_task_namespaces(p); 1285 bad_fork_cleanup_mm: 1286 if (p->mm) 1287 mmput(p->mm); 1288 bad_fork_cleanup_signal: 1289 if (!(clone_flags & CLONE_THREAD)) 1290 __cleanup_signal(p->signal); 1291 bad_fork_cleanup_sighand: 1292 __cleanup_sighand(p->sighand); 1293 bad_fork_cleanup_fs: 1294 exit_fs(p); /* blocking */ 1295 bad_fork_cleanup_files: 1296 exit_files(p); /* blocking */ 1297 bad_fork_cleanup_semundo: 1298 exit_sem(p); 1299 bad_fork_cleanup_audit: 1300 audit_free(p); 1301 bad_fork_cleanup_policy: 1302 perf_event_free_task(p); 1303 #ifdef CONFIG_NUMA 1304 mpol_put(p->mempolicy); 1305 bad_fork_cleanup_cgroup: 1306 #endif 1307 cgroup_exit(p, cgroup_callbacks_done); 1308 delayacct_tsk_free(p); 1309 if (p->binfmt) 1310 module_put(p->binfmt->module); 1311 bad_fork_cleanup_put_domain: 1312 module_put(task_thread_info(p)->exec_domain->module); 1313 bad_fork_cleanup_count: 1314 atomic_dec(&p->cred->user->processes); 1315 exit_creds(p); 1316 bad_fork_free: 1317 free_task(p); 1318 fork_out: 1319 return ERR_PTR(retval); 1320 } 1321 1322 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1323 { 1324 memset(regs, 0, sizeof(struct pt_regs)); 1325 return regs; 1326 } 1327 1328 struct task_struct * __cpuinit fork_idle(int cpu) 1329 { 1330 struct task_struct *task; 1331 struct pt_regs regs; 1332 1333 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, 1334 &init_struct_pid, 0); 1335 if (!IS_ERR(task)) 1336 init_idle(task, cpu); 1337 1338 return task; 1339 } 1340 1341 /* 1342 * Ok, this is the main fork-routine. 1343 * 1344 * It copies the process, and if successful kick-starts 1345 * it and waits for it to finish using the VM if required. 1346 */ 1347 long do_fork(unsigned long clone_flags, 1348 unsigned long stack_start, 1349 struct pt_regs *regs, 1350 unsigned long stack_size, 1351 int __user *parent_tidptr, 1352 int __user *child_tidptr) 1353 { 1354 struct task_struct *p; 1355 int trace = 0; 1356 long nr; 1357 1358 /* 1359 * Do some preliminary argument and permissions checking before we 1360 * actually start allocating stuff 1361 */ 1362 if (clone_flags & CLONE_NEWUSER) { 1363 if (clone_flags & CLONE_THREAD) 1364 return -EINVAL; 1365 /* hopefully this check will go away when userns support is 1366 * complete 1367 */ 1368 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || 1369 !capable(CAP_SETGID)) 1370 return -EPERM; 1371 } 1372 1373 /* 1374 * We hope to recycle these flags after 2.6.26 1375 */ 1376 if (unlikely(clone_flags & CLONE_STOPPED)) { 1377 static int __read_mostly count = 100; 1378 1379 if (count > 0 && printk_ratelimit()) { 1380 char comm[TASK_COMM_LEN]; 1381 1382 count--; 1383 printk(KERN_INFO "fork(): process `%s' used deprecated " 1384 "clone flags 0x%lx\n", 1385 get_task_comm(comm, current), 1386 clone_flags & CLONE_STOPPED); 1387 } 1388 } 1389 1390 /* 1391 * When called from kernel_thread, don't do user tracing stuff. 1392 */ 1393 if (likely(user_mode(regs))) 1394 trace = tracehook_prepare_clone(clone_flags); 1395 1396 p = copy_process(clone_flags, stack_start, regs, stack_size, 1397 child_tidptr, NULL, trace); 1398 /* 1399 * Do this prior waking up the new thread - the thread pointer 1400 * might get invalid after that point, if the thread exits quickly. 1401 */ 1402 if (!IS_ERR(p)) { 1403 struct completion vfork; 1404 1405 trace_sched_process_fork(current, p); 1406 1407 nr = task_pid_vnr(p); 1408 1409 if (clone_flags & CLONE_PARENT_SETTID) 1410 put_user(nr, parent_tidptr); 1411 1412 if (clone_flags & CLONE_VFORK) { 1413 p->vfork_done = &vfork; 1414 init_completion(&vfork); 1415 } 1416 1417 audit_finish_fork(p); 1418 tracehook_report_clone(regs, clone_flags, nr, p); 1419 1420 /* 1421 * We set PF_STARTING at creation in case tracing wants to 1422 * use this to distinguish a fully live task from one that 1423 * hasn't gotten to tracehook_report_clone() yet. Now we 1424 * clear it and set the child going. 1425 */ 1426 p->flags &= ~PF_STARTING; 1427 1428 if (unlikely(clone_flags & CLONE_STOPPED)) { 1429 /* 1430 * We'll start up with an immediate SIGSTOP. 1431 */ 1432 sigaddset(&p->pending.signal, SIGSTOP); 1433 set_tsk_thread_flag(p, TIF_SIGPENDING); 1434 __set_task_state(p, TASK_STOPPED); 1435 } else { 1436 wake_up_new_task(p, clone_flags); 1437 } 1438 1439 tracehook_report_clone_complete(trace, regs, 1440 clone_flags, nr, p); 1441 1442 if (clone_flags & CLONE_VFORK) { 1443 freezer_do_not_count(); 1444 wait_for_completion(&vfork); 1445 freezer_count(); 1446 tracehook_report_vfork_done(p, nr); 1447 } 1448 } else { 1449 nr = PTR_ERR(p); 1450 } 1451 return nr; 1452 } 1453 1454 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1455 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1456 #endif 1457 1458 static void sighand_ctor(void *data) 1459 { 1460 struct sighand_struct *sighand = data; 1461 1462 spin_lock_init(&sighand->siglock); 1463 init_waitqueue_head(&sighand->signalfd_wqh); 1464 } 1465 1466 void __init proc_caches_init(void) 1467 { 1468 sighand_cachep = kmem_cache_create("sighand_cache", 1469 sizeof(struct sighand_struct), 0, 1470 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| 1471 SLAB_NOTRACK, sighand_ctor); 1472 signal_cachep = kmem_cache_create("signal_cache", 1473 sizeof(struct signal_struct), 0, 1474 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1475 files_cachep = kmem_cache_create("files_cache", 1476 sizeof(struct files_struct), 0, 1477 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1478 fs_cachep = kmem_cache_create("fs_cache", 1479 sizeof(struct fs_struct), 0, 1480 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1481 mm_cachep = kmem_cache_create("mm_struct", 1482 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1483 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1484 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1485 mmap_init(); 1486 } 1487 1488 /* 1489 * Check constraints on flags passed to the unshare system call and 1490 * force unsharing of additional process context as appropriate. 1491 */ 1492 static void check_unshare_flags(unsigned long *flags_ptr) 1493 { 1494 /* 1495 * If unsharing a thread from a thread group, must also 1496 * unshare vm. 1497 */ 1498 if (*flags_ptr & CLONE_THREAD) 1499 *flags_ptr |= CLONE_VM; 1500 1501 /* 1502 * If unsharing vm, must also unshare signal handlers. 1503 */ 1504 if (*flags_ptr & CLONE_VM) 1505 *flags_ptr |= CLONE_SIGHAND; 1506 1507 /* 1508 * If unsharing signal handlers and the task was created 1509 * using CLONE_THREAD, then must unshare the thread 1510 */ 1511 if ((*flags_ptr & CLONE_SIGHAND) && 1512 (atomic_read(¤t->signal->count) > 1)) 1513 *flags_ptr |= CLONE_THREAD; 1514 1515 /* 1516 * If unsharing namespace, must also unshare filesystem information. 1517 */ 1518 if (*flags_ptr & CLONE_NEWNS) 1519 *flags_ptr |= CLONE_FS; 1520 } 1521 1522 /* 1523 * Unsharing of tasks created with CLONE_THREAD is not supported yet 1524 */ 1525 static int unshare_thread(unsigned long unshare_flags) 1526 { 1527 if (unshare_flags & CLONE_THREAD) 1528 return -EINVAL; 1529 1530 return 0; 1531 } 1532 1533 /* 1534 * Unshare the filesystem structure if it is being shared 1535 */ 1536 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1537 { 1538 struct fs_struct *fs = current->fs; 1539 1540 if (!(unshare_flags & CLONE_FS) || !fs) 1541 return 0; 1542 1543 /* don't need lock here; in the worst case we'll do useless copy */ 1544 if (fs->users == 1) 1545 return 0; 1546 1547 *new_fsp = copy_fs_struct(fs); 1548 if (!*new_fsp) 1549 return -ENOMEM; 1550 1551 return 0; 1552 } 1553 1554 /* 1555 * Unsharing of sighand is not supported yet 1556 */ 1557 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) 1558 { 1559 struct sighand_struct *sigh = current->sighand; 1560 1561 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) 1562 return -EINVAL; 1563 else 1564 return 0; 1565 } 1566 1567 /* 1568 * Unshare vm if it is being shared 1569 */ 1570 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) 1571 { 1572 struct mm_struct *mm = current->mm; 1573 1574 if ((unshare_flags & CLONE_VM) && 1575 (mm && atomic_read(&mm->mm_users) > 1)) { 1576 return -EINVAL; 1577 } 1578 1579 return 0; 1580 } 1581 1582 /* 1583 * Unshare file descriptor table if it is being shared 1584 */ 1585 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1586 { 1587 struct files_struct *fd = current->files; 1588 int error = 0; 1589 1590 if ((unshare_flags & CLONE_FILES) && 1591 (fd && atomic_read(&fd->count) > 1)) { 1592 *new_fdp = dup_fd(fd, &error); 1593 if (!*new_fdp) 1594 return error; 1595 } 1596 1597 return 0; 1598 } 1599 1600 /* 1601 * unshare allows a process to 'unshare' part of the process 1602 * context which was originally shared using clone. copy_* 1603 * functions used by do_fork() cannot be used here directly 1604 * because they modify an inactive task_struct that is being 1605 * constructed. Here we are modifying the current, active, 1606 * task_struct. 1607 */ 1608 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 1609 { 1610 int err = 0; 1611 struct fs_struct *fs, *new_fs = NULL; 1612 struct sighand_struct *new_sigh = NULL; 1613 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; 1614 struct files_struct *fd, *new_fd = NULL; 1615 struct nsproxy *new_nsproxy = NULL; 1616 int do_sysvsem = 0; 1617 1618 check_unshare_flags(&unshare_flags); 1619 1620 /* Return -EINVAL for all unsupported flags */ 1621 err = -EINVAL; 1622 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1623 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1624 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) 1625 goto bad_unshare_out; 1626 1627 /* 1628 * CLONE_NEWIPC must also detach from the undolist: after switching 1629 * to a new ipc namespace, the semaphore arrays from the old 1630 * namespace are unreachable. 1631 */ 1632 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 1633 do_sysvsem = 1; 1634 if ((err = unshare_thread(unshare_flags))) 1635 goto bad_unshare_out; 1636 if ((err = unshare_fs(unshare_flags, &new_fs))) 1637 goto bad_unshare_cleanup_thread; 1638 if ((err = unshare_sighand(unshare_flags, &new_sigh))) 1639 goto bad_unshare_cleanup_fs; 1640 if ((err = unshare_vm(unshare_flags, &new_mm))) 1641 goto bad_unshare_cleanup_sigh; 1642 if ((err = unshare_fd(unshare_flags, &new_fd))) 1643 goto bad_unshare_cleanup_vm; 1644 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1645 new_fs))) 1646 goto bad_unshare_cleanup_fd; 1647 1648 if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { 1649 if (do_sysvsem) { 1650 /* 1651 * CLONE_SYSVSEM is equivalent to sys_exit(). 1652 */ 1653 exit_sem(current); 1654 } 1655 1656 if (new_nsproxy) { 1657 switch_task_namespaces(current, new_nsproxy); 1658 new_nsproxy = NULL; 1659 } 1660 1661 task_lock(current); 1662 1663 if (new_fs) { 1664 fs = current->fs; 1665 write_lock(&fs->lock); 1666 current->fs = new_fs; 1667 if (--fs->users) 1668 new_fs = NULL; 1669 else 1670 new_fs = fs; 1671 write_unlock(&fs->lock); 1672 } 1673 1674 if (new_mm) { 1675 mm = current->mm; 1676 active_mm = current->active_mm; 1677 current->mm = new_mm; 1678 current->active_mm = new_mm; 1679 activate_mm(active_mm, new_mm); 1680 new_mm = mm; 1681 } 1682 1683 if (new_fd) { 1684 fd = current->files; 1685 current->files = new_fd; 1686 new_fd = fd; 1687 } 1688 1689 task_unlock(current); 1690 } 1691 1692 if (new_nsproxy) 1693 put_nsproxy(new_nsproxy); 1694 1695 bad_unshare_cleanup_fd: 1696 if (new_fd) 1697 put_files_struct(new_fd); 1698 1699 bad_unshare_cleanup_vm: 1700 if (new_mm) 1701 mmput(new_mm); 1702 1703 bad_unshare_cleanup_sigh: 1704 if (new_sigh) 1705 if (atomic_dec_and_test(&new_sigh->count)) 1706 kmem_cache_free(sighand_cachep, new_sigh); 1707 1708 bad_unshare_cleanup_fs: 1709 if (new_fs) 1710 free_fs_struct(new_fs); 1711 1712 bad_unshare_cleanup_thread: 1713 bad_unshare_out: 1714 return err; 1715 } 1716 1717 /* 1718 * Helper to unshare the files of the current task. 1719 * We don't want to expose copy_files internals to 1720 * the exec layer of the kernel. 1721 */ 1722 1723 int unshare_files(struct files_struct **displaced) 1724 { 1725 struct task_struct *task = current; 1726 struct files_struct *copy = NULL; 1727 int error; 1728 1729 error = unshare_fd(CLONE_FILES, ©); 1730 if (error || !copy) { 1731 *displaced = NULL; 1732 return error; 1733 } 1734 *displaced = task->files; 1735 task_lock(task); 1736 task->files = copy; 1737 task_unlock(task); 1738 return 0; 1739 } 1740