1 /* 2 * linux/kernel/fork.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * 'fork.c' contains the help-routines for the 'fork' system call 9 * (see also entry.S and others). 10 * Fork is rather simple, once you get the hang of it, but the memory 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/unistd.h> 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/completion.h> 20 #include <linux/mnt_namespace.h> 21 #include <linux/personality.h> 22 #include <linux/mempolicy.h> 23 #include <linux/sem.h> 24 #include <linux/file.h> 25 #include <linux/key.h> 26 #include <linux/binfmts.h> 27 #include <linux/mman.h> 28 #include <linux/fs.h> 29 #include <linux/nsproxy.h> 30 #include <linux/capability.h> 31 #include <linux/cpu.h> 32 #include <linux/cgroup.h> 33 #include <linux/security.h> 34 #include <linux/swap.h> 35 #include <linux/syscalls.h> 36 #include <linux/jiffies.h> 37 #include <linux/futex.h> 38 #include <linux/task_io_accounting_ops.h> 39 #include <linux/rcupdate.h> 40 #include <linux/ptrace.h> 41 #include <linux/mount.h> 42 #include <linux/audit.h> 43 #include <linux/profile.h> 44 #include <linux/rmap.h> 45 #include <linux/acct.h> 46 #include <linux/tsacct_kern.h> 47 #include <linux/cn_proc.h> 48 #include <linux/freezer.h> 49 #include <linux/delayacct.h> 50 #include <linux/taskstats_kern.h> 51 #include <linux/random.h> 52 #include <linux/tty.h> 53 #include <linux/proc_fs.h> 54 #include <linux/blkdev.h> 55 56 #include <asm/pgtable.h> 57 #include <asm/pgalloc.h> 58 #include <asm/uaccess.h> 59 #include <asm/mmu_context.h> 60 #include <asm/cacheflush.h> 61 #include <asm/tlbflush.h> 62 63 /* 64 * Protected counters by write_lock_irq(&tasklist_lock) 65 */ 66 unsigned long total_forks; /* Handle normal Linux uptimes. */ 67 int nr_threads; /* The idle threads do not count.. */ 68 69 int max_threads; /* tunable limit on nr_threads */ 70 71 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 72 73 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 74 75 int nr_processes(void) 76 { 77 int cpu; 78 int total = 0; 79 80 for_each_online_cpu(cpu) 81 total += per_cpu(process_counts, cpu); 82 83 return total; 84 } 85 86 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 87 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 88 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 89 static struct kmem_cache *task_struct_cachep; 90 #endif 91 92 /* SLAB cache for signal_struct structures (tsk->signal) */ 93 static struct kmem_cache *signal_cachep; 94 95 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 96 struct kmem_cache *sighand_cachep; 97 98 /* SLAB cache for files_struct structures (tsk->files) */ 99 struct kmem_cache *files_cachep; 100 101 /* SLAB cache for fs_struct structures (tsk->fs) */ 102 struct kmem_cache *fs_cachep; 103 104 /* SLAB cache for vm_area_struct structures */ 105 struct kmem_cache *vm_area_cachep; 106 107 /* SLAB cache for mm_struct structures (tsk->mm) */ 108 static struct kmem_cache *mm_cachep; 109 110 void free_task(struct task_struct *tsk) 111 { 112 prop_local_destroy_single(&tsk->dirties); 113 free_thread_info(tsk->stack); 114 rt_mutex_debug_task_free(tsk); 115 free_task_struct(tsk); 116 } 117 EXPORT_SYMBOL(free_task); 118 119 void __put_task_struct(struct task_struct *tsk) 120 { 121 WARN_ON(!tsk->exit_state); 122 WARN_ON(atomic_read(&tsk->usage)); 123 WARN_ON(tsk == current); 124 125 security_task_free(tsk); 126 free_uid(tsk->user); 127 put_group_info(tsk->group_info); 128 delayacct_tsk_free(tsk); 129 130 if (!profile_handoff_task(tsk)) 131 free_task(tsk); 132 } 133 134 void __init fork_init(unsigned long mempages) 135 { 136 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 137 #ifndef ARCH_MIN_TASKALIGN 138 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 139 #endif 140 /* create a slab on which task_structs can be allocated */ 141 task_struct_cachep = 142 kmem_cache_create("task_struct", sizeof(struct task_struct), 143 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 144 #endif 145 146 /* 147 * The default maximum number of threads is set to a safe 148 * value: the thread structures can take up at most half 149 * of memory. 150 */ 151 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); 152 153 /* 154 * we need to allow at least 20 threads to boot a system 155 */ 156 if(max_threads < 20) 157 max_threads = 20; 158 159 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; 160 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; 161 init_task.signal->rlim[RLIMIT_SIGPENDING] = 162 init_task.signal->rlim[RLIMIT_NPROC]; 163 } 164 165 static struct task_struct *dup_task_struct(struct task_struct *orig) 166 { 167 struct task_struct *tsk; 168 struct thread_info *ti; 169 int err; 170 171 prepare_to_copy(orig); 172 173 tsk = alloc_task_struct(); 174 if (!tsk) 175 return NULL; 176 177 ti = alloc_thread_info(tsk); 178 if (!ti) { 179 free_task_struct(tsk); 180 return NULL; 181 } 182 183 *tsk = *orig; 184 tsk->stack = ti; 185 186 err = prop_local_init_single(&tsk->dirties); 187 if (err) { 188 free_thread_info(ti); 189 free_task_struct(tsk); 190 return NULL; 191 } 192 193 setup_thread_stack(tsk, orig); 194 195 #ifdef CONFIG_CC_STACKPROTECTOR 196 tsk->stack_canary = get_random_int(); 197 #endif 198 199 /* One for us, one for whoever does the "release_task()" (usually parent) */ 200 atomic_set(&tsk->usage,2); 201 atomic_set(&tsk->fs_excl, 0); 202 #ifdef CONFIG_BLK_DEV_IO_TRACE 203 tsk->btrace_seq = 0; 204 #endif 205 tsk->splice_pipe = NULL; 206 return tsk; 207 } 208 209 #ifdef CONFIG_MMU 210 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 211 { 212 struct vm_area_struct *mpnt, *tmp, **pprev; 213 struct rb_node **rb_link, *rb_parent; 214 int retval; 215 unsigned long charge; 216 struct mempolicy *pol; 217 218 down_write(&oldmm->mmap_sem); 219 flush_cache_dup_mm(oldmm); 220 /* 221 * Not linked in yet - no deadlock potential: 222 */ 223 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 224 225 mm->locked_vm = 0; 226 mm->mmap = NULL; 227 mm->mmap_cache = NULL; 228 mm->free_area_cache = oldmm->mmap_base; 229 mm->cached_hole_size = ~0UL; 230 mm->map_count = 0; 231 cpus_clear(mm->cpu_vm_mask); 232 mm->mm_rb = RB_ROOT; 233 rb_link = &mm->mm_rb.rb_node; 234 rb_parent = NULL; 235 pprev = &mm->mmap; 236 237 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 238 struct file *file; 239 240 if (mpnt->vm_flags & VM_DONTCOPY) { 241 long pages = vma_pages(mpnt); 242 mm->total_vm -= pages; 243 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 244 -pages); 245 continue; 246 } 247 charge = 0; 248 if (mpnt->vm_flags & VM_ACCOUNT) { 249 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 250 if (security_vm_enough_memory(len)) 251 goto fail_nomem; 252 charge = len; 253 } 254 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 255 if (!tmp) 256 goto fail_nomem; 257 *tmp = *mpnt; 258 pol = mpol_copy(vma_policy(mpnt)); 259 retval = PTR_ERR(pol); 260 if (IS_ERR(pol)) 261 goto fail_nomem_policy; 262 vma_set_policy(tmp, pol); 263 tmp->vm_flags &= ~VM_LOCKED; 264 tmp->vm_mm = mm; 265 tmp->vm_next = NULL; 266 anon_vma_link(tmp); 267 file = tmp->vm_file; 268 if (file) { 269 struct inode *inode = file->f_path.dentry->d_inode; 270 get_file(file); 271 if (tmp->vm_flags & VM_DENYWRITE) 272 atomic_dec(&inode->i_writecount); 273 274 /* insert tmp into the share list, just after mpnt */ 275 spin_lock(&file->f_mapping->i_mmap_lock); 276 tmp->vm_truncate_count = mpnt->vm_truncate_count; 277 flush_dcache_mmap_lock(file->f_mapping); 278 vma_prio_tree_add(tmp, mpnt); 279 flush_dcache_mmap_unlock(file->f_mapping); 280 spin_unlock(&file->f_mapping->i_mmap_lock); 281 } 282 283 /* 284 * Link in the new vma and copy the page table entries. 285 */ 286 *pprev = tmp; 287 pprev = &tmp->vm_next; 288 289 __vma_link_rb(mm, tmp, rb_link, rb_parent); 290 rb_link = &tmp->vm_rb.rb_right; 291 rb_parent = &tmp->vm_rb; 292 293 mm->map_count++; 294 retval = copy_page_range(mm, oldmm, mpnt); 295 296 if (tmp->vm_ops && tmp->vm_ops->open) 297 tmp->vm_ops->open(tmp); 298 299 if (retval) 300 goto out; 301 } 302 /* a new mm has just been created */ 303 arch_dup_mmap(oldmm, mm); 304 retval = 0; 305 out: 306 up_write(&mm->mmap_sem); 307 flush_tlb_mm(oldmm); 308 up_write(&oldmm->mmap_sem); 309 return retval; 310 fail_nomem_policy: 311 kmem_cache_free(vm_area_cachep, tmp); 312 fail_nomem: 313 retval = -ENOMEM; 314 vm_unacct_memory(charge); 315 goto out; 316 } 317 318 static inline int mm_alloc_pgd(struct mm_struct * mm) 319 { 320 mm->pgd = pgd_alloc(mm); 321 if (unlikely(!mm->pgd)) 322 return -ENOMEM; 323 return 0; 324 } 325 326 static inline void mm_free_pgd(struct mm_struct * mm) 327 { 328 pgd_free(mm->pgd); 329 } 330 #else 331 #define dup_mmap(mm, oldmm) (0) 332 #define mm_alloc_pgd(mm) (0) 333 #define mm_free_pgd(mm) 334 #endif /* CONFIG_MMU */ 335 336 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 337 338 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 339 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 340 341 #include <linux/init_task.h> 342 343 static struct mm_struct * mm_init(struct mm_struct * mm) 344 { 345 atomic_set(&mm->mm_users, 1); 346 atomic_set(&mm->mm_count, 1); 347 init_rwsem(&mm->mmap_sem); 348 INIT_LIST_HEAD(&mm->mmlist); 349 mm->flags = (current->mm) ? current->mm->flags 350 : MMF_DUMP_FILTER_DEFAULT; 351 mm->core_waiters = 0; 352 mm->nr_ptes = 0; 353 set_mm_counter(mm, file_rss, 0); 354 set_mm_counter(mm, anon_rss, 0); 355 spin_lock_init(&mm->page_table_lock); 356 rwlock_init(&mm->ioctx_list_lock); 357 mm->ioctx_list = NULL; 358 mm->free_area_cache = TASK_UNMAPPED_BASE; 359 mm->cached_hole_size = ~0UL; 360 361 if (likely(!mm_alloc_pgd(mm))) { 362 mm->def_flags = 0; 363 return mm; 364 } 365 free_mm(mm); 366 return NULL; 367 } 368 369 /* 370 * Allocate and initialize an mm_struct. 371 */ 372 struct mm_struct * mm_alloc(void) 373 { 374 struct mm_struct * mm; 375 376 mm = allocate_mm(); 377 if (mm) { 378 memset(mm, 0, sizeof(*mm)); 379 mm = mm_init(mm); 380 } 381 return mm; 382 } 383 384 /* 385 * Called when the last reference to the mm 386 * is dropped: either by a lazy thread or by 387 * mmput. Free the page directory and the mm. 388 */ 389 void fastcall __mmdrop(struct mm_struct *mm) 390 { 391 BUG_ON(mm == &init_mm); 392 mm_free_pgd(mm); 393 destroy_context(mm); 394 free_mm(mm); 395 } 396 397 /* 398 * Decrement the use count and release all resources for an mm. 399 */ 400 void mmput(struct mm_struct *mm) 401 { 402 might_sleep(); 403 404 if (atomic_dec_and_test(&mm->mm_users)) { 405 exit_aio(mm); 406 exit_mmap(mm); 407 if (!list_empty(&mm->mmlist)) { 408 spin_lock(&mmlist_lock); 409 list_del(&mm->mmlist); 410 spin_unlock(&mmlist_lock); 411 } 412 put_swap_token(mm); 413 mmdrop(mm); 414 } 415 } 416 EXPORT_SYMBOL_GPL(mmput); 417 418 /** 419 * get_task_mm - acquire a reference to the task's mm 420 * 421 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning 422 * this kernel workthread has transiently adopted a user mm with use_mm, 423 * to do its AIO) is not set and if so returns a reference to it, after 424 * bumping up the use count. User must release the mm via mmput() 425 * after use. Typically used by /proc and ptrace. 426 */ 427 struct mm_struct *get_task_mm(struct task_struct *task) 428 { 429 struct mm_struct *mm; 430 431 task_lock(task); 432 mm = task->mm; 433 if (mm) { 434 if (task->flags & PF_BORROWED_MM) 435 mm = NULL; 436 else 437 atomic_inc(&mm->mm_users); 438 } 439 task_unlock(task); 440 return mm; 441 } 442 EXPORT_SYMBOL_GPL(get_task_mm); 443 444 /* Please note the differences between mmput and mm_release. 445 * mmput is called whenever we stop holding onto a mm_struct, 446 * error success whatever. 447 * 448 * mm_release is called after a mm_struct has been removed 449 * from the current process. 450 * 451 * This difference is important for error handling, when we 452 * only half set up a mm_struct for a new process and need to restore 453 * the old one. Because we mmput the new mm_struct before 454 * restoring the old one. . . 455 * Eric Biederman 10 January 1998 456 */ 457 void mm_release(struct task_struct *tsk, struct mm_struct *mm) 458 { 459 struct completion *vfork_done = tsk->vfork_done; 460 461 /* Get rid of any cached register state */ 462 deactivate_mm(tsk, mm); 463 464 /* notify parent sleeping on vfork() */ 465 if (vfork_done) { 466 tsk->vfork_done = NULL; 467 complete(vfork_done); 468 } 469 470 /* 471 * If we're exiting normally, clear a user-space tid field if 472 * requested. We leave this alone when dying by signal, to leave 473 * the value intact in a core dump, and to save the unnecessary 474 * trouble otherwise. Userland only wants this done for a sys_exit. 475 */ 476 if (tsk->clear_child_tid 477 && !(tsk->flags & PF_SIGNALED) 478 && atomic_read(&mm->mm_users) > 1) { 479 u32 __user * tidptr = tsk->clear_child_tid; 480 tsk->clear_child_tid = NULL; 481 482 /* 483 * We don't check the error code - if userspace has 484 * not set up a proper pointer then tough luck. 485 */ 486 put_user(0, tidptr); 487 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); 488 } 489 } 490 491 /* 492 * Allocate a new mm structure and copy contents from the 493 * mm structure of the passed in task structure. 494 */ 495 static struct mm_struct *dup_mm(struct task_struct *tsk) 496 { 497 struct mm_struct *mm, *oldmm = current->mm; 498 int err; 499 500 if (!oldmm) 501 return NULL; 502 503 mm = allocate_mm(); 504 if (!mm) 505 goto fail_nomem; 506 507 memcpy(mm, oldmm, sizeof(*mm)); 508 509 /* Initializing for Swap token stuff */ 510 mm->token_priority = 0; 511 mm->last_interval = 0; 512 513 if (!mm_init(mm)) 514 goto fail_nomem; 515 516 if (init_new_context(tsk, mm)) 517 goto fail_nocontext; 518 519 err = dup_mmap(mm, oldmm); 520 if (err) 521 goto free_pt; 522 523 mm->hiwater_rss = get_mm_rss(mm); 524 mm->hiwater_vm = mm->total_vm; 525 526 return mm; 527 528 free_pt: 529 mmput(mm); 530 531 fail_nomem: 532 return NULL; 533 534 fail_nocontext: 535 /* 536 * If init_new_context() failed, we cannot use mmput() to free the mm 537 * because it calls destroy_context() 538 */ 539 mm_free_pgd(mm); 540 free_mm(mm); 541 return NULL; 542 } 543 544 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) 545 { 546 struct mm_struct * mm, *oldmm; 547 int retval; 548 549 tsk->min_flt = tsk->maj_flt = 0; 550 tsk->nvcsw = tsk->nivcsw = 0; 551 552 tsk->mm = NULL; 553 tsk->active_mm = NULL; 554 555 /* 556 * Are we cloning a kernel thread? 557 * 558 * We need to steal a active VM for that.. 559 */ 560 oldmm = current->mm; 561 if (!oldmm) 562 return 0; 563 564 if (clone_flags & CLONE_VM) { 565 atomic_inc(&oldmm->mm_users); 566 mm = oldmm; 567 goto good_mm; 568 } 569 570 retval = -ENOMEM; 571 mm = dup_mm(tsk); 572 if (!mm) 573 goto fail_nomem; 574 575 good_mm: 576 /* Initializing for Swap token stuff */ 577 mm->token_priority = 0; 578 mm->last_interval = 0; 579 580 tsk->mm = mm; 581 tsk->active_mm = mm; 582 return 0; 583 584 fail_nomem: 585 return retval; 586 } 587 588 static struct fs_struct *__copy_fs_struct(struct fs_struct *old) 589 { 590 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); 591 /* We don't need to lock fs - think why ;-) */ 592 if (fs) { 593 atomic_set(&fs->count, 1); 594 rwlock_init(&fs->lock); 595 fs->umask = old->umask; 596 read_lock(&old->lock); 597 fs->rootmnt = mntget(old->rootmnt); 598 fs->root = dget(old->root); 599 fs->pwdmnt = mntget(old->pwdmnt); 600 fs->pwd = dget(old->pwd); 601 if (old->altroot) { 602 fs->altrootmnt = mntget(old->altrootmnt); 603 fs->altroot = dget(old->altroot); 604 } else { 605 fs->altrootmnt = NULL; 606 fs->altroot = NULL; 607 } 608 read_unlock(&old->lock); 609 } 610 return fs; 611 } 612 613 struct fs_struct *copy_fs_struct(struct fs_struct *old) 614 { 615 return __copy_fs_struct(old); 616 } 617 618 EXPORT_SYMBOL_GPL(copy_fs_struct); 619 620 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 621 { 622 if (clone_flags & CLONE_FS) { 623 atomic_inc(¤t->fs->count); 624 return 0; 625 } 626 tsk->fs = __copy_fs_struct(current->fs); 627 if (!tsk->fs) 628 return -ENOMEM; 629 return 0; 630 } 631 632 static int count_open_files(struct fdtable *fdt) 633 { 634 int size = fdt->max_fds; 635 int i; 636 637 /* Find the last open fd */ 638 for (i = size/(8*sizeof(long)); i > 0; ) { 639 if (fdt->open_fds->fds_bits[--i]) 640 break; 641 } 642 i = (i+1) * 8 * sizeof(long); 643 return i; 644 } 645 646 static struct files_struct *alloc_files(void) 647 { 648 struct files_struct *newf; 649 struct fdtable *fdt; 650 651 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 652 if (!newf) 653 goto out; 654 655 atomic_set(&newf->count, 1); 656 657 spin_lock_init(&newf->file_lock); 658 newf->next_fd = 0; 659 fdt = &newf->fdtab; 660 fdt->max_fds = NR_OPEN_DEFAULT; 661 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 662 fdt->open_fds = (fd_set *)&newf->open_fds_init; 663 fdt->fd = &newf->fd_array[0]; 664 INIT_RCU_HEAD(&fdt->rcu); 665 fdt->next = NULL; 666 rcu_assign_pointer(newf->fdt, fdt); 667 out: 668 return newf; 669 } 670 671 /* 672 * Allocate a new files structure and copy contents from the 673 * passed in files structure. 674 * errorp will be valid only when the returned files_struct is NULL. 675 */ 676 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 677 { 678 struct files_struct *newf; 679 struct file **old_fds, **new_fds; 680 int open_files, size, i; 681 struct fdtable *old_fdt, *new_fdt; 682 683 *errorp = -ENOMEM; 684 newf = alloc_files(); 685 if (!newf) 686 goto out; 687 688 spin_lock(&oldf->file_lock); 689 old_fdt = files_fdtable(oldf); 690 new_fdt = files_fdtable(newf); 691 open_files = count_open_files(old_fdt); 692 693 /* 694 * Check whether we need to allocate a larger fd array and fd set. 695 * Note: we're not a clone task, so the open count won't change. 696 */ 697 if (open_files > new_fdt->max_fds) { 698 new_fdt->max_fds = 0; 699 spin_unlock(&oldf->file_lock); 700 spin_lock(&newf->file_lock); 701 *errorp = expand_files(newf, open_files-1); 702 spin_unlock(&newf->file_lock); 703 if (*errorp < 0) 704 goto out_release; 705 new_fdt = files_fdtable(newf); 706 /* 707 * Reacquire the oldf lock and a pointer to its fd table 708 * who knows it may have a new bigger fd table. We need 709 * the latest pointer. 710 */ 711 spin_lock(&oldf->file_lock); 712 old_fdt = files_fdtable(oldf); 713 } 714 715 old_fds = old_fdt->fd; 716 new_fds = new_fdt->fd; 717 718 memcpy(new_fdt->open_fds->fds_bits, 719 old_fdt->open_fds->fds_bits, open_files/8); 720 memcpy(new_fdt->close_on_exec->fds_bits, 721 old_fdt->close_on_exec->fds_bits, open_files/8); 722 723 for (i = open_files; i != 0; i--) { 724 struct file *f = *old_fds++; 725 if (f) { 726 get_file(f); 727 } else { 728 /* 729 * The fd may be claimed in the fd bitmap but not yet 730 * instantiated in the files array if a sibling thread 731 * is partway through open(). So make sure that this 732 * fd is available to the new process. 733 */ 734 FD_CLR(open_files - i, new_fdt->open_fds); 735 } 736 rcu_assign_pointer(*new_fds++, f); 737 } 738 spin_unlock(&oldf->file_lock); 739 740 /* compute the remainder to be cleared */ 741 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 742 743 /* This is long word aligned thus could use a optimized version */ 744 memset(new_fds, 0, size); 745 746 if (new_fdt->max_fds > open_files) { 747 int left = (new_fdt->max_fds-open_files)/8; 748 int start = open_files / (8 * sizeof(unsigned long)); 749 750 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 751 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 752 } 753 754 return newf; 755 756 out_release: 757 kmem_cache_free(files_cachep, newf); 758 out: 759 return NULL; 760 } 761 762 static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 763 { 764 struct files_struct *oldf, *newf; 765 int error = 0; 766 767 /* 768 * A background process may not have any files ... 769 */ 770 oldf = current->files; 771 if (!oldf) 772 goto out; 773 774 if (clone_flags & CLONE_FILES) { 775 atomic_inc(&oldf->count); 776 goto out; 777 } 778 779 /* 780 * Note: we may be using current for both targets (See exec.c) 781 * This works because we cache current->files (old) as oldf. Don't 782 * break this. 783 */ 784 tsk->files = NULL; 785 newf = dup_fd(oldf, &error); 786 if (!newf) 787 goto out; 788 789 tsk->files = newf; 790 error = 0; 791 out: 792 return error; 793 } 794 795 static int copy_io(unsigned long clone_flags, struct task_struct *tsk) 796 { 797 #ifdef CONFIG_BLOCK 798 struct io_context *ioc = current->io_context; 799 800 if (!ioc) 801 return 0; 802 /* 803 * Share io context with parent, if CLONE_IO is set 804 */ 805 if (clone_flags & CLONE_IO) { 806 tsk->io_context = ioc_task_link(ioc); 807 if (unlikely(!tsk->io_context)) 808 return -ENOMEM; 809 } else if (ioprio_valid(ioc->ioprio)) { 810 tsk->io_context = alloc_io_context(GFP_KERNEL, -1); 811 if (unlikely(!tsk->io_context)) 812 return -ENOMEM; 813 814 tsk->io_context->ioprio = ioc->ioprio; 815 } 816 #endif 817 return 0; 818 } 819 820 /* 821 * Helper to unshare the files of the current task. 822 * We don't want to expose copy_files internals to 823 * the exec layer of the kernel. 824 */ 825 826 int unshare_files(void) 827 { 828 struct files_struct *files = current->files; 829 int rc; 830 831 BUG_ON(!files); 832 833 /* This can race but the race causes us to copy when we don't 834 need to and drop the copy */ 835 if(atomic_read(&files->count) == 1) 836 { 837 atomic_inc(&files->count); 838 return 0; 839 } 840 rc = copy_files(0, current); 841 if(rc) 842 current->files = files; 843 return rc; 844 } 845 846 EXPORT_SYMBOL(unshare_files); 847 848 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 849 { 850 struct sighand_struct *sig; 851 852 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { 853 atomic_inc(¤t->sighand->count); 854 return 0; 855 } 856 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); 857 rcu_assign_pointer(tsk->sighand, sig); 858 if (!sig) 859 return -ENOMEM; 860 atomic_set(&sig->count, 1); 861 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 862 return 0; 863 } 864 865 void __cleanup_sighand(struct sighand_struct *sighand) 866 { 867 if (atomic_dec_and_test(&sighand->count)) 868 kmem_cache_free(sighand_cachep, sighand); 869 } 870 871 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 872 { 873 struct signal_struct *sig; 874 int ret; 875 876 if (clone_flags & CLONE_THREAD) { 877 atomic_inc(¤t->signal->count); 878 atomic_inc(¤t->signal->live); 879 return 0; 880 } 881 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 882 tsk->signal = sig; 883 if (!sig) 884 return -ENOMEM; 885 886 ret = copy_thread_group_keys(tsk); 887 if (ret < 0) { 888 kmem_cache_free(signal_cachep, sig); 889 return ret; 890 } 891 892 atomic_set(&sig->count, 1); 893 atomic_set(&sig->live, 1); 894 init_waitqueue_head(&sig->wait_chldexit); 895 sig->flags = 0; 896 sig->group_exit_code = 0; 897 sig->group_exit_task = NULL; 898 sig->group_stop_count = 0; 899 sig->curr_target = NULL; 900 init_sigpending(&sig->shared_pending); 901 INIT_LIST_HEAD(&sig->posix_timers); 902 903 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 904 sig->it_real_incr.tv64 = 0; 905 sig->real_timer.function = it_real_fn; 906 sig->tsk = tsk; 907 908 sig->it_virt_expires = cputime_zero; 909 sig->it_virt_incr = cputime_zero; 910 sig->it_prof_expires = cputime_zero; 911 sig->it_prof_incr = cputime_zero; 912 913 sig->leader = 0; /* session leadership doesn't inherit */ 914 sig->tty_old_pgrp = NULL; 915 916 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 917 sig->gtime = cputime_zero; 918 sig->cgtime = cputime_zero; 919 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 920 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 921 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 922 sig->sum_sched_runtime = 0; 923 INIT_LIST_HEAD(&sig->cpu_timers[0]); 924 INIT_LIST_HEAD(&sig->cpu_timers[1]); 925 INIT_LIST_HEAD(&sig->cpu_timers[2]); 926 taskstats_tgid_init(sig); 927 928 task_lock(current->group_leader); 929 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 930 task_unlock(current->group_leader); 931 932 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 933 /* 934 * New sole thread in the process gets an expiry time 935 * of the whole CPU time limit. 936 */ 937 tsk->it_prof_expires = 938 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 939 } 940 acct_init_pacct(&sig->pacct); 941 942 tty_audit_fork(sig); 943 944 return 0; 945 } 946 947 void __cleanup_signal(struct signal_struct *sig) 948 { 949 exit_thread_group_keys(sig); 950 kmem_cache_free(signal_cachep, sig); 951 } 952 953 static void cleanup_signal(struct task_struct *tsk) 954 { 955 struct signal_struct *sig = tsk->signal; 956 957 atomic_dec(&sig->live); 958 959 if (atomic_dec_and_test(&sig->count)) 960 __cleanup_signal(sig); 961 } 962 963 static void copy_flags(unsigned long clone_flags, struct task_struct *p) 964 { 965 unsigned long new_flags = p->flags; 966 967 new_flags &= ~PF_SUPERPRIV; 968 new_flags |= PF_FORKNOEXEC; 969 if (!(clone_flags & CLONE_PTRACE)) 970 p->ptrace = 0; 971 p->flags = new_flags; 972 clear_freeze_flag(p); 973 } 974 975 asmlinkage long sys_set_tid_address(int __user *tidptr) 976 { 977 current->clear_child_tid = tidptr; 978 979 return task_pid_vnr(current); 980 } 981 982 static void rt_mutex_init_task(struct task_struct *p) 983 { 984 spin_lock_init(&p->pi_lock); 985 #ifdef CONFIG_RT_MUTEXES 986 plist_head_init(&p->pi_waiters, &p->pi_lock); 987 p->pi_blocked_on = NULL; 988 #endif 989 } 990 991 /* 992 * This creates a new process as a copy of the old one, 993 * but does not actually start it yet. 994 * 995 * It copies the registers, and all the appropriate 996 * parts of the process environment (as per the clone 997 * flags). The actual kick-off is left to the caller. 998 */ 999 static struct task_struct *copy_process(unsigned long clone_flags, 1000 unsigned long stack_start, 1001 struct pt_regs *regs, 1002 unsigned long stack_size, 1003 int __user *child_tidptr, 1004 struct pid *pid) 1005 { 1006 int retval; 1007 struct task_struct *p; 1008 int cgroup_callbacks_done = 0; 1009 1010 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1011 return ERR_PTR(-EINVAL); 1012 1013 /* 1014 * Thread groups must share signals as well, and detached threads 1015 * can only be started up within the thread group. 1016 */ 1017 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) 1018 return ERR_PTR(-EINVAL); 1019 1020 /* 1021 * Shared signal handlers imply shared VM. By way of the above, 1022 * thread groups also imply shared VM. Blocking this case allows 1023 * for various simplifications in other code. 1024 */ 1025 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) 1026 return ERR_PTR(-EINVAL); 1027 1028 retval = security_task_create(clone_flags); 1029 if (retval) 1030 goto fork_out; 1031 1032 retval = -ENOMEM; 1033 p = dup_task_struct(current); 1034 if (!p) 1035 goto fork_out; 1036 1037 rt_mutex_init_task(p); 1038 1039 #ifdef CONFIG_TRACE_IRQFLAGS 1040 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 1041 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 1042 #endif 1043 retval = -EAGAIN; 1044 if (atomic_read(&p->user->processes) >= 1045 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 1046 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 1047 p->user != current->nsproxy->user_ns->root_user) 1048 goto bad_fork_free; 1049 } 1050 1051 atomic_inc(&p->user->__count); 1052 atomic_inc(&p->user->processes); 1053 get_group_info(p->group_info); 1054 1055 /* 1056 * If multiple threads are within copy_process(), then this check 1057 * triggers too late. This doesn't hurt, the check is only there 1058 * to stop root fork bombs. 1059 */ 1060 if (nr_threads >= max_threads) 1061 goto bad_fork_cleanup_count; 1062 1063 if (!try_module_get(task_thread_info(p)->exec_domain->module)) 1064 goto bad_fork_cleanup_count; 1065 1066 if (p->binfmt && !try_module_get(p->binfmt->module)) 1067 goto bad_fork_cleanup_put_domain; 1068 1069 p->did_exec = 0; 1070 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ 1071 copy_flags(clone_flags, p); 1072 INIT_LIST_HEAD(&p->children); 1073 INIT_LIST_HEAD(&p->sibling); 1074 #ifdef CONFIG_PREEMPT_RCU 1075 p->rcu_read_lock_nesting = 0; 1076 p->rcu_flipctr_idx = 0; 1077 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 1078 p->vfork_done = NULL; 1079 spin_lock_init(&p->alloc_lock); 1080 1081 clear_tsk_thread_flag(p, TIF_SIGPENDING); 1082 init_sigpending(&p->pending); 1083 1084 p->utime = cputime_zero; 1085 p->stime = cputime_zero; 1086 p->gtime = cputime_zero; 1087 p->utimescaled = cputime_zero; 1088 p->stimescaled = cputime_zero; 1089 p->prev_utime = cputime_zero; 1090 p->prev_stime = cputime_zero; 1091 1092 #ifdef CONFIG_DETECT_SOFTLOCKUP 1093 p->last_switch_count = 0; 1094 p->last_switch_timestamp = 0; 1095 #endif 1096 1097 #ifdef CONFIG_TASK_XACCT 1098 p->rchar = 0; /* I/O counter: bytes read */ 1099 p->wchar = 0; /* I/O counter: bytes written */ 1100 p->syscr = 0; /* I/O counter: read syscalls */ 1101 p->syscw = 0; /* I/O counter: write syscalls */ 1102 #endif 1103 task_io_accounting_init(p); 1104 acct_clear_integrals(p); 1105 1106 p->it_virt_expires = cputime_zero; 1107 p->it_prof_expires = cputime_zero; 1108 p->it_sched_expires = 0; 1109 INIT_LIST_HEAD(&p->cpu_timers[0]); 1110 INIT_LIST_HEAD(&p->cpu_timers[1]); 1111 INIT_LIST_HEAD(&p->cpu_timers[2]); 1112 1113 p->lock_depth = -1; /* -1 = no lock */ 1114 do_posix_clock_monotonic_gettime(&p->start_time); 1115 p->real_start_time = p->start_time; 1116 monotonic_to_bootbased(&p->real_start_time); 1117 #ifdef CONFIG_SECURITY 1118 p->security = NULL; 1119 #endif 1120 p->io_context = NULL; 1121 p->audit_context = NULL; 1122 cgroup_fork(p); 1123 #ifdef CONFIG_NUMA 1124 p->mempolicy = mpol_copy(p->mempolicy); 1125 if (IS_ERR(p->mempolicy)) { 1126 retval = PTR_ERR(p->mempolicy); 1127 p->mempolicy = NULL; 1128 goto bad_fork_cleanup_cgroup; 1129 } 1130 mpol_fix_fork_child_flag(p); 1131 #endif 1132 #ifdef CONFIG_TRACE_IRQFLAGS 1133 p->irq_events = 0; 1134 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1135 p->hardirqs_enabled = 1; 1136 #else 1137 p->hardirqs_enabled = 0; 1138 #endif 1139 p->hardirq_enable_ip = 0; 1140 p->hardirq_enable_event = 0; 1141 p->hardirq_disable_ip = _THIS_IP_; 1142 p->hardirq_disable_event = 0; 1143 p->softirqs_enabled = 1; 1144 p->softirq_enable_ip = _THIS_IP_; 1145 p->softirq_enable_event = 0; 1146 p->softirq_disable_ip = 0; 1147 p->softirq_disable_event = 0; 1148 p->hardirq_context = 0; 1149 p->softirq_context = 0; 1150 #endif 1151 #ifdef CONFIG_LOCKDEP 1152 p->lockdep_depth = 0; /* no locks held yet */ 1153 p->curr_chain_key = 0; 1154 p->lockdep_recursion = 0; 1155 #endif 1156 1157 #ifdef CONFIG_DEBUG_MUTEXES 1158 p->blocked_on = NULL; /* not blocked yet */ 1159 #endif 1160 1161 /* Perform scheduler related setup. Assign this task to a CPU. */ 1162 sched_fork(p, clone_flags); 1163 1164 if ((retval = security_task_alloc(p))) 1165 goto bad_fork_cleanup_policy; 1166 if ((retval = audit_alloc(p))) 1167 goto bad_fork_cleanup_security; 1168 /* copy all the process information */ 1169 if ((retval = copy_semundo(clone_flags, p))) 1170 goto bad_fork_cleanup_audit; 1171 if ((retval = copy_files(clone_flags, p))) 1172 goto bad_fork_cleanup_semundo; 1173 if ((retval = copy_fs(clone_flags, p))) 1174 goto bad_fork_cleanup_files; 1175 if ((retval = copy_sighand(clone_flags, p))) 1176 goto bad_fork_cleanup_fs; 1177 if ((retval = copy_signal(clone_flags, p))) 1178 goto bad_fork_cleanup_sighand; 1179 if ((retval = copy_mm(clone_flags, p))) 1180 goto bad_fork_cleanup_signal; 1181 if ((retval = copy_keys(clone_flags, p))) 1182 goto bad_fork_cleanup_mm; 1183 if ((retval = copy_namespaces(clone_flags, p))) 1184 goto bad_fork_cleanup_keys; 1185 if ((retval = copy_io(clone_flags, p))) 1186 goto bad_fork_cleanup_namespaces; 1187 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1188 if (retval) 1189 goto bad_fork_cleanup_io; 1190 1191 if (pid != &init_struct_pid) { 1192 retval = -ENOMEM; 1193 pid = alloc_pid(task_active_pid_ns(p)); 1194 if (!pid) 1195 goto bad_fork_cleanup_io; 1196 1197 if (clone_flags & CLONE_NEWPID) { 1198 retval = pid_ns_prepare_proc(task_active_pid_ns(p)); 1199 if (retval < 0) 1200 goto bad_fork_free_pid; 1201 } 1202 } 1203 1204 p->pid = pid_nr(pid); 1205 p->tgid = p->pid; 1206 if (clone_flags & CLONE_THREAD) 1207 p->tgid = current->tgid; 1208 1209 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1210 /* 1211 * Clear TID on mm_release()? 1212 */ 1213 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1214 #ifdef CONFIG_FUTEX 1215 p->robust_list = NULL; 1216 #ifdef CONFIG_COMPAT 1217 p->compat_robust_list = NULL; 1218 #endif 1219 INIT_LIST_HEAD(&p->pi_state_list); 1220 p->pi_state_cache = NULL; 1221 #endif 1222 /* 1223 * sigaltstack should be cleared when sharing the same VM 1224 */ 1225 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) 1226 p->sas_ss_sp = p->sas_ss_size = 0; 1227 1228 /* 1229 * Syscall tracing should be turned off in the child regardless 1230 * of CLONE_PTRACE. 1231 */ 1232 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); 1233 #ifdef TIF_SYSCALL_EMU 1234 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); 1235 #endif 1236 clear_all_latency_tracing(p); 1237 1238 /* Our parent execution domain becomes current domain 1239 These must match for thread signalling to apply */ 1240 p->parent_exec_id = p->self_exec_id; 1241 1242 /* ok, now we should be set up.. */ 1243 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); 1244 p->pdeath_signal = 0; 1245 p->exit_state = 0; 1246 1247 /* 1248 * Ok, make it visible to the rest of the system. 1249 * We dont wake it up yet. 1250 */ 1251 p->group_leader = p; 1252 INIT_LIST_HEAD(&p->thread_group); 1253 INIT_LIST_HEAD(&p->ptrace_children); 1254 INIT_LIST_HEAD(&p->ptrace_list); 1255 1256 /* Now that the task is set up, run cgroup callbacks if 1257 * necessary. We need to run them before the task is visible 1258 * on the tasklist. */ 1259 cgroup_fork_callbacks(p); 1260 cgroup_callbacks_done = 1; 1261 1262 /* Need tasklist lock for parent etc handling! */ 1263 write_lock_irq(&tasklist_lock); 1264 1265 /* 1266 * The task hasn't been attached yet, so its cpus_allowed mask will 1267 * not be changed, nor will its assigned CPU. 1268 * 1269 * The cpus_allowed mask of the parent may have changed after it was 1270 * copied first time - so re-copy it here, then check the child's CPU 1271 * to ensure it is on a valid CPU (and if not, just force it back to 1272 * parent's CPU). This avoids alot of nasty races. 1273 */ 1274 p->cpus_allowed = current->cpus_allowed; 1275 p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; 1276 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || 1277 !cpu_online(task_cpu(p)))) 1278 set_task_cpu(p, smp_processor_id()); 1279 1280 /* CLONE_PARENT re-uses the old parent */ 1281 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1282 p->real_parent = current->real_parent; 1283 else 1284 p->real_parent = current; 1285 p->parent = p->real_parent; 1286 1287 spin_lock(¤t->sighand->siglock); 1288 1289 /* 1290 * Process group and session signals need to be delivered to just the 1291 * parent before the fork or both the parent and the child after the 1292 * fork. Restart if a signal comes in before we add the new process to 1293 * it's process group. 1294 * A fatal signal pending means that current will exit, so the new 1295 * thread can't slip out of an OOM kill (or normal SIGKILL). 1296 */ 1297 recalc_sigpending(); 1298 if (signal_pending(current)) { 1299 spin_unlock(¤t->sighand->siglock); 1300 write_unlock_irq(&tasklist_lock); 1301 retval = -ERESTARTNOINTR; 1302 goto bad_fork_free_pid; 1303 } 1304 1305 if (clone_flags & CLONE_THREAD) { 1306 p->group_leader = current->group_leader; 1307 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1308 1309 if (!cputime_eq(current->signal->it_virt_expires, 1310 cputime_zero) || 1311 !cputime_eq(current->signal->it_prof_expires, 1312 cputime_zero) || 1313 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || 1314 !list_empty(¤t->signal->cpu_timers[0]) || 1315 !list_empty(¤t->signal->cpu_timers[1]) || 1316 !list_empty(¤t->signal->cpu_timers[2])) { 1317 /* 1318 * Have child wake up on its first tick to check 1319 * for process CPU timers. 1320 */ 1321 p->it_prof_expires = jiffies_to_cputime(1); 1322 } 1323 } 1324 1325 if (likely(p->pid)) { 1326 add_parent(p); 1327 if (unlikely(p->ptrace & PT_PTRACED)) 1328 __ptrace_link(p, current->parent); 1329 1330 if (thread_group_leader(p)) { 1331 if (clone_flags & CLONE_NEWPID) 1332 p->nsproxy->pid_ns->child_reaper = p; 1333 1334 p->signal->tty = current->signal->tty; 1335 set_task_pgrp(p, task_pgrp_nr(current)); 1336 set_task_session(p, task_session_nr(current)); 1337 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1338 attach_pid(p, PIDTYPE_SID, task_session(current)); 1339 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1340 __get_cpu_var(process_counts)++; 1341 } 1342 attach_pid(p, PIDTYPE_PID, pid); 1343 nr_threads++; 1344 } 1345 1346 total_forks++; 1347 spin_unlock(¤t->sighand->siglock); 1348 write_unlock_irq(&tasklist_lock); 1349 proc_fork_connector(p); 1350 cgroup_post_fork(p); 1351 return p; 1352 1353 bad_fork_free_pid: 1354 if (pid != &init_struct_pid) 1355 free_pid(pid); 1356 bad_fork_cleanup_io: 1357 put_io_context(p->io_context); 1358 bad_fork_cleanup_namespaces: 1359 exit_task_namespaces(p); 1360 bad_fork_cleanup_keys: 1361 exit_keys(p); 1362 bad_fork_cleanup_mm: 1363 if (p->mm) 1364 mmput(p->mm); 1365 bad_fork_cleanup_signal: 1366 cleanup_signal(p); 1367 bad_fork_cleanup_sighand: 1368 __cleanup_sighand(p->sighand); 1369 bad_fork_cleanup_fs: 1370 exit_fs(p); /* blocking */ 1371 bad_fork_cleanup_files: 1372 exit_files(p); /* blocking */ 1373 bad_fork_cleanup_semundo: 1374 exit_sem(p); 1375 bad_fork_cleanup_audit: 1376 audit_free(p); 1377 bad_fork_cleanup_security: 1378 security_task_free(p); 1379 bad_fork_cleanup_policy: 1380 #ifdef CONFIG_NUMA 1381 mpol_free(p->mempolicy); 1382 bad_fork_cleanup_cgroup: 1383 #endif 1384 cgroup_exit(p, cgroup_callbacks_done); 1385 delayacct_tsk_free(p); 1386 if (p->binfmt) 1387 module_put(p->binfmt->module); 1388 bad_fork_cleanup_put_domain: 1389 module_put(task_thread_info(p)->exec_domain->module); 1390 bad_fork_cleanup_count: 1391 put_group_info(p->group_info); 1392 atomic_dec(&p->user->processes); 1393 free_uid(p->user); 1394 bad_fork_free: 1395 free_task(p); 1396 fork_out: 1397 return ERR_PTR(retval); 1398 } 1399 1400 noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1401 { 1402 memset(regs, 0, sizeof(struct pt_regs)); 1403 return regs; 1404 } 1405 1406 struct task_struct * __cpuinit fork_idle(int cpu) 1407 { 1408 struct task_struct *task; 1409 struct pt_regs regs; 1410 1411 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, 1412 &init_struct_pid); 1413 if (!IS_ERR(task)) 1414 init_idle(task, cpu); 1415 1416 return task; 1417 } 1418 1419 static int fork_traceflag(unsigned clone_flags) 1420 { 1421 if (clone_flags & CLONE_UNTRACED) 1422 return 0; 1423 else if (clone_flags & CLONE_VFORK) { 1424 if (current->ptrace & PT_TRACE_VFORK) 1425 return PTRACE_EVENT_VFORK; 1426 } else if ((clone_flags & CSIGNAL) != SIGCHLD) { 1427 if (current->ptrace & PT_TRACE_CLONE) 1428 return PTRACE_EVENT_CLONE; 1429 } else if (current->ptrace & PT_TRACE_FORK) 1430 return PTRACE_EVENT_FORK; 1431 1432 return 0; 1433 } 1434 1435 /* 1436 * Ok, this is the main fork-routine. 1437 * 1438 * It copies the process, and if successful kick-starts 1439 * it and waits for it to finish using the VM if required. 1440 */ 1441 long do_fork(unsigned long clone_flags, 1442 unsigned long stack_start, 1443 struct pt_regs *regs, 1444 unsigned long stack_size, 1445 int __user *parent_tidptr, 1446 int __user *child_tidptr) 1447 { 1448 struct task_struct *p; 1449 int trace = 0; 1450 long nr; 1451 1452 if (unlikely(current->ptrace)) { 1453 trace = fork_traceflag (clone_flags); 1454 if (trace) 1455 clone_flags |= CLONE_PTRACE; 1456 } 1457 1458 p = copy_process(clone_flags, stack_start, regs, stack_size, 1459 child_tidptr, NULL); 1460 /* 1461 * Do this prior waking up the new thread - the thread pointer 1462 * might get invalid after that point, if the thread exits quickly. 1463 */ 1464 if (!IS_ERR(p)) { 1465 struct completion vfork; 1466 1467 /* 1468 * this is enough to call pid_nr_ns here, but this if 1469 * improves optimisation of regular fork() 1470 */ 1471 nr = (clone_flags & CLONE_NEWPID) ? 1472 task_pid_nr_ns(p, current->nsproxy->pid_ns) : 1473 task_pid_vnr(p); 1474 1475 if (clone_flags & CLONE_PARENT_SETTID) 1476 put_user(nr, parent_tidptr); 1477 1478 if (clone_flags & CLONE_VFORK) { 1479 p->vfork_done = &vfork; 1480 init_completion(&vfork); 1481 } 1482 1483 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1484 /* 1485 * We'll start up with an immediate SIGSTOP. 1486 */ 1487 sigaddset(&p->pending.signal, SIGSTOP); 1488 set_tsk_thread_flag(p, TIF_SIGPENDING); 1489 } 1490 1491 if (!(clone_flags & CLONE_STOPPED)) 1492 wake_up_new_task(p, clone_flags); 1493 else 1494 p->state = TASK_STOPPED; 1495 1496 if (unlikely (trace)) { 1497 current->ptrace_message = nr; 1498 ptrace_notify ((trace << 8) | SIGTRAP); 1499 } 1500 1501 if (clone_flags & CLONE_VFORK) { 1502 freezer_do_not_count(); 1503 wait_for_completion(&vfork); 1504 freezer_count(); 1505 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1506 current->ptrace_message = nr; 1507 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1508 } 1509 } 1510 } else { 1511 nr = PTR_ERR(p); 1512 } 1513 return nr; 1514 } 1515 1516 #ifndef ARCH_MIN_MMSTRUCT_ALIGN 1517 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1518 #endif 1519 1520 static void sighand_ctor(struct kmem_cache *cachep, void *data) 1521 { 1522 struct sighand_struct *sighand = data; 1523 1524 spin_lock_init(&sighand->siglock); 1525 init_waitqueue_head(&sighand->signalfd_wqh); 1526 } 1527 1528 void __init proc_caches_init(void) 1529 { 1530 sighand_cachep = kmem_cache_create("sighand_cache", 1531 sizeof(struct sighand_struct), 0, 1532 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1533 sighand_ctor); 1534 signal_cachep = kmem_cache_create("signal_cache", 1535 sizeof(struct signal_struct), 0, 1536 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1537 files_cachep = kmem_cache_create("files_cache", 1538 sizeof(struct files_struct), 0, 1539 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1540 fs_cachep = kmem_cache_create("fs_cache", 1541 sizeof(struct fs_struct), 0, 1542 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1543 vm_area_cachep = kmem_cache_create("vm_area_struct", 1544 sizeof(struct vm_area_struct), 0, 1545 SLAB_PANIC, NULL); 1546 mm_cachep = kmem_cache_create("mm_struct", 1547 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1548 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1549 } 1550 1551 /* 1552 * Check constraints on flags passed to the unshare system call and 1553 * force unsharing of additional process context as appropriate. 1554 */ 1555 static void check_unshare_flags(unsigned long *flags_ptr) 1556 { 1557 /* 1558 * If unsharing a thread from a thread group, must also 1559 * unshare vm. 1560 */ 1561 if (*flags_ptr & CLONE_THREAD) 1562 *flags_ptr |= CLONE_VM; 1563 1564 /* 1565 * If unsharing vm, must also unshare signal handlers. 1566 */ 1567 if (*flags_ptr & CLONE_VM) 1568 *flags_ptr |= CLONE_SIGHAND; 1569 1570 /* 1571 * If unsharing signal handlers and the task was created 1572 * using CLONE_THREAD, then must unshare the thread 1573 */ 1574 if ((*flags_ptr & CLONE_SIGHAND) && 1575 (atomic_read(¤t->signal->count) > 1)) 1576 *flags_ptr |= CLONE_THREAD; 1577 1578 /* 1579 * If unsharing namespace, must also unshare filesystem information. 1580 */ 1581 if (*flags_ptr & CLONE_NEWNS) 1582 *flags_ptr |= CLONE_FS; 1583 } 1584 1585 /* 1586 * Unsharing of tasks created with CLONE_THREAD is not supported yet 1587 */ 1588 static int unshare_thread(unsigned long unshare_flags) 1589 { 1590 if (unshare_flags & CLONE_THREAD) 1591 return -EINVAL; 1592 1593 return 0; 1594 } 1595 1596 /* 1597 * Unshare the filesystem structure if it is being shared 1598 */ 1599 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) 1600 { 1601 struct fs_struct *fs = current->fs; 1602 1603 if ((unshare_flags & CLONE_FS) && 1604 (fs && atomic_read(&fs->count) > 1)) { 1605 *new_fsp = __copy_fs_struct(current->fs); 1606 if (!*new_fsp) 1607 return -ENOMEM; 1608 } 1609 1610 return 0; 1611 } 1612 1613 /* 1614 * Unsharing of sighand is not supported yet 1615 */ 1616 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) 1617 { 1618 struct sighand_struct *sigh = current->sighand; 1619 1620 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) 1621 return -EINVAL; 1622 else 1623 return 0; 1624 } 1625 1626 /* 1627 * Unshare vm if it is being shared 1628 */ 1629 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) 1630 { 1631 struct mm_struct *mm = current->mm; 1632 1633 if ((unshare_flags & CLONE_VM) && 1634 (mm && atomic_read(&mm->mm_users) > 1)) { 1635 return -EINVAL; 1636 } 1637 1638 return 0; 1639 } 1640 1641 /* 1642 * Unshare file descriptor table if it is being shared 1643 */ 1644 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1645 { 1646 struct files_struct *fd = current->files; 1647 int error = 0; 1648 1649 if ((unshare_flags & CLONE_FILES) && 1650 (fd && atomic_read(&fd->count) > 1)) { 1651 *new_fdp = dup_fd(fd, &error); 1652 if (!*new_fdp) 1653 return error; 1654 } 1655 1656 return 0; 1657 } 1658 1659 /* 1660 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not 1661 * supported yet 1662 */ 1663 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) 1664 { 1665 if (unshare_flags & CLONE_SYSVSEM) 1666 return -EINVAL; 1667 1668 return 0; 1669 } 1670 1671 /* 1672 * unshare allows a process to 'unshare' part of the process 1673 * context which was originally shared using clone. copy_* 1674 * functions used by do_fork() cannot be used here directly 1675 * because they modify an inactive task_struct that is being 1676 * constructed. Here we are modifying the current, active, 1677 * task_struct. 1678 */ 1679 asmlinkage long sys_unshare(unsigned long unshare_flags) 1680 { 1681 int err = 0; 1682 struct fs_struct *fs, *new_fs = NULL; 1683 struct sighand_struct *new_sigh = NULL; 1684 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; 1685 struct files_struct *fd, *new_fd = NULL; 1686 struct sem_undo_list *new_ulist = NULL; 1687 struct nsproxy *new_nsproxy = NULL; 1688 1689 check_unshare_flags(&unshare_flags); 1690 1691 /* Return -EINVAL for all unsupported flags */ 1692 err = -EINVAL; 1693 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1694 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1695 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| 1696 CLONE_NEWNET)) 1697 goto bad_unshare_out; 1698 1699 if ((err = unshare_thread(unshare_flags))) 1700 goto bad_unshare_out; 1701 if ((err = unshare_fs(unshare_flags, &new_fs))) 1702 goto bad_unshare_cleanup_thread; 1703 if ((err = unshare_sighand(unshare_flags, &new_sigh))) 1704 goto bad_unshare_cleanup_fs; 1705 if ((err = unshare_vm(unshare_flags, &new_mm))) 1706 goto bad_unshare_cleanup_sigh; 1707 if ((err = unshare_fd(unshare_flags, &new_fd))) 1708 goto bad_unshare_cleanup_vm; 1709 if ((err = unshare_semundo(unshare_flags, &new_ulist))) 1710 goto bad_unshare_cleanup_fd; 1711 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1712 new_fs))) 1713 goto bad_unshare_cleanup_semundo; 1714 1715 if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { 1716 1717 if (new_nsproxy) { 1718 switch_task_namespaces(current, new_nsproxy); 1719 new_nsproxy = NULL; 1720 } 1721 1722 task_lock(current); 1723 1724 if (new_fs) { 1725 fs = current->fs; 1726 current->fs = new_fs; 1727 new_fs = fs; 1728 } 1729 1730 if (new_mm) { 1731 mm = current->mm; 1732 active_mm = current->active_mm; 1733 current->mm = new_mm; 1734 current->active_mm = new_mm; 1735 activate_mm(active_mm, new_mm); 1736 new_mm = mm; 1737 } 1738 1739 if (new_fd) { 1740 fd = current->files; 1741 current->files = new_fd; 1742 new_fd = fd; 1743 } 1744 1745 task_unlock(current); 1746 } 1747 1748 if (new_nsproxy) 1749 put_nsproxy(new_nsproxy); 1750 1751 bad_unshare_cleanup_semundo: 1752 bad_unshare_cleanup_fd: 1753 if (new_fd) 1754 put_files_struct(new_fd); 1755 1756 bad_unshare_cleanup_vm: 1757 if (new_mm) 1758 mmput(new_mm); 1759 1760 bad_unshare_cleanup_sigh: 1761 if (new_sigh) 1762 if (atomic_dec_and_test(&new_sigh->count)) 1763 kmem_cache_free(sighand_cachep, new_sigh); 1764 1765 bad_unshare_cleanup_fs: 1766 if (new_fs) 1767 put_fs_struct(new_fs); 1768 1769 bad_unshare_cleanup_thread: 1770 bad_unshare_out: 1771 return err; 1772 } 1773