1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/fork.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 /*
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13 */
14
15 #include <linux/anon_inodes.h>
16 #include <linux/slab.h>
17 #include <linux/sched/autogroup.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/user.h>
20 #include <linux/sched/numa_balancing.h>
21 #include <linux/sched/stat.h>
22 #include <linux/sched/task.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/sched/cputime.h>
25 #include <linux/sched/ext.h>
26 #include <linux/seq_file.h>
27 #include <linux/rtmutex.h>
28 #include <linux/init.h>
29 #include <linux/unistd.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/completion.h>
33 #include <linux/personality.h>
34 #include <linux/mempolicy.h>
35 #include <linux/sem.h>
36 #include <linux/file.h>
37 #include <linux/fdtable.h>
38 #include <linux/iocontext.h>
39 #include <linux/key.h>
40 #include <linux/kmsan.h>
41 #include <linux/binfmts.h>
42 #include <linux/mman.h>
43 #include <linux/mmu_notifier.h>
44 #include <linux/fs.h>
45 #include <linux/mm.h>
46 #include <linux/mm_inline.h>
47 #include <linux/memblock.h>
48 #include <linux/nsproxy.h>
49 #include <linux/capability.h>
50 #include <linux/cpu.h>
51 #include <linux/cgroup.h>
52 #include <linux/security.h>
53 #include <linux/hugetlb.h>
54 #include <linux/seccomp.h>
55 #include <linux/swap.h>
56 #include <linux/syscalls.h>
57 #include <linux/syscall_user_dispatch.h>
58 #include <linux/jiffies.h>
59 #include <linux/futex.h>
60 #include <linux/compat.h>
61 #include <linux/kthread.h>
62 #include <linux/task_io_accounting_ops.h>
63 #include <linux/rcupdate.h>
64 #include <linux/ptrace.h>
65 #include <linux/mount.h>
66 #include <linux/audit.h>
67 #include <linux/memcontrol.h>
68 #include <linux/ftrace.h>
69 #include <linux/proc_fs.h>
70 #include <linux/profile.h>
71 #include <linux/rmap.h>
72 #include <linux/ksm.h>
73 #include <linux/acct.h>
74 #include <linux/userfaultfd_k.h>
75 #include <linux/tsacct_kern.h>
76 #include <linux/cn_proc.h>
77 #include <linux/freezer.h>
78 #include <linux/delayacct.h>
79 #include <linux/taskstats_kern.h>
80 #include <linux/tty.h>
81 #include <linux/fs_struct.h>
82 #include <linux/magic.h>
83 #include <linux/perf_event.h>
84 #include <linux/posix-timers.h>
85 #include <linux/user-return-notifier.h>
86 #include <linux/oom.h>
87 #include <linux/khugepaged.h>
88 #include <linux/signalfd.h>
89 #include <linux/uprobes.h>
90 #include <linux/aio.h>
91 #include <linux/compiler.h>
92 #include <linux/sysctl.h>
93 #include <linux/kcov.h>
94 #include <linux/livepatch.h>
95 #include <linux/thread_info.h>
96 #include <linux/stackleak.h>
97 #include <linux/kasan.h>
98 #include <linux/scs.h>
99 #include <linux/io_uring.h>
100 #include <linux/bpf.h>
101 #include <linux/stackprotector.h>
102 #include <linux/user_events.h>
103 #include <linux/iommu.h>
104 #include <linux/rseq.h>
105 #include <uapi/linux/pidfd.h>
106 #include <linux/pidfs.h>
107 #include <linux/tick.h>
108
109 #include <asm/pgalloc.h>
110 #include <linux/uaccess.h>
111 #include <asm/mmu_context.h>
112 #include <asm/cacheflush.h>
113 #include <asm/tlbflush.h>
114
115 #include <trace/events/sched.h>
116
117 #define CREATE_TRACE_POINTS
118 #include <trace/events/task.h>
119
120 #include <kunit/visibility.h>
121
122 /*
123 * Minimum number of threads to boot the kernel
124 */
125 #define MIN_THREADS 20
126
127 /*
128 * Maximum number of threads
129 */
130 #define MAX_THREADS FUTEX_TID_MASK
131
132 /*
133 * Protected counters by write_lock_irq(&tasklist_lock)
134 */
135 unsigned long total_forks; /* Handle normal Linux uptimes. */
136 int nr_threads; /* The idle threads do not count.. */
137
138 static int max_threads; /* tunable limit on nr_threads */
139
140 #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
141
142 static const char * const resident_page_types[] = {
143 NAMED_ARRAY_INDEX(MM_FILEPAGES),
144 NAMED_ARRAY_INDEX(MM_ANONPAGES),
145 NAMED_ARRAY_INDEX(MM_SWAPENTS),
146 NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
147 };
148
149 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
150
151 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
152
153 #ifdef CONFIG_PROVE_RCU
lockdep_tasklist_lock_is_held(void)154 int lockdep_tasklist_lock_is_held(void)
155 {
156 return lockdep_is_held(&tasklist_lock);
157 }
158 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
159 #endif /* #ifdef CONFIG_PROVE_RCU */
160
nr_processes(void)161 int nr_processes(void)
162 {
163 int cpu;
164 int total = 0;
165
166 for_each_possible_cpu(cpu)
167 total += per_cpu(process_counts, cpu);
168
169 return total;
170 }
171
arch_release_task_struct(struct task_struct * tsk)172 void __weak arch_release_task_struct(struct task_struct *tsk)
173 {
174 }
175
176 static struct kmem_cache *task_struct_cachep;
177
alloc_task_struct_node(int node)178 static inline struct task_struct *alloc_task_struct_node(int node)
179 {
180 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
181 }
182
free_task_struct(struct task_struct * tsk)183 static inline void free_task_struct(struct task_struct *tsk)
184 {
185 kmem_cache_free(task_struct_cachep, tsk);
186 }
187
188 /*
189 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
190 * kmemcache based allocator.
191 */
192 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
193
194 # ifdef CONFIG_VMAP_STACK
195 /*
196 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
197 * flush. Try to minimize the number of calls by caching stacks.
198 */
199 #define NR_CACHED_STACKS 2
200 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
201
202 struct vm_stack {
203 struct rcu_head rcu;
204 struct vm_struct *stack_vm_area;
205 };
206
try_release_thread_stack_to_cache(struct vm_struct * vm)207 static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
208 {
209 unsigned int i;
210
211 for (i = 0; i < NR_CACHED_STACKS; i++) {
212 struct vm_struct *tmp = NULL;
213
214 if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm))
215 return true;
216 }
217 return false;
218 }
219
thread_stack_free_rcu(struct rcu_head * rh)220 static void thread_stack_free_rcu(struct rcu_head *rh)
221 {
222 struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
223
224 if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
225 return;
226
227 vfree(vm_stack);
228 }
229
thread_stack_delayed_free(struct task_struct * tsk)230 static void thread_stack_delayed_free(struct task_struct *tsk)
231 {
232 struct vm_stack *vm_stack = tsk->stack;
233
234 vm_stack->stack_vm_area = tsk->stack_vm_area;
235 call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
236 }
237
free_vm_stack_cache(unsigned int cpu)238 static int free_vm_stack_cache(unsigned int cpu)
239 {
240 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
241 int i;
242
243 for (i = 0; i < NR_CACHED_STACKS; i++) {
244 struct vm_struct *vm_stack = cached_vm_stacks[i];
245
246 if (!vm_stack)
247 continue;
248
249 vfree(vm_stack->addr);
250 cached_vm_stacks[i] = NULL;
251 }
252
253 return 0;
254 }
255
memcg_charge_kernel_stack(struct vm_struct * vm)256 static int memcg_charge_kernel_stack(struct vm_struct *vm)
257 {
258 int i;
259 int ret;
260 int nr_charged = 0;
261
262 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
263
264 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
265 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
266 if (ret)
267 goto err;
268 nr_charged++;
269 }
270 return 0;
271 err:
272 for (i = 0; i < nr_charged; i++)
273 memcg_kmem_uncharge_page(vm->pages[i], 0);
274 return ret;
275 }
276
alloc_thread_stack_node(struct task_struct * tsk,int node)277 static int alloc_thread_stack_node(struct task_struct *tsk, int node)
278 {
279 struct vm_struct *vm;
280 void *stack;
281 int i;
282
283 for (i = 0; i < NR_CACHED_STACKS; i++) {
284 struct vm_struct *s;
285
286 s = this_cpu_xchg(cached_stacks[i], NULL);
287
288 if (!s)
289 continue;
290
291 /* Reset stack metadata. */
292 kasan_unpoison_range(s->addr, THREAD_SIZE);
293
294 stack = kasan_reset_tag(s->addr);
295
296 /* Clear stale pointers from reused stack. */
297 memset(stack, 0, THREAD_SIZE);
298
299 if (memcg_charge_kernel_stack(s)) {
300 vfree(s->addr);
301 return -ENOMEM;
302 }
303
304 tsk->stack_vm_area = s;
305 tsk->stack = stack;
306 return 0;
307 }
308
309 /*
310 * Allocated stacks are cached and later reused by new threads,
311 * so memcg accounting is performed manually on assigning/releasing
312 * stacks to tasks. Drop __GFP_ACCOUNT.
313 */
314 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
315 VMALLOC_START, VMALLOC_END,
316 THREADINFO_GFP & ~__GFP_ACCOUNT,
317 PAGE_KERNEL,
318 0, node, __builtin_return_address(0));
319 if (!stack)
320 return -ENOMEM;
321
322 vm = find_vm_area(stack);
323 if (memcg_charge_kernel_stack(vm)) {
324 vfree(stack);
325 return -ENOMEM;
326 }
327 /*
328 * We can't call find_vm_area() in interrupt context, and
329 * free_thread_stack() can be called in interrupt context,
330 * so cache the vm_struct.
331 */
332 tsk->stack_vm_area = vm;
333 stack = kasan_reset_tag(stack);
334 tsk->stack = stack;
335 return 0;
336 }
337
free_thread_stack(struct task_struct * tsk)338 static void free_thread_stack(struct task_struct *tsk)
339 {
340 if (!try_release_thread_stack_to_cache(tsk->stack_vm_area))
341 thread_stack_delayed_free(tsk);
342
343 tsk->stack = NULL;
344 tsk->stack_vm_area = NULL;
345 }
346
347 # else /* !CONFIG_VMAP_STACK */
348
thread_stack_free_rcu(struct rcu_head * rh)349 static void thread_stack_free_rcu(struct rcu_head *rh)
350 {
351 __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
352 }
353
thread_stack_delayed_free(struct task_struct * tsk)354 static void thread_stack_delayed_free(struct task_struct *tsk)
355 {
356 struct rcu_head *rh = tsk->stack;
357
358 call_rcu(rh, thread_stack_free_rcu);
359 }
360
alloc_thread_stack_node(struct task_struct * tsk,int node)361 static int alloc_thread_stack_node(struct task_struct *tsk, int node)
362 {
363 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
364 THREAD_SIZE_ORDER);
365
366 if (likely(page)) {
367 tsk->stack = kasan_reset_tag(page_address(page));
368 return 0;
369 }
370 return -ENOMEM;
371 }
372
free_thread_stack(struct task_struct * tsk)373 static void free_thread_stack(struct task_struct *tsk)
374 {
375 thread_stack_delayed_free(tsk);
376 tsk->stack = NULL;
377 }
378
379 # endif /* CONFIG_VMAP_STACK */
380 # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
381
382 static struct kmem_cache *thread_stack_cache;
383
thread_stack_free_rcu(struct rcu_head * rh)384 static void thread_stack_free_rcu(struct rcu_head *rh)
385 {
386 kmem_cache_free(thread_stack_cache, rh);
387 }
388
thread_stack_delayed_free(struct task_struct * tsk)389 static void thread_stack_delayed_free(struct task_struct *tsk)
390 {
391 struct rcu_head *rh = tsk->stack;
392
393 call_rcu(rh, thread_stack_free_rcu);
394 }
395
alloc_thread_stack_node(struct task_struct * tsk,int node)396 static int alloc_thread_stack_node(struct task_struct *tsk, int node)
397 {
398 unsigned long *stack;
399 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
400 stack = kasan_reset_tag(stack);
401 tsk->stack = stack;
402 return stack ? 0 : -ENOMEM;
403 }
404
free_thread_stack(struct task_struct * tsk)405 static void free_thread_stack(struct task_struct *tsk)
406 {
407 thread_stack_delayed_free(tsk);
408 tsk->stack = NULL;
409 }
410
thread_stack_cache_init(void)411 void thread_stack_cache_init(void)
412 {
413 thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
414 THREAD_SIZE, THREAD_SIZE, 0, 0,
415 THREAD_SIZE, NULL);
416 BUG_ON(thread_stack_cache == NULL);
417 }
418
419 # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
420
421 /* SLAB cache for signal_struct structures (tsk->signal) */
422 static struct kmem_cache *signal_cachep;
423
424 /* SLAB cache for sighand_struct structures (tsk->sighand) */
425 struct kmem_cache *sighand_cachep;
426
427 /* SLAB cache for files_struct structures (tsk->files) */
428 struct kmem_cache *files_cachep;
429
430 /* SLAB cache for fs_struct structures (tsk->fs) */
431 struct kmem_cache *fs_cachep;
432
433 /* SLAB cache for vm_area_struct structures */
434 static struct kmem_cache *vm_area_cachep;
435
436 /* SLAB cache for mm_struct structures (tsk->mm) */
437 static struct kmem_cache *mm_cachep;
438
439 #ifdef CONFIG_PER_VMA_LOCK
440
441 /* SLAB cache for vm_area_struct.lock */
442 static struct kmem_cache *vma_lock_cachep;
443
vma_lock_alloc(struct vm_area_struct * vma)444 static bool vma_lock_alloc(struct vm_area_struct *vma)
445 {
446 vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
447 if (!vma->vm_lock)
448 return false;
449
450 init_rwsem(&vma->vm_lock->lock);
451 vma->vm_lock_seq = UINT_MAX;
452
453 return true;
454 }
455
vma_lock_free(struct vm_area_struct * vma)456 static inline void vma_lock_free(struct vm_area_struct *vma)
457 {
458 kmem_cache_free(vma_lock_cachep, vma->vm_lock);
459 }
460
461 #else /* CONFIG_PER_VMA_LOCK */
462
vma_lock_alloc(struct vm_area_struct * vma)463 static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
vma_lock_free(struct vm_area_struct * vma)464 static inline void vma_lock_free(struct vm_area_struct *vma) {}
465
466 #endif /* CONFIG_PER_VMA_LOCK */
467
vm_area_alloc(struct mm_struct * mm)468 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
469 {
470 struct vm_area_struct *vma;
471
472 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
473 if (!vma)
474 return NULL;
475
476 vma_init(vma, mm);
477 if (!vma_lock_alloc(vma)) {
478 kmem_cache_free(vm_area_cachep, vma);
479 return NULL;
480 }
481
482 return vma;
483 }
484
vm_area_dup(struct vm_area_struct * orig)485 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
486 {
487 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
488
489 if (!new)
490 return NULL;
491
492 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
493 ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
494 /*
495 * orig->shared.rb may be modified concurrently, but the clone
496 * will be reinitialized.
497 */
498 data_race(memcpy(new, orig, sizeof(*new)));
499 if (!vma_lock_alloc(new)) {
500 kmem_cache_free(vm_area_cachep, new);
501 return NULL;
502 }
503 INIT_LIST_HEAD(&new->anon_vma_chain);
504 vma_numab_state_init(new);
505 dup_anon_vma_name(orig, new);
506
507 return new;
508 }
509
__vm_area_free(struct vm_area_struct * vma)510 void __vm_area_free(struct vm_area_struct *vma)
511 {
512 vma_numab_state_free(vma);
513 free_anon_vma_name(vma);
514 vma_lock_free(vma);
515 kmem_cache_free(vm_area_cachep, vma);
516 }
517
518 #ifdef CONFIG_PER_VMA_LOCK
vm_area_free_rcu_cb(struct rcu_head * head)519 static void vm_area_free_rcu_cb(struct rcu_head *head)
520 {
521 struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
522 vm_rcu);
523
524 /* The vma should not be locked while being destroyed. */
525 VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
526 __vm_area_free(vma);
527 }
528 #endif
529
vm_area_free(struct vm_area_struct * vma)530 void vm_area_free(struct vm_area_struct *vma)
531 {
532 #ifdef CONFIG_PER_VMA_LOCK
533 call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
534 #else
535 __vm_area_free(vma);
536 #endif
537 }
538
account_kernel_stack(struct task_struct * tsk,int account)539 static void account_kernel_stack(struct task_struct *tsk, int account)
540 {
541 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
542 struct vm_struct *vm = task_stack_vm_area(tsk);
543 int i;
544
545 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
546 mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
547 account * (PAGE_SIZE / 1024));
548 } else {
549 void *stack = task_stack_page(tsk);
550
551 /* All stack pages are in the same node. */
552 mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
553 account * (THREAD_SIZE / 1024));
554 }
555 }
556
exit_task_stack_account(struct task_struct * tsk)557 void exit_task_stack_account(struct task_struct *tsk)
558 {
559 account_kernel_stack(tsk, -1);
560
561 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
562 struct vm_struct *vm;
563 int i;
564
565 vm = task_stack_vm_area(tsk);
566 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
567 memcg_kmem_uncharge_page(vm->pages[i], 0);
568 }
569 }
570
release_task_stack(struct task_struct * tsk)571 static void release_task_stack(struct task_struct *tsk)
572 {
573 if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
574 return; /* Better to leak the stack than to free prematurely */
575
576 free_thread_stack(tsk);
577 }
578
579 #ifdef CONFIG_THREAD_INFO_IN_TASK
put_task_stack(struct task_struct * tsk)580 void put_task_stack(struct task_struct *tsk)
581 {
582 if (refcount_dec_and_test(&tsk->stack_refcount))
583 release_task_stack(tsk);
584 }
585 #endif
586
free_task(struct task_struct * tsk)587 void free_task(struct task_struct *tsk)
588 {
589 #ifdef CONFIG_SECCOMP
590 WARN_ON_ONCE(tsk->seccomp.filter);
591 #endif
592 release_user_cpus_ptr(tsk);
593 scs_release(tsk);
594
595 #ifndef CONFIG_THREAD_INFO_IN_TASK
596 /*
597 * The task is finally done with both the stack and thread_info,
598 * so free both.
599 */
600 release_task_stack(tsk);
601 #else
602 /*
603 * If the task had a separate stack allocation, it should be gone
604 * by now.
605 */
606 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
607 #endif
608 rt_mutex_debug_task_free(tsk);
609 ftrace_graph_exit_task(tsk);
610 arch_release_task_struct(tsk);
611 if (tsk->flags & PF_KTHREAD)
612 free_kthread_struct(tsk);
613 bpf_task_storage_free(tsk);
614 free_task_struct(tsk);
615 }
616 EXPORT_SYMBOL(free_task);
617
dup_mm_exe_file(struct mm_struct * mm,struct mm_struct * oldmm)618 static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
619 {
620 struct file *exe_file;
621
622 exe_file = get_mm_exe_file(oldmm);
623 RCU_INIT_POINTER(mm->exe_file, exe_file);
624 /*
625 * We depend on the oldmm having properly denied write access to the
626 * exe_file already.
627 */
628 if (exe_file && exe_file_deny_write_access(exe_file))
629 pr_warn_once("exe_file_deny_write_access() failed in %s\n", __func__);
630 }
631
632 #ifdef CONFIG_MMU
dup_mmap(struct mm_struct * mm,struct mm_struct * oldmm)633 static __latent_entropy int dup_mmap(struct mm_struct *mm,
634 struct mm_struct *oldmm)
635 {
636 struct vm_area_struct *mpnt, *tmp;
637 int retval;
638 unsigned long charge = 0;
639 LIST_HEAD(uf);
640 VMA_ITERATOR(vmi, mm, 0);
641
642 if (mmap_write_lock_killable(oldmm))
643 return -EINTR;
644 flush_cache_dup_mm(oldmm);
645 uprobe_dup_mmap(oldmm, mm);
646 /*
647 * Not linked in yet - no deadlock potential:
648 */
649 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
650
651 /* No ordering required: file already has been exposed. */
652 dup_mm_exe_file(mm, oldmm);
653
654 mm->total_vm = oldmm->total_vm;
655 mm->data_vm = oldmm->data_vm;
656 mm->exec_vm = oldmm->exec_vm;
657 mm->stack_vm = oldmm->stack_vm;
658
659 /* Use __mt_dup() to efficiently build an identical maple tree. */
660 retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
661 if (unlikely(retval))
662 goto out;
663
664 mt_clear_in_rcu(vmi.mas.tree);
665 for_each_vma(vmi, mpnt) {
666 struct file *file;
667
668 vma_start_write(mpnt);
669 if (mpnt->vm_flags & VM_DONTCOPY) {
670 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
671 mpnt->vm_end, GFP_KERNEL);
672 if (retval)
673 goto loop_out;
674
675 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
676 continue;
677 }
678 charge = 0;
679 /*
680 * Don't duplicate many vmas if we've been oom-killed (for
681 * example)
682 */
683 if (fatal_signal_pending(current)) {
684 retval = -EINTR;
685 goto loop_out;
686 }
687 if (mpnt->vm_flags & VM_ACCOUNT) {
688 unsigned long len = vma_pages(mpnt);
689
690 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
691 goto fail_nomem;
692 charge = len;
693 }
694 tmp = vm_area_dup(mpnt);
695 if (!tmp)
696 goto fail_nomem;
697 retval = vma_dup_policy(mpnt, tmp);
698 if (retval)
699 goto fail_nomem_policy;
700 tmp->vm_mm = mm;
701 retval = dup_userfaultfd(tmp, &uf);
702 if (retval)
703 goto fail_nomem_anon_vma_fork;
704 if (tmp->vm_flags & VM_WIPEONFORK) {
705 /*
706 * VM_WIPEONFORK gets a clean slate in the child.
707 * Don't prepare anon_vma until fault since we don't
708 * copy page for current vma.
709 */
710 tmp->anon_vma = NULL;
711 } else if (anon_vma_fork(tmp, mpnt))
712 goto fail_nomem_anon_vma_fork;
713 vm_flags_clear(tmp, VM_LOCKED_MASK);
714 /*
715 * Copy/update hugetlb private vma information.
716 */
717 if (is_vm_hugetlb_page(tmp))
718 hugetlb_dup_vma_private(tmp);
719
720 /*
721 * Link the vma into the MT. After using __mt_dup(), memory
722 * allocation is not necessary here, so it cannot fail.
723 */
724 vma_iter_bulk_store(&vmi, tmp);
725
726 mm->map_count++;
727
728 if (tmp->vm_ops && tmp->vm_ops->open)
729 tmp->vm_ops->open(tmp);
730
731 file = tmp->vm_file;
732 if (file) {
733 struct address_space *mapping = file->f_mapping;
734
735 get_file(file);
736 i_mmap_lock_write(mapping);
737 if (vma_is_shared_maywrite(tmp))
738 mapping_allow_writable(mapping);
739 flush_dcache_mmap_lock(mapping);
740 /* insert tmp into the share list, just after mpnt */
741 vma_interval_tree_insert_after(tmp, mpnt,
742 &mapping->i_mmap);
743 flush_dcache_mmap_unlock(mapping);
744 i_mmap_unlock_write(mapping);
745 }
746
747 if (!(tmp->vm_flags & VM_WIPEONFORK))
748 retval = copy_page_range(tmp, mpnt);
749
750 if (retval) {
751 mpnt = vma_next(&vmi);
752 goto loop_out;
753 }
754 }
755 /* a new mm has just been created */
756 retval = arch_dup_mmap(oldmm, mm);
757 loop_out:
758 vma_iter_free(&vmi);
759 if (!retval) {
760 mt_set_in_rcu(vmi.mas.tree);
761 ksm_fork(mm, oldmm);
762 khugepaged_fork(mm, oldmm);
763 } else {
764
765 /*
766 * The entire maple tree has already been duplicated. If the
767 * mmap duplication fails, mark the failure point with
768 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
769 * stop releasing VMAs that have not been duplicated after this
770 * point.
771 */
772 if (mpnt) {
773 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
774 mas_store(&vmi.mas, XA_ZERO_ENTRY);
775 /* Avoid OOM iterating a broken tree */
776 set_bit(MMF_OOM_SKIP, &mm->flags);
777 }
778 /*
779 * The mm_struct is going to exit, but the locks will be dropped
780 * first. Set the mm_struct as unstable is advisable as it is
781 * not fully initialised.
782 */
783 set_bit(MMF_UNSTABLE, &mm->flags);
784 }
785 out:
786 mmap_write_unlock(mm);
787 flush_tlb_mm(oldmm);
788 mmap_write_unlock(oldmm);
789 if (!retval)
790 dup_userfaultfd_complete(&uf);
791 else
792 dup_userfaultfd_fail(&uf);
793 return retval;
794
795 fail_nomem_anon_vma_fork:
796 mpol_put(vma_policy(tmp));
797 fail_nomem_policy:
798 vm_area_free(tmp);
799 fail_nomem:
800 retval = -ENOMEM;
801 vm_unacct_memory(charge);
802 goto loop_out;
803 }
804
mm_alloc_pgd(struct mm_struct * mm)805 static inline int mm_alloc_pgd(struct mm_struct *mm)
806 {
807 mm->pgd = pgd_alloc(mm);
808 if (unlikely(!mm->pgd))
809 return -ENOMEM;
810 return 0;
811 }
812
mm_free_pgd(struct mm_struct * mm)813 static inline void mm_free_pgd(struct mm_struct *mm)
814 {
815 pgd_free(mm, mm->pgd);
816 }
817 #else
dup_mmap(struct mm_struct * mm,struct mm_struct * oldmm)818 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
819 {
820 mmap_write_lock(oldmm);
821 dup_mm_exe_file(mm, oldmm);
822 mmap_write_unlock(oldmm);
823 return 0;
824 }
825 #define mm_alloc_pgd(mm) (0)
826 #define mm_free_pgd(mm)
827 #endif /* CONFIG_MMU */
828
check_mm(struct mm_struct * mm)829 static void check_mm(struct mm_struct *mm)
830 {
831 int i;
832
833 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
834 "Please make sure 'struct resident_page_types[]' is updated as well");
835
836 for (i = 0; i < NR_MM_COUNTERS; i++) {
837 long x = percpu_counter_sum(&mm->rss_stat[i]);
838
839 if (unlikely(x))
840 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
841 mm, resident_page_types[i], x);
842 }
843
844 if (mm_pgtables_bytes(mm))
845 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
846 mm_pgtables_bytes(mm));
847
848 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
849 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
850 #endif
851 }
852
853 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
854 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
855
do_check_lazy_tlb(void * arg)856 static void do_check_lazy_tlb(void *arg)
857 {
858 struct mm_struct *mm = arg;
859
860 WARN_ON_ONCE(current->active_mm == mm);
861 }
862
do_shoot_lazy_tlb(void * arg)863 static void do_shoot_lazy_tlb(void *arg)
864 {
865 struct mm_struct *mm = arg;
866
867 if (current->active_mm == mm) {
868 WARN_ON_ONCE(current->mm);
869 current->active_mm = &init_mm;
870 switch_mm(mm, &init_mm, current);
871 }
872 }
873
cleanup_lazy_tlbs(struct mm_struct * mm)874 static void cleanup_lazy_tlbs(struct mm_struct *mm)
875 {
876 if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
877 /*
878 * In this case, lazy tlb mms are refounted and would not reach
879 * __mmdrop until all CPUs have switched away and mmdrop()ed.
880 */
881 return;
882 }
883
884 /*
885 * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
886 * requires lazy mm users to switch to another mm when the refcount
887 * drops to zero, before the mm is freed. This requires IPIs here to
888 * switch kernel threads to init_mm.
889 *
890 * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
891 * switch with the final userspace teardown TLB flush which leaves the
892 * mm lazy on this CPU but no others, reducing the need for additional
893 * IPIs here. There are cases where a final IPI is still required here,
894 * such as the final mmdrop being performed on a different CPU than the
895 * one exiting, or kernel threads using the mm when userspace exits.
896 *
897 * IPI overheads have not found to be expensive, but they could be
898 * reduced in a number of possible ways, for example (roughly
899 * increasing order of complexity):
900 * - The last lazy reference created by exit_mm() could instead switch
901 * to init_mm, however it's probable this will run on the same CPU
902 * immediately afterwards, so this may not reduce IPIs much.
903 * - A batch of mms requiring IPIs could be gathered and freed at once.
904 * - CPUs store active_mm where it can be remotely checked without a
905 * lock, to filter out false-positives in the cpumask.
906 * - After mm_users or mm_count reaches zero, switching away from the
907 * mm could clear mm_cpumask to reduce some IPIs, perhaps together
908 * with some batching or delaying of the final IPIs.
909 * - A delayed freeing and RCU-like quiescing sequence based on mm
910 * switching to avoid IPIs completely.
911 */
912 on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1);
913 if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES))
914 on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
915 }
916
917 /*
918 * Called when the last reference to the mm
919 * is dropped: either by a lazy thread or by
920 * mmput. Free the page directory and the mm.
921 */
__mmdrop(struct mm_struct * mm)922 void __mmdrop(struct mm_struct *mm)
923 {
924 BUG_ON(mm == &init_mm);
925 WARN_ON_ONCE(mm == current->mm);
926
927 /* Ensure no CPUs are using this as their lazy tlb mm */
928 cleanup_lazy_tlbs(mm);
929
930 WARN_ON_ONCE(mm == current->active_mm);
931 mm_free_pgd(mm);
932 destroy_context(mm);
933 mmu_notifier_subscriptions_destroy(mm);
934 check_mm(mm);
935 put_user_ns(mm->user_ns);
936 mm_pasid_drop(mm);
937 mm_destroy_cid(mm);
938 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
939
940 free_mm(mm);
941 }
942 EXPORT_SYMBOL_GPL(__mmdrop);
943
mmdrop_async_fn(struct work_struct * work)944 static void mmdrop_async_fn(struct work_struct *work)
945 {
946 struct mm_struct *mm;
947
948 mm = container_of(work, struct mm_struct, async_put_work);
949 __mmdrop(mm);
950 }
951
mmdrop_async(struct mm_struct * mm)952 static void mmdrop_async(struct mm_struct *mm)
953 {
954 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
955 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
956 schedule_work(&mm->async_put_work);
957 }
958 }
959
free_signal_struct(struct signal_struct * sig)960 static inline void free_signal_struct(struct signal_struct *sig)
961 {
962 taskstats_tgid_free(sig);
963 sched_autogroup_exit(sig);
964 /*
965 * __mmdrop is not safe to call from softirq context on x86 due to
966 * pgd_dtor so postpone it to the async context
967 */
968 if (sig->oom_mm)
969 mmdrop_async(sig->oom_mm);
970 kmem_cache_free(signal_cachep, sig);
971 }
972
put_signal_struct(struct signal_struct * sig)973 static inline void put_signal_struct(struct signal_struct *sig)
974 {
975 if (refcount_dec_and_test(&sig->sigcnt))
976 free_signal_struct(sig);
977 }
978
__put_task_struct(struct task_struct * tsk)979 void __put_task_struct(struct task_struct *tsk)
980 {
981 WARN_ON(!tsk->exit_state);
982 WARN_ON(refcount_read(&tsk->usage));
983 WARN_ON(tsk == current);
984
985 sched_ext_free(tsk);
986 io_uring_free(tsk);
987 cgroup_free(tsk);
988 task_numa_free(tsk, true);
989 security_task_free(tsk);
990 exit_creds(tsk);
991 delayacct_tsk_free(tsk);
992 put_signal_struct(tsk->signal);
993 sched_core_free(tsk);
994 free_task(tsk);
995 }
996 EXPORT_SYMBOL_GPL(__put_task_struct);
997
__put_task_struct_rcu_cb(struct rcu_head * rhp)998 void __put_task_struct_rcu_cb(struct rcu_head *rhp)
999 {
1000 struct task_struct *task = container_of(rhp, struct task_struct, rcu);
1001
1002 __put_task_struct(task);
1003 }
1004 EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
1005
arch_task_cache_init(void)1006 void __init __weak arch_task_cache_init(void) { }
1007
1008 /*
1009 * set_max_threads
1010 */
set_max_threads(unsigned int max_threads_suggested)1011 static void __init set_max_threads(unsigned int max_threads_suggested)
1012 {
1013 u64 threads;
1014 unsigned long nr_pages = memblock_estimated_nr_free_pages();
1015
1016 /*
1017 * The number of threads shall be limited such that the thread
1018 * structures may only consume a small part of the available memory.
1019 */
1020 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
1021 threads = MAX_THREADS;
1022 else
1023 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
1024 (u64) THREAD_SIZE * 8UL);
1025
1026 if (threads > max_threads_suggested)
1027 threads = max_threads_suggested;
1028
1029 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
1030 }
1031
1032 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1033 /* Initialized by the architecture: */
1034 int arch_task_struct_size __read_mostly;
1035 #endif
1036
task_struct_whitelist(unsigned long * offset,unsigned long * size)1037 static void __init task_struct_whitelist(unsigned long *offset, unsigned long *size)
1038 {
1039 /* Fetch thread_struct whitelist for the architecture. */
1040 arch_thread_struct_whitelist(offset, size);
1041
1042 /*
1043 * Handle zero-sized whitelist or empty thread_struct, otherwise
1044 * adjust offset to position of thread_struct in task_struct.
1045 */
1046 if (unlikely(*size == 0))
1047 *offset = 0;
1048 else
1049 *offset += offsetof(struct task_struct, thread);
1050 }
1051
fork_init(void)1052 void __init fork_init(void)
1053 {
1054 int i;
1055 #ifndef ARCH_MIN_TASKALIGN
1056 #define ARCH_MIN_TASKALIGN 0
1057 #endif
1058 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
1059 unsigned long useroffset, usersize;
1060
1061 /* create a slab on which task_structs can be allocated */
1062 task_struct_whitelist(&useroffset, &usersize);
1063 task_struct_cachep = kmem_cache_create_usercopy("task_struct",
1064 arch_task_struct_size, align,
1065 SLAB_PANIC|SLAB_ACCOUNT,
1066 useroffset, usersize, NULL);
1067
1068 /* do the arch specific task caches init */
1069 arch_task_cache_init();
1070
1071 set_max_threads(MAX_THREADS);
1072
1073 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
1074 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
1075 init_task.signal->rlim[RLIMIT_SIGPENDING] =
1076 init_task.signal->rlim[RLIMIT_NPROC];
1077
1078 for (i = 0; i < UCOUNT_COUNTS; i++)
1079 init_user_ns.ucount_max[i] = max_threads/2;
1080
1081 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY);
1082 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY);
1083 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
1084 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
1085
1086 #ifdef CONFIG_VMAP_STACK
1087 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
1088 NULL, free_vm_stack_cache);
1089 #endif
1090
1091 scs_init();
1092
1093 lockdep_init_task(&init_task);
1094 uprobes_init();
1095 }
1096
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)1097 int __weak arch_dup_task_struct(struct task_struct *dst,
1098 struct task_struct *src)
1099 {
1100 *dst = *src;
1101 return 0;
1102 }
1103
set_task_stack_end_magic(struct task_struct * tsk)1104 void set_task_stack_end_magic(struct task_struct *tsk)
1105 {
1106 unsigned long *stackend;
1107
1108 stackend = end_of_stack(tsk);
1109 *stackend = STACK_END_MAGIC; /* for overflow detection */
1110 }
1111
dup_task_struct(struct task_struct * orig,int node)1112 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
1113 {
1114 struct task_struct *tsk;
1115 int err;
1116
1117 if (node == NUMA_NO_NODE)
1118 node = tsk_fork_get_node(orig);
1119 tsk = alloc_task_struct_node(node);
1120 if (!tsk)
1121 return NULL;
1122
1123 err = arch_dup_task_struct(tsk, orig);
1124 if (err)
1125 goto free_tsk;
1126
1127 err = alloc_thread_stack_node(tsk, node);
1128 if (err)
1129 goto free_tsk;
1130
1131 #ifdef CONFIG_THREAD_INFO_IN_TASK
1132 refcount_set(&tsk->stack_refcount, 1);
1133 #endif
1134 account_kernel_stack(tsk, 1);
1135
1136 err = scs_prepare(tsk, node);
1137 if (err)
1138 goto free_stack;
1139
1140 #ifdef CONFIG_SECCOMP
1141 /*
1142 * We must handle setting up seccomp filters once we're under
1143 * the sighand lock in case orig has changed between now and
1144 * then. Until then, filter must be NULL to avoid messing up
1145 * the usage counts on the error path calling free_task.
1146 */
1147 tsk->seccomp.filter = NULL;
1148 #endif
1149
1150 setup_thread_stack(tsk, orig);
1151 clear_user_return_notifier(tsk);
1152 clear_tsk_need_resched(tsk);
1153 set_task_stack_end_magic(tsk);
1154 clear_syscall_work_syscall_user_dispatch(tsk);
1155
1156 #ifdef CONFIG_STACKPROTECTOR
1157 tsk->stack_canary = get_random_canary();
1158 #endif
1159 if (orig->cpus_ptr == &orig->cpus_mask)
1160 tsk->cpus_ptr = &tsk->cpus_mask;
1161 dup_user_cpus_ptr(tsk, orig, node);
1162
1163 /*
1164 * One for the user space visible state that goes away when reaped.
1165 * One for the scheduler.
1166 */
1167 refcount_set(&tsk->rcu_users, 2);
1168 /* One for the rcu users */
1169 refcount_set(&tsk->usage, 1);
1170 #ifdef CONFIG_BLK_DEV_IO_TRACE
1171 tsk->btrace_seq = 0;
1172 #endif
1173 tsk->splice_pipe = NULL;
1174 tsk->task_frag.page = NULL;
1175 tsk->wake_q.next = NULL;
1176 tsk->worker_private = NULL;
1177
1178 kcov_task_init(tsk);
1179 kmsan_task_create(tsk);
1180 kmap_local_fork(tsk);
1181
1182 #ifdef CONFIG_FAULT_INJECTION
1183 tsk->fail_nth = 0;
1184 #endif
1185
1186 #ifdef CONFIG_BLK_CGROUP
1187 tsk->throttle_disk = NULL;
1188 tsk->use_memdelay = 0;
1189 #endif
1190
1191 #ifdef CONFIG_ARCH_HAS_CPU_PASID
1192 tsk->pasid_activated = 0;
1193 #endif
1194
1195 #ifdef CONFIG_MEMCG
1196 tsk->active_memcg = NULL;
1197 #endif
1198
1199 #ifdef CONFIG_X86_BUS_LOCK_DETECT
1200 tsk->reported_split_lock = 0;
1201 #endif
1202
1203 #ifdef CONFIG_SCHED_MM_CID
1204 tsk->mm_cid = -1;
1205 tsk->last_mm_cid = -1;
1206 tsk->mm_cid_active = 0;
1207 tsk->migrate_from_cpu = -1;
1208 #endif
1209 return tsk;
1210
1211 free_stack:
1212 exit_task_stack_account(tsk);
1213 free_thread_stack(tsk);
1214 free_tsk:
1215 free_task_struct(tsk);
1216 return NULL;
1217 }
1218
1219 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
1220
1221 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
1222
coredump_filter_setup(char * s)1223 static int __init coredump_filter_setup(char *s)
1224 {
1225 default_dump_filter =
1226 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
1227 MMF_DUMP_FILTER_MASK;
1228 return 1;
1229 }
1230
1231 __setup("coredump_filter=", coredump_filter_setup);
1232
1233 #include <linux/init_task.h>
1234
mm_init_aio(struct mm_struct * mm)1235 static void mm_init_aio(struct mm_struct *mm)
1236 {
1237 #ifdef CONFIG_AIO
1238 spin_lock_init(&mm->ioctx_lock);
1239 mm->ioctx_table = NULL;
1240 #endif
1241 }
1242
mm_clear_owner(struct mm_struct * mm,struct task_struct * p)1243 static __always_inline void mm_clear_owner(struct mm_struct *mm,
1244 struct task_struct *p)
1245 {
1246 #ifdef CONFIG_MEMCG
1247 if (mm->owner == p)
1248 WRITE_ONCE(mm->owner, NULL);
1249 #endif
1250 }
1251
mm_init_owner(struct mm_struct * mm,struct task_struct * p)1252 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1253 {
1254 #ifdef CONFIG_MEMCG
1255 mm->owner = p;
1256 #endif
1257 }
1258
mm_init_uprobes_state(struct mm_struct * mm)1259 static void mm_init_uprobes_state(struct mm_struct *mm)
1260 {
1261 #ifdef CONFIG_UPROBES
1262 mm->uprobes_state.xol_area = NULL;
1263 #endif
1264 }
1265
mm_init(struct mm_struct * mm,struct task_struct * p,struct user_namespace * user_ns)1266 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1267 struct user_namespace *user_ns)
1268 {
1269 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
1270 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
1271 atomic_set(&mm->mm_users, 1);
1272 atomic_set(&mm->mm_count, 1);
1273 seqcount_init(&mm->write_protect_seq);
1274 mmap_init_lock(mm);
1275 INIT_LIST_HEAD(&mm->mmlist);
1276 mm_pgtables_bytes_init(mm);
1277 mm->map_count = 0;
1278 mm->locked_vm = 0;
1279 atomic64_set(&mm->pinned_vm, 0);
1280 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
1281 spin_lock_init(&mm->page_table_lock);
1282 spin_lock_init(&mm->arg_lock);
1283 mm_init_cpumask(mm);
1284 mm_init_aio(mm);
1285 mm_init_owner(mm, p);
1286 mm_pasid_init(mm);
1287 RCU_INIT_POINTER(mm->exe_file, NULL);
1288 mmu_notifier_subscriptions_init(mm);
1289 init_tlb_flush_pending(mm);
1290 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
1291 mm->pmd_huge_pte = NULL;
1292 #endif
1293 mm_init_uprobes_state(mm);
1294 hugetlb_count_init(mm);
1295
1296 if (current->mm) {
1297 mm->flags = mmf_init_flags(current->mm->flags);
1298 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1299 } else {
1300 mm->flags = default_dump_filter;
1301 mm->def_flags = 0;
1302 }
1303
1304 if (mm_alloc_pgd(mm))
1305 goto fail_nopgd;
1306
1307 if (init_new_context(p, mm))
1308 goto fail_nocontext;
1309
1310 if (mm_alloc_cid(mm, p))
1311 goto fail_cid;
1312
1313 if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT,
1314 NR_MM_COUNTERS))
1315 goto fail_pcpu;
1316
1317 mm->user_ns = get_user_ns(user_ns);
1318 lru_gen_init_mm(mm);
1319 return mm;
1320
1321 fail_pcpu:
1322 mm_destroy_cid(mm);
1323 fail_cid:
1324 destroy_context(mm);
1325 fail_nocontext:
1326 mm_free_pgd(mm);
1327 fail_nopgd:
1328 free_mm(mm);
1329 return NULL;
1330 }
1331
1332 /*
1333 * Allocate and initialize an mm_struct.
1334 */
mm_alloc(void)1335 struct mm_struct *mm_alloc(void)
1336 {
1337 struct mm_struct *mm;
1338
1339 mm = allocate_mm();
1340 if (!mm)
1341 return NULL;
1342
1343 memset(mm, 0, sizeof(*mm));
1344 return mm_init(mm, current, current_user_ns());
1345 }
1346 EXPORT_SYMBOL_IF_KUNIT(mm_alloc);
1347
__mmput(struct mm_struct * mm)1348 static inline void __mmput(struct mm_struct *mm)
1349 {
1350 VM_BUG_ON(atomic_read(&mm->mm_users));
1351
1352 uprobe_clear_state(mm);
1353 exit_aio(mm);
1354 ksm_exit(mm);
1355 khugepaged_exit(mm); /* must run before exit_mmap */
1356 exit_mmap(mm);
1357 mm_put_huge_zero_folio(mm);
1358 set_mm_exe_file(mm, NULL);
1359 if (!list_empty(&mm->mmlist)) {
1360 spin_lock(&mmlist_lock);
1361 list_del(&mm->mmlist);
1362 spin_unlock(&mmlist_lock);
1363 }
1364 if (mm->binfmt)
1365 module_put(mm->binfmt->module);
1366 lru_gen_del_mm(mm);
1367 mmdrop(mm);
1368 }
1369
1370 /*
1371 * Decrement the use count and release all resources for an mm.
1372 */
mmput(struct mm_struct * mm)1373 void mmput(struct mm_struct *mm)
1374 {
1375 might_sleep();
1376
1377 if (atomic_dec_and_test(&mm->mm_users))
1378 __mmput(mm);
1379 }
1380 EXPORT_SYMBOL_GPL(mmput);
1381
1382 #ifdef CONFIG_MMU
mmput_async_fn(struct work_struct * work)1383 static void mmput_async_fn(struct work_struct *work)
1384 {
1385 struct mm_struct *mm = container_of(work, struct mm_struct,
1386 async_put_work);
1387
1388 __mmput(mm);
1389 }
1390
mmput_async(struct mm_struct * mm)1391 void mmput_async(struct mm_struct *mm)
1392 {
1393 if (atomic_dec_and_test(&mm->mm_users)) {
1394 INIT_WORK(&mm->async_put_work, mmput_async_fn);
1395 schedule_work(&mm->async_put_work);
1396 }
1397 }
1398 EXPORT_SYMBOL_GPL(mmput_async);
1399 #endif
1400
1401 /**
1402 * set_mm_exe_file - change a reference to the mm's executable file
1403 * @mm: The mm to change.
1404 * @new_exe_file: The new file to use.
1405 *
1406 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1407 *
1408 * Main users are mmput() and sys_execve(). Callers prevent concurrent
1409 * invocations: in mmput() nobody alive left, in execve it happens before
1410 * the new mm is made visible to anyone.
1411 *
1412 * Can only fail if new_exe_file != NULL.
1413 */
set_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file)1414 int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1415 {
1416 struct file *old_exe_file;
1417
1418 /*
1419 * It is safe to dereference the exe_file without RCU as
1420 * this function is only called if nobody else can access
1421 * this mm -- see comment above for justification.
1422 */
1423 old_exe_file = rcu_dereference_raw(mm->exe_file);
1424
1425 if (new_exe_file) {
1426 /*
1427 * We expect the caller (i.e., sys_execve) to already denied
1428 * write access, so this is unlikely to fail.
1429 */
1430 if (unlikely(exe_file_deny_write_access(new_exe_file)))
1431 return -EACCES;
1432 get_file(new_exe_file);
1433 }
1434 rcu_assign_pointer(mm->exe_file, new_exe_file);
1435 if (old_exe_file) {
1436 exe_file_allow_write_access(old_exe_file);
1437 fput(old_exe_file);
1438 }
1439 return 0;
1440 }
1441
1442 /**
1443 * replace_mm_exe_file - replace a reference to the mm's executable file
1444 * @mm: The mm to change.
1445 * @new_exe_file: The new file to use.
1446 *
1447 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1448 *
1449 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1450 */
replace_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file)1451 int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1452 {
1453 struct vm_area_struct *vma;
1454 struct file *old_exe_file;
1455 int ret = 0;
1456
1457 /* Forbid mm->exe_file change if old file still mapped. */
1458 old_exe_file = get_mm_exe_file(mm);
1459 if (old_exe_file) {
1460 VMA_ITERATOR(vmi, mm, 0);
1461 mmap_read_lock(mm);
1462 for_each_vma(vmi, vma) {
1463 if (!vma->vm_file)
1464 continue;
1465 if (path_equal(&vma->vm_file->f_path,
1466 &old_exe_file->f_path)) {
1467 ret = -EBUSY;
1468 break;
1469 }
1470 }
1471 mmap_read_unlock(mm);
1472 fput(old_exe_file);
1473 if (ret)
1474 return ret;
1475 }
1476
1477 ret = exe_file_deny_write_access(new_exe_file);
1478 if (ret)
1479 return -EACCES;
1480 get_file(new_exe_file);
1481
1482 /* set the new file */
1483 mmap_write_lock(mm);
1484 old_exe_file = rcu_dereference_raw(mm->exe_file);
1485 rcu_assign_pointer(mm->exe_file, new_exe_file);
1486 mmap_write_unlock(mm);
1487
1488 if (old_exe_file) {
1489 exe_file_allow_write_access(old_exe_file);
1490 fput(old_exe_file);
1491 }
1492 return 0;
1493 }
1494
1495 /**
1496 * get_mm_exe_file - acquire a reference to the mm's executable file
1497 * @mm: The mm of interest.
1498 *
1499 * Returns %NULL if mm has no associated executable file.
1500 * User must release file via fput().
1501 */
get_mm_exe_file(struct mm_struct * mm)1502 struct file *get_mm_exe_file(struct mm_struct *mm)
1503 {
1504 struct file *exe_file;
1505
1506 rcu_read_lock();
1507 exe_file = get_file_rcu(&mm->exe_file);
1508 rcu_read_unlock();
1509 return exe_file;
1510 }
1511
1512 /**
1513 * get_task_exe_file - acquire a reference to the task's executable file
1514 * @task: The task.
1515 *
1516 * Returns %NULL if task's mm (if any) has no associated executable file or
1517 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1518 * User must release file via fput().
1519 */
get_task_exe_file(struct task_struct * task)1520 struct file *get_task_exe_file(struct task_struct *task)
1521 {
1522 struct file *exe_file = NULL;
1523 struct mm_struct *mm;
1524
1525 if (task->flags & PF_KTHREAD)
1526 return NULL;
1527
1528 task_lock(task);
1529 mm = task->mm;
1530 if (mm)
1531 exe_file = get_mm_exe_file(mm);
1532 task_unlock(task);
1533 return exe_file;
1534 }
1535
1536 /**
1537 * get_task_mm - acquire a reference to the task's mm
1538 * @task: The task.
1539 *
1540 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1541 * this kernel workthread has transiently adopted a user mm with use_mm,
1542 * to do its AIO) is not set and if so returns a reference to it, after
1543 * bumping up the use count. User must release the mm via mmput()
1544 * after use. Typically used by /proc and ptrace.
1545 */
get_task_mm(struct task_struct * task)1546 struct mm_struct *get_task_mm(struct task_struct *task)
1547 {
1548 struct mm_struct *mm;
1549
1550 if (task->flags & PF_KTHREAD)
1551 return NULL;
1552
1553 task_lock(task);
1554 mm = task->mm;
1555 if (mm)
1556 mmget(mm);
1557 task_unlock(task);
1558 return mm;
1559 }
1560 EXPORT_SYMBOL_GPL(get_task_mm);
1561
mm_access(struct task_struct * task,unsigned int mode)1562 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1563 {
1564 struct mm_struct *mm;
1565 int err;
1566
1567 err = down_read_killable(&task->signal->exec_update_lock);
1568 if (err)
1569 return ERR_PTR(err);
1570
1571 mm = get_task_mm(task);
1572 if (!mm) {
1573 mm = ERR_PTR(-ESRCH);
1574 } else if (mm != current->mm && !ptrace_may_access(task, mode)) {
1575 mmput(mm);
1576 mm = ERR_PTR(-EACCES);
1577 }
1578 up_read(&task->signal->exec_update_lock);
1579
1580 return mm;
1581 }
1582
complete_vfork_done(struct task_struct * tsk)1583 static void complete_vfork_done(struct task_struct *tsk)
1584 {
1585 struct completion *vfork;
1586
1587 task_lock(tsk);
1588 vfork = tsk->vfork_done;
1589 if (likely(vfork)) {
1590 tsk->vfork_done = NULL;
1591 complete(vfork);
1592 }
1593 task_unlock(tsk);
1594 }
1595
wait_for_vfork_done(struct task_struct * child,struct completion * vfork)1596 static int wait_for_vfork_done(struct task_struct *child,
1597 struct completion *vfork)
1598 {
1599 unsigned int state = TASK_KILLABLE|TASK_FREEZABLE;
1600 int killed;
1601
1602 cgroup_enter_frozen();
1603 killed = wait_for_completion_state(vfork, state);
1604 cgroup_leave_frozen(false);
1605
1606 if (killed) {
1607 task_lock(child);
1608 child->vfork_done = NULL;
1609 task_unlock(child);
1610 }
1611
1612 put_task_struct(child);
1613 return killed;
1614 }
1615
1616 /* Please note the differences between mmput and mm_release.
1617 * mmput is called whenever we stop holding onto a mm_struct,
1618 * error success whatever.
1619 *
1620 * mm_release is called after a mm_struct has been removed
1621 * from the current process.
1622 *
1623 * This difference is important for error handling, when we
1624 * only half set up a mm_struct for a new process and need to restore
1625 * the old one. Because we mmput the new mm_struct before
1626 * restoring the old one. . .
1627 * Eric Biederman 10 January 1998
1628 */
mm_release(struct task_struct * tsk,struct mm_struct * mm)1629 static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1630 {
1631 uprobe_free_utask(tsk);
1632
1633 /* Get rid of any cached register state */
1634 deactivate_mm(tsk, mm);
1635
1636 /*
1637 * Signal userspace if we're not exiting with a core dump
1638 * because we want to leave the value intact for debugging
1639 * purposes.
1640 */
1641 if (tsk->clear_child_tid) {
1642 if (atomic_read(&mm->mm_users) > 1) {
1643 /*
1644 * We don't check the error code - if userspace has
1645 * not set up a proper pointer then tough luck.
1646 */
1647 put_user(0, tsk->clear_child_tid);
1648 do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1649 1, NULL, NULL, 0, 0);
1650 }
1651 tsk->clear_child_tid = NULL;
1652 }
1653
1654 /*
1655 * All done, finally we can wake up parent and return this mm to him.
1656 * Also kthread_stop() uses this completion for synchronization.
1657 */
1658 if (tsk->vfork_done)
1659 complete_vfork_done(tsk);
1660 }
1661
exit_mm_release(struct task_struct * tsk,struct mm_struct * mm)1662 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1663 {
1664 futex_exit_release(tsk);
1665 mm_release(tsk, mm);
1666 }
1667
exec_mm_release(struct task_struct * tsk,struct mm_struct * mm)1668 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1669 {
1670 futex_exec_release(tsk);
1671 mm_release(tsk, mm);
1672 }
1673
1674 /**
1675 * dup_mm() - duplicates an existing mm structure
1676 * @tsk: the task_struct with which the new mm will be associated.
1677 * @oldmm: the mm to duplicate.
1678 *
1679 * Allocates a new mm structure and duplicates the provided @oldmm structure
1680 * content into it.
1681 *
1682 * Return: the duplicated mm or NULL on failure.
1683 */
dup_mm(struct task_struct * tsk,struct mm_struct * oldmm)1684 static struct mm_struct *dup_mm(struct task_struct *tsk,
1685 struct mm_struct *oldmm)
1686 {
1687 struct mm_struct *mm;
1688 int err;
1689
1690 mm = allocate_mm();
1691 if (!mm)
1692 goto fail_nomem;
1693
1694 memcpy(mm, oldmm, sizeof(*mm));
1695
1696 if (!mm_init(mm, tsk, mm->user_ns))
1697 goto fail_nomem;
1698
1699 uprobe_start_dup_mmap();
1700 err = dup_mmap(mm, oldmm);
1701 if (err)
1702 goto free_pt;
1703 uprobe_end_dup_mmap();
1704
1705 mm->hiwater_rss = get_mm_rss(mm);
1706 mm->hiwater_vm = mm->total_vm;
1707
1708 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1709 goto free_pt;
1710
1711 return mm;
1712
1713 free_pt:
1714 /* don't put binfmt in mmput, we haven't got module yet */
1715 mm->binfmt = NULL;
1716 mm_init_owner(mm, NULL);
1717 mmput(mm);
1718 if (err)
1719 uprobe_end_dup_mmap();
1720
1721 fail_nomem:
1722 return NULL;
1723 }
1724
copy_mm(unsigned long clone_flags,struct task_struct * tsk)1725 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1726 {
1727 struct mm_struct *mm, *oldmm;
1728
1729 tsk->min_flt = tsk->maj_flt = 0;
1730 tsk->nvcsw = tsk->nivcsw = 0;
1731 #ifdef CONFIG_DETECT_HUNG_TASK
1732 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1733 tsk->last_switch_time = 0;
1734 #endif
1735
1736 tsk->mm = NULL;
1737 tsk->active_mm = NULL;
1738
1739 /*
1740 * Are we cloning a kernel thread?
1741 *
1742 * We need to steal a active VM for that..
1743 */
1744 oldmm = current->mm;
1745 if (!oldmm)
1746 return 0;
1747
1748 if (clone_flags & CLONE_VM) {
1749 mmget(oldmm);
1750 mm = oldmm;
1751 } else {
1752 mm = dup_mm(tsk, current->mm);
1753 if (!mm)
1754 return -ENOMEM;
1755 }
1756
1757 tsk->mm = mm;
1758 tsk->active_mm = mm;
1759 sched_mm_cid_fork(tsk);
1760 return 0;
1761 }
1762
copy_fs(unsigned long clone_flags,struct task_struct * tsk)1763 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1764 {
1765 struct fs_struct *fs = current->fs;
1766 if (clone_flags & CLONE_FS) {
1767 /* tsk->fs is already what we want */
1768 spin_lock(&fs->lock);
1769 /* "users" and "in_exec" locked for check_unsafe_exec() */
1770 if (fs->in_exec) {
1771 spin_unlock(&fs->lock);
1772 return -EAGAIN;
1773 }
1774 fs->users++;
1775 spin_unlock(&fs->lock);
1776 return 0;
1777 }
1778 tsk->fs = copy_fs_struct(fs);
1779 if (!tsk->fs)
1780 return -ENOMEM;
1781 return 0;
1782 }
1783
copy_files(unsigned long clone_flags,struct task_struct * tsk,int no_files)1784 static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
1785 int no_files)
1786 {
1787 struct files_struct *oldf, *newf;
1788
1789 /*
1790 * A background process may not have any files ...
1791 */
1792 oldf = current->files;
1793 if (!oldf)
1794 return 0;
1795
1796 if (no_files) {
1797 tsk->files = NULL;
1798 return 0;
1799 }
1800
1801 if (clone_flags & CLONE_FILES) {
1802 atomic_inc(&oldf->count);
1803 return 0;
1804 }
1805
1806 newf = dup_fd(oldf, NULL);
1807 if (IS_ERR(newf))
1808 return PTR_ERR(newf);
1809
1810 tsk->files = newf;
1811 return 0;
1812 }
1813
copy_sighand(unsigned long clone_flags,struct task_struct * tsk)1814 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1815 {
1816 struct sighand_struct *sig;
1817
1818 if (clone_flags & CLONE_SIGHAND) {
1819 refcount_inc(¤t->sighand->count);
1820 return 0;
1821 }
1822 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1823 RCU_INIT_POINTER(tsk->sighand, sig);
1824 if (!sig)
1825 return -ENOMEM;
1826
1827 refcount_set(&sig->count, 1);
1828 spin_lock_irq(¤t->sighand->siglock);
1829 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1830 spin_unlock_irq(¤t->sighand->siglock);
1831
1832 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1833 if (clone_flags & CLONE_CLEAR_SIGHAND)
1834 flush_signal_handlers(tsk, 0);
1835
1836 return 0;
1837 }
1838
__cleanup_sighand(struct sighand_struct * sighand)1839 void __cleanup_sighand(struct sighand_struct *sighand)
1840 {
1841 if (refcount_dec_and_test(&sighand->count)) {
1842 signalfd_cleanup(sighand);
1843 /*
1844 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1845 * without an RCU grace period, see __lock_task_sighand().
1846 */
1847 kmem_cache_free(sighand_cachep, sighand);
1848 }
1849 }
1850
1851 /*
1852 * Initialize POSIX timer handling for a thread group.
1853 */
posix_cpu_timers_init_group(struct signal_struct * sig)1854 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1855 {
1856 struct posix_cputimers *pct = &sig->posix_cputimers;
1857 unsigned long cpu_limit;
1858
1859 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1860 posix_cputimers_group_init(pct, cpu_limit);
1861 }
1862
copy_signal(unsigned long clone_flags,struct task_struct * tsk)1863 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1864 {
1865 struct signal_struct *sig;
1866
1867 if (clone_flags & CLONE_THREAD)
1868 return 0;
1869
1870 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1871 tsk->signal = sig;
1872 if (!sig)
1873 return -ENOMEM;
1874
1875 sig->nr_threads = 1;
1876 sig->quick_threads = 1;
1877 atomic_set(&sig->live, 1);
1878 refcount_set(&sig->sigcnt, 1);
1879
1880 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1881 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1882 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1883
1884 init_waitqueue_head(&sig->wait_chldexit);
1885 sig->curr_target = tsk;
1886 init_sigpending(&sig->shared_pending);
1887 INIT_HLIST_HEAD(&sig->multiprocess);
1888 seqlock_init(&sig->stats_lock);
1889 prev_cputime_init(&sig->prev_cputime);
1890
1891 #ifdef CONFIG_POSIX_TIMERS
1892 INIT_HLIST_HEAD(&sig->posix_timers);
1893 INIT_HLIST_HEAD(&sig->ignored_posix_timers);
1894 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1895 sig->real_timer.function = it_real_fn;
1896 #endif
1897
1898 task_lock(current->group_leader);
1899 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1900 task_unlock(current->group_leader);
1901
1902 posix_cpu_timers_init_group(sig);
1903
1904 tty_audit_fork(sig);
1905 sched_autogroup_fork(sig);
1906
1907 sig->oom_score_adj = current->signal->oom_score_adj;
1908 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1909
1910 mutex_init(&sig->cred_guard_mutex);
1911 init_rwsem(&sig->exec_update_lock);
1912
1913 return 0;
1914 }
1915
copy_seccomp(struct task_struct * p)1916 static void copy_seccomp(struct task_struct *p)
1917 {
1918 #ifdef CONFIG_SECCOMP
1919 /*
1920 * Must be called with sighand->lock held, which is common to
1921 * all threads in the group. Holding cred_guard_mutex is not
1922 * needed because this new task is not yet running and cannot
1923 * be racing exec.
1924 */
1925 assert_spin_locked(¤t->sighand->siglock);
1926
1927 /* Ref-count the new filter user, and assign it. */
1928 get_seccomp_filter(current);
1929 p->seccomp = current->seccomp;
1930
1931 /*
1932 * Explicitly enable no_new_privs here in case it got set
1933 * between the task_struct being duplicated and holding the
1934 * sighand lock. The seccomp state and nnp must be in sync.
1935 */
1936 if (task_no_new_privs(current))
1937 task_set_no_new_privs(p);
1938
1939 /*
1940 * If the parent gained a seccomp mode after copying thread
1941 * flags and between before we held the sighand lock, we have
1942 * to manually enable the seccomp thread flag here.
1943 */
1944 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1945 set_task_syscall_work(p, SECCOMP);
1946 #endif
1947 }
1948
SYSCALL_DEFINE1(set_tid_address,int __user *,tidptr)1949 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1950 {
1951 current->clear_child_tid = tidptr;
1952
1953 return task_pid_vnr(current);
1954 }
1955
rt_mutex_init_task(struct task_struct * p)1956 static void rt_mutex_init_task(struct task_struct *p)
1957 {
1958 raw_spin_lock_init(&p->pi_lock);
1959 #ifdef CONFIG_RT_MUTEXES
1960 p->pi_waiters = RB_ROOT_CACHED;
1961 p->pi_top_task = NULL;
1962 p->pi_blocked_on = NULL;
1963 #endif
1964 }
1965
init_task_pid_links(struct task_struct * task)1966 static inline void init_task_pid_links(struct task_struct *task)
1967 {
1968 enum pid_type type;
1969
1970 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
1971 INIT_HLIST_NODE(&task->pid_links[type]);
1972 }
1973
1974 static inline void
init_task_pid(struct task_struct * task,enum pid_type type,struct pid * pid)1975 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1976 {
1977 if (type == PIDTYPE_PID)
1978 task->thread_pid = pid;
1979 else
1980 task->signal->pids[type] = pid;
1981 }
1982
rcu_copy_process(struct task_struct * p)1983 static inline void rcu_copy_process(struct task_struct *p)
1984 {
1985 #ifdef CONFIG_PREEMPT_RCU
1986 p->rcu_read_lock_nesting = 0;
1987 p->rcu_read_unlock_special.s = 0;
1988 p->rcu_blocked_node = NULL;
1989 INIT_LIST_HEAD(&p->rcu_node_entry);
1990 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1991 #ifdef CONFIG_TASKS_RCU
1992 p->rcu_tasks_holdout = false;
1993 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1994 p->rcu_tasks_idle_cpu = -1;
1995 INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
1996 #endif /* #ifdef CONFIG_TASKS_RCU */
1997 #ifdef CONFIG_TASKS_TRACE_RCU
1998 p->trc_reader_nesting = 0;
1999 p->trc_reader_special.s = 0;
2000 INIT_LIST_HEAD(&p->trc_holdout_list);
2001 INIT_LIST_HEAD(&p->trc_blkd_node);
2002 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
2003 }
2004
2005 /**
2006 * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2007 * @pid: the struct pid for which to create a pidfd
2008 * @flags: flags of the new @pidfd
2009 * @ret: Where to return the file for the pidfd.
2010 *
2011 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2012 * caller's file descriptor table. The pidfd is reserved but not installed yet.
2013 *
2014 * The helper doesn't perform checks on @pid which makes it useful for pidfds
2015 * created via CLONE_PIDFD where @pid has no task attached when the pidfd and
2016 * pidfd file are prepared.
2017 *
2018 * If this function returns successfully the caller is responsible to either
2019 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2020 * order to install the pidfd into its file descriptor table or they must use
2021 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2022 * respectively.
2023 *
2024 * This function is useful when a pidfd must already be reserved but there
2025 * might still be points of failure afterwards and the caller wants to ensure
2026 * that no pidfd is leaked into its file descriptor table.
2027 *
2028 * Return: On success, a reserved pidfd is returned from the function and a new
2029 * pidfd file is returned in the last argument to the function. On
2030 * error, a negative error code is returned from the function and the
2031 * last argument remains unchanged.
2032 */
__pidfd_prepare(struct pid * pid,unsigned int flags,struct file ** ret)2033 static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2034 {
2035 int pidfd;
2036 struct file *pidfd_file;
2037
2038 pidfd = get_unused_fd_flags(O_CLOEXEC);
2039 if (pidfd < 0)
2040 return pidfd;
2041
2042 pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR);
2043 if (IS_ERR(pidfd_file)) {
2044 put_unused_fd(pidfd);
2045 return PTR_ERR(pidfd_file);
2046 }
2047 /*
2048 * anon_inode_getfile() ignores everything outside of the
2049 * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually.
2050 */
2051 pidfd_file->f_flags |= (flags & PIDFD_THREAD);
2052 *ret = pidfd_file;
2053 return pidfd;
2054 }
2055
2056 /**
2057 * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2058 * @pid: the struct pid for which to create a pidfd
2059 * @flags: flags of the new @pidfd
2060 * @ret: Where to return the pidfd.
2061 *
2062 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2063 * caller's file descriptor table. The pidfd is reserved but not installed yet.
2064 *
2065 * The helper verifies that @pid is still in use, without PIDFD_THREAD the
2066 * task identified by @pid must be a thread-group leader.
2067 *
2068 * If this function returns successfully the caller is responsible to either
2069 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2070 * order to install the pidfd into its file descriptor table or they must use
2071 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2072 * respectively.
2073 *
2074 * This function is useful when a pidfd must already be reserved but there
2075 * might still be points of failure afterwards and the caller wants to ensure
2076 * that no pidfd is leaked into its file descriptor table.
2077 *
2078 * Return: On success, a reserved pidfd is returned from the function and a new
2079 * pidfd file is returned in the last argument to the function. On
2080 * error, a negative error code is returned from the function and the
2081 * last argument remains unchanged.
2082 */
pidfd_prepare(struct pid * pid,unsigned int flags,struct file ** ret)2083 int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2084 {
2085 bool thread = flags & PIDFD_THREAD;
2086
2087 if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID))
2088 return -EINVAL;
2089
2090 return __pidfd_prepare(pid, flags, ret);
2091 }
2092
__delayed_free_task(struct rcu_head * rhp)2093 static void __delayed_free_task(struct rcu_head *rhp)
2094 {
2095 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
2096
2097 free_task(tsk);
2098 }
2099
delayed_free_task(struct task_struct * tsk)2100 static __always_inline void delayed_free_task(struct task_struct *tsk)
2101 {
2102 if (IS_ENABLED(CONFIG_MEMCG))
2103 call_rcu(&tsk->rcu, __delayed_free_task);
2104 else
2105 free_task(tsk);
2106 }
2107
copy_oom_score_adj(u64 clone_flags,struct task_struct * tsk)2108 static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
2109 {
2110 /* Skip if kernel thread */
2111 if (!tsk->mm)
2112 return;
2113
2114 /* Skip if spawning a thread or using vfork */
2115 if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
2116 return;
2117
2118 /* We need to synchronize with __set_oom_adj */
2119 mutex_lock(&oom_adj_mutex);
2120 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
2121 /* Update the values in case they were changed after copy_signal */
2122 tsk->signal->oom_score_adj = current->signal->oom_score_adj;
2123 tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
2124 mutex_unlock(&oom_adj_mutex);
2125 }
2126
2127 #ifdef CONFIG_RV
rv_task_fork(struct task_struct * p)2128 static void rv_task_fork(struct task_struct *p)
2129 {
2130 int i;
2131
2132 for (i = 0; i < RV_PER_TASK_MONITORS; i++)
2133 p->rv[i].da_mon.monitoring = false;
2134 }
2135 #else
2136 #define rv_task_fork(p) do {} while (0)
2137 #endif
2138
2139 /*
2140 * This creates a new process as a copy of the old one,
2141 * but does not actually start it yet.
2142 *
2143 * It copies the registers, and all the appropriate
2144 * parts of the process environment (as per the clone
2145 * flags). The actual kick-off is left to the caller.
2146 */
copy_process(struct pid * pid,int trace,int node,struct kernel_clone_args * args)2147 __latent_entropy struct task_struct *copy_process(
2148 struct pid *pid,
2149 int trace,
2150 int node,
2151 struct kernel_clone_args *args)
2152 {
2153 int pidfd = -1, retval;
2154 struct task_struct *p;
2155 struct multiprocess_signals delayed;
2156 struct file *pidfile = NULL;
2157 const u64 clone_flags = args->flags;
2158 struct nsproxy *nsp = current->nsproxy;
2159
2160 /*
2161 * Don't allow sharing the root directory with processes in a different
2162 * namespace
2163 */
2164 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
2165 return ERR_PTR(-EINVAL);
2166
2167 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
2168 return ERR_PTR(-EINVAL);
2169
2170 /*
2171 * Thread groups must share signals as well, and detached threads
2172 * can only be started up within the thread group.
2173 */
2174 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
2175 return ERR_PTR(-EINVAL);
2176
2177 /*
2178 * Shared signal handlers imply shared VM. By way of the above,
2179 * thread groups also imply shared VM. Blocking this case allows
2180 * for various simplifications in other code.
2181 */
2182 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
2183 return ERR_PTR(-EINVAL);
2184
2185 /*
2186 * Siblings of global init remain as zombies on exit since they are
2187 * not reaped by their parent (swapper). To solve this and to avoid
2188 * multi-rooted process trees, prevent global and container-inits
2189 * from creating siblings.
2190 */
2191 if ((clone_flags & CLONE_PARENT) &&
2192 current->signal->flags & SIGNAL_UNKILLABLE)
2193 return ERR_PTR(-EINVAL);
2194
2195 /*
2196 * If the new process will be in a different pid or user namespace
2197 * do not allow it to share a thread group with the forking task.
2198 */
2199 if (clone_flags & CLONE_THREAD) {
2200 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
2201 (task_active_pid_ns(current) != nsp->pid_ns_for_children))
2202 return ERR_PTR(-EINVAL);
2203 }
2204
2205 if (clone_flags & CLONE_PIDFD) {
2206 /*
2207 * - CLONE_DETACHED is blocked so that we can potentially
2208 * reuse it later for CLONE_PIDFD.
2209 */
2210 if (clone_flags & CLONE_DETACHED)
2211 return ERR_PTR(-EINVAL);
2212 }
2213
2214 /*
2215 * Force any signals received before this point to be delivered
2216 * before the fork happens. Collect up signals sent to multiple
2217 * processes that happen during the fork and delay them so that
2218 * they appear to happen after the fork.
2219 */
2220 sigemptyset(&delayed.signal);
2221 INIT_HLIST_NODE(&delayed.node);
2222
2223 spin_lock_irq(¤t->sighand->siglock);
2224 if (!(clone_flags & CLONE_THREAD))
2225 hlist_add_head(&delayed.node, ¤t->signal->multiprocess);
2226 recalc_sigpending();
2227 spin_unlock_irq(¤t->sighand->siglock);
2228 retval = -ERESTARTNOINTR;
2229 if (task_sigpending(current))
2230 goto fork_out;
2231
2232 retval = -ENOMEM;
2233 p = dup_task_struct(current, node);
2234 if (!p)
2235 goto fork_out;
2236 p->flags &= ~PF_KTHREAD;
2237 if (args->kthread)
2238 p->flags |= PF_KTHREAD;
2239 if (args->user_worker) {
2240 /*
2241 * Mark us a user worker, and block any signal that isn't
2242 * fatal or STOP
2243 */
2244 p->flags |= PF_USER_WORKER;
2245 siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
2246 }
2247 if (args->io_thread)
2248 p->flags |= PF_IO_WORKER;
2249
2250 if (args->name)
2251 strscpy_pad(p->comm, args->name, sizeof(p->comm));
2252
2253 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
2254 /*
2255 * Clear TID on mm_release()?
2256 */
2257 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
2258
2259 ftrace_graph_init_task(p);
2260
2261 rt_mutex_init_task(p);
2262
2263 lockdep_assert_irqs_enabled();
2264 #ifdef CONFIG_PROVE_LOCKING
2265 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
2266 #endif
2267 retval = copy_creds(p, clone_flags);
2268 if (retval < 0)
2269 goto bad_fork_free;
2270
2271 retval = -EAGAIN;
2272 if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
2273 if (p->real_cred->user != INIT_USER &&
2274 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
2275 goto bad_fork_cleanup_count;
2276 }
2277 current->flags &= ~PF_NPROC_EXCEEDED;
2278
2279 /*
2280 * If multiple threads are within copy_process(), then this check
2281 * triggers too late. This doesn't hurt, the check is only there
2282 * to stop root fork bombs.
2283 */
2284 retval = -EAGAIN;
2285 if (data_race(nr_threads >= max_threads))
2286 goto bad_fork_cleanup_count;
2287
2288 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
2289 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
2290 p->flags |= PF_FORKNOEXEC;
2291 INIT_LIST_HEAD(&p->children);
2292 INIT_LIST_HEAD(&p->sibling);
2293 rcu_copy_process(p);
2294 p->vfork_done = NULL;
2295 spin_lock_init(&p->alloc_lock);
2296
2297 init_sigpending(&p->pending);
2298
2299 p->utime = p->stime = p->gtime = 0;
2300 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2301 p->utimescaled = p->stimescaled = 0;
2302 #endif
2303 prev_cputime_init(&p->prev_cputime);
2304
2305 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2306 seqcount_init(&p->vtime.seqcount);
2307 p->vtime.starttime = 0;
2308 p->vtime.state = VTIME_INACTIVE;
2309 #endif
2310
2311 #ifdef CONFIG_IO_URING
2312 p->io_uring = NULL;
2313 #endif
2314
2315 p->default_timer_slack_ns = current->timer_slack_ns;
2316
2317 #ifdef CONFIG_PSI
2318 p->psi_flags = 0;
2319 #endif
2320
2321 task_io_accounting_init(&p->ioac);
2322 acct_clear_integrals(p);
2323
2324 posix_cputimers_init(&p->posix_cputimers);
2325 tick_dep_init_task(p);
2326
2327 p->io_context = NULL;
2328 audit_set_context(p, NULL);
2329 cgroup_fork(p);
2330 if (args->kthread) {
2331 if (!set_kthread_struct(p))
2332 goto bad_fork_cleanup_delayacct;
2333 }
2334 #ifdef CONFIG_NUMA
2335 p->mempolicy = mpol_dup(p->mempolicy);
2336 if (IS_ERR(p->mempolicy)) {
2337 retval = PTR_ERR(p->mempolicy);
2338 p->mempolicy = NULL;
2339 goto bad_fork_cleanup_delayacct;
2340 }
2341 #endif
2342 #ifdef CONFIG_CPUSETS
2343 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2344 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
2345 #endif
2346 #ifdef CONFIG_TRACE_IRQFLAGS
2347 memset(&p->irqtrace, 0, sizeof(p->irqtrace));
2348 p->irqtrace.hardirq_disable_ip = _THIS_IP_;
2349 p->irqtrace.softirq_enable_ip = _THIS_IP_;
2350 p->softirqs_enabled = 1;
2351 p->softirq_context = 0;
2352 #endif
2353
2354 p->pagefault_disabled = 0;
2355
2356 #ifdef CONFIG_LOCKDEP
2357 lockdep_init_task(p);
2358 #endif
2359
2360 #ifdef CONFIG_DEBUG_MUTEXES
2361 p->blocked_on = NULL; /* not blocked yet */
2362 #endif
2363 #ifdef CONFIG_BCACHE
2364 p->sequential_io = 0;
2365 p->sequential_io_avg = 0;
2366 #endif
2367 #ifdef CONFIG_BPF_SYSCALL
2368 RCU_INIT_POINTER(p->bpf_storage, NULL);
2369 p->bpf_ctx = NULL;
2370 #endif
2371
2372 /* Perform scheduler related setup. Assign this task to a CPU. */
2373 retval = sched_fork(clone_flags, p);
2374 if (retval)
2375 goto bad_fork_cleanup_policy;
2376
2377 retval = perf_event_init_task(p, clone_flags);
2378 if (retval)
2379 goto bad_fork_sched_cancel_fork;
2380 retval = audit_alloc(p);
2381 if (retval)
2382 goto bad_fork_cleanup_perf;
2383 /* copy all the process information */
2384 shm_init_task(p);
2385 retval = security_task_alloc(p, clone_flags);
2386 if (retval)
2387 goto bad_fork_cleanup_audit;
2388 retval = copy_semundo(clone_flags, p);
2389 if (retval)
2390 goto bad_fork_cleanup_security;
2391 retval = copy_files(clone_flags, p, args->no_files);
2392 if (retval)
2393 goto bad_fork_cleanup_semundo;
2394 retval = copy_fs(clone_flags, p);
2395 if (retval)
2396 goto bad_fork_cleanup_files;
2397 retval = copy_sighand(clone_flags, p);
2398 if (retval)
2399 goto bad_fork_cleanup_fs;
2400 retval = copy_signal(clone_flags, p);
2401 if (retval)
2402 goto bad_fork_cleanup_sighand;
2403 retval = copy_mm(clone_flags, p);
2404 if (retval)
2405 goto bad_fork_cleanup_signal;
2406 retval = copy_namespaces(clone_flags, p);
2407 if (retval)
2408 goto bad_fork_cleanup_mm;
2409 retval = copy_io(clone_flags, p);
2410 if (retval)
2411 goto bad_fork_cleanup_namespaces;
2412 retval = copy_thread(p, args);
2413 if (retval)
2414 goto bad_fork_cleanup_io;
2415
2416 stackleak_task_init(p);
2417
2418 if (pid != &init_struct_pid) {
2419 pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
2420 args->set_tid_size);
2421 if (IS_ERR(pid)) {
2422 retval = PTR_ERR(pid);
2423 goto bad_fork_cleanup_thread;
2424 }
2425 }
2426
2427 /*
2428 * This has to happen after we've potentially unshared the file
2429 * descriptor table (so that the pidfd doesn't leak into the child
2430 * if the fd table isn't shared).
2431 */
2432 if (clone_flags & CLONE_PIDFD) {
2433 int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
2434
2435 /* Note that no task has been attached to @pid yet. */
2436 retval = __pidfd_prepare(pid, flags, &pidfile);
2437 if (retval < 0)
2438 goto bad_fork_free_pid;
2439 pidfd = retval;
2440
2441 retval = put_user(pidfd, args->pidfd);
2442 if (retval)
2443 goto bad_fork_put_pidfd;
2444 }
2445
2446 #ifdef CONFIG_BLOCK
2447 p->plug = NULL;
2448 #endif
2449 futex_init_task(p);
2450
2451 /*
2452 * sigaltstack should be cleared when sharing the same VM
2453 */
2454 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2455 sas_ss_reset(p);
2456
2457 /*
2458 * Syscall tracing and stepping should be turned off in the
2459 * child regardless of CLONE_PTRACE.
2460 */
2461 user_disable_single_step(p);
2462 clear_task_syscall_work(p, SYSCALL_TRACE);
2463 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2464 clear_task_syscall_work(p, SYSCALL_EMU);
2465 #endif
2466 clear_tsk_latency_tracing(p);
2467
2468 /* ok, now we should be set up.. */
2469 p->pid = pid_nr(pid);
2470 if (clone_flags & CLONE_THREAD) {
2471 p->group_leader = current->group_leader;
2472 p->tgid = current->tgid;
2473 } else {
2474 p->group_leader = p;
2475 p->tgid = p->pid;
2476 }
2477
2478 p->nr_dirtied = 0;
2479 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
2480 p->dirty_paused_when = 0;
2481
2482 p->pdeath_signal = 0;
2483 p->task_works = NULL;
2484 clear_posix_cputimers_work(p);
2485
2486 #ifdef CONFIG_KRETPROBES
2487 p->kretprobe_instances.first = NULL;
2488 #endif
2489 #ifdef CONFIG_RETHOOK
2490 p->rethooks.first = NULL;
2491 #endif
2492
2493 /*
2494 * Ensure that the cgroup subsystem policies allow the new process to be
2495 * forked. It should be noted that the new process's css_set can be changed
2496 * between here and cgroup_post_fork() if an organisation operation is in
2497 * progress.
2498 */
2499 retval = cgroup_can_fork(p, args);
2500 if (retval)
2501 goto bad_fork_put_pidfd;
2502
2503 /*
2504 * Now that the cgroups are pinned, re-clone the parent cgroup and put
2505 * the new task on the correct runqueue. All this *before* the task
2506 * becomes visible.
2507 *
2508 * This isn't part of ->can_fork() because while the re-cloning is
2509 * cgroup specific, it unconditionally needs to place the task on a
2510 * runqueue.
2511 */
2512 retval = sched_cgroup_fork(p, args);
2513 if (retval)
2514 goto bad_fork_cancel_cgroup;
2515
2516 /*
2517 * From this point on we must avoid any synchronous user-space
2518 * communication until we take the tasklist-lock. In particular, we do
2519 * not want user-space to be able to predict the process start-time by
2520 * stalling fork(2) after we recorded the start_time but before it is
2521 * visible to the system.
2522 */
2523
2524 p->start_time = ktime_get_ns();
2525 p->start_boottime = ktime_get_boottime_ns();
2526
2527 /*
2528 * Make it visible to the rest of the system, but dont wake it up yet.
2529 * Need tasklist lock for parent etc handling!
2530 */
2531 write_lock_irq(&tasklist_lock);
2532
2533 /* CLONE_PARENT re-uses the old parent */
2534 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2535 p->real_parent = current->real_parent;
2536 p->parent_exec_id = current->parent_exec_id;
2537 if (clone_flags & CLONE_THREAD)
2538 p->exit_signal = -1;
2539 else
2540 p->exit_signal = current->group_leader->exit_signal;
2541 } else {
2542 p->real_parent = current;
2543 p->parent_exec_id = current->self_exec_id;
2544 p->exit_signal = args->exit_signal;
2545 }
2546
2547 klp_copy_process(p);
2548
2549 sched_core_fork(p);
2550
2551 spin_lock(¤t->sighand->siglock);
2552
2553 rv_task_fork(p);
2554
2555 rseq_fork(p, clone_flags);
2556
2557 /* Don't start children in a dying pid namespace */
2558 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
2559 retval = -ENOMEM;
2560 goto bad_fork_core_free;
2561 }
2562
2563 /* Let kill terminate clone/fork in the middle */
2564 if (fatal_signal_pending(current)) {
2565 retval = -EINTR;
2566 goto bad_fork_core_free;
2567 }
2568
2569 /* No more failure paths after this point. */
2570
2571 /*
2572 * Copy seccomp details explicitly here, in case they were changed
2573 * before holding sighand lock.
2574 */
2575 copy_seccomp(p);
2576
2577 init_task_pid_links(p);
2578 if (likely(p->pid)) {
2579 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
2580
2581 init_task_pid(p, PIDTYPE_PID, pid);
2582 if (thread_group_leader(p)) {
2583 init_task_pid(p, PIDTYPE_TGID, pid);
2584 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2585 init_task_pid(p, PIDTYPE_SID, task_session(current));
2586
2587 if (is_child_reaper(pid)) {
2588 ns_of_pid(pid)->child_reaper = p;
2589 p->signal->flags |= SIGNAL_UNKILLABLE;
2590 }
2591 p->signal->shared_pending.signal = delayed.signal;
2592 p->signal->tty = tty_kref_get(current->signal->tty);
2593 /*
2594 * Inherit has_child_subreaper flag under the same
2595 * tasklist_lock with adding child to the process tree
2596 * for propagate_has_child_subreaper optimization.
2597 */
2598 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2599 p->real_parent->signal->is_child_subreaper;
2600 list_add_tail(&p->sibling, &p->real_parent->children);
2601 list_add_tail_rcu(&p->tasks, &init_task.tasks);
2602 attach_pid(p, PIDTYPE_TGID);
2603 attach_pid(p, PIDTYPE_PGID);
2604 attach_pid(p, PIDTYPE_SID);
2605 __this_cpu_inc(process_counts);
2606 } else {
2607 current->signal->nr_threads++;
2608 current->signal->quick_threads++;
2609 atomic_inc(¤t->signal->live);
2610 refcount_inc(¤t->signal->sigcnt);
2611 task_join_group_stop(p);
2612 list_add_tail_rcu(&p->thread_node,
2613 &p->signal->thread_head);
2614 }
2615 attach_pid(p, PIDTYPE_PID);
2616 nr_threads++;
2617 }
2618 total_forks++;
2619 hlist_del_init(&delayed.node);
2620 spin_unlock(¤t->sighand->siglock);
2621 syscall_tracepoint_update(p);
2622 write_unlock_irq(&tasklist_lock);
2623
2624 if (pidfile)
2625 fd_install(pidfd, pidfile);
2626
2627 proc_fork_connector(p);
2628 sched_post_fork(p);
2629 cgroup_post_fork(p, args);
2630 perf_event_fork(p);
2631
2632 trace_task_newtask(p, clone_flags);
2633 uprobe_copy_process(p, clone_flags);
2634 user_events_fork(p, clone_flags);
2635
2636 copy_oom_score_adj(clone_flags, p);
2637
2638 return p;
2639
2640 bad_fork_core_free:
2641 sched_core_free(p);
2642 spin_unlock(¤t->sighand->siglock);
2643 write_unlock_irq(&tasklist_lock);
2644 bad_fork_cancel_cgroup:
2645 cgroup_cancel_fork(p, args);
2646 bad_fork_put_pidfd:
2647 if (clone_flags & CLONE_PIDFD) {
2648 fput(pidfile);
2649 put_unused_fd(pidfd);
2650 }
2651 bad_fork_free_pid:
2652 if (pid != &init_struct_pid)
2653 free_pid(pid);
2654 bad_fork_cleanup_thread:
2655 exit_thread(p);
2656 bad_fork_cleanup_io:
2657 if (p->io_context)
2658 exit_io_context(p);
2659 bad_fork_cleanup_namespaces:
2660 exit_task_namespaces(p);
2661 bad_fork_cleanup_mm:
2662 if (p->mm) {
2663 mm_clear_owner(p->mm, p);
2664 mmput(p->mm);
2665 }
2666 bad_fork_cleanup_signal:
2667 if (!(clone_flags & CLONE_THREAD))
2668 free_signal_struct(p->signal);
2669 bad_fork_cleanup_sighand:
2670 __cleanup_sighand(p->sighand);
2671 bad_fork_cleanup_fs:
2672 exit_fs(p); /* blocking */
2673 bad_fork_cleanup_files:
2674 exit_files(p); /* blocking */
2675 bad_fork_cleanup_semundo:
2676 exit_sem(p);
2677 bad_fork_cleanup_security:
2678 security_task_free(p);
2679 bad_fork_cleanup_audit:
2680 audit_free(p);
2681 bad_fork_cleanup_perf:
2682 perf_event_free_task(p);
2683 bad_fork_sched_cancel_fork:
2684 sched_cancel_fork(p);
2685 bad_fork_cleanup_policy:
2686 lockdep_free_task(p);
2687 #ifdef CONFIG_NUMA
2688 mpol_put(p->mempolicy);
2689 #endif
2690 bad_fork_cleanup_delayacct:
2691 delayacct_tsk_free(p);
2692 bad_fork_cleanup_count:
2693 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
2694 exit_creds(p);
2695 bad_fork_free:
2696 WRITE_ONCE(p->__state, TASK_DEAD);
2697 exit_task_stack_account(p);
2698 put_task_stack(p);
2699 delayed_free_task(p);
2700 fork_out:
2701 spin_lock_irq(¤t->sighand->siglock);
2702 hlist_del_init(&delayed.node);
2703 spin_unlock_irq(¤t->sighand->siglock);
2704 return ERR_PTR(retval);
2705 }
2706
init_idle_pids(struct task_struct * idle)2707 static inline void init_idle_pids(struct task_struct *idle)
2708 {
2709 enum pid_type type;
2710
2711 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2712 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2713 init_task_pid(idle, type, &init_struct_pid);
2714 }
2715 }
2716
idle_dummy(void * dummy)2717 static int idle_dummy(void *dummy)
2718 {
2719 /* This function is never called */
2720 return 0;
2721 }
2722
fork_idle(int cpu)2723 struct task_struct * __init fork_idle(int cpu)
2724 {
2725 struct task_struct *task;
2726 struct kernel_clone_args args = {
2727 .flags = CLONE_VM,
2728 .fn = &idle_dummy,
2729 .fn_arg = NULL,
2730 .kthread = 1,
2731 .idle = 1,
2732 };
2733
2734 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
2735 if (!IS_ERR(task)) {
2736 init_idle_pids(task);
2737 init_idle(task, cpu);
2738 }
2739
2740 return task;
2741 }
2742
2743 /*
2744 * This is like kernel_clone(), but shaved down and tailored to just
2745 * creating io_uring workers. It returns a created task, or an error pointer.
2746 * The returned task is inactive, and the caller must fire it up through
2747 * wake_up_new_task(p). All signals are blocked in the created task.
2748 */
create_io_thread(int (* fn)(void *),void * arg,int node)2749 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
2750 {
2751 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
2752 CLONE_IO;
2753 struct kernel_clone_args args = {
2754 .flags = ((lower_32_bits(flags) | CLONE_VM |
2755 CLONE_UNTRACED) & ~CSIGNAL),
2756 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
2757 .fn = fn,
2758 .fn_arg = arg,
2759 .io_thread = 1,
2760 .user_worker = 1,
2761 };
2762
2763 return copy_process(NULL, 0, node, &args);
2764 }
2765
2766 /*
2767 * Ok, this is the main fork-routine.
2768 *
2769 * It copies the process, and if successful kick-starts
2770 * it and waits for it to finish using the VM if required.
2771 *
2772 * args->exit_signal is expected to be checked for sanity by the caller.
2773 */
kernel_clone(struct kernel_clone_args * args)2774 pid_t kernel_clone(struct kernel_clone_args *args)
2775 {
2776 u64 clone_flags = args->flags;
2777 struct completion vfork;
2778 struct pid *pid;
2779 struct task_struct *p;
2780 int trace = 0;
2781 pid_t nr;
2782
2783 /*
2784 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2785 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2786 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2787 * field in struct clone_args and it still doesn't make sense to have
2788 * them both point at the same memory location. Performing this check
2789 * here has the advantage that we don't need to have a separate helper
2790 * to check for legacy clone().
2791 */
2792 if ((clone_flags & CLONE_PIDFD) &&
2793 (clone_flags & CLONE_PARENT_SETTID) &&
2794 (args->pidfd == args->parent_tid))
2795 return -EINVAL;
2796
2797 /*
2798 * Determine whether and which event to report to ptracer. When
2799 * called from kernel_thread or CLONE_UNTRACED is explicitly
2800 * requested, no event is reported; otherwise, report if the event
2801 * for the type of forking is enabled.
2802 */
2803 if (!(clone_flags & CLONE_UNTRACED)) {
2804 if (clone_flags & CLONE_VFORK)
2805 trace = PTRACE_EVENT_VFORK;
2806 else if (args->exit_signal != SIGCHLD)
2807 trace = PTRACE_EVENT_CLONE;
2808 else
2809 trace = PTRACE_EVENT_FORK;
2810
2811 if (likely(!ptrace_event_enabled(current, trace)))
2812 trace = 0;
2813 }
2814
2815 p = copy_process(NULL, trace, NUMA_NO_NODE, args);
2816 add_latent_entropy();
2817
2818 if (IS_ERR(p))
2819 return PTR_ERR(p);
2820
2821 /*
2822 * Do this prior waking up the new thread - the thread pointer
2823 * might get invalid after that point, if the thread exits quickly.
2824 */
2825 trace_sched_process_fork(current, p);
2826
2827 pid = get_task_pid(p, PIDTYPE_PID);
2828 nr = pid_vnr(pid);
2829
2830 if (clone_flags & CLONE_PARENT_SETTID)
2831 put_user(nr, args->parent_tid);
2832
2833 if (clone_flags & CLONE_VFORK) {
2834 p->vfork_done = &vfork;
2835 init_completion(&vfork);
2836 get_task_struct(p);
2837 }
2838
2839 if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) {
2840 /* lock the task to synchronize with memcg migration */
2841 task_lock(p);
2842 lru_gen_add_mm(p->mm);
2843 task_unlock(p);
2844 }
2845
2846 wake_up_new_task(p);
2847
2848 /* forking complete and child started to run, tell ptracer */
2849 if (unlikely(trace))
2850 ptrace_event_pid(trace, pid);
2851
2852 if (clone_flags & CLONE_VFORK) {
2853 if (!wait_for_vfork_done(p, &vfork))
2854 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2855 }
2856
2857 put_pid(pid);
2858 return nr;
2859 }
2860
2861 /*
2862 * Create a kernel thread.
2863 */
kernel_thread(int (* fn)(void *),void * arg,const char * name,unsigned long flags)2864 pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
2865 unsigned long flags)
2866 {
2867 struct kernel_clone_args args = {
2868 .flags = ((lower_32_bits(flags) | CLONE_VM |
2869 CLONE_UNTRACED) & ~CSIGNAL),
2870 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
2871 .fn = fn,
2872 .fn_arg = arg,
2873 .name = name,
2874 .kthread = 1,
2875 };
2876
2877 return kernel_clone(&args);
2878 }
2879
2880 /*
2881 * Create a user mode thread.
2882 */
user_mode_thread(int (* fn)(void *),void * arg,unsigned long flags)2883 pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
2884 {
2885 struct kernel_clone_args args = {
2886 .flags = ((lower_32_bits(flags) | CLONE_VM |
2887 CLONE_UNTRACED) & ~CSIGNAL),
2888 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
2889 .fn = fn,
2890 .fn_arg = arg,
2891 };
2892
2893 return kernel_clone(&args);
2894 }
2895
2896 #ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)2897 SYSCALL_DEFINE0(fork)
2898 {
2899 #ifdef CONFIG_MMU
2900 struct kernel_clone_args args = {
2901 .exit_signal = SIGCHLD,
2902 };
2903
2904 return kernel_clone(&args);
2905 #else
2906 /* can not support in nommu mode */
2907 return -EINVAL;
2908 #endif
2909 }
2910 #endif
2911
2912 #ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)2913 SYSCALL_DEFINE0(vfork)
2914 {
2915 struct kernel_clone_args args = {
2916 .flags = CLONE_VFORK | CLONE_VM,
2917 .exit_signal = SIGCHLD,
2918 };
2919
2920 return kernel_clone(&args);
2921 }
2922 #endif
2923
2924 #ifdef __ARCH_WANT_SYS_CLONE
2925 #ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone,unsigned long,clone_flags,unsigned long,newsp,int __user *,parent_tidptr,unsigned long,tls,int __user *,child_tidptr)2926 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2927 int __user *, parent_tidptr,
2928 unsigned long, tls,
2929 int __user *, child_tidptr)
2930 #elif defined(CONFIG_CLONE_BACKWARDS2)
2931 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2932 int __user *, parent_tidptr,
2933 int __user *, child_tidptr,
2934 unsigned long, tls)
2935 #elif defined(CONFIG_CLONE_BACKWARDS3)
2936 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2937 int, stack_size,
2938 int __user *, parent_tidptr,
2939 int __user *, child_tidptr,
2940 unsigned long, tls)
2941 #else
2942 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2943 int __user *, parent_tidptr,
2944 int __user *, child_tidptr,
2945 unsigned long, tls)
2946 #endif
2947 {
2948 struct kernel_clone_args args = {
2949 .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
2950 .pidfd = parent_tidptr,
2951 .child_tid = child_tidptr,
2952 .parent_tid = parent_tidptr,
2953 .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
2954 .stack = newsp,
2955 .tls = tls,
2956 };
2957
2958 return kernel_clone(&args);
2959 }
2960 #endif
2961
copy_clone_args_from_user(struct kernel_clone_args * kargs,struct clone_args __user * uargs,size_t usize)2962 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2963 struct clone_args __user *uargs,
2964 size_t usize)
2965 {
2966 int err;
2967 struct clone_args args;
2968 pid_t *kset_tid = kargs->set_tid;
2969
2970 BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
2971 CLONE_ARGS_SIZE_VER0);
2972 BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
2973 CLONE_ARGS_SIZE_VER1);
2974 BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
2975 CLONE_ARGS_SIZE_VER2);
2976 BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
2977
2978 if (unlikely(usize > PAGE_SIZE))
2979 return -E2BIG;
2980 if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
2981 return -EINVAL;
2982
2983 err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
2984 if (err)
2985 return err;
2986
2987 if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
2988 return -EINVAL;
2989
2990 if (unlikely(!args.set_tid && args.set_tid_size > 0))
2991 return -EINVAL;
2992
2993 if (unlikely(args.set_tid && args.set_tid_size == 0))
2994 return -EINVAL;
2995
2996 /*
2997 * Verify that higher 32bits of exit_signal are unset and that
2998 * it is a valid signal
2999 */
3000 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
3001 !valid_signal(args.exit_signal)))
3002 return -EINVAL;
3003
3004 if ((args.flags & CLONE_INTO_CGROUP) &&
3005 (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
3006 return -EINVAL;
3007
3008 *kargs = (struct kernel_clone_args){
3009 .flags = args.flags,
3010 .pidfd = u64_to_user_ptr(args.pidfd),
3011 .child_tid = u64_to_user_ptr(args.child_tid),
3012 .parent_tid = u64_to_user_ptr(args.parent_tid),
3013 .exit_signal = args.exit_signal,
3014 .stack = args.stack,
3015 .stack_size = args.stack_size,
3016 .tls = args.tls,
3017 .set_tid_size = args.set_tid_size,
3018 .cgroup = args.cgroup,
3019 };
3020
3021 if (args.set_tid &&
3022 copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
3023 (kargs->set_tid_size * sizeof(pid_t))))
3024 return -EFAULT;
3025
3026 kargs->set_tid = kset_tid;
3027
3028 return 0;
3029 }
3030
3031 /**
3032 * clone3_stack_valid - check and prepare stack
3033 * @kargs: kernel clone args
3034 *
3035 * Verify that the stack arguments userspace gave us are sane.
3036 * In addition, set the stack direction for userspace since it's easy for us to
3037 * determine.
3038 */
clone3_stack_valid(struct kernel_clone_args * kargs)3039 static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
3040 {
3041 if (kargs->stack == 0) {
3042 if (kargs->stack_size > 0)
3043 return false;
3044 } else {
3045 if (kargs->stack_size == 0)
3046 return false;
3047
3048 if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
3049 return false;
3050
3051 #if !defined(CONFIG_STACK_GROWSUP)
3052 kargs->stack += kargs->stack_size;
3053 #endif
3054 }
3055
3056 return true;
3057 }
3058
clone3_args_valid(struct kernel_clone_args * kargs)3059 static bool clone3_args_valid(struct kernel_clone_args *kargs)
3060 {
3061 /* Verify that no unknown flags are passed along. */
3062 if (kargs->flags &
3063 ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
3064 return false;
3065
3066 /*
3067 * - make the CLONE_DETACHED bit reusable for clone3
3068 * - make the CSIGNAL bits reusable for clone3
3069 */
3070 if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
3071 return false;
3072
3073 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
3074 (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
3075 return false;
3076
3077 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
3078 kargs->exit_signal)
3079 return false;
3080
3081 if (!clone3_stack_valid(kargs))
3082 return false;
3083
3084 return true;
3085 }
3086
3087 /**
3088 * sys_clone3 - create a new process with specific properties
3089 * @uargs: argument structure
3090 * @size: size of @uargs
3091 *
3092 * clone3() is the extensible successor to clone()/clone2().
3093 * It takes a struct as argument that is versioned by its size.
3094 *
3095 * Return: On success, a positive PID for the child process.
3096 * On error, a negative errno number.
3097 */
SYSCALL_DEFINE2(clone3,struct clone_args __user *,uargs,size_t,size)3098 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
3099 {
3100 int err;
3101
3102 struct kernel_clone_args kargs;
3103 pid_t set_tid[MAX_PID_NS_LEVEL];
3104
3105 #ifdef __ARCH_BROKEN_SYS_CLONE3
3106 #warning clone3() entry point is missing, please fix
3107 return -ENOSYS;
3108 #endif
3109
3110 kargs.set_tid = set_tid;
3111
3112 err = copy_clone_args_from_user(&kargs, uargs, size);
3113 if (err)
3114 return err;
3115
3116 if (!clone3_args_valid(&kargs))
3117 return -EINVAL;
3118
3119 return kernel_clone(&kargs);
3120 }
3121
walk_process_tree(struct task_struct * top,proc_visitor visitor,void * data)3122 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
3123 {
3124 struct task_struct *leader, *parent, *child;
3125 int res;
3126
3127 read_lock(&tasklist_lock);
3128 leader = top = top->group_leader;
3129 down:
3130 for_each_thread(leader, parent) {
3131 list_for_each_entry(child, &parent->children, sibling) {
3132 res = visitor(child, data);
3133 if (res) {
3134 if (res < 0)
3135 goto out;
3136 leader = child;
3137 goto down;
3138 }
3139 up:
3140 ;
3141 }
3142 }
3143
3144 if (leader != top) {
3145 child = leader;
3146 parent = child->real_parent;
3147 leader = parent->group_leader;
3148 goto up;
3149 }
3150 out:
3151 read_unlock(&tasklist_lock);
3152 }
3153
3154 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
3155 #define ARCH_MIN_MMSTRUCT_ALIGN 0
3156 #endif
3157
sighand_ctor(void * data)3158 static void sighand_ctor(void *data)
3159 {
3160 struct sighand_struct *sighand = data;
3161
3162 spin_lock_init(&sighand->siglock);
3163 init_waitqueue_head(&sighand->signalfd_wqh);
3164 }
3165
mm_cache_init(void)3166 void __init mm_cache_init(void)
3167 {
3168 unsigned int mm_size;
3169
3170 /*
3171 * The mm_cpumask is located at the end of mm_struct, and is
3172 * dynamically sized based on the maximum CPU number this system
3173 * can have, taking hotplug into account (nr_cpu_ids).
3174 */
3175 mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
3176
3177 mm_cachep = kmem_cache_create_usercopy("mm_struct",
3178 mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
3179 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3180 offsetof(struct mm_struct, saved_auxv),
3181 sizeof_field(struct mm_struct, saved_auxv),
3182 NULL);
3183 }
3184
proc_caches_init(void)3185 void __init proc_caches_init(void)
3186 {
3187 sighand_cachep = kmem_cache_create("sighand_cache",
3188 sizeof(struct sighand_struct), 0,
3189 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
3190 SLAB_ACCOUNT, sighand_ctor);
3191 signal_cachep = kmem_cache_create("signal_cache",
3192 sizeof(struct signal_struct), 0,
3193 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3194 NULL);
3195 files_cachep = kmem_cache_create("files_cache",
3196 sizeof(struct files_struct), 0,
3197 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3198 NULL);
3199 fs_cachep = kmem_cache_create("fs_cache",
3200 sizeof(struct fs_struct), 0,
3201 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3202 NULL);
3203
3204 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
3205 #ifdef CONFIG_PER_VMA_LOCK
3206 vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
3207 #endif
3208 mmap_init();
3209 nsproxy_cache_init();
3210 }
3211
3212 /*
3213 * Check constraints on flags passed to the unshare system call.
3214 */
check_unshare_flags(unsigned long unshare_flags)3215 static int check_unshare_flags(unsigned long unshare_flags)
3216 {
3217 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
3218 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
3219 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
3220 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
3221 CLONE_NEWTIME))
3222 return -EINVAL;
3223 /*
3224 * Not implemented, but pretend it works if there is nothing
3225 * to unshare. Note that unsharing the address space or the
3226 * signal handlers also need to unshare the signal queues (aka
3227 * CLONE_THREAD).
3228 */
3229 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
3230 if (!thread_group_empty(current))
3231 return -EINVAL;
3232 }
3233 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
3234 if (refcount_read(¤t->sighand->count) > 1)
3235 return -EINVAL;
3236 }
3237 if (unshare_flags & CLONE_VM) {
3238 if (!current_is_single_threaded())
3239 return -EINVAL;
3240 }
3241
3242 return 0;
3243 }
3244
3245 /*
3246 * Unshare the filesystem structure if it is being shared
3247 */
unshare_fs(unsigned long unshare_flags,struct fs_struct ** new_fsp)3248 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
3249 {
3250 struct fs_struct *fs = current->fs;
3251
3252 if (!(unshare_flags & CLONE_FS) || !fs)
3253 return 0;
3254
3255 /* don't need lock here; in the worst case we'll do useless copy */
3256 if (fs->users == 1)
3257 return 0;
3258
3259 *new_fsp = copy_fs_struct(fs);
3260 if (!*new_fsp)
3261 return -ENOMEM;
3262
3263 return 0;
3264 }
3265
3266 /*
3267 * Unshare file descriptor table if it is being shared
3268 */
unshare_fd(unsigned long unshare_flags,struct files_struct ** new_fdp)3269 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
3270 {
3271 struct files_struct *fd = current->files;
3272
3273 if ((unshare_flags & CLONE_FILES) &&
3274 (fd && atomic_read(&fd->count) > 1)) {
3275 fd = dup_fd(fd, NULL);
3276 if (IS_ERR(fd))
3277 return PTR_ERR(fd);
3278 *new_fdp = fd;
3279 }
3280
3281 return 0;
3282 }
3283
3284 /*
3285 * unshare allows a process to 'unshare' part of the process
3286 * context which was originally shared using clone. copy_*
3287 * functions used by kernel_clone() cannot be used here directly
3288 * because they modify an inactive task_struct that is being
3289 * constructed. Here we are modifying the current, active,
3290 * task_struct.
3291 */
ksys_unshare(unsigned long unshare_flags)3292 int ksys_unshare(unsigned long unshare_flags)
3293 {
3294 struct fs_struct *fs, *new_fs = NULL;
3295 struct files_struct *new_fd = NULL;
3296 struct cred *new_cred = NULL;
3297 struct nsproxy *new_nsproxy = NULL;
3298 int do_sysvsem = 0;
3299 int err;
3300
3301 /*
3302 * If unsharing a user namespace must also unshare the thread group
3303 * and unshare the filesystem root and working directories.
3304 */
3305 if (unshare_flags & CLONE_NEWUSER)
3306 unshare_flags |= CLONE_THREAD | CLONE_FS;
3307 /*
3308 * If unsharing vm, must also unshare signal handlers.
3309 */
3310 if (unshare_flags & CLONE_VM)
3311 unshare_flags |= CLONE_SIGHAND;
3312 /*
3313 * If unsharing a signal handlers, must also unshare the signal queues.
3314 */
3315 if (unshare_flags & CLONE_SIGHAND)
3316 unshare_flags |= CLONE_THREAD;
3317 /*
3318 * If unsharing namespace, must also unshare filesystem information.
3319 */
3320 if (unshare_flags & CLONE_NEWNS)
3321 unshare_flags |= CLONE_FS;
3322
3323 err = check_unshare_flags(unshare_flags);
3324 if (err)
3325 goto bad_unshare_out;
3326 /*
3327 * CLONE_NEWIPC must also detach from the undolist: after switching
3328 * to a new ipc namespace, the semaphore arrays from the old
3329 * namespace are unreachable.
3330 */
3331 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
3332 do_sysvsem = 1;
3333 err = unshare_fs(unshare_flags, &new_fs);
3334 if (err)
3335 goto bad_unshare_out;
3336 err = unshare_fd(unshare_flags, &new_fd);
3337 if (err)
3338 goto bad_unshare_cleanup_fs;
3339 err = unshare_userns(unshare_flags, &new_cred);
3340 if (err)
3341 goto bad_unshare_cleanup_fd;
3342 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
3343 new_cred, new_fs);
3344 if (err)
3345 goto bad_unshare_cleanup_cred;
3346
3347 if (new_cred) {
3348 err = set_cred_ucounts(new_cred);
3349 if (err)
3350 goto bad_unshare_cleanup_cred;
3351 }
3352
3353 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
3354 if (do_sysvsem) {
3355 /*
3356 * CLONE_SYSVSEM is equivalent to sys_exit().
3357 */
3358 exit_sem(current);
3359 }
3360 if (unshare_flags & CLONE_NEWIPC) {
3361 /* Orphan segments in old ns (see sem above). */
3362 exit_shm(current);
3363 shm_init_task(current);
3364 }
3365
3366 if (new_nsproxy)
3367 switch_task_namespaces(current, new_nsproxy);
3368
3369 task_lock(current);
3370
3371 if (new_fs) {
3372 fs = current->fs;
3373 spin_lock(&fs->lock);
3374 current->fs = new_fs;
3375 if (--fs->users)
3376 new_fs = NULL;
3377 else
3378 new_fs = fs;
3379 spin_unlock(&fs->lock);
3380 }
3381
3382 if (new_fd)
3383 swap(current->files, new_fd);
3384
3385 task_unlock(current);
3386
3387 if (new_cred) {
3388 /* Install the new user namespace */
3389 commit_creds(new_cred);
3390 new_cred = NULL;
3391 }
3392 }
3393
3394 perf_event_namespaces(current);
3395
3396 bad_unshare_cleanup_cred:
3397 if (new_cred)
3398 put_cred(new_cred);
3399 bad_unshare_cleanup_fd:
3400 if (new_fd)
3401 put_files_struct(new_fd);
3402
3403 bad_unshare_cleanup_fs:
3404 if (new_fs)
3405 free_fs_struct(new_fs);
3406
3407 bad_unshare_out:
3408 return err;
3409 }
3410
SYSCALL_DEFINE1(unshare,unsigned long,unshare_flags)3411 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
3412 {
3413 return ksys_unshare(unshare_flags);
3414 }
3415
3416 /*
3417 * Helper to unshare the files of the current task.
3418 * We don't want to expose copy_files internals to
3419 * the exec layer of the kernel.
3420 */
3421
unshare_files(void)3422 int unshare_files(void)
3423 {
3424 struct task_struct *task = current;
3425 struct files_struct *old, *copy = NULL;
3426 int error;
3427
3428 error = unshare_fd(CLONE_FILES, ©);
3429 if (error || !copy)
3430 return error;
3431
3432 old = task->files;
3433 task_lock(task);
3434 task->files = copy;
3435 task_unlock(task);
3436 put_files_struct(old);
3437 return 0;
3438 }
3439
sysctl_max_threads(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)3440 int sysctl_max_threads(const struct ctl_table *table, int write,
3441 void *buffer, size_t *lenp, loff_t *ppos)
3442 {
3443 struct ctl_table t;
3444 int ret;
3445 int threads = max_threads;
3446 int min = 1;
3447 int max = MAX_THREADS;
3448
3449 t = *table;
3450 t.data = &threads;
3451 t.extra1 = &min;
3452 t.extra2 = &max;
3453
3454 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3455 if (ret || !write)
3456 return ret;
3457
3458 max_threads = threads;
3459
3460 return 0;
3461 }
3462