xref: /linux/kernel/pid.c (revision 3786f2ad009549c9e5e2af86e5829b31ad788eb4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic pidhash and scalable, time-bounded PID allocator
4  *
5  * (C) 2002-2003 Nadia Yvette Chambers, IBM
6  * (C) 2004 Nadia Yvette Chambers, Oracle
7  * (C) 2002-2004 Ingo Molnar, Red Hat
8  *
9  * pid-structures are backing objects for tasks sharing a given ID to chain
10  * against. There is very little to them aside from hashing them and
11  * parking tasks using given ID's on a list.
12  *
13  * The hash is always changed with the tasklist_lock write-acquired,
14  * and the hash is only accessed with the tasklist_lock at least
15  * read-acquired, so there's no additional SMP locking needed here.
16  *
17  * We have a list of bitmap pages, which bitmaps represent the PID space.
18  * Allocating and freeing PIDs is completely lockless. The worst-case
19  * allocation scenario when all but one out of 1 million PIDs possible are
20  * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
21  * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22  *
23  * Pid namespaces:
24  *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
25  *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
26  *     Many thanks to Oleg Nesterov for comments and help
27  *
28  */
29 
30 #include <linux/mm.h>
31 #include <linux/export.h>
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/rculist.h>
35 #include <linux/memblock.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/proc_ns.h>
40 #include <linux/refcount.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/idr.h>
45 #include <linux/pidfs.h>
46 #include <net/sock.h>
47 #include <uapi/linux/pidfd.h>
48 
49 struct pid init_struct_pid = {
50 	.count		= REFCOUNT_INIT(1),
51 	.tasks		= {
52 		{ .first = NULL },
53 		{ .first = NULL },
54 		{ .first = NULL },
55 	},
56 	.level		= 0,
57 	.numbers	= { {
58 		.nr		= 0,
59 		.ns		= &init_pid_ns,
60 	}, }
61 };
62 
63 static int pid_max_min = RESERVED_PIDS + 1;
64 static int pid_max_max = PID_MAX_LIMIT;
65 
66 /*
67  * PID-map pages start out as NULL, they get allocated upon
68  * first use and are never deallocated. This way a low pid_max
69  * value does not cause lots of bitmaps to be allocated, but
70  * the scheme scales to up to 4 million PIDs, runtime.
71  */
72 struct pid_namespace init_pid_ns = {
73 	.ns = NS_COMMON_INIT(init_pid_ns),
74 	.idr = IDR_INIT(init_pid_ns.idr),
75 	.pid_allocated = PIDNS_ADDING,
76 	.level = 0,
77 	.child_reaper = &init_task,
78 	.user_ns = &init_user_ns,
79 	.pid_max = PID_MAX_DEFAULT,
80 #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
81 	.memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
82 #endif
83 };
84 EXPORT_SYMBOL_GPL(init_pid_ns);
85 
86 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
87 
88 void put_pid(struct pid *pid)
89 {
90 	struct pid_namespace *ns;
91 
92 	if (!pid)
93 		return;
94 
95 	ns = pid->numbers[pid->level].ns;
96 	if (refcount_dec_and_test(&pid->count)) {
97 		pidfs_free_pid(pid);
98 		kmem_cache_free(ns->pid_cachep, pid);
99 		put_pid_ns(ns);
100 	}
101 }
102 EXPORT_SYMBOL_GPL(put_pid);
103 
104 static void delayed_put_pid(struct rcu_head *rhp)
105 {
106 	struct pid *pid = container_of(rhp, struct pid, rcu);
107 	put_pid(pid);
108 }
109 
110 void free_pid(struct pid *pid)
111 {
112 	int i;
113 	struct pid_namespace *active_ns;
114 
115 	lockdep_assert_not_held(&tasklist_lock);
116 
117 	active_ns = pid->numbers[pid->level].ns;
118 	ns_ref_active_put(active_ns);
119 
120 	spin_lock(&pidmap_lock);
121 	for (i = 0; i <= pid->level; i++) {
122 		struct upid *upid = pid->numbers + i;
123 		struct pid_namespace *ns = upid->ns;
124 		switch (--ns->pid_allocated) {
125 		case 2:
126 		case 1:
127 			/* When all that is left in the pid namespace
128 			 * is the reaper wake up the reaper.  The reaper
129 			 * may be sleeping in zap_pid_ns_processes().
130 			 */
131 			wake_up_process(ns->child_reaper);
132 			break;
133 		case PIDNS_ADDING:
134 			/* Only possible if the 1st fork fails */
135 			WARN_ON(READ_ONCE(ns->child_reaper));
136 			break;
137 		}
138 
139 		idr_remove(&ns->idr, upid->nr);
140 	}
141 	spin_unlock(&pidmap_lock);
142 
143 	pidfs_remove_pid(pid);
144 	call_rcu(&pid->rcu, delayed_put_pid);
145 }
146 
147 void free_pids(struct pid **pids)
148 {
149 	int tmp;
150 
151 	/*
152 	 * This can batch pidmap_lock.
153 	 */
154 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
155 		if (pids[tmp])
156 			free_pid(pids[tmp]);
157 }
158 
159 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid,
160 		      size_t arg_set_tid_size)
161 {
162 	int set_tid[MAX_PID_NS_LEVEL + 1] = {};
163 	int pid_max[MAX_PID_NS_LEVEL + 1] = {};
164 	struct pid *pid;
165 	enum pid_type type;
166 	int i, nr;
167 	struct pid_namespace *tmp;
168 	struct upid *upid;
169 	int retval = -ENOMEM;
170 	bool retried_preload;
171 
172 	/*
173 	 * arg_set_tid_size contains the size of the arg_set_tid array. Starting at
174 	 * the most nested currently active PID namespace it tells alloc_pid()
175 	 * which PID to set for a process in that most nested PID namespace
176 	 * up to arg_set_tid_size PID namespaces. It does not have to set the PID
177 	 * for a process in all nested PID namespaces but arg_set_tid_size must
178 	 * never be greater than the current ns->level + 1.
179 	 */
180 	if (arg_set_tid_size > ns->level + 1)
181 		return ERR_PTR(-EINVAL);
182 
183 	/*
184 	 * Prep before we take locks:
185 	 *
186 	 * 1. allocate and fill in pid struct
187 	 */
188 	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
189 	if (!pid)
190 		return ERR_PTR(retval);
191 
192 	get_pid_ns(ns);
193 	pid->level = ns->level;
194 	refcount_set(&pid->count, 1);
195 	spin_lock_init(&pid->lock);
196 	for (type = 0; type < PIDTYPE_MAX; ++type)
197 		INIT_HLIST_HEAD(&pid->tasks[type]);
198 	init_waitqueue_head(&pid->wait_pidfd);
199 	INIT_HLIST_HEAD(&pid->inodes);
200 	pidfs_prepare_pid(pid);
201 
202 	/*
203 	 * 2. perm check checkpoint_restore_ns_capable()
204 	 *
205 	 * This stores found pid_max to make sure the used value is the same should
206 	 * later code need it.
207 	 */
208 	for (tmp = ns, i = ns->level; i >= 0; i--) {
209 		pid_max[ns->level - i] = READ_ONCE(tmp->pid_max);
210 
211 		if (arg_set_tid_size) {
212 			int tid = set_tid[ns->level - i] = arg_set_tid[ns->level - i];
213 
214 			retval = -EINVAL;
215 			if (tid < 1 || tid >= pid_max[ns->level - i])
216 				goto out_abort;
217 			/*
218 			 * Also fail if a PID != 1 is requested and
219 			 * no PID 1 exists.
220 			 */
221 			if (tid != 1 && !tmp->child_reaper)
222 				goto out_abort;
223 			retval = -EPERM;
224 			if (!checkpoint_restore_ns_capable(tmp->user_ns))
225 				goto out_abort;
226 			arg_set_tid_size--;
227 		}
228 
229 		tmp = tmp->parent;
230 	}
231 
232 	/*
233 	 * Prep is done, id allocation goes here:
234 	 */
235 	retried_preload = false;
236 	idr_preload(GFP_KERNEL);
237 	spin_lock(&pidmap_lock);
238 	/* For the case when the previous attempt to create init failed */
239 	if (ns->pid_allocated == PIDNS_ADDING)
240 		idr_set_cursor(&ns->idr, 0);
241 
242 	for (tmp = ns, i = ns->level; i >= 0;) {
243 		int tid = set_tid[ns->level - i];
244 
245 		if (tid) {
246 			nr = idr_alloc(&tmp->idr, NULL, tid,
247 				       tid + 1, GFP_ATOMIC);
248 			/*
249 			 * If ENOSPC is returned it means that the PID is
250 			 * alreay in use. Return EEXIST in that case.
251 			 */
252 			if (nr == -ENOSPC)
253 
254 				nr = -EEXIST;
255 		} else {
256 			int pid_min = 1;
257 			/*
258 			 * init really needs pid 1, but after reaching the
259 			 * maximum wrap back to RESERVED_PIDS
260 			 */
261 			if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
262 				pid_min = RESERVED_PIDS;
263 
264 			/*
265 			 * Store a null pointer so find_pid_ns does not find
266 			 * a partially initialized PID (see below).
267 			 */
268 			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
269 					      pid_max[ns->level - i], GFP_ATOMIC);
270 			if (nr == -ENOSPC)
271 				nr = -EAGAIN;
272 		}
273 
274 		if (unlikely(nr < 0)) {
275 			/*
276 			 * Preload more memory if idr_alloc{,cyclic} failed with -ENOMEM.
277 			 *
278 			 * The IDR API only allows us to preload memory for one call, while we may end
279 			 * up doing several under pidmap_lock with GFP_ATOMIC. The situation may be
280 			 * salvageable with GFP_KERNEL. But make sure to not loop indefinitely if preload
281 			 * did not help (the routine unfortunately returns void, so we have no idea
282 			 * if it got anywhere).
283 			 *
284 			 * The lock can be safely dropped and picked up as historically pid allocation
285 			 * for different namespaces was *not* atomic -- we try to hold on to it the
286 			 * entire time only for performance reasons.
287 			 */
288 			if (nr == -ENOMEM && !retried_preload) {
289 				spin_unlock(&pidmap_lock);
290 				idr_preload_end();
291 				retried_preload = true;
292 				idr_preload(GFP_KERNEL);
293 				spin_lock(&pidmap_lock);
294 				continue;
295 			}
296 			retval = nr;
297 			goto out_free;
298 		}
299 
300 		pid->numbers[i].nr = nr;
301 		pid->numbers[i].ns = tmp;
302 		tmp = tmp->parent;
303 		i--;
304 		retried_preload = false;
305 	}
306 
307 	/*
308 	 * ENOMEM is not the most obvious choice especially for the case
309 	 * where the child subreaper has already exited and the pid
310 	 * namespace denies the creation of any new processes. But ENOMEM
311 	 * is what we have exposed to userspace for a long time and it is
312 	 * documented behavior for pid namespaces. So we can't easily
313 	 * change it even if there were an error code better suited.
314 	 *
315 	 * This can't be done earlier because we need to preserve other
316 	 * error conditions.
317 	 *
318 	 * We need this even if copy_process() does the same check. If two
319 	 * or more tasks from parent namespace try to inject a child into a
320 	 * dead namespace, one of free_pid() calls from the copy_process()
321 	 * error path may try to wakeup the possibly freed ns->child_reaper.
322 	 */
323 	retval = -ENOMEM;
324 	if (unlikely(!(ns->pid_allocated & PIDNS_ADDING)))
325 		goto out_free;
326 	for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) {
327 		/* Make the PID visible to find_pid_ns. */
328 		idr_replace(&upid->ns->idr, pid, upid->nr);
329 		upid->ns->pid_allocated++;
330 	}
331 	spin_unlock(&pidmap_lock);
332 	idr_preload_end();
333 	ns_ref_active_get(ns);
334 
335 	retval = pidfs_add_pid(pid);
336 	if (unlikely(retval)) {
337 		free_pid(pid);
338 		pid = ERR_PTR(-ENOMEM);
339 	}
340 
341 	return pid;
342 
343 out_free:
344 	while (++i <= ns->level) {
345 		upid = pid->numbers + i;
346 		idr_remove(&upid->ns->idr, upid->nr);
347 	}
348 
349 	spin_unlock(&pidmap_lock);
350 	idr_preload_end();
351 
352 out_abort:
353 	put_pid_ns(ns);
354 	kmem_cache_free(ns->pid_cachep, pid);
355 	return ERR_PTR(retval);
356 }
357 
358 void disable_pid_allocation(struct pid_namespace *ns)
359 {
360 	spin_lock(&pidmap_lock);
361 	ns->pid_allocated &= ~PIDNS_ADDING;
362 	spin_unlock(&pidmap_lock);
363 }
364 
365 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
366 {
367 	return idr_find(&ns->idr, nr);
368 }
369 EXPORT_SYMBOL_GPL(find_pid_ns);
370 
371 struct pid *find_vpid(int nr)
372 {
373 	return find_pid_ns(nr, task_active_pid_ns(current));
374 }
375 EXPORT_SYMBOL_GPL(find_vpid);
376 
377 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
378 {
379 	return (type == PIDTYPE_PID) ?
380 		&task->thread_pid :
381 		&task->signal->pids[type];
382 }
383 
384 /*
385  * attach_pid() must be called with the tasklist_lock write-held.
386  */
387 void attach_pid(struct task_struct *task, enum pid_type type)
388 {
389 	struct pid *pid;
390 
391 	lockdep_assert_held_write(&tasklist_lock);
392 
393 	pid = *task_pid_ptr(task, type);
394 	hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
395 }
396 
397 static void __change_pid(struct pid **pids, struct task_struct *task,
398 			 enum pid_type type, struct pid *new)
399 {
400 	struct pid **pid_ptr, *pid;
401 	int tmp;
402 
403 	lockdep_assert_held_write(&tasklist_lock);
404 
405 	pid_ptr = task_pid_ptr(task, type);
406 	pid = *pid_ptr;
407 
408 	hlist_del_rcu(&task->pid_links[type]);
409 	*pid_ptr = new;
410 
411 	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
412 		if (pid_has_task(pid, tmp))
413 			return;
414 
415 	WARN_ON(pids[type]);
416 	pids[type] = pid;
417 }
418 
419 void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type)
420 {
421 	__change_pid(pids, task, type, NULL);
422 }
423 
424 void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type,
425 		struct pid *pid)
426 {
427 	__change_pid(pids, task, type, pid);
428 	attach_pid(task, type);
429 }
430 
431 void exchange_tids(struct task_struct *left, struct task_struct *right)
432 {
433 	struct pid *pid1 = left->thread_pid;
434 	struct pid *pid2 = right->thread_pid;
435 	struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
436 	struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
437 
438 	lockdep_assert_held_write(&tasklist_lock);
439 
440 	/* Swap the single entry tid lists */
441 	hlists_swap_heads_rcu(head1, head2);
442 
443 	/* Swap the per task_struct pid */
444 	rcu_assign_pointer(left->thread_pid, pid2);
445 	rcu_assign_pointer(right->thread_pid, pid1);
446 
447 	/* Swap the cached value */
448 	WRITE_ONCE(left->pid, pid_nr(pid2));
449 	WRITE_ONCE(right->pid, pid_nr(pid1));
450 }
451 
452 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
453 void transfer_pid(struct task_struct *old, struct task_struct *new,
454 			   enum pid_type type)
455 {
456 	WARN_ON_ONCE(type == PIDTYPE_PID);
457 	lockdep_assert_held_write(&tasklist_lock);
458 	hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
459 }
460 
461 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
462 {
463 	struct task_struct *result = NULL;
464 	if (pid) {
465 		struct hlist_node *first;
466 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
467 					      lockdep_tasklist_lock_is_held());
468 		if (first)
469 			result = hlist_entry(first, struct task_struct, pid_links[(type)]);
470 	}
471 	return result;
472 }
473 EXPORT_SYMBOL(pid_task);
474 
475 /*
476  * Must be called under rcu_read_lock().
477  */
478 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
479 {
480 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
481 			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
482 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
483 }
484 
485 struct task_struct *find_task_by_vpid(pid_t vnr)
486 {
487 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
488 }
489 
490 struct task_struct *find_get_task_by_vpid(pid_t nr)
491 {
492 	struct task_struct *task;
493 
494 	rcu_read_lock();
495 	task = find_task_by_vpid(nr);
496 	if (task)
497 		get_task_struct(task);
498 	rcu_read_unlock();
499 
500 	return task;
501 }
502 
503 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
504 {
505 	struct pid *pid;
506 	rcu_read_lock();
507 	pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
508 	rcu_read_unlock();
509 	return pid;
510 }
511 EXPORT_SYMBOL_GPL(get_task_pid);
512 
513 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
514 {
515 	struct task_struct *result;
516 	rcu_read_lock();
517 	result = pid_task(pid, type);
518 	if (result)
519 		get_task_struct(result);
520 	rcu_read_unlock();
521 	return result;
522 }
523 EXPORT_SYMBOL_GPL(get_pid_task);
524 
525 struct pid *find_get_pid(pid_t nr)
526 {
527 	struct pid *pid;
528 
529 	rcu_read_lock();
530 	pid = get_pid(find_vpid(nr));
531 	rcu_read_unlock();
532 
533 	return pid;
534 }
535 EXPORT_SYMBOL_GPL(find_get_pid);
536 
537 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
538 {
539 	struct upid *upid;
540 	pid_t nr = 0;
541 
542 	if (pid && ns && ns->level <= pid->level) {
543 		upid = &pid->numbers[ns->level];
544 		if (upid->ns == ns)
545 			nr = upid->nr;
546 	}
547 	return nr;
548 }
549 EXPORT_SYMBOL_GPL(pid_nr_ns);
550 
551 pid_t pid_vnr(struct pid *pid)
552 {
553 	return pid_nr_ns(pid, task_active_pid_ns(current));
554 }
555 EXPORT_SYMBOL_GPL(pid_vnr);
556 
557 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
558 			struct pid_namespace *ns)
559 {
560 	pid_t nr = 0;
561 
562 	rcu_read_lock();
563 	if (!ns)
564 		ns = task_active_pid_ns(current);
565 	nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
566 	rcu_read_unlock();
567 
568 	return nr;
569 }
570 EXPORT_SYMBOL(__task_pid_nr_ns);
571 
572 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
573 {
574 	return ns_of_pid(task_pid(tsk));
575 }
576 EXPORT_SYMBOL_GPL(task_active_pid_ns);
577 
578 /*
579  * Used by proc to find the first pid that is greater than or equal to nr.
580  *
581  * If there is a pid at nr this function is exactly the same as find_pid_ns.
582  */
583 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
584 {
585 	return idr_get_next(&ns->idr, &nr);
586 }
587 EXPORT_SYMBOL_GPL(find_ge_pid);
588 
589 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
590 {
591 	CLASS(fd, f)(fd);
592 	struct pid *pid;
593 
594 	if (fd_empty(f))
595 		return ERR_PTR(-EBADF);
596 
597 	pid = pidfd_pid(fd_file(f));
598 	if (!IS_ERR(pid)) {
599 		get_pid(pid);
600 		*flags = fd_file(f)->f_flags;
601 	}
602 	return pid;
603 }
604 
605 /**
606  * pidfd_get_task() - Get the task associated with a pidfd
607  *
608  * @pidfd: pidfd for which to get the task
609  * @flags: flags associated with this pidfd
610  *
611  * Return the task associated with @pidfd. The function takes a reference on
612  * the returned task. The caller is responsible for releasing that reference.
613  *
614  * Return: On success, the task_struct associated with the pidfd.
615  *	   On error, a negative errno number will be returned.
616  */
617 struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
618 {
619 	unsigned int f_flags = 0;
620 	struct pid *pid;
621 	struct task_struct *task;
622 	enum pid_type type;
623 
624 	switch (pidfd) {
625 	case  PIDFD_SELF_THREAD:
626 		type = PIDTYPE_PID;
627 		pid = get_task_pid(current, type);
628 		break;
629 	case  PIDFD_SELF_THREAD_GROUP:
630 		type = PIDTYPE_TGID;
631 		pid = get_task_pid(current, type);
632 		break;
633 	default:
634 		pid = pidfd_get_pid(pidfd, &f_flags);
635 		if (IS_ERR(pid))
636 			return ERR_CAST(pid);
637 		type = PIDTYPE_TGID;
638 		break;
639 	}
640 
641 	task = get_pid_task(pid, type);
642 	put_pid(pid);
643 	if (!task)
644 		return ERR_PTR(-ESRCH);
645 
646 	*flags = f_flags;
647 	return task;
648 }
649 
650 /**
651  * pidfd_create() - Create a new pid file descriptor.
652  *
653  * @pid:   struct pid that the pidfd will reference
654  * @flags: flags to pass
655  *
656  * This creates a new pid file descriptor with the O_CLOEXEC flag set.
657  *
658  * Note, that this function can only be called after the fd table has
659  * been unshared to avoid leaking the pidfd to the new process.
660  *
661  * This symbol should not be explicitly exported to loadable modules.
662  *
663  * Return: On success, a cloexec pidfd is returned.
664  *         On error, a negative errno number will be returned.
665  */
666 static int pidfd_create(struct pid *pid, unsigned int flags)
667 {
668 	int pidfd;
669 	struct file *pidfd_file;
670 
671 	pidfd = pidfd_prepare(pid, flags, &pidfd_file);
672 	if (pidfd < 0)
673 		return pidfd;
674 
675 	fd_install(pidfd, pidfd_file);
676 	return pidfd;
677 }
678 
679 /**
680  * sys_pidfd_open() - Open new pid file descriptor.
681  *
682  * @pid:   pid for which to retrieve a pidfd
683  * @flags: flags to pass
684  *
685  * This creates a new pid file descriptor with the O_CLOEXEC flag set for
686  * the task identified by @pid. Without PIDFD_THREAD flag the target task
687  * must be a thread-group leader.
688  *
689  * Return: On success, a cloexec pidfd is returned.
690  *         On error, a negative errno number will be returned.
691  */
692 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
693 {
694 	int fd;
695 	struct pid *p;
696 
697 	if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD))
698 		return -EINVAL;
699 
700 	if (pid <= 0)
701 		return -EINVAL;
702 
703 	p = find_get_pid(pid);
704 	if (!p)
705 		return -ESRCH;
706 
707 	fd = pidfd_create(p, flags);
708 
709 	put_pid(p);
710 	return fd;
711 }
712 
713 #ifdef CONFIG_SYSCTL
714 static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root)
715 {
716 	return &task_active_pid_ns(current)->set;
717 }
718 
719 static int set_is_seen(struct ctl_table_set *set)
720 {
721 	return &task_active_pid_ns(current)->set == set;
722 }
723 
724 static int pid_table_root_permissions(struct ctl_table_header *head,
725 				      const struct ctl_table *table)
726 {
727 	struct pid_namespace *pidns =
728 		container_of(head->set, struct pid_namespace, set);
729 	int mode = table->mode;
730 
731 	if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) ||
732 	    uid_eq(current_euid(), make_kuid(pidns->user_ns, 0)))
733 		mode = (mode & S_IRWXU) >> 6;
734 	else if (in_egroup_p(make_kgid(pidns->user_ns, 0)))
735 		mode = (mode & S_IRWXG) >> 3;
736 	else
737 		mode = mode & S_IROTH;
738 	return (mode << 6) | (mode << 3) | mode;
739 }
740 
741 static void pid_table_root_set_ownership(struct ctl_table_header *head,
742 					 kuid_t *uid, kgid_t *gid)
743 {
744 	struct pid_namespace *pidns =
745 		container_of(head->set, struct pid_namespace, set);
746 	kuid_t ns_root_uid;
747 	kgid_t ns_root_gid;
748 
749 	ns_root_uid = make_kuid(pidns->user_ns, 0);
750 	if (uid_valid(ns_root_uid))
751 		*uid = ns_root_uid;
752 
753 	ns_root_gid = make_kgid(pidns->user_ns, 0);
754 	if (gid_valid(ns_root_gid))
755 		*gid = ns_root_gid;
756 }
757 
758 static struct ctl_table_root pid_table_root = {
759 	.lookup		= pid_table_root_lookup,
760 	.permissions	= pid_table_root_permissions,
761 	.set_ownership	= pid_table_root_set_ownership,
762 };
763 
764 static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer,
765 		size_t *lenp, loff_t *ppos)
766 {
767 	struct pid *new_pid;
768 	pid_t tmp_pid;
769 	int r;
770 	struct ctl_table tmp_table = *table;
771 
772 	tmp_pid = pid_vnr(cad_pid);
773 	tmp_table.data = &tmp_pid;
774 
775 	r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
776 	if (r || !write)
777 		return r;
778 
779 	new_pid = find_get_pid(tmp_pid);
780 	if (!new_pid)
781 		return -ESRCH;
782 
783 	put_pid(xchg(&cad_pid, new_pid));
784 	return 0;
785 }
786 
787 static const struct ctl_table pid_table[] = {
788 	{
789 		.procname	= "pid_max",
790 		.data		= &init_pid_ns.pid_max,
791 		.maxlen		= sizeof(int),
792 		.mode		= 0644,
793 		.proc_handler	= proc_dointvec_minmax,
794 		.extra1		= &pid_max_min,
795 		.extra2		= &pid_max_max,
796 	},
797 #ifdef CONFIG_PROC_SYSCTL
798 	{
799 		.procname	= "cad_pid",
800 		.maxlen		= sizeof(int),
801 		.mode		= 0600,
802 		.proc_handler	= proc_do_cad_pid,
803 	},
804 #endif
805 };
806 #endif
807 
808 int register_pidns_sysctls(struct pid_namespace *pidns)
809 {
810 #ifdef CONFIG_SYSCTL
811 	struct ctl_table *tbl;
812 
813 	setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen);
814 
815 	tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL);
816 	if (!tbl)
817 		return -ENOMEM;
818 	tbl->data = &pidns->pid_max;
819 	pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max,
820 			     PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
821 
822 	pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl,
823 						 ARRAY_SIZE(pid_table));
824 	if (!pidns->sysctls) {
825 		kfree(tbl);
826 		retire_sysctl_set(&pidns->set);
827 		return -ENOMEM;
828 	}
829 #endif
830 	return 0;
831 }
832 
833 void unregister_pidns_sysctls(struct pid_namespace *pidns)
834 {
835 #ifdef CONFIG_SYSCTL
836 	const struct ctl_table *tbl;
837 
838 	tbl = pidns->sysctls->ctl_table_arg;
839 	unregister_sysctl_table(pidns->sysctls);
840 	retire_sysctl_set(&pidns->set);
841 	kfree(tbl);
842 #endif
843 }
844 
845 void __init pid_idr_init(void)
846 {
847 	/* Verify no one has done anything silly: */
848 	BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
849 
850 	/* bump default and minimum pid_max based on number of cpus */
851 	init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max,
852 				  PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
853 	pid_max_min = max_t(int, pid_max_min,
854 				PIDS_PER_CPU_MIN * num_possible_cpus());
855 	pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min);
856 
857 	idr_init(&init_pid_ns.idr);
858 
859 	init_pid_ns.pid_cachep = kmem_cache_create("pid",
860 			struct_size_t(struct pid, numbers, 1),
861 			__alignof__(struct pid),
862 			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
863 			NULL);
864 }
865 
866 static __init int pid_namespace_sysctl_init(void)
867 {
868 #ifdef CONFIG_SYSCTL
869 	/* "kernel" directory will have already been initialized. */
870 	BUG_ON(register_pidns_sysctls(&init_pid_ns));
871 #endif
872 	return 0;
873 }
874 subsys_initcall(pid_namespace_sysctl_init);
875 
876 static struct file *__pidfd_fget(struct task_struct *task, int fd)
877 {
878 	struct file *file;
879 	int ret;
880 
881 	ret = down_read_killable(&task->signal->exec_update_lock);
882 	if (ret)
883 		return ERR_PTR(ret);
884 
885 	if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
886 		file = fget_task(task, fd);
887 	else
888 		file = ERR_PTR(-EPERM);
889 
890 	up_read(&task->signal->exec_update_lock);
891 
892 	if (!file) {
893 		/*
894 		 * It is possible that the target thread is exiting; it can be
895 		 * either:
896 		 * 1. before exit_signals(), which gives a real fd
897 		 * 2. before exit_files() takes the task_lock() gives a real fd
898 		 * 3. after exit_files() releases task_lock(), ->files is NULL;
899 		 *    this has PF_EXITING, since it was set in exit_signals(),
900 		 *    __pidfd_fget() returns EBADF.
901 		 * In case 3 we get EBADF, but that really means ESRCH, since
902 		 * the task is currently exiting and has freed its files
903 		 * struct, so we fix it up.
904 		 */
905 		if (task->flags & PF_EXITING)
906 			file = ERR_PTR(-ESRCH);
907 		else
908 			file = ERR_PTR(-EBADF);
909 	}
910 
911 	return file;
912 }
913 
914 static int pidfd_getfd(struct pid *pid, int fd)
915 {
916 	struct task_struct *task;
917 	struct file *file;
918 	int ret;
919 
920 	task = get_pid_task(pid, PIDTYPE_PID);
921 	if (!task)
922 		return -ESRCH;
923 
924 	file = __pidfd_fget(task, fd);
925 	put_task_struct(task);
926 	if (IS_ERR(file))
927 		return PTR_ERR(file);
928 
929 	ret = receive_fd(file, NULL, O_CLOEXEC);
930 	fput(file);
931 
932 	return ret;
933 }
934 
935 /**
936  * sys_pidfd_getfd() - Get a file descriptor from another process
937  *
938  * @pidfd:	the pidfd file descriptor of the process
939  * @fd:		the file descriptor number to get
940  * @flags:	flags on how to get the fd (reserved)
941  *
942  * This syscall gets a copy of a file descriptor from another process
943  * based on the pidfd, and file descriptor number. It requires that
944  * the calling process has the ability to ptrace the process represented
945  * by the pidfd. The process which is having its file descriptor copied
946  * is otherwise unaffected.
947  *
948  * Return: On success, a cloexec file descriptor is returned.
949  *         On error, a negative errno number will be returned.
950  */
951 SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
952 		unsigned int, flags)
953 {
954 	struct pid *pid;
955 
956 	/* flags is currently unused - make sure it's unset */
957 	if (flags)
958 		return -EINVAL;
959 
960 	CLASS(fd, f)(pidfd);
961 	if (fd_empty(f))
962 		return -EBADF;
963 
964 	pid = pidfd_pid(fd_file(f));
965 	if (IS_ERR(pid))
966 		return PTR_ERR(pid);
967 
968 	return pidfd_getfd(pid, fd);
969 }
970