Lines Matching full:pid

3  * Generic pidhash and scalable, time-bounded PID allocator
9 * pid-structures are backing objects for tasks sharing a given ID to chain
17 * We have a list of bitmap pages, which bitmaps represent the PID space.
23 * Pid namespaces:
50 struct pid init_struct_pid = {
68 * PID-map pages start out as NULL, they get allocated upon
108 void put_pid(struct pid *pid)
112 if (!pid)
115 ns = pid->numbers[pid->level].ns;
116 if (refcount_dec_and_test(&pid->count)) {
117 kmem_cache_free(ns->pid_cachep, pid);
125 struct pid *pid = container_of(rhp, struct pid, rcu);
126 put_pid(pid);
129 void free_pid(struct pid *pid)
136 for (i = 0; i <= pid->level; i++) {
137 struct upid *upid = pid->numbers + i;
142 /* When all that is left in the pid namespace
157 pidfs_remove_pid(pid);
160 call_rcu(&pid->rcu, delayed_put_pid);
163 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
166 struct pid *pid;
175 * the most nested currently active PID namespace it tells alloc_pid()
176 * which PID to set for a process in that most nested PID namespace
177 * up to set_tid_size PID namespaces. It does not have to set the PID
178 * for a process in all nested PID namespaces but set_tid_size must
184 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
185 if (!pid)
189 pid->level = ns->level;
202 * Also fail if a PID != 1 is requested and
203 * no PID 1 exists.
220 * If ENOSPC is returned it means that the PID is
228 * init really needs pid 1, but after reaching the
236 * a partially initialized PID (see below).
249 pid->numbers[i].nr = nr;
250 pid->numbers[i].ns = tmp;
256 * where the child subreaper has already exited and the pid
259 * documented behavior for pid namespaces. So we can't easily
265 refcount_set(&pid->count, 1);
266 spin_lock_init(&pid->lock);
268 INIT_HLIST_HEAD(&pid->tasks[type]);
270 init_waitqueue_head(&pid->wait_pidfd);
271 INIT_HLIST_HEAD(&pid->inodes);
273 upid = pid->numbers + ns->level;
278 pidfs_add_pid(pid);
279 for ( ; upid >= pid->numbers; --upid) {
280 /* Make the PID visible to find_pid_ns. */
281 idr_replace(&upid->ns->idr, pid, upid->nr);
287 return pid;
297 upid = pid->numbers + i;
301 /* On failure to allocate the first pid, reset the state */
307 kmem_cache_free(ns->pid_cachep, pid);
318 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
324 struct pid *find_vpid(int nr)
330 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
342 struct pid *pid = *task_pid_ptr(task, type);
343 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
347 struct pid *new)
349 struct pid **pid_ptr = task_pid_ptr(task, type);
350 struct pid *pid;
353 pid = *pid_ptr;
359 WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID));
360 wake_up_all(&pid->wait_pidfd);
364 if (pid_has_task(pid, tmp))
367 free_pid(pid);
376 struct pid *pid)
378 __change_pid(task, type, pid);
384 struct pid *pid1 = left->thread_pid;
385 struct pid *pid2 = right->thread_pid;
392 /* Swap the per task_struct pid */
397 WRITE_ONCE(left->pid, pid_nr(pid2));
398 WRITE_ONCE(right->pid, pid_nr(pid1));
409 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
412 if (pid) {
414 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
451 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
453 struct pid *pid;
455 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
457 return pid;
461 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
465 result = pid_task(pid, type);
473 struct pid *find_get_pid(pid_t nr)
475 struct pid *pid;
478 pid = get_pid(find_vpid(nr));
481 return pid;
485 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
490 if (pid && ns->level <= pid->level) {
491 upid = &pid->numbers[ns->level];
499 pid_t pid_vnr(struct pid *pid)
501 return pid_nr_ns(pid, task_active_pid_ns(current));
527 * Used by proc to find the first pid that is greater than or equal to nr.
529 * If there is a pid at nr this function is exactly the same as find_pid_ns.
531 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
537 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
540 struct pid *pid;
545 pid = pidfd_pid(fd_file(f));
546 if (!IS_ERR(pid)) {
547 get_pid(pid);
550 return pid;
568 struct pid *pid;
571 pid = pidfd_get_pid(pidfd, &f_flags);
572 if (IS_ERR(pid))
573 return ERR_CAST(pid);
575 task = get_pid_task(pid, PIDTYPE_TGID);
576 put_pid(pid);
585 * pidfd_create() - Create a new pid file descriptor.
587 * @pid: struct pid that the pidfd will reference
590 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
600 static int pidfd_create(struct pid *pid, unsigned int flags)
605 pidfd = pidfd_prepare(pid, flags, &pidfd_file);
614 * sys_pidfd_open() - Open new pid file descriptor.
616 * @pid: pid for which to retrieve a pidfd
619 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
620 * the task identified by @pid. Without PIDFD_THREAD flag the target task
626 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
629 struct pid *p;
634 if (pid <= 0)
637 p = find_get_pid(pid);
762 init_pid_ns.pid_cachep = kmem_cache_create("pid",
763 struct_size_t(struct pid, numbers, 1),
764 __alignof__(struct pid),
817 static int pidfd_getfd(struct pid *pid, int fd)
823 task = get_pid_task(pid, PIDTYPE_PID);
857 struct pid *pid;
867 pid = pidfd_pid(fd_file(f));
868 if (IS_ERR(pid))
869 return PTR_ERR(pid);
871 return pidfd_getfd(pid, fd);