1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic pidhash and scalable, time-bounded PID allocator 4 * 5 * (C) 2002-2003 Nadia Yvette Chambers, IBM 6 * (C) 2004 Nadia Yvette Chambers, Oracle 7 * (C) 2002-2004 Ingo Molnar, Red Hat 8 * 9 * pid-structures are backing objects for tasks sharing a given ID to chain 10 * against. There is very little to them aside from hashing them and 11 * parking tasks using given ID's on a list. 12 * 13 * The hash is always changed with the tasklist_lock write-acquired, 14 * and the hash is only accessed with the tasklist_lock at least 15 * read-acquired, so there's no additional SMP locking needed here. 16 * 17 * We have a list of bitmap pages, which bitmaps represent the PID space. 18 * Allocating and freeing PIDs is completely lockless. The worst-case 19 * allocation scenario when all but one out of 1 million PIDs possible are 20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE 21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). 22 * 23 * Pid namespaces: 24 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 25 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 26 * Many thanks to Oleg Nesterov for comments and help 27 * 28 */ 29 30 #include <linux/mm.h> 31 #include <linux/export.h> 32 #include <linux/slab.h> 33 #include <linux/init.h> 34 #include <linux/rculist.h> 35 #include <linux/memblock.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 #include <linux/proc_ns.h> 40 #include <linux/refcount.h> 41 #include <linux/anon_inodes.h> 42 #include <linux/sched/signal.h> 43 #include <linux/sched/task.h> 44 #include <linux/idr.h> 45 #include <linux/pidfs.h> 46 #include <net/sock.h> 47 #include <uapi/linux/pidfd.h> 48 49 struct pid init_struct_pid = { 50 .count = REFCOUNT_INIT(1), 51 .tasks = { 52 { .first = NULL }, 53 { .first = NULL }, 54 { .first = NULL }, 55 }, 56 .level = 0, 57 .numbers = { { 58 .nr = 0, 59 .ns = &init_pid_ns, 60 }, } 61 }; 62 63 static int pid_max_min = RESERVED_PIDS + 1; 64 static int pid_max_max = PID_MAX_LIMIT; 65 66 /* 67 * PID-map pages start out as NULL, they get allocated upon 68 * first use and are never deallocated. This way a low pid_max 69 * value does not cause lots of bitmaps to be allocated, but 70 * the scheme scales to up to 4 million PIDs, runtime. 71 */ 72 struct pid_namespace init_pid_ns = { 73 .ns = NS_COMMON_INIT(init_pid_ns), 74 .idr = IDR_INIT(init_pid_ns.idr), 75 .pid_allocated = PIDNS_ADDING, 76 .level = 0, 77 .child_reaper = &init_task, 78 .user_ns = &init_user_ns, 79 .pid_max = PID_MAX_DEFAULT, 80 #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) 81 .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, 82 #endif 83 }; 84 EXPORT_SYMBOL_GPL(init_pid_ns); 85 86 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 87 88 void put_pid(struct pid *pid) 89 { 90 struct pid_namespace *ns; 91 92 if (!pid) 93 return; 94 95 ns = pid->numbers[pid->level].ns; 96 if (refcount_dec_and_test(&pid->count)) { 97 pidfs_free_pid(pid); 98 kmem_cache_free(ns->pid_cachep, pid); 99 put_pid_ns(ns); 100 } 101 } 102 EXPORT_SYMBOL_GPL(put_pid); 103 104 static void delayed_put_pid(struct rcu_head *rhp) 105 { 106 struct pid *pid = container_of(rhp, struct pid, rcu); 107 put_pid(pid); 108 } 109 110 void free_pid(struct pid *pid) 111 { 112 int i; 113 struct pid_namespace *active_ns; 114 115 lockdep_assert_not_held(&tasklist_lock); 116 117 active_ns = pid->numbers[pid->level].ns; 118 ns_ref_active_put(active_ns); 119 120 spin_lock(&pidmap_lock); 121 for (i = 0; i <= pid->level; i++) { 122 struct upid *upid = pid->numbers + i; 123 struct pid_namespace *ns = upid->ns; 124 switch (--ns->pid_allocated) { 125 case 2: 126 case 1: 127 /* When all that is left in the pid namespace 128 * is the reaper wake up the reaper. The reaper 129 * may be sleeping in zap_pid_ns_processes(). 130 */ 131 wake_up_process(READ_ONCE(ns->child_reaper)); 132 break; 133 case PIDNS_ADDING: 134 /* Handle a fork failure of the first process */ 135 WARN_ON(ns->child_reaper); 136 ns->pid_allocated = 0; 137 break; 138 } 139 140 idr_remove(&ns->idr, upid->nr); 141 } 142 spin_unlock(&pidmap_lock); 143 144 pidfs_remove_pid(pid); 145 call_rcu(&pid->rcu, delayed_put_pid); 146 } 147 148 void free_pids(struct pid **pids) 149 { 150 int tmp; 151 152 /* 153 * This can batch pidmap_lock. 154 */ 155 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 156 if (pids[tmp]) 157 free_pid(pids[tmp]); 158 } 159 160 struct pid *alloc_pid(struct pid_namespace *ns, pid_t *arg_set_tid, 161 size_t arg_set_tid_size) 162 { 163 int set_tid[MAX_PID_NS_LEVEL + 1] = {}; 164 int pid_max[MAX_PID_NS_LEVEL + 1] = {}; 165 struct pid *pid; 166 enum pid_type type; 167 int i, nr; 168 struct pid_namespace *tmp; 169 struct upid *upid; 170 int retval = -ENOMEM; 171 bool retried_preload; 172 173 /* 174 * arg_set_tid_size contains the size of the arg_set_tid array. Starting at 175 * the most nested currently active PID namespace it tells alloc_pid() 176 * which PID to set for a process in that most nested PID namespace 177 * up to arg_set_tid_size PID namespaces. It does not have to set the PID 178 * for a process in all nested PID namespaces but arg_set_tid_size must 179 * never be greater than the current ns->level + 1. 180 */ 181 if (arg_set_tid_size > ns->level + 1) 182 return ERR_PTR(-EINVAL); 183 184 /* 185 * Prep before we take locks: 186 * 187 * 1. allocate and fill in pid struct 188 */ 189 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); 190 if (!pid) 191 return ERR_PTR(retval); 192 193 get_pid_ns(ns); 194 pid->level = ns->level; 195 refcount_set(&pid->count, 1); 196 spin_lock_init(&pid->lock); 197 for (type = 0; type < PIDTYPE_MAX; ++type) 198 INIT_HLIST_HEAD(&pid->tasks[type]); 199 init_waitqueue_head(&pid->wait_pidfd); 200 INIT_HLIST_HEAD(&pid->inodes); 201 pidfs_prepare_pid(pid); 202 203 /* 204 * 2. perm check checkpoint_restore_ns_capable() 205 * 206 * This stores found pid_max to make sure the used value is the same should 207 * later code need it. 208 */ 209 for (tmp = ns, i = ns->level; i >= 0; i--) { 210 pid_max[ns->level - i] = READ_ONCE(tmp->pid_max); 211 212 if (arg_set_tid_size) { 213 int tid = set_tid[ns->level - i] = arg_set_tid[ns->level - i]; 214 215 retval = -EINVAL; 216 if (tid < 1 || tid >= pid_max[ns->level - i]) 217 goto out_abort; 218 retval = -EPERM; 219 if (!checkpoint_restore_ns_capable(tmp->user_ns)) 220 goto out_abort; 221 arg_set_tid_size--; 222 } 223 224 tmp = tmp->parent; 225 } 226 227 /* 228 * Prep is done, id allocation goes here: 229 */ 230 retried_preload = false; 231 idr_preload(GFP_KERNEL); 232 spin_lock(&pidmap_lock); 233 for (tmp = ns, i = ns->level; i >= 0;) { 234 int tid = set_tid[ns->level - i]; 235 236 if (tid) { 237 nr = idr_alloc(&tmp->idr, NULL, tid, 238 tid + 1, GFP_ATOMIC); 239 /* 240 * If ENOSPC is returned it means that the PID is 241 * alreay in use. Return EEXIST in that case. 242 */ 243 if (nr == -ENOSPC) 244 245 nr = -EEXIST; 246 } else { 247 int pid_min = 1; 248 /* 249 * init really needs pid 1, but after reaching the 250 * maximum wrap back to RESERVED_PIDS 251 */ 252 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) 253 pid_min = RESERVED_PIDS; 254 255 /* 256 * Store a null pointer so find_pid_ns does not find 257 * a partially initialized PID (see below). 258 */ 259 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, 260 pid_max[ns->level - i], GFP_ATOMIC); 261 if (nr == -ENOSPC) 262 nr = -EAGAIN; 263 } 264 265 if (unlikely(nr < 0)) { 266 /* 267 * Preload more memory if idr_alloc{,cyclic} failed with -ENOMEM. 268 * 269 * The IDR API only allows us to preload memory for one call, while we may end 270 * up doing several under pidmap_lock with GFP_ATOMIC. The situation may be 271 * salvageable with GFP_KERNEL. But make sure to not loop indefinitely if preload 272 * did not help (the routine unfortunately returns void, so we have no idea 273 * if it got anywhere). 274 * 275 * The lock can be safely dropped and picked up as historically pid allocation 276 * for different namespaces was *not* atomic -- we try to hold on to it the 277 * entire time only for performance reasons. 278 */ 279 if (nr == -ENOMEM && !retried_preload) { 280 spin_unlock(&pidmap_lock); 281 idr_preload_end(); 282 retried_preload = true; 283 idr_preload(GFP_KERNEL); 284 spin_lock(&pidmap_lock); 285 continue; 286 } 287 retval = nr; 288 goto out_free; 289 } 290 291 pid->numbers[i].nr = nr; 292 pid->numbers[i].ns = tmp; 293 i--; 294 retried_preload = false; 295 296 /* 297 * PID 1 (init) must be created first. 298 */ 299 if (!READ_ONCE(tmp->child_reaper) && nr != 1) { 300 retval = -EINVAL; 301 goto out_free; 302 } 303 304 tmp = tmp->parent; 305 } 306 307 /* 308 * ENOMEM is not the most obvious choice especially for the case 309 * where the child subreaper has already exited and the pid 310 * namespace denies the creation of any new processes. But ENOMEM 311 * is what we have exposed to userspace for a long time and it is 312 * documented behavior for pid namespaces. So we can't easily 313 * change it even if there were an error code better suited. 314 * 315 * This can't be done earlier because we need to preserve other 316 * error conditions. 317 */ 318 retval = -ENOMEM; 319 if (unlikely(!(ns->pid_allocated & PIDNS_ADDING))) 320 goto out_free; 321 for (upid = pid->numbers + ns->level; upid >= pid->numbers; --upid) { 322 /* Make the PID visible to find_pid_ns. */ 323 idr_replace(&upid->ns->idr, pid, upid->nr); 324 upid->ns->pid_allocated++; 325 } 326 spin_unlock(&pidmap_lock); 327 idr_preload_end(); 328 ns_ref_active_get(ns); 329 330 retval = pidfs_add_pid(pid); 331 if (unlikely(retval)) { 332 free_pid(pid); 333 pid = ERR_PTR(-ENOMEM); 334 } 335 336 return pid; 337 338 out_free: 339 while (++i <= ns->level) { 340 upid = pid->numbers + i; 341 idr_remove(&upid->ns->idr, upid->nr); 342 } 343 344 /* On failure to allocate the first pid, reset the state */ 345 if (ns->pid_allocated == PIDNS_ADDING) 346 idr_set_cursor(&ns->idr, 0); 347 348 spin_unlock(&pidmap_lock); 349 idr_preload_end(); 350 351 out_abort: 352 put_pid_ns(ns); 353 kmem_cache_free(ns->pid_cachep, pid); 354 return ERR_PTR(retval); 355 } 356 357 void disable_pid_allocation(struct pid_namespace *ns) 358 { 359 spin_lock(&pidmap_lock); 360 ns->pid_allocated &= ~PIDNS_ADDING; 361 spin_unlock(&pidmap_lock); 362 } 363 364 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 365 { 366 return idr_find(&ns->idr, nr); 367 } 368 EXPORT_SYMBOL_GPL(find_pid_ns); 369 370 struct pid *find_vpid(int nr) 371 { 372 return find_pid_ns(nr, task_active_pid_ns(current)); 373 } 374 EXPORT_SYMBOL_GPL(find_vpid); 375 376 static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) 377 { 378 return (type == PIDTYPE_PID) ? 379 &task->thread_pid : 380 &task->signal->pids[type]; 381 } 382 383 /* 384 * attach_pid() must be called with the tasklist_lock write-held. 385 */ 386 void attach_pid(struct task_struct *task, enum pid_type type) 387 { 388 struct pid *pid; 389 390 lockdep_assert_held_write(&tasklist_lock); 391 392 pid = *task_pid_ptr(task, type); 393 hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); 394 } 395 396 static void __change_pid(struct pid **pids, struct task_struct *task, 397 enum pid_type type, struct pid *new) 398 { 399 struct pid **pid_ptr, *pid; 400 int tmp; 401 402 lockdep_assert_held_write(&tasklist_lock); 403 404 pid_ptr = task_pid_ptr(task, type); 405 pid = *pid_ptr; 406 407 hlist_del_rcu(&task->pid_links[type]); 408 *pid_ptr = new; 409 410 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 411 if (pid_has_task(pid, tmp)) 412 return; 413 414 WARN_ON(pids[type]); 415 pids[type] = pid; 416 } 417 418 void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type) 419 { 420 __change_pid(pids, task, type, NULL); 421 } 422 423 void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type, 424 struct pid *pid) 425 { 426 __change_pid(pids, task, type, pid); 427 attach_pid(task, type); 428 } 429 430 void exchange_tids(struct task_struct *left, struct task_struct *right) 431 { 432 struct pid *pid1 = left->thread_pid; 433 struct pid *pid2 = right->thread_pid; 434 struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; 435 struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; 436 437 lockdep_assert_held_write(&tasklist_lock); 438 439 /* Swap the single entry tid lists */ 440 hlists_swap_heads_rcu(head1, head2); 441 442 /* Swap the per task_struct pid */ 443 rcu_assign_pointer(left->thread_pid, pid2); 444 rcu_assign_pointer(right->thread_pid, pid1); 445 446 /* Swap the cached value */ 447 WRITE_ONCE(left->pid, pid_nr(pid2)); 448 WRITE_ONCE(right->pid, pid_nr(pid1)); 449 } 450 451 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 452 void transfer_pid(struct task_struct *old, struct task_struct *new, 453 enum pid_type type) 454 { 455 WARN_ON_ONCE(type == PIDTYPE_PID); 456 lockdep_assert_held_write(&tasklist_lock); 457 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); 458 } 459 460 struct task_struct *pid_task(struct pid *pid, enum pid_type type) 461 { 462 struct task_struct *result = NULL; 463 if (pid) { 464 struct hlist_node *first; 465 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), 466 lockdep_tasklist_lock_is_held()); 467 if (first) 468 result = hlist_entry(first, struct task_struct, pid_links[(type)]); 469 } 470 return result; 471 } 472 EXPORT_SYMBOL(pid_task); 473 474 /* 475 * Must be called under rcu_read_lock(). 476 */ 477 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) 478 { 479 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 480 "find_task_by_pid_ns() needs rcu_read_lock() protection"); 481 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); 482 } 483 484 struct task_struct *find_task_by_vpid(pid_t vnr) 485 { 486 return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); 487 } 488 489 struct task_struct *find_get_task_by_vpid(pid_t nr) 490 { 491 struct task_struct *task; 492 493 rcu_read_lock(); 494 task = find_task_by_vpid(nr); 495 if (task) 496 get_task_struct(task); 497 rcu_read_unlock(); 498 499 return task; 500 } 501 502 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) 503 { 504 struct pid *pid; 505 rcu_read_lock(); 506 pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); 507 rcu_read_unlock(); 508 return pid; 509 } 510 EXPORT_SYMBOL_GPL(get_task_pid); 511 512 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) 513 { 514 struct task_struct *result; 515 rcu_read_lock(); 516 result = pid_task(pid, type); 517 if (result) 518 get_task_struct(result); 519 rcu_read_unlock(); 520 return result; 521 } 522 EXPORT_SYMBOL_GPL(get_pid_task); 523 524 struct pid *find_get_pid(pid_t nr) 525 { 526 struct pid *pid; 527 528 rcu_read_lock(); 529 pid = get_pid(find_vpid(nr)); 530 rcu_read_unlock(); 531 532 return pid; 533 } 534 EXPORT_SYMBOL_GPL(find_get_pid); 535 536 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 537 { 538 struct upid *upid; 539 pid_t nr = 0; 540 541 if (pid && ns && ns->level <= pid->level) { 542 upid = &pid->numbers[ns->level]; 543 if (upid->ns == ns) 544 nr = upid->nr; 545 } 546 return nr; 547 } 548 EXPORT_SYMBOL_GPL(pid_nr_ns); 549 550 pid_t pid_vnr(struct pid *pid) 551 { 552 return pid_nr_ns(pid, task_active_pid_ns(current)); 553 } 554 EXPORT_SYMBOL_GPL(pid_vnr); 555 556 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 557 struct pid_namespace *ns) 558 { 559 pid_t nr = 0; 560 561 rcu_read_lock(); 562 if (!ns) 563 ns = task_active_pid_ns(current); 564 nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); 565 rcu_read_unlock(); 566 567 return nr; 568 } 569 EXPORT_SYMBOL(__task_pid_nr_ns); 570 571 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 572 { 573 return ns_of_pid(task_pid(tsk)); 574 } 575 EXPORT_SYMBOL_GPL(task_active_pid_ns); 576 577 /* 578 * Used by proc to find the first pid that is greater than or equal to nr. 579 * 580 * If there is a pid at nr this function is exactly the same as find_pid_ns. 581 */ 582 struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 583 { 584 return idr_get_next(&ns->idr, &nr); 585 } 586 EXPORT_SYMBOL_GPL(find_ge_pid); 587 588 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) 589 { 590 CLASS(fd, f)(fd); 591 struct pid *pid; 592 593 if (fd_empty(f)) 594 return ERR_PTR(-EBADF); 595 596 pid = pidfd_pid(fd_file(f)); 597 if (!IS_ERR(pid)) { 598 get_pid(pid); 599 *flags = fd_file(f)->f_flags; 600 } 601 return pid; 602 } 603 604 /** 605 * pidfd_get_task() - Get the task associated with a pidfd 606 * 607 * @pidfd: pidfd for which to get the task 608 * @flags: flags associated with this pidfd 609 * 610 * Return the task associated with @pidfd. The function takes a reference on 611 * the returned task. The caller is responsible for releasing that reference. 612 * 613 * Return: On success, the task_struct associated with the pidfd. 614 * On error, a negative errno number will be returned. 615 */ 616 struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) 617 { 618 unsigned int f_flags = 0; 619 struct pid *pid; 620 struct task_struct *task; 621 enum pid_type type; 622 623 switch (pidfd) { 624 case PIDFD_SELF_THREAD: 625 type = PIDTYPE_PID; 626 pid = get_task_pid(current, type); 627 break; 628 case PIDFD_SELF_THREAD_GROUP: 629 type = PIDTYPE_TGID; 630 pid = get_task_pid(current, type); 631 break; 632 default: 633 pid = pidfd_get_pid(pidfd, &f_flags); 634 if (IS_ERR(pid)) 635 return ERR_CAST(pid); 636 type = PIDTYPE_TGID; 637 break; 638 } 639 640 task = get_pid_task(pid, type); 641 put_pid(pid); 642 if (!task) 643 return ERR_PTR(-ESRCH); 644 645 *flags = f_flags; 646 return task; 647 } 648 649 /** 650 * pidfd_create() - Create a new pid file descriptor. 651 * 652 * @pid: struct pid that the pidfd will reference 653 * @flags: flags to pass 654 * 655 * This creates a new pid file descriptor with the O_CLOEXEC flag set. 656 * 657 * Note, that this function can only be called after the fd table has 658 * been unshared to avoid leaking the pidfd to the new process. 659 * 660 * This symbol should not be explicitly exported to loadable modules. 661 * 662 * Return: On success, a cloexec pidfd is returned. 663 * On error, a negative errno number will be returned. 664 */ 665 static int pidfd_create(struct pid *pid, unsigned int flags) 666 { 667 int pidfd; 668 struct file *pidfd_file; 669 670 pidfd = pidfd_prepare(pid, flags, &pidfd_file); 671 if (pidfd < 0) 672 return pidfd; 673 674 fd_install(pidfd, pidfd_file); 675 return pidfd; 676 } 677 678 /** 679 * sys_pidfd_open() - Open new pid file descriptor. 680 * 681 * @pid: pid for which to retrieve a pidfd 682 * @flags: flags to pass 683 * 684 * This creates a new pid file descriptor with the O_CLOEXEC flag set for 685 * the task identified by @pid. Without PIDFD_THREAD flag the target task 686 * must be a thread-group leader. 687 * 688 * Return: On success, a cloexec pidfd is returned. 689 * On error, a negative errno number will be returned. 690 */ 691 SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) 692 { 693 int fd; 694 struct pid *p; 695 696 if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) 697 return -EINVAL; 698 699 if (pid <= 0) 700 return -EINVAL; 701 702 p = find_get_pid(pid); 703 if (!p) 704 return -ESRCH; 705 706 fd = pidfd_create(p, flags); 707 708 put_pid(p); 709 return fd; 710 } 711 712 #ifdef CONFIG_SYSCTL 713 static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root) 714 { 715 return &task_active_pid_ns(current)->set; 716 } 717 718 static int set_is_seen(struct ctl_table_set *set) 719 { 720 return &task_active_pid_ns(current)->set == set; 721 } 722 723 static int pid_table_root_permissions(struct ctl_table_header *head, 724 const struct ctl_table *table) 725 { 726 struct pid_namespace *pidns = 727 container_of(head->set, struct pid_namespace, set); 728 int mode = table->mode; 729 730 if (ns_capable_noaudit(pidns->user_ns, CAP_SYS_ADMIN) || 731 uid_eq(current_euid(), make_kuid(pidns->user_ns, 0))) 732 mode = (mode & S_IRWXU) >> 6; 733 else if (in_egroup_p(make_kgid(pidns->user_ns, 0))) 734 mode = (mode & S_IRWXG) >> 3; 735 else 736 mode = mode & S_IROTH; 737 return (mode << 6) | (mode << 3) | mode; 738 } 739 740 static void pid_table_root_set_ownership(struct ctl_table_header *head, 741 kuid_t *uid, kgid_t *gid) 742 { 743 struct pid_namespace *pidns = 744 container_of(head->set, struct pid_namespace, set); 745 kuid_t ns_root_uid; 746 kgid_t ns_root_gid; 747 748 ns_root_uid = make_kuid(pidns->user_ns, 0); 749 if (uid_valid(ns_root_uid)) 750 *uid = ns_root_uid; 751 752 ns_root_gid = make_kgid(pidns->user_ns, 0); 753 if (gid_valid(ns_root_gid)) 754 *gid = ns_root_gid; 755 } 756 757 static struct ctl_table_root pid_table_root = { 758 .lookup = pid_table_root_lookup, 759 .permissions = pid_table_root_permissions, 760 .set_ownership = pid_table_root_set_ownership, 761 }; 762 763 static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer, 764 size_t *lenp, loff_t *ppos) 765 { 766 struct pid *new_pid; 767 pid_t tmp_pid; 768 int r; 769 struct ctl_table tmp_table = *table; 770 771 tmp_pid = pid_vnr(cad_pid); 772 tmp_table.data = &tmp_pid; 773 774 r = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); 775 if (r || !write) 776 return r; 777 778 new_pid = find_get_pid(tmp_pid); 779 if (!new_pid) 780 return -ESRCH; 781 782 put_pid(xchg(&cad_pid, new_pid)); 783 return 0; 784 } 785 786 static const struct ctl_table pid_table[] = { 787 { 788 .procname = "pid_max", 789 .data = &init_pid_ns.pid_max, 790 .maxlen = sizeof(int), 791 .mode = 0644, 792 .proc_handler = proc_dointvec_minmax, 793 .extra1 = &pid_max_min, 794 .extra2 = &pid_max_max, 795 }, 796 #ifdef CONFIG_PROC_SYSCTL 797 { 798 .procname = "cad_pid", 799 .maxlen = sizeof(int), 800 .mode = 0600, 801 .proc_handler = proc_do_cad_pid, 802 }, 803 #endif 804 }; 805 #endif 806 807 int register_pidns_sysctls(struct pid_namespace *pidns) 808 { 809 #ifdef CONFIG_SYSCTL 810 struct ctl_table *tbl; 811 812 setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen); 813 814 tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL); 815 if (!tbl) 816 return -ENOMEM; 817 tbl->data = &pidns->pid_max; 818 pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max, 819 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 820 821 pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl, 822 ARRAY_SIZE(pid_table)); 823 if (!pidns->sysctls) { 824 kfree(tbl); 825 retire_sysctl_set(&pidns->set); 826 return -ENOMEM; 827 } 828 #endif 829 return 0; 830 } 831 832 void unregister_pidns_sysctls(struct pid_namespace *pidns) 833 { 834 #ifdef CONFIG_SYSCTL 835 const struct ctl_table *tbl; 836 837 tbl = pidns->sysctls->ctl_table_arg; 838 unregister_sysctl_table(pidns->sysctls); 839 retire_sysctl_set(&pidns->set); 840 kfree(tbl); 841 #endif 842 } 843 844 void __init pid_idr_init(void) 845 { 846 /* Verify no one has done anything silly: */ 847 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); 848 849 /* bump default and minimum pid_max based on number of cpus */ 850 init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max, 851 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 852 pid_max_min = max_t(int, pid_max_min, 853 PIDS_PER_CPU_MIN * num_possible_cpus()); 854 pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min); 855 856 idr_init(&init_pid_ns.idr); 857 858 init_pid_ns.pid_cachep = kmem_cache_create("pid", 859 struct_size_t(struct pid, numbers, 1), 860 __alignof__(struct pid), 861 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, 862 NULL); 863 } 864 865 static __init int pid_namespace_sysctl_init(void) 866 { 867 #ifdef CONFIG_SYSCTL 868 /* "kernel" directory will have already been initialized. */ 869 BUG_ON(register_pidns_sysctls(&init_pid_ns)); 870 #endif 871 return 0; 872 } 873 subsys_initcall(pid_namespace_sysctl_init); 874 875 static struct file *__pidfd_fget(struct task_struct *task, int fd) 876 { 877 struct file *file; 878 int ret; 879 880 ret = down_read_killable(&task->signal->exec_update_lock); 881 if (ret) 882 return ERR_PTR(ret); 883 884 if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) 885 file = fget_task(task, fd); 886 else 887 file = ERR_PTR(-EPERM); 888 889 up_read(&task->signal->exec_update_lock); 890 891 if (!file) { 892 /* 893 * It is possible that the target thread is exiting; it can be 894 * either: 895 * 1. before exit_signals(), which gives a real fd 896 * 2. before exit_files() takes the task_lock() gives a real fd 897 * 3. after exit_files() releases task_lock(), ->files is NULL; 898 * this has PF_EXITING, since it was set in exit_signals(), 899 * __pidfd_fget() returns EBADF. 900 * In case 3 we get EBADF, but that really means ESRCH, since 901 * the task is currently exiting and has freed its files 902 * struct, so we fix it up. 903 */ 904 if (task->flags & PF_EXITING) 905 file = ERR_PTR(-ESRCH); 906 else 907 file = ERR_PTR(-EBADF); 908 } 909 910 return file; 911 } 912 913 static int pidfd_getfd(struct pid *pid, int fd) 914 { 915 struct task_struct *task; 916 struct file *file; 917 int ret; 918 919 task = get_pid_task(pid, PIDTYPE_PID); 920 if (!task) 921 return -ESRCH; 922 923 file = __pidfd_fget(task, fd); 924 put_task_struct(task); 925 if (IS_ERR(file)) 926 return PTR_ERR(file); 927 928 ret = receive_fd(file, NULL, O_CLOEXEC); 929 fput(file); 930 931 return ret; 932 } 933 934 /** 935 * sys_pidfd_getfd() - Get a file descriptor from another process 936 * 937 * @pidfd: the pidfd file descriptor of the process 938 * @fd: the file descriptor number to get 939 * @flags: flags on how to get the fd (reserved) 940 * 941 * This syscall gets a copy of a file descriptor from another process 942 * based on the pidfd, and file descriptor number. It requires that 943 * the calling process has the ability to ptrace the process represented 944 * by the pidfd. The process which is having its file descriptor copied 945 * is otherwise unaffected. 946 * 947 * Return: On success, a cloexec file descriptor is returned. 948 * On error, a negative errno number will be returned. 949 */ 950 SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, 951 unsigned int, flags) 952 { 953 struct pid *pid; 954 955 /* flags is currently unused - make sure it's unset */ 956 if (flags) 957 return -EINVAL; 958 959 CLASS(fd, f)(pidfd); 960 if (fd_empty(f)) 961 return -EBADF; 962 963 pid = pidfd_pid(fd_file(f)); 964 if (IS_ERR(pid)) 965 return PTR_ERR(pid); 966 967 return pidfd_getfd(pid, fd); 968 } 969