1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Pid namespaces 4 * 5 * Authors: 6 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 7 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 8 * Many thanks to Oleg Nesterov for comments and help 9 * 10 */ 11 12 #include <linux/pid.h> 13 #include <linux/pid_namespace.h> 14 #include <linux/user_namespace.h> 15 #include <linux/syscalls.h> 16 #include <linux/cred.h> 17 #include <linux/err.h> 18 #include <linux/acct.h> 19 #include <linux/slab.h> 20 #include <linux/proc_ns.h> 21 #include <linux/reboot.h> 22 #include <linux/export.h> 23 #include <linux/sched/task.h> 24 #include <linux/sched/signal.h> 25 #include <linux/idr.h> 26 #include "pid_sysctl.h" 27 28 static DEFINE_MUTEX(pid_caches_mutex); 29 static struct kmem_cache *pid_ns_cachep; 30 /* Write once array, filled from the beginning. */ 31 static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; 32 33 /* 34 * creates the kmem cache to allocate pids from. 35 * @level: pid namespace level 36 */ 37 38 static struct kmem_cache *create_pid_cachep(unsigned int level) 39 { 40 /* Level 0 is init_pid_ns.pid_cachep */ 41 struct kmem_cache **pkc = &pid_cache[level - 1]; 42 struct kmem_cache *kc; 43 char name[4 + 10 + 1]; 44 unsigned int len; 45 46 kc = READ_ONCE(*pkc); 47 if (kc) 48 return kc; 49 50 snprintf(name, sizeof(name), "pid_%u", level + 1); 51 len = sizeof(struct pid) + level * sizeof(struct upid); 52 mutex_lock(&pid_caches_mutex); 53 /* Name collision forces to do allocation under mutex. */ 54 if (!*pkc) 55 *pkc = kmem_cache_create(name, len, 0, 56 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); 57 mutex_unlock(&pid_caches_mutex); 58 /* current can fail, but someone else can succeed. */ 59 return READ_ONCE(*pkc); 60 } 61 62 static struct ucounts *inc_pid_namespaces(struct user_namespace *ns) 63 { 64 return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES); 65 } 66 67 static void dec_pid_namespaces(struct ucounts *ucounts) 68 { 69 dec_ucount(ucounts, UCOUNT_PID_NAMESPACES); 70 } 71 72 static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, 73 struct pid_namespace *parent_pid_ns) 74 { 75 struct pid_namespace *ns; 76 unsigned int level = parent_pid_ns->level + 1; 77 struct ucounts *ucounts; 78 int err; 79 80 err = -EINVAL; 81 if (!in_userns(parent_pid_ns->user_ns, user_ns)) 82 goto out; 83 84 err = -ENOSPC; 85 if (level > MAX_PID_NS_LEVEL) 86 goto out; 87 ucounts = inc_pid_namespaces(user_ns); 88 if (!ucounts) 89 goto out; 90 91 err = -ENOMEM; 92 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); 93 if (ns == NULL) 94 goto out_dec; 95 96 idr_init(&ns->idr); 97 98 ns->pid_cachep = create_pid_cachep(level); 99 if (ns->pid_cachep == NULL) 100 goto out_free_idr; 101 102 err = ns_alloc_inum(&ns->ns); 103 if (err) 104 goto out_free_idr; 105 ns->ns.ops = &pidns_operations; 106 107 refcount_set(&ns->ns.count, 1); 108 ns->level = level; 109 ns->parent = get_pid_ns(parent_pid_ns); 110 ns->user_ns = get_user_ns(user_ns); 111 ns->ucounts = ucounts; 112 ns->pid_allocated = PIDNS_ADDING; 113 114 initialize_memfd_noexec_scope(ns); 115 116 return ns; 117 118 out_free_idr: 119 idr_destroy(&ns->idr); 120 kmem_cache_free(pid_ns_cachep, ns); 121 out_dec: 122 dec_pid_namespaces(ucounts); 123 out: 124 return ERR_PTR(err); 125 } 126 127 static void delayed_free_pidns(struct rcu_head *p) 128 { 129 struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu); 130 131 dec_pid_namespaces(ns->ucounts); 132 put_user_ns(ns->user_ns); 133 134 kmem_cache_free(pid_ns_cachep, ns); 135 } 136 137 static void destroy_pid_namespace(struct pid_namespace *ns) 138 { 139 ns_free_inum(&ns->ns); 140 141 idr_destroy(&ns->idr); 142 call_rcu(&ns->rcu, delayed_free_pidns); 143 } 144 145 struct pid_namespace *copy_pid_ns(unsigned long flags, 146 struct user_namespace *user_ns, struct pid_namespace *old_ns) 147 { 148 if (!(flags & CLONE_NEWPID)) 149 return get_pid_ns(old_ns); 150 if (task_active_pid_ns(current) != old_ns) 151 return ERR_PTR(-EINVAL); 152 return create_pid_namespace(user_ns, old_ns); 153 } 154 155 void put_pid_ns(struct pid_namespace *ns) 156 { 157 struct pid_namespace *parent; 158 159 while (ns != &init_pid_ns) { 160 parent = ns->parent; 161 if (!refcount_dec_and_test(&ns->ns.count)) 162 break; 163 destroy_pid_namespace(ns); 164 ns = parent; 165 } 166 } 167 EXPORT_SYMBOL_GPL(put_pid_ns); 168 169 void zap_pid_ns_processes(struct pid_namespace *pid_ns) 170 { 171 int nr; 172 int rc; 173 struct task_struct *task, *me = current; 174 int init_pids = thread_group_leader(me) ? 1 : 2; 175 struct pid *pid; 176 177 /* Don't allow any more processes into the pid namespace */ 178 disable_pid_allocation(pid_ns); 179 180 /* 181 * Ignore SIGCHLD causing any terminated children to autoreap. 182 * This speeds up the namespace shutdown, plus see the comment 183 * below. 184 */ 185 spin_lock_irq(&me->sighand->siglock); 186 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; 187 spin_unlock_irq(&me->sighand->siglock); 188 189 /* 190 * The last thread in the cgroup-init thread group is terminating. 191 * Find remaining pid_ts in the namespace, signal and wait for them 192 * to exit. 193 * 194 * Note: This signals each threads in the namespace - even those that 195 * belong to the same thread group, To avoid this, we would have 196 * to walk the entire tasklist looking a processes in this 197 * namespace, but that could be unnecessarily expensive if the 198 * pid namespace has just a few processes. Or we need to 199 * maintain a tasklist for each pid namespace. 200 * 201 */ 202 rcu_read_lock(); 203 read_lock(&tasklist_lock); 204 nr = 2; 205 idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { 206 task = pid_task(pid, PIDTYPE_PID); 207 if (task && !__fatal_signal_pending(task)) 208 group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); 209 } 210 read_unlock(&tasklist_lock); 211 rcu_read_unlock(); 212 213 /* 214 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD. 215 * kernel_wait4() will also block until our children traced from the 216 * parent namespace are detached and become EXIT_DEAD. 217 */ 218 do { 219 clear_thread_flag(TIF_SIGPENDING); 220 rc = kernel_wait4(-1, NULL, __WALL, NULL); 221 } while (rc != -ECHILD); 222 223 /* 224 * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE 225 * process whose parents processes are outside of the pid 226 * namespace. Such processes are created with setns()+fork(). 227 * 228 * If those EXIT_ZOMBIE processes are not reaped by their 229 * parents before their parents exit, they will be reparented 230 * to pid_ns->child_reaper. Thus pidns->child_reaper needs to 231 * stay valid until they all go away. 232 * 233 * The code relies on the pid_ns->child_reaper ignoring 234 * SIGCHILD to cause those EXIT_ZOMBIE processes to be 235 * autoreaped if reparented. 236 * 237 * Semantically it is also desirable to wait for EXIT_ZOMBIE 238 * processes before allowing the child_reaper to be reaped, as 239 * that gives the invariant that when the init process of a 240 * pid namespace is reaped all of the processes in the pid 241 * namespace are gone. 242 * 243 * Once all of the other tasks are gone from the pid_namespace 244 * free_pid() will awaken this task. 245 */ 246 for (;;) { 247 set_current_state(TASK_INTERRUPTIBLE); 248 if (pid_ns->pid_allocated == init_pids) 249 break; 250 /* 251 * Release tasks_rcu_exit_srcu to avoid following deadlock: 252 * 253 * 1) TASK A unshare(CLONE_NEWPID) 254 * 2) TASK A fork() twice -> TASK B (child reaper for new ns) 255 * and TASK C 256 * 3) TASK B exits, kills TASK C, waits for TASK A to reap it 257 * 4) TASK A calls synchronize_rcu_tasks() 258 * -> synchronize_srcu(tasks_rcu_exit_srcu) 259 * 5) *DEADLOCK* 260 * 261 * It is considered safe to release tasks_rcu_exit_srcu here 262 * because we assume the current task can not be concurrently 263 * reaped at this point. 264 */ 265 exit_tasks_rcu_stop(); 266 schedule(); 267 exit_tasks_rcu_start(); 268 } 269 __set_current_state(TASK_RUNNING); 270 271 if (pid_ns->reboot) 272 current->signal->group_exit_code = pid_ns->reboot; 273 274 acct_exit_ns(pid_ns); 275 return; 276 } 277 278 #ifdef CONFIG_CHECKPOINT_RESTORE 279 static int pid_ns_ctl_handler(struct ctl_table *table, int write, 280 void *buffer, size_t *lenp, loff_t *ppos) 281 { 282 struct pid_namespace *pid_ns = task_active_pid_ns(current); 283 struct ctl_table tmp = *table; 284 int ret, next; 285 286 if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) 287 return -EPERM; 288 289 /* 290 * Writing directly to ns' last_pid field is OK, since this field 291 * is volatile in a living namespace anyway and a code writing to 292 * it should synchronize its usage with external means. 293 */ 294 295 next = idr_get_cursor(&pid_ns->idr) - 1; 296 297 tmp.data = &next; 298 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 299 if (!ret && write) 300 idr_set_cursor(&pid_ns->idr, next + 1); 301 302 return ret; 303 } 304 305 extern int pid_max; 306 static struct ctl_table pid_ns_ctl_table[] = { 307 { 308 .procname = "ns_last_pid", 309 .maxlen = sizeof(int), 310 .mode = 0666, /* permissions are checked in the handler */ 311 .proc_handler = pid_ns_ctl_handler, 312 .extra1 = SYSCTL_ZERO, 313 .extra2 = &pid_max, 314 }, 315 { } 316 }; 317 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; 318 #endif /* CONFIG_CHECKPOINT_RESTORE */ 319 320 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) 321 { 322 if (pid_ns == &init_pid_ns) 323 return 0; 324 325 switch (cmd) { 326 case LINUX_REBOOT_CMD_RESTART2: 327 case LINUX_REBOOT_CMD_RESTART: 328 pid_ns->reboot = SIGHUP; 329 break; 330 331 case LINUX_REBOOT_CMD_POWER_OFF: 332 case LINUX_REBOOT_CMD_HALT: 333 pid_ns->reboot = SIGINT; 334 break; 335 default: 336 return -EINVAL; 337 } 338 339 read_lock(&tasklist_lock); 340 send_sig(SIGKILL, pid_ns->child_reaper, 1); 341 read_unlock(&tasklist_lock); 342 343 do_exit(0); 344 345 /* Not reached */ 346 return 0; 347 } 348 349 static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) 350 { 351 return container_of(ns, struct pid_namespace, ns); 352 } 353 354 static struct ns_common *pidns_get(struct task_struct *task) 355 { 356 struct pid_namespace *ns; 357 358 rcu_read_lock(); 359 ns = task_active_pid_ns(task); 360 if (ns) 361 get_pid_ns(ns); 362 rcu_read_unlock(); 363 364 return ns ? &ns->ns : NULL; 365 } 366 367 static struct ns_common *pidns_for_children_get(struct task_struct *task) 368 { 369 struct pid_namespace *ns = NULL; 370 371 task_lock(task); 372 if (task->nsproxy) { 373 ns = task->nsproxy->pid_ns_for_children; 374 get_pid_ns(ns); 375 } 376 task_unlock(task); 377 378 if (ns) { 379 read_lock(&tasklist_lock); 380 if (!ns->child_reaper) { 381 put_pid_ns(ns); 382 ns = NULL; 383 } 384 read_unlock(&tasklist_lock); 385 } 386 387 return ns ? &ns->ns : NULL; 388 } 389 390 static void pidns_put(struct ns_common *ns) 391 { 392 put_pid_ns(to_pid_ns(ns)); 393 } 394 395 static int pidns_install(struct nsset *nsset, struct ns_common *ns) 396 { 397 struct nsproxy *nsproxy = nsset->nsproxy; 398 struct pid_namespace *active = task_active_pid_ns(current); 399 struct pid_namespace *ancestor, *new = to_pid_ns(ns); 400 401 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || 402 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) 403 return -EPERM; 404 405 /* 406 * Only allow entering the current active pid namespace 407 * or a child of the current active pid namespace. 408 * 409 * This is required for fork to return a usable pid value and 410 * this maintains the property that processes and their 411 * children can not escape their current pid namespace. 412 */ 413 if (new->level < active->level) 414 return -EINVAL; 415 416 ancestor = new; 417 while (ancestor->level > active->level) 418 ancestor = ancestor->parent; 419 if (ancestor != active) 420 return -EINVAL; 421 422 put_pid_ns(nsproxy->pid_ns_for_children); 423 nsproxy->pid_ns_for_children = get_pid_ns(new); 424 return 0; 425 } 426 427 static struct ns_common *pidns_get_parent(struct ns_common *ns) 428 { 429 struct pid_namespace *active = task_active_pid_ns(current); 430 struct pid_namespace *pid_ns, *p; 431 432 /* See if the parent is in the current namespace */ 433 pid_ns = p = to_pid_ns(ns)->parent; 434 for (;;) { 435 if (!p) 436 return ERR_PTR(-EPERM); 437 if (p == active) 438 break; 439 p = p->parent; 440 } 441 442 return &get_pid_ns(pid_ns)->ns; 443 } 444 445 static struct user_namespace *pidns_owner(struct ns_common *ns) 446 { 447 return to_pid_ns(ns)->user_ns; 448 } 449 450 const struct proc_ns_operations pidns_operations = { 451 .name = "pid", 452 .type = CLONE_NEWPID, 453 .get = pidns_get, 454 .put = pidns_put, 455 .install = pidns_install, 456 .owner = pidns_owner, 457 .get_parent = pidns_get_parent, 458 }; 459 460 const struct proc_ns_operations pidns_for_children_operations = { 461 .name = "pid_for_children", 462 .real_ns_name = "pid", 463 .type = CLONE_NEWPID, 464 .get = pidns_for_children_get, 465 .put = pidns_put, 466 .install = pidns_install, 467 .owner = pidns_owner, 468 .get_parent = pidns_get_parent, 469 }; 470 471 static __init int pid_namespaces_init(void) 472 { 473 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC | SLAB_ACCOUNT); 474 475 #ifdef CONFIG_CHECKPOINT_RESTORE 476 register_sysctl_paths(kern_path, pid_ns_ctl_table); 477 #endif 478 479 register_pid_ns_sysctl_table_vm(); 480 return 0; 481 } 482 483 __initcall(pid_namespaces_init); 484