1 /* 2 * Generic pidhash and scalable, time-bounded PID allocator 3 * 4 * (C) 2002-2003 Nadia Yvette Chambers, IBM 5 * (C) 2004 Nadia Yvette Chambers, Oracle 6 * (C) 2002-2004 Ingo Molnar, Red Hat 7 * 8 * pid-structures are backing objects for tasks sharing a given ID to chain 9 * against. There is very little to them aside from hashing them and 10 * parking tasks using given ID's on a list. 11 * 12 * The hash is always changed with the tasklist_lock write-acquired, 13 * and the hash is only accessed with the tasklist_lock at least 14 * read-acquired, so there's no additional SMP locking needed here. 15 * 16 * We have a list of bitmap pages, which bitmaps represent the PID space. 17 * Allocating and freeing PIDs is completely lockless. The worst-case 18 * allocation scenario when all but one out of 1 million PIDs possible are 19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE 20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). 21 * 22 * Pid namespaces: 23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 25 * Many thanks to Oleg Nesterov for comments and help 26 * 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/export.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/rculist.h> 34 #include <linux/bootmem.h> 35 #include <linux/hash.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 #include <linux/proc_ns.h> 40 #include <linux/proc_fs.h> 41 #include <linux/sched/task.h> 42 #include <linux/idr.h> 43 44 struct pid init_struct_pid = { 45 .count = ATOMIC_INIT(1), 46 .tasks = { 47 { .first = NULL }, 48 { .first = NULL }, 49 { .first = NULL }, 50 }, 51 .level = 0, 52 .numbers = { { 53 .nr = 0, 54 .ns = &init_pid_ns, 55 }, } 56 }; 57 58 int pid_max = PID_MAX_DEFAULT; 59 60 #define RESERVED_PIDS 300 61 62 int pid_max_min = RESERVED_PIDS + 1; 63 int pid_max_max = PID_MAX_LIMIT; 64 65 /* 66 * PID-map pages start out as NULL, they get allocated upon 67 * first use and are never deallocated. This way a low pid_max 68 * value does not cause lots of bitmaps to be allocated, but 69 * the scheme scales to up to 4 million PIDs, runtime. 70 */ 71 struct pid_namespace init_pid_ns = { 72 .kref = KREF_INIT(2), 73 .idr = IDR_INIT(init_pid_ns.idr), 74 .pid_allocated = PIDNS_ADDING, 75 .level = 0, 76 .child_reaper = &init_task, 77 .user_ns = &init_user_ns, 78 .ns.inum = PROC_PID_INIT_INO, 79 #ifdef CONFIG_PID_NS 80 .ns.ops = &pidns_operations, 81 #endif 82 }; 83 EXPORT_SYMBOL_GPL(init_pid_ns); 84 85 /* 86 * Note: disable interrupts while the pidmap_lock is held as an 87 * interrupt might come in and do read_lock(&tasklist_lock). 88 * 89 * If we don't disable interrupts there is a nasty deadlock between 90 * detach_pid()->free_pid() and another cpu that does 91 * spin_lock(&pidmap_lock) followed by an interrupt routine that does 92 * read_lock(&tasklist_lock); 93 * 94 * After we clean up the tasklist_lock and know there are no 95 * irq handlers that take it we can leave the interrupts enabled. 96 * For now it is easier to be safe than to prove it can't happen. 97 */ 98 99 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 100 101 void put_pid(struct pid *pid) 102 { 103 struct pid_namespace *ns; 104 105 if (!pid) 106 return; 107 108 ns = pid->numbers[pid->level].ns; 109 if ((atomic_read(&pid->count) == 1) || 110 atomic_dec_and_test(&pid->count)) { 111 kmem_cache_free(ns->pid_cachep, pid); 112 put_pid_ns(ns); 113 } 114 } 115 EXPORT_SYMBOL_GPL(put_pid); 116 117 static void delayed_put_pid(struct rcu_head *rhp) 118 { 119 struct pid *pid = container_of(rhp, struct pid, rcu); 120 put_pid(pid); 121 } 122 123 void free_pid(struct pid *pid) 124 { 125 /* We can be called with write_lock_irq(&tasklist_lock) held */ 126 int i; 127 unsigned long flags; 128 129 spin_lock_irqsave(&pidmap_lock, flags); 130 for (i = 0; i <= pid->level; i++) { 131 struct upid *upid = pid->numbers + i; 132 struct pid_namespace *ns = upid->ns; 133 switch (--ns->pid_allocated) { 134 case 2: 135 case 1: 136 /* When all that is left in the pid namespace 137 * is the reaper wake up the reaper. The reaper 138 * may be sleeping in zap_pid_ns_processes(). 139 */ 140 wake_up_process(ns->child_reaper); 141 break; 142 case PIDNS_ADDING: 143 /* Handle a fork failure of the first process */ 144 WARN_ON(ns->child_reaper); 145 ns->pid_allocated = 0; 146 /* fall through */ 147 case 0: 148 schedule_work(&ns->proc_work); 149 break; 150 } 151 152 idr_remove(&ns->idr, upid->nr); 153 } 154 spin_unlock_irqrestore(&pidmap_lock, flags); 155 156 call_rcu(&pid->rcu, delayed_put_pid); 157 } 158 159 struct pid *alloc_pid(struct pid_namespace *ns) 160 { 161 struct pid *pid; 162 enum pid_type type; 163 int i, nr; 164 struct pid_namespace *tmp; 165 struct upid *upid; 166 int retval = -ENOMEM; 167 168 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); 169 if (!pid) 170 return ERR_PTR(retval); 171 172 tmp = ns; 173 pid->level = ns->level; 174 175 for (i = ns->level; i >= 0; i--) { 176 int pid_min = 1; 177 178 idr_preload(GFP_KERNEL); 179 spin_lock_irq(&pidmap_lock); 180 181 /* 182 * init really needs pid 1, but after reaching the maximum 183 * wrap back to RESERVED_PIDS 184 */ 185 if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) 186 pid_min = RESERVED_PIDS; 187 188 /* 189 * Store a null pointer so find_pid_ns does not find 190 * a partially initialized PID (see below). 191 */ 192 nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, 193 pid_max, GFP_ATOMIC); 194 spin_unlock_irq(&pidmap_lock); 195 idr_preload_end(); 196 197 if (nr < 0) { 198 retval = nr; 199 goto out_free; 200 } 201 202 pid->numbers[i].nr = nr; 203 pid->numbers[i].ns = tmp; 204 tmp = tmp->parent; 205 } 206 207 if (unlikely(is_child_reaper(pid))) { 208 if (pid_ns_prepare_proc(ns)) 209 goto out_free; 210 } 211 212 get_pid_ns(ns); 213 atomic_set(&pid->count, 1); 214 for (type = 0; type < PIDTYPE_MAX; ++type) 215 INIT_HLIST_HEAD(&pid->tasks[type]); 216 217 upid = pid->numbers + ns->level; 218 spin_lock_irq(&pidmap_lock); 219 if (!(ns->pid_allocated & PIDNS_ADDING)) 220 goto out_unlock; 221 for ( ; upid >= pid->numbers; --upid) { 222 /* Make the PID visible to find_pid_ns. */ 223 idr_replace(&upid->ns->idr, pid, upid->nr); 224 upid->ns->pid_allocated++; 225 } 226 spin_unlock_irq(&pidmap_lock); 227 228 return pid; 229 230 out_unlock: 231 spin_unlock_irq(&pidmap_lock); 232 put_pid_ns(ns); 233 234 out_free: 235 spin_lock_irq(&pidmap_lock); 236 while (++i <= ns->level) 237 idr_remove(&ns->idr, (pid->numbers + i)->nr); 238 239 /* On failure to allocate the first pid, reset the state */ 240 if (ns->pid_allocated == PIDNS_ADDING) 241 idr_set_cursor(&ns->idr, 0); 242 243 spin_unlock_irq(&pidmap_lock); 244 245 kmem_cache_free(ns->pid_cachep, pid); 246 return ERR_PTR(retval); 247 } 248 249 void disable_pid_allocation(struct pid_namespace *ns) 250 { 251 spin_lock_irq(&pidmap_lock); 252 ns->pid_allocated &= ~PIDNS_ADDING; 253 spin_unlock_irq(&pidmap_lock); 254 } 255 256 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 257 { 258 return idr_find(&ns->idr, nr); 259 } 260 EXPORT_SYMBOL_GPL(find_pid_ns); 261 262 struct pid *find_vpid(int nr) 263 { 264 return find_pid_ns(nr, task_active_pid_ns(current)); 265 } 266 EXPORT_SYMBOL_GPL(find_vpid); 267 268 /* 269 * attach_pid() must be called with the tasklist_lock write-held. 270 */ 271 void attach_pid(struct task_struct *task, enum pid_type type) 272 { 273 struct pid_link *link = &task->pids[type]; 274 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); 275 } 276 277 static void __change_pid(struct task_struct *task, enum pid_type type, 278 struct pid *new) 279 { 280 struct pid_link *link; 281 struct pid *pid; 282 int tmp; 283 284 link = &task->pids[type]; 285 pid = link->pid; 286 287 hlist_del_rcu(&link->node); 288 link->pid = new; 289 290 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 291 if (!hlist_empty(&pid->tasks[tmp])) 292 return; 293 294 free_pid(pid); 295 } 296 297 void detach_pid(struct task_struct *task, enum pid_type type) 298 { 299 __change_pid(task, type, NULL); 300 } 301 302 void change_pid(struct task_struct *task, enum pid_type type, 303 struct pid *pid) 304 { 305 __change_pid(task, type, pid); 306 attach_pid(task, type); 307 } 308 309 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 310 void transfer_pid(struct task_struct *old, struct task_struct *new, 311 enum pid_type type) 312 { 313 new->pids[type].pid = old->pids[type].pid; 314 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); 315 } 316 317 struct task_struct *pid_task(struct pid *pid, enum pid_type type) 318 { 319 struct task_struct *result = NULL; 320 if (pid) { 321 struct hlist_node *first; 322 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), 323 lockdep_tasklist_lock_is_held()); 324 if (first) 325 result = hlist_entry(first, struct task_struct, pids[(type)].node); 326 } 327 return result; 328 } 329 EXPORT_SYMBOL(pid_task); 330 331 /* 332 * Must be called under rcu_read_lock(). 333 */ 334 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) 335 { 336 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 337 "find_task_by_pid_ns() needs rcu_read_lock() protection"); 338 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); 339 } 340 341 struct task_struct *find_task_by_vpid(pid_t vnr) 342 { 343 return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); 344 } 345 346 struct task_struct *find_get_task_by_vpid(pid_t nr) 347 { 348 struct task_struct *task; 349 350 rcu_read_lock(); 351 task = find_task_by_vpid(nr); 352 if (task) 353 get_task_struct(task); 354 rcu_read_unlock(); 355 356 return task; 357 } 358 359 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) 360 { 361 struct pid *pid; 362 rcu_read_lock(); 363 if (type != PIDTYPE_PID) 364 task = task->group_leader; 365 pid = get_pid(rcu_dereference(task->pids[type].pid)); 366 rcu_read_unlock(); 367 return pid; 368 } 369 EXPORT_SYMBOL_GPL(get_task_pid); 370 371 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) 372 { 373 struct task_struct *result; 374 rcu_read_lock(); 375 result = pid_task(pid, type); 376 if (result) 377 get_task_struct(result); 378 rcu_read_unlock(); 379 return result; 380 } 381 EXPORT_SYMBOL_GPL(get_pid_task); 382 383 struct pid *find_get_pid(pid_t nr) 384 { 385 struct pid *pid; 386 387 rcu_read_lock(); 388 pid = get_pid(find_vpid(nr)); 389 rcu_read_unlock(); 390 391 return pid; 392 } 393 EXPORT_SYMBOL_GPL(find_get_pid); 394 395 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 396 { 397 struct upid *upid; 398 pid_t nr = 0; 399 400 if (pid && ns->level <= pid->level) { 401 upid = &pid->numbers[ns->level]; 402 if (upid->ns == ns) 403 nr = upid->nr; 404 } 405 return nr; 406 } 407 EXPORT_SYMBOL_GPL(pid_nr_ns); 408 409 pid_t pid_vnr(struct pid *pid) 410 { 411 return pid_nr_ns(pid, task_active_pid_ns(current)); 412 } 413 EXPORT_SYMBOL_GPL(pid_vnr); 414 415 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 416 struct pid_namespace *ns) 417 { 418 pid_t nr = 0; 419 420 rcu_read_lock(); 421 if (!ns) 422 ns = task_active_pid_ns(current); 423 if (likely(pid_alive(task))) { 424 if (type != PIDTYPE_PID) { 425 if (type == __PIDTYPE_TGID) 426 type = PIDTYPE_PID; 427 428 task = task->group_leader; 429 } 430 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); 431 } 432 rcu_read_unlock(); 433 434 return nr; 435 } 436 EXPORT_SYMBOL(__task_pid_nr_ns); 437 438 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 439 { 440 return ns_of_pid(task_pid(tsk)); 441 } 442 EXPORT_SYMBOL_GPL(task_active_pid_ns); 443 444 /* 445 * Used by proc to find the first pid that is greater than or equal to nr. 446 * 447 * If there is a pid at nr this function is exactly the same as find_pid_ns. 448 */ 449 struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 450 { 451 return idr_get_next(&ns->idr, &nr); 452 } 453 454 void __init pid_idr_init(void) 455 { 456 /* Verify no one has done anything silly: */ 457 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); 458 459 /* bump default and minimum pid_max based on number of cpus */ 460 pid_max = min(pid_max_max, max_t(int, pid_max, 461 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 462 pid_max_min = max_t(int, pid_max_min, 463 PIDS_PER_CPU_MIN * num_possible_cpus()); 464 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); 465 466 idr_init(&init_pid_ns.idr); 467 468 init_pid_ns.pid_cachep = KMEM_CACHE(pid, 469 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); 470 } 471