1 /* 2 * Generic pidhash and scalable, time-bounded PID allocator 3 * 4 * (C) 2002-2003 William Irwin, IBM 5 * (C) 2004 William Irwin, Oracle 6 * (C) 2002-2004 Ingo Molnar, Red Hat 7 * 8 * pid-structures are backing objects for tasks sharing a given ID to chain 9 * against. There is very little to them aside from hashing them and 10 * parking tasks using given ID's on a list. 11 * 12 * The hash is always changed with the tasklist_lock write-acquired, 13 * and the hash is only accessed with the tasklist_lock at least 14 * read-acquired, so there's no additional SMP locking needed here. 15 * 16 * We have a list of bitmap pages, which bitmaps represent the PID space. 17 * Allocating and freeing PIDs is completely lockless. The worst-case 18 * allocation scenario when all but one out of 1 million PIDs possible are 19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE 20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). 21 * 22 * Pid namespaces: 23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 25 * Many thanks to Oleg Nesterov for comments and help 26 * 27 */ 28 29 #include <linux/mm.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/rculist.h> 34 #include <linux/bootmem.h> 35 #include <linux/hash.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 40 #define pid_hashfn(nr, ns) \ 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) 42 static struct hlist_head *pid_hash; 43 static unsigned int pidhash_shift = 4; 44 struct pid init_struct_pid = INIT_STRUCT_PID; 45 46 int pid_max = PID_MAX_DEFAULT; 47 48 #define RESERVED_PIDS 300 49 50 int pid_max_min = RESERVED_PIDS + 1; 51 int pid_max_max = PID_MAX_LIMIT; 52 53 #define BITS_PER_PAGE (PAGE_SIZE*8) 54 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 55 56 static inline int mk_pid(struct pid_namespace *pid_ns, 57 struct pidmap *map, int off) 58 { 59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; 60 } 61 62 #define find_next_offset(map, off) \ 63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off) 64 65 /* 66 * PID-map pages start out as NULL, they get allocated upon 67 * first use and are never deallocated. This way a low pid_max 68 * value does not cause lots of bitmaps to be allocated, but 69 * the scheme scales to up to 4 million PIDs, runtime. 70 */ 71 struct pid_namespace init_pid_ns = { 72 .kref = { 73 .refcount = ATOMIC_INIT(2), 74 }, 75 .pidmap = { 76 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } 77 }, 78 .last_pid = 0, 79 .level = 0, 80 .child_reaper = &init_task, 81 }; 82 EXPORT_SYMBOL_GPL(init_pid_ns); 83 84 int is_container_init(struct task_struct *tsk) 85 { 86 int ret = 0; 87 struct pid *pid; 88 89 rcu_read_lock(); 90 pid = task_pid(tsk); 91 if (pid != NULL && pid->numbers[pid->level].nr == 1) 92 ret = 1; 93 rcu_read_unlock(); 94 95 return ret; 96 } 97 EXPORT_SYMBOL(is_container_init); 98 99 /* 100 * Note: disable interrupts while the pidmap_lock is held as an 101 * interrupt might come in and do read_lock(&tasklist_lock). 102 * 103 * If we don't disable interrupts there is a nasty deadlock between 104 * detach_pid()->free_pid() and another cpu that does 105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does 106 * read_lock(&tasklist_lock); 107 * 108 * After we clean up the tasklist_lock and know there are no 109 * irq handlers that take it we can leave the interrupts enabled. 110 * For now it is easier to be safe than to prove it can't happen. 111 */ 112 113 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 114 115 static void free_pidmap(struct upid *upid) 116 { 117 int nr = upid->nr; 118 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; 119 int offset = nr & BITS_PER_PAGE_MASK; 120 121 clear_bit(offset, map->page); 122 atomic_inc(&map->nr_free); 123 } 124 125 static int alloc_pidmap(struct pid_namespace *pid_ns) 126 { 127 int i, offset, max_scan, pid, last = pid_ns->last_pid; 128 struct pidmap *map; 129 130 pid = last + 1; 131 if (pid >= pid_max) 132 pid = RESERVED_PIDS; 133 offset = pid & BITS_PER_PAGE_MASK; 134 map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; 135 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; 136 for (i = 0; i <= max_scan; ++i) { 137 if (unlikely(!map->page)) { 138 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); 139 /* 140 * Free the page if someone raced with us 141 * installing it: 142 */ 143 spin_lock_irq(&pidmap_lock); 144 if (!map->page) { 145 map->page = page; 146 page = NULL; 147 } 148 spin_unlock_irq(&pidmap_lock); 149 kfree(page); 150 if (unlikely(!map->page)) 151 break; 152 } 153 if (likely(atomic_read(&map->nr_free))) { 154 do { 155 if (!test_and_set_bit(offset, map->page)) { 156 atomic_dec(&map->nr_free); 157 pid_ns->last_pid = pid; 158 return pid; 159 } 160 offset = find_next_offset(map, offset); 161 pid = mk_pid(pid_ns, map, offset); 162 /* 163 * find_next_offset() found a bit, the pid from it 164 * is in-bounds, and if we fell back to the last 165 * bitmap block and the final block was the same 166 * as the starting point, pid is before last_pid. 167 */ 168 } while (offset < BITS_PER_PAGE && pid < pid_max && 169 (i != max_scan || pid < last || 170 !((last+1) & BITS_PER_PAGE_MASK))); 171 } 172 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 173 ++map; 174 offset = 0; 175 } else { 176 map = &pid_ns->pidmap[0]; 177 offset = RESERVED_PIDS; 178 if (unlikely(last == offset)) 179 break; 180 } 181 pid = mk_pid(pid_ns, map, offset); 182 } 183 return -1; 184 } 185 186 int next_pidmap(struct pid_namespace *pid_ns, int last) 187 { 188 int offset; 189 struct pidmap *map, *end; 190 191 offset = (last + 1) & BITS_PER_PAGE_MASK; 192 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; 193 end = &pid_ns->pidmap[PIDMAP_ENTRIES]; 194 for (; map < end; map++, offset = 0) { 195 if (unlikely(!map->page)) 196 continue; 197 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); 198 if (offset < BITS_PER_PAGE) 199 return mk_pid(pid_ns, map, offset); 200 } 201 return -1; 202 } 203 204 void put_pid(struct pid *pid) 205 { 206 struct pid_namespace *ns; 207 208 if (!pid) 209 return; 210 211 ns = pid->numbers[pid->level].ns; 212 if ((atomic_read(&pid->count) == 1) || 213 atomic_dec_and_test(&pid->count)) { 214 kmem_cache_free(ns->pid_cachep, pid); 215 put_pid_ns(ns); 216 } 217 } 218 EXPORT_SYMBOL_GPL(put_pid); 219 220 static void delayed_put_pid(struct rcu_head *rhp) 221 { 222 struct pid *pid = container_of(rhp, struct pid, rcu); 223 put_pid(pid); 224 } 225 226 void free_pid(struct pid *pid) 227 { 228 /* We can be called with write_lock_irq(&tasklist_lock) held */ 229 int i; 230 unsigned long flags; 231 232 spin_lock_irqsave(&pidmap_lock, flags); 233 for (i = 0; i <= pid->level; i++) 234 hlist_del_rcu(&pid->numbers[i].pid_chain); 235 spin_unlock_irqrestore(&pidmap_lock, flags); 236 237 for (i = 0; i <= pid->level; i++) 238 free_pidmap(pid->numbers + i); 239 240 call_rcu(&pid->rcu, delayed_put_pid); 241 } 242 243 struct pid *alloc_pid(struct pid_namespace *ns) 244 { 245 struct pid *pid; 246 enum pid_type type; 247 int i, nr; 248 struct pid_namespace *tmp; 249 struct upid *upid; 250 251 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); 252 if (!pid) 253 goto out; 254 255 tmp = ns; 256 for (i = ns->level; i >= 0; i--) { 257 nr = alloc_pidmap(tmp); 258 if (nr < 0) 259 goto out_free; 260 261 pid->numbers[i].nr = nr; 262 pid->numbers[i].ns = tmp; 263 tmp = tmp->parent; 264 } 265 266 get_pid_ns(ns); 267 pid->level = ns->level; 268 atomic_set(&pid->count, 1); 269 for (type = 0; type < PIDTYPE_MAX; ++type) 270 INIT_HLIST_HEAD(&pid->tasks[type]); 271 272 upid = pid->numbers + ns->level; 273 spin_lock_irq(&pidmap_lock); 274 for ( ; upid >= pid->numbers; --upid) 275 hlist_add_head_rcu(&upid->pid_chain, 276 &pid_hash[pid_hashfn(upid->nr, upid->ns)]); 277 spin_unlock_irq(&pidmap_lock); 278 279 out: 280 return pid; 281 282 out_free: 283 while (++i <= ns->level) 284 free_pidmap(pid->numbers + i); 285 286 kmem_cache_free(ns->pid_cachep, pid); 287 pid = NULL; 288 goto out; 289 } 290 291 struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 292 { 293 struct hlist_node *elem; 294 struct upid *pnr; 295 296 hlist_for_each_entry_rcu(pnr, elem, 297 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 298 if (pnr->nr == nr && pnr->ns == ns) 299 return container_of(pnr, struct pid, 300 numbers[ns->level]); 301 302 return NULL; 303 } 304 EXPORT_SYMBOL_GPL(find_pid_ns); 305 306 struct pid *find_vpid(int nr) 307 { 308 return find_pid_ns(nr, current->nsproxy->pid_ns); 309 } 310 EXPORT_SYMBOL_GPL(find_vpid); 311 312 /* 313 * attach_pid() must be called with the tasklist_lock write-held. 314 */ 315 void attach_pid(struct task_struct *task, enum pid_type type, 316 struct pid *pid) 317 { 318 struct pid_link *link; 319 320 link = &task->pids[type]; 321 link->pid = pid; 322 hlist_add_head_rcu(&link->node, &pid->tasks[type]); 323 } 324 325 static void __change_pid(struct task_struct *task, enum pid_type type, 326 struct pid *new) 327 { 328 struct pid_link *link; 329 struct pid *pid; 330 int tmp; 331 332 link = &task->pids[type]; 333 pid = link->pid; 334 335 hlist_del_rcu(&link->node); 336 link->pid = new; 337 338 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 339 if (!hlist_empty(&pid->tasks[tmp])) 340 return; 341 342 free_pid(pid); 343 } 344 345 void detach_pid(struct task_struct *task, enum pid_type type) 346 { 347 __change_pid(task, type, NULL); 348 } 349 350 void change_pid(struct task_struct *task, enum pid_type type, 351 struct pid *pid) 352 { 353 __change_pid(task, type, pid); 354 attach_pid(task, type, pid); 355 } 356 357 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 358 void transfer_pid(struct task_struct *old, struct task_struct *new, 359 enum pid_type type) 360 { 361 new->pids[type].pid = old->pids[type].pid; 362 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); 363 } 364 365 struct task_struct *pid_task(struct pid *pid, enum pid_type type) 366 { 367 struct task_struct *result = NULL; 368 if (pid) { 369 struct hlist_node *first; 370 first = rcu_dereference_check(pid->tasks[type].first, 371 rcu_read_lock_held() || 372 lockdep_tasklist_lock_is_held()); 373 if (first) 374 result = hlist_entry(first, struct task_struct, pids[(type)].node); 375 } 376 return result; 377 } 378 EXPORT_SYMBOL(pid_task); 379 380 /* 381 * Must be called under rcu_read_lock(). 382 */ 383 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) 384 { 385 return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); 386 } 387 388 struct task_struct *find_task_by_vpid(pid_t vnr) 389 { 390 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); 391 } 392 393 struct pid *get_task_pid(struct task_struct *task, enum pid_type type) 394 { 395 struct pid *pid; 396 rcu_read_lock(); 397 if (type != PIDTYPE_PID) 398 task = task->group_leader; 399 pid = get_pid(task->pids[type].pid); 400 rcu_read_unlock(); 401 return pid; 402 } 403 404 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) 405 { 406 struct task_struct *result; 407 rcu_read_lock(); 408 result = pid_task(pid, type); 409 if (result) 410 get_task_struct(result); 411 rcu_read_unlock(); 412 return result; 413 } 414 415 struct pid *find_get_pid(pid_t nr) 416 { 417 struct pid *pid; 418 419 rcu_read_lock(); 420 pid = get_pid(find_vpid(nr)); 421 rcu_read_unlock(); 422 423 return pid; 424 } 425 EXPORT_SYMBOL_GPL(find_get_pid); 426 427 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 428 { 429 struct upid *upid; 430 pid_t nr = 0; 431 432 if (pid && ns->level <= pid->level) { 433 upid = &pid->numbers[ns->level]; 434 if (upid->ns == ns) 435 nr = upid->nr; 436 } 437 return nr; 438 } 439 440 pid_t pid_vnr(struct pid *pid) 441 { 442 return pid_nr_ns(pid, current->nsproxy->pid_ns); 443 } 444 EXPORT_SYMBOL_GPL(pid_vnr); 445 446 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, 447 struct pid_namespace *ns) 448 { 449 pid_t nr = 0; 450 451 rcu_read_lock(); 452 if (!ns) 453 ns = current->nsproxy->pid_ns; 454 if (likely(pid_alive(task))) { 455 if (type != PIDTYPE_PID) 456 task = task->group_leader; 457 nr = pid_nr_ns(task->pids[type].pid, ns); 458 } 459 rcu_read_unlock(); 460 461 return nr; 462 } 463 EXPORT_SYMBOL(__task_pid_nr_ns); 464 465 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 466 { 467 return pid_nr_ns(task_tgid(tsk), ns); 468 } 469 EXPORT_SYMBOL(task_tgid_nr_ns); 470 471 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 472 { 473 return ns_of_pid(task_pid(tsk)); 474 } 475 EXPORT_SYMBOL_GPL(task_active_pid_ns); 476 477 /* 478 * Used by proc to find the first pid that is greater than or equal to nr. 479 * 480 * If there is a pid at nr this function is exactly the same as find_pid_ns. 481 */ 482 struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 483 { 484 struct pid *pid; 485 486 do { 487 pid = find_pid_ns(nr, ns); 488 if (pid) 489 break; 490 nr = next_pidmap(ns, nr); 491 } while (nr > 0); 492 493 return pid; 494 } 495 496 /* 497 * The pid hash table is scaled according to the amount of memory in the 498 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 499 * more. 500 */ 501 void __init pidhash_init(void) 502 { 503 int i, pidhash_size; 504 505 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, 506 HASH_EARLY | HASH_SMALL, 507 &pidhash_shift, NULL, 4096); 508 pidhash_size = 1 << pidhash_shift; 509 510 for (i = 0; i < pidhash_size; i++) 511 INIT_HLIST_HEAD(&pid_hash[i]); 512 } 513 514 void __init pidmap_init(void) 515 { 516 /* bump default and minimum pid_max based on number of cpus */ 517 pid_max = min(pid_max_max, max_t(int, pid_max, 518 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 519 pid_max_min = max_t(int, pid_max_min, 520 PIDS_PER_CPU_MIN * num_possible_cpus()); 521 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); 522 523 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 524 /* Reserve PID 0. We never call free_pidmap(0) */ 525 set_bit(0, init_pid_ns.pidmap[0].page); 526 atomic_dec(&init_pid_ns.pidmap[0].nr_free); 527 528 init_pid_ns.pid_cachep = KMEM_CACHE(pid, 529 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 530 } 531