1 /* 2 * linux/fs/proc/base.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * proc base directory handling functions 7 * 8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. 9 * Instead of using magical inumbers to determine the kind of object 10 * we allocate and fill in-core inodes upon lookup. They don't even 11 * go into icache. We cache the reference to task_struct upon lookup too. 12 * Eventually it should become a filesystem in its own. We don't use the 13 * rest of procfs anymore. 14 * 15 * 16 * Changelog: 17 * 17-Jan-2005 18 * Allan Bezerra 19 * Bruna Moreira <bruna.moreira@indt.org.br> 20 * Edjard Mota <edjard.mota@indt.org.br> 21 * Ilias Biris <ilias.biris@indt.org.br> 22 * Mauricio Lin <mauricio.lin@indt.org.br> 23 * 24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 25 * 26 * A new process specific entry (smaps) included in /proc. It shows the 27 * size of rss for each memory area. The maps entry lacks information 28 * about physical memory size (rss) for each mapped file, i.e., 29 * rss information for executables and library files. 30 * This additional information is useful for any tools that need to know 31 * about physical memory consumption for a process specific library. 32 * 33 * Changelog: 34 * 21-Feb-2005 35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 36 * Pud inclusion in the page table walking. 37 * 38 * ChangeLog: 39 * 10-Mar-2005 40 * 10LE Instituto Nokia de Tecnologia - INdT: 41 * A better way to walks through the page table as suggested by Hugh Dickins. 42 * 43 * Simo Piiroinen <simo.piiroinen@nokia.com>: 44 * Smaps information related to shared, private, clean and dirty pages. 45 * 46 * Paul Mundt <paul.mundt@nokia.com>: 47 * Overall revision about smaps. 48 */ 49 50 #include <asm/uaccess.h> 51 52 #include <linux/errno.h> 53 #include <linux/time.h> 54 #include <linux/proc_fs.h> 55 #include <linux/stat.h> 56 #include <linux/init.h> 57 #include <linux/capability.h> 58 #include <linux/file.h> 59 #include <linux/string.h> 60 #include <linux/seq_file.h> 61 #include <linux/namei.h> 62 #include <linux/mnt_namespace.h> 63 #include <linux/mm.h> 64 #include <linux/rcupdate.h> 65 #include <linux/kallsyms.h> 66 #include <linux/module.h> 67 #include <linux/mount.h> 68 #include <linux/security.h> 69 #include <linux/ptrace.h> 70 #include <linux/cpuset.h> 71 #include <linux/audit.h> 72 #include <linux/poll.h> 73 #include <linux/nsproxy.h> 74 #include <linux/oom.h> 75 #include "internal.h" 76 77 /* NOTE: 78 * Implementing inode permission operations in /proc is almost 79 * certainly an error. Permission checks need to happen during 80 * each system call not at open time. The reason is that most of 81 * what we wish to check for permissions in /proc varies at runtime. 82 * 83 * The classic example of a problem is opening file descriptors 84 * in /proc for a task before it execs a suid executable. 85 */ 86 87 88 /* Worst case buffer size needed for holding an integer. */ 89 #define PROC_NUMBUF 13 90 91 struct pid_entry { 92 char *name; 93 int len; 94 mode_t mode; 95 const struct inode_operations *iop; 96 const struct file_operations *fop; 97 union proc_op op; 98 }; 99 100 #define NOD(NAME, MODE, IOP, FOP, OP) { \ 101 .name = (NAME), \ 102 .len = sizeof(NAME) - 1, \ 103 .mode = MODE, \ 104 .iop = IOP, \ 105 .fop = FOP, \ 106 .op = OP, \ 107 } 108 109 #define DIR(NAME, MODE, OTYPE) \ 110 NOD(NAME, (S_IFDIR|(MODE)), \ 111 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \ 112 {} ) 113 #define LNK(NAME, OTYPE) \ 114 NOD(NAME, (S_IFLNK|S_IRWXUGO), \ 115 &proc_pid_link_inode_operations, NULL, \ 116 { .proc_get_link = &proc_##OTYPE##_link } ) 117 #define REG(NAME, MODE, OTYPE) \ 118 NOD(NAME, (S_IFREG|(MODE)), NULL, \ 119 &proc_##OTYPE##_operations, {}) 120 #define INF(NAME, MODE, OTYPE) \ 121 NOD(NAME, (S_IFREG|(MODE)), \ 122 NULL, &proc_info_file_operations, \ 123 { .proc_read = &proc_##OTYPE } ) 124 125 int maps_protect; 126 EXPORT_SYMBOL(maps_protect); 127 128 static struct fs_struct *get_fs_struct(struct task_struct *task) 129 { 130 struct fs_struct *fs; 131 task_lock(task); 132 fs = task->fs; 133 if(fs) 134 atomic_inc(&fs->count); 135 task_unlock(task); 136 return fs; 137 } 138 139 static int get_nr_threads(struct task_struct *tsk) 140 { 141 /* Must be called with the rcu_read_lock held */ 142 unsigned long flags; 143 int count = 0; 144 145 if (lock_task_sighand(tsk, &flags)) { 146 count = atomic_read(&tsk->signal->count); 147 unlock_task_sighand(tsk, &flags); 148 } 149 return count; 150 } 151 152 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 153 { 154 struct task_struct *task = get_proc_task(inode); 155 struct fs_struct *fs = NULL; 156 int result = -ENOENT; 157 158 if (task) { 159 fs = get_fs_struct(task); 160 put_task_struct(task); 161 } 162 if (fs) { 163 read_lock(&fs->lock); 164 *mnt = mntget(fs->pwdmnt); 165 *dentry = dget(fs->pwd); 166 read_unlock(&fs->lock); 167 result = 0; 168 put_fs_struct(fs); 169 } 170 return result; 171 } 172 173 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 174 { 175 struct task_struct *task = get_proc_task(inode); 176 struct fs_struct *fs = NULL; 177 int result = -ENOENT; 178 179 if (task) { 180 fs = get_fs_struct(task); 181 put_task_struct(task); 182 } 183 if (fs) { 184 read_lock(&fs->lock); 185 *mnt = mntget(fs->rootmnt); 186 *dentry = dget(fs->root); 187 read_unlock(&fs->lock); 188 result = 0; 189 put_fs_struct(fs); 190 } 191 return result; 192 } 193 194 #define MAY_PTRACE(task) \ 195 (task == current || \ 196 (task->parent == current && \ 197 (task->ptrace & PT_PTRACED) && \ 198 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \ 199 security_ptrace(current,task) == 0)) 200 201 static int proc_pid_environ(struct task_struct *task, char * buffer) 202 { 203 int res = 0; 204 struct mm_struct *mm = get_task_mm(task); 205 if (mm) { 206 unsigned int len; 207 208 res = -ESRCH; 209 if (!ptrace_may_attach(task)) 210 goto out; 211 212 len = mm->env_end - mm->env_start; 213 if (len > PAGE_SIZE) 214 len = PAGE_SIZE; 215 res = access_process_vm(task, mm->env_start, buffer, len, 0); 216 out: 217 mmput(mm); 218 } 219 return res; 220 } 221 222 static int proc_pid_cmdline(struct task_struct *task, char * buffer) 223 { 224 int res = 0; 225 unsigned int len; 226 struct mm_struct *mm = get_task_mm(task); 227 if (!mm) 228 goto out; 229 if (!mm->arg_end) 230 goto out_mm; /* Shh! No looking before we're done */ 231 232 len = mm->arg_end - mm->arg_start; 233 234 if (len > PAGE_SIZE) 235 len = PAGE_SIZE; 236 237 res = access_process_vm(task, mm->arg_start, buffer, len, 0); 238 239 // If the nul at the end of args has been overwritten, then 240 // assume application is using setproctitle(3). 241 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) { 242 len = strnlen(buffer, res); 243 if (len < res) { 244 res = len; 245 } else { 246 len = mm->env_end - mm->env_start; 247 if (len > PAGE_SIZE - res) 248 len = PAGE_SIZE - res; 249 res += access_process_vm(task, mm->env_start, buffer+res, len, 0); 250 res = strnlen(buffer, res); 251 } 252 } 253 out_mm: 254 mmput(mm); 255 out: 256 return res; 257 } 258 259 static int proc_pid_auxv(struct task_struct *task, char *buffer) 260 { 261 int res = 0; 262 struct mm_struct *mm = get_task_mm(task); 263 if (mm) { 264 unsigned int nwords = 0; 265 do 266 nwords += 2; 267 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 268 res = nwords * sizeof(mm->saved_auxv[0]); 269 if (res > PAGE_SIZE) 270 res = PAGE_SIZE; 271 memcpy(buffer, mm->saved_auxv, res); 272 mmput(mm); 273 } 274 return res; 275 } 276 277 278 #ifdef CONFIG_KALLSYMS 279 /* 280 * Provides a wchan file via kallsyms in a proper one-value-per-file format. 281 * Returns the resolved symbol. If that fails, simply return the address. 282 */ 283 static int proc_pid_wchan(struct task_struct *task, char *buffer) 284 { 285 unsigned long wchan; 286 char symname[KSYM_NAME_LEN]; 287 288 wchan = get_wchan(task); 289 290 if (lookup_symbol_name(wchan, symname) < 0) 291 return sprintf(buffer, "%lu", wchan); 292 else 293 return sprintf(buffer, "%s", symname); 294 } 295 #endif /* CONFIG_KALLSYMS */ 296 297 #ifdef CONFIG_SCHEDSTATS 298 /* 299 * Provides /proc/PID/schedstat 300 */ 301 static int proc_pid_schedstat(struct task_struct *task, char *buffer) 302 { 303 return sprintf(buffer, "%llu %llu %lu\n", 304 task->sched_info.cpu_time, 305 task->sched_info.run_delay, 306 task->sched_info.pcnt); 307 } 308 #endif 309 310 /* The badness from the OOM killer */ 311 unsigned long badness(struct task_struct *p, unsigned long uptime); 312 static int proc_oom_score(struct task_struct *task, char *buffer) 313 { 314 unsigned long points; 315 struct timespec uptime; 316 317 do_posix_clock_monotonic_gettime(&uptime); 318 read_lock(&tasklist_lock); 319 points = badness(task, uptime.tv_sec); 320 read_unlock(&tasklist_lock); 321 return sprintf(buffer, "%lu\n", points); 322 } 323 324 /************************************************************************/ 325 /* Here the fs part begins */ 326 /************************************************************************/ 327 328 /* permission checks */ 329 static int proc_fd_access_allowed(struct inode *inode) 330 { 331 struct task_struct *task; 332 int allowed = 0; 333 /* Allow access to a task's file descriptors if it is us or we 334 * may use ptrace attach to the process and find out that 335 * information. 336 */ 337 task = get_proc_task(inode); 338 if (task) { 339 allowed = ptrace_may_attach(task); 340 put_task_struct(task); 341 } 342 return allowed; 343 } 344 345 static int proc_setattr(struct dentry *dentry, struct iattr *attr) 346 { 347 int error; 348 struct inode *inode = dentry->d_inode; 349 350 if (attr->ia_valid & ATTR_MODE) 351 return -EPERM; 352 353 error = inode_change_ok(inode, attr); 354 if (!error) 355 error = inode_setattr(inode, attr); 356 return error; 357 } 358 359 static const struct inode_operations proc_def_inode_operations = { 360 .setattr = proc_setattr, 361 }; 362 363 extern struct seq_operations mounts_op; 364 struct proc_mounts { 365 struct seq_file m; 366 int event; 367 }; 368 369 static int mounts_open(struct inode *inode, struct file *file) 370 { 371 struct task_struct *task = get_proc_task(inode); 372 struct mnt_namespace *ns = NULL; 373 struct proc_mounts *p; 374 int ret = -EINVAL; 375 376 if (task) { 377 task_lock(task); 378 if (task->nsproxy) { 379 ns = task->nsproxy->mnt_ns; 380 if (ns) 381 get_mnt_ns(ns); 382 } 383 task_unlock(task); 384 put_task_struct(task); 385 } 386 387 if (ns) { 388 ret = -ENOMEM; 389 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 390 if (p) { 391 file->private_data = &p->m; 392 ret = seq_open(file, &mounts_op); 393 if (!ret) { 394 p->m.private = ns; 395 p->event = ns->event; 396 return 0; 397 } 398 kfree(p); 399 } 400 put_mnt_ns(ns); 401 } 402 return ret; 403 } 404 405 static int mounts_release(struct inode *inode, struct file *file) 406 { 407 struct seq_file *m = file->private_data; 408 struct mnt_namespace *ns = m->private; 409 put_mnt_ns(ns); 410 return seq_release(inode, file); 411 } 412 413 static unsigned mounts_poll(struct file *file, poll_table *wait) 414 { 415 struct proc_mounts *p = file->private_data; 416 struct mnt_namespace *ns = p->m.private; 417 unsigned res = 0; 418 419 poll_wait(file, &ns->poll, wait); 420 421 spin_lock(&vfsmount_lock); 422 if (p->event != ns->event) { 423 p->event = ns->event; 424 res = POLLERR; 425 } 426 spin_unlock(&vfsmount_lock); 427 428 return res; 429 } 430 431 static const struct file_operations proc_mounts_operations = { 432 .open = mounts_open, 433 .read = seq_read, 434 .llseek = seq_lseek, 435 .release = mounts_release, 436 .poll = mounts_poll, 437 }; 438 439 extern struct seq_operations mountstats_op; 440 static int mountstats_open(struct inode *inode, struct file *file) 441 { 442 int ret = seq_open(file, &mountstats_op); 443 444 if (!ret) { 445 struct seq_file *m = file->private_data; 446 struct mnt_namespace *mnt_ns = NULL; 447 struct task_struct *task = get_proc_task(inode); 448 449 if (task) { 450 task_lock(task); 451 if (task->nsproxy) 452 mnt_ns = task->nsproxy->mnt_ns; 453 if (mnt_ns) 454 get_mnt_ns(mnt_ns); 455 task_unlock(task); 456 put_task_struct(task); 457 } 458 459 if (mnt_ns) 460 m->private = mnt_ns; 461 else { 462 seq_release(inode, file); 463 ret = -EINVAL; 464 } 465 } 466 return ret; 467 } 468 469 static const struct file_operations proc_mountstats_operations = { 470 .open = mountstats_open, 471 .read = seq_read, 472 .llseek = seq_lseek, 473 .release = mounts_release, 474 }; 475 476 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ 477 478 static ssize_t proc_info_read(struct file * file, char __user * buf, 479 size_t count, loff_t *ppos) 480 { 481 struct inode * inode = file->f_path.dentry->d_inode; 482 unsigned long page; 483 ssize_t length; 484 struct task_struct *task = get_proc_task(inode); 485 486 length = -ESRCH; 487 if (!task) 488 goto out_no_task; 489 490 if (count > PROC_BLOCK_SIZE) 491 count = PROC_BLOCK_SIZE; 492 493 length = -ENOMEM; 494 if (!(page = __get_free_page(GFP_KERNEL))) 495 goto out; 496 497 length = PROC_I(inode)->op.proc_read(task, (char*)page); 498 499 if (length >= 0) 500 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 501 free_page(page); 502 out: 503 put_task_struct(task); 504 out_no_task: 505 return length; 506 } 507 508 static const struct file_operations proc_info_file_operations = { 509 .read = proc_info_read, 510 }; 511 512 static int mem_open(struct inode* inode, struct file* file) 513 { 514 file->private_data = (void*)((long)current->self_exec_id); 515 return 0; 516 } 517 518 static ssize_t mem_read(struct file * file, char __user * buf, 519 size_t count, loff_t *ppos) 520 { 521 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 522 char *page; 523 unsigned long src = *ppos; 524 int ret = -ESRCH; 525 struct mm_struct *mm; 526 527 if (!task) 528 goto out_no_task; 529 530 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 531 goto out; 532 533 ret = -ENOMEM; 534 page = (char *)__get_free_page(GFP_USER); 535 if (!page) 536 goto out; 537 538 ret = 0; 539 540 mm = get_task_mm(task); 541 if (!mm) 542 goto out_free; 543 544 ret = -EIO; 545 546 if (file->private_data != (void*)((long)current->self_exec_id)) 547 goto out_put; 548 549 ret = 0; 550 551 while (count > 0) { 552 int this_len, retval; 553 554 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 555 retval = access_process_vm(task, src, page, this_len, 0); 556 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) { 557 if (!ret) 558 ret = -EIO; 559 break; 560 } 561 562 if (copy_to_user(buf, page, retval)) { 563 ret = -EFAULT; 564 break; 565 } 566 567 ret += retval; 568 src += retval; 569 buf += retval; 570 count -= retval; 571 } 572 *ppos = src; 573 574 out_put: 575 mmput(mm); 576 out_free: 577 free_page((unsigned long) page); 578 out: 579 put_task_struct(task); 580 out_no_task: 581 return ret; 582 } 583 584 #define mem_write NULL 585 586 #ifndef mem_write 587 /* This is a security hazard */ 588 static ssize_t mem_write(struct file * file, const char __user *buf, 589 size_t count, loff_t *ppos) 590 { 591 int copied; 592 char *page; 593 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 594 unsigned long dst = *ppos; 595 596 copied = -ESRCH; 597 if (!task) 598 goto out_no_task; 599 600 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 601 goto out; 602 603 copied = -ENOMEM; 604 page = (char *)__get_free_page(GFP_USER); 605 if (!page) 606 goto out; 607 608 copied = 0; 609 while (count > 0) { 610 int this_len, retval; 611 612 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 613 if (copy_from_user(page, buf, this_len)) { 614 copied = -EFAULT; 615 break; 616 } 617 retval = access_process_vm(task, dst, page, this_len, 1); 618 if (!retval) { 619 if (!copied) 620 copied = -EIO; 621 break; 622 } 623 copied += retval; 624 buf += retval; 625 dst += retval; 626 count -= retval; 627 } 628 *ppos = dst; 629 free_page((unsigned long) page); 630 out: 631 put_task_struct(task); 632 out_no_task: 633 return copied; 634 } 635 #endif 636 637 static loff_t mem_lseek(struct file * file, loff_t offset, int orig) 638 { 639 switch (orig) { 640 case 0: 641 file->f_pos = offset; 642 break; 643 case 1: 644 file->f_pos += offset; 645 break; 646 default: 647 return -EINVAL; 648 } 649 force_successful_syscall_return(); 650 return file->f_pos; 651 } 652 653 static const struct file_operations proc_mem_operations = { 654 .llseek = mem_lseek, 655 .read = mem_read, 656 .write = mem_write, 657 .open = mem_open, 658 }; 659 660 static ssize_t oom_adjust_read(struct file *file, char __user *buf, 661 size_t count, loff_t *ppos) 662 { 663 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 664 char buffer[PROC_NUMBUF]; 665 size_t len; 666 int oom_adjust; 667 668 if (!task) 669 return -ESRCH; 670 oom_adjust = task->oomkilladj; 671 put_task_struct(task); 672 673 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); 674 675 return simple_read_from_buffer(buf, count, ppos, buffer, len); 676 } 677 678 static ssize_t oom_adjust_write(struct file *file, const char __user *buf, 679 size_t count, loff_t *ppos) 680 { 681 struct task_struct *task; 682 char buffer[PROC_NUMBUF], *end; 683 int oom_adjust; 684 685 memset(buffer, 0, sizeof(buffer)); 686 if (count > sizeof(buffer) - 1) 687 count = sizeof(buffer) - 1; 688 if (copy_from_user(buffer, buf, count)) 689 return -EFAULT; 690 oom_adjust = simple_strtol(buffer, &end, 0); 691 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && 692 oom_adjust != OOM_DISABLE) 693 return -EINVAL; 694 if (*end == '\n') 695 end++; 696 task = get_proc_task(file->f_path.dentry->d_inode); 697 if (!task) 698 return -ESRCH; 699 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { 700 put_task_struct(task); 701 return -EACCES; 702 } 703 task->oomkilladj = oom_adjust; 704 put_task_struct(task); 705 if (end - buffer == 0) 706 return -EIO; 707 return end - buffer; 708 } 709 710 static const struct file_operations proc_oom_adjust_operations = { 711 .read = oom_adjust_read, 712 .write = oom_adjust_write, 713 }; 714 715 #ifdef CONFIG_MMU 716 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 717 size_t count, loff_t *ppos) 718 { 719 struct task_struct *task; 720 char buffer[PROC_NUMBUF], *end; 721 struct mm_struct *mm; 722 723 memset(buffer, 0, sizeof(buffer)); 724 if (count > sizeof(buffer) - 1) 725 count = sizeof(buffer) - 1; 726 if (copy_from_user(buffer, buf, count)) 727 return -EFAULT; 728 if (!simple_strtol(buffer, &end, 0)) 729 return -EINVAL; 730 if (*end == '\n') 731 end++; 732 task = get_proc_task(file->f_path.dentry->d_inode); 733 if (!task) 734 return -ESRCH; 735 mm = get_task_mm(task); 736 if (mm) { 737 clear_refs_smap(mm); 738 mmput(mm); 739 } 740 put_task_struct(task); 741 if (end - buffer == 0) 742 return -EIO; 743 return end - buffer; 744 } 745 746 static struct file_operations proc_clear_refs_operations = { 747 .write = clear_refs_write, 748 }; 749 #endif 750 751 #ifdef CONFIG_AUDITSYSCALL 752 #define TMPBUFLEN 21 753 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 754 size_t count, loff_t *ppos) 755 { 756 struct inode * inode = file->f_path.dentry->d_inode; 757 struct task_struct *task = get_proc_task(inode); 758 ssize_t length; 759 char tmpbuf[TMPBUFLEN]; 760 761 if (!task) 762 return -ESRCH; 763 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 764 audit_get_loginuid(task->audit_context)); 765 put_task_struct(task); 766 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 767 } 768 769 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, 770 size_t count, loff_t *ppos) 771 { 772 struct inode * inode = file->f_path.dentry->d_inode; 773 char *page, *tmp; 774 ssize_t length; 775 uid_t loginuid; 776 777 if (!capable(CAP_AUDIT_CONTROL)) 778 return -EPERM; 779 780 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) 781 return -EPERM; 782 783 if (count >= PAGE_SIZE) 784 count = PAGE_SIZE - 1; 785 786 if (*ppos != 0) { 787 /* No partial writes. */ 788 return -EINVAL; 789 } 790 page = (char*)__get_free_page(GFP_USER); 791 if (!page) 792 return -ENOMEM; 793 length = -EFAULT; 794 if (copy_from_user(page, buf, count)) 795 goto out_free_page; 796 797 page[count] = '\0'; 798 loginuid = simple_strtoul(page, &tmp, 10); 799 if (tmp == page) { 800 length = -EINVAL; 801 goto out_free_page; 802 803 } 804 length = audit_set_loginuid(current, loginuid); 805 if (likely(length == 0)) 806 length = count; 807 808 out_free_page: 809 free_page((unsigned long) page); 810 return length; 811 } 812 813 static const struct file_operations proc_loginuid_operations = { 814 .read = proc_loginuid_read, 815 .write = proc_loginuid_write, 816 }; 817 #endif 818 819 #ifdef CONFIG_FAULT_INJECTION 820 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, 821 size_t count, loff_t *ppos) 822 { 823 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 824 char buffer[PROC_NUMBUF]; 825 size_t len; 826 int make_it_fail; 827 828 if (!task) 829 return -ESRCH; 830 make_it_fail = task->make_it_fail; 831 put_task_struct(task); 832 833 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); 834 835 return simple_read_from_buffer(buf, count, ppos, buffer, len); 836 } 837 838 static ssize_t proc_fault_inject_write(struct file * file, 839 const char __user * buf, size_t count, loff_t *ppos) 840 { 841 struct task_struct *task; 842 char buffer[PROC_NUMBUF], *end; 843 int make_it_fail; 844 845 if (!capable(CAP_SYS_RESOURCE)) 846 return -EPERM; 847 memset(buffer, 0, sizeof(buffer)); 848 if (count > sizeof(buffer) - 1) 849 count = sizeof(buffer) - 1; 850 if (copy_from_user(buffer, buf, count)) 851 return -EFAULT; 852 make_it_fail = simple_strtol(buffer, &end, 0); 853 if (*end == '\n') 854 end++; 855 task = get_proc_task(file->f_dentry->d_inode); 856 if (!task) 857 return -ESRCH; 858 task->make_it_fail = make_it_fail; 859 put_task_struct(task); 860 if (end - buffer == 0) 861 return -EIO; 862 return end - buffer; 863 } 864 865 static const struct file_operations proc_fault_inject_operations = { 866 .read = proc_fault_inject_read, 867 .write = proc_fault_inject_write, 868 }; 869 #endif 870 871 #ifdef CONFIG_SCHED_DEBUG 872 /* 873 * Print out various scheduling related per-task fields: 874 */ 875 static int sched_show(struct seq_file *m, void *v) 876 { 877 struct inode *inode = m->private; 878 struct task_struct *p; 879 880 WARN_ON(!inode); 881 882 p = get_proc_task(inode); 883 if (!p) 884 return -ESRCH; 885 proc_sched_show_task(p, m); 886 887 put_task_struct(p); 888 889 return 0; 890 } 891 892 static ssize_t 893 sched_write(struct file *file, const char __user *buf, 894 size_t count, loff_t *offset) 895 { 896 struct inode *inode = file->f_path.dentry->d_inode; 897 struct task_struct *p; 898 899 WARN_ON(!inode); 900 901 p = get_proc_task(inode); 902 if (!p) 903 return -ESRCH; 904 proc_sched_set_task(p); 905 906 put_task_struct(p); 907 908 return count; 909 } 910 911 static int sched_open(struct inode *inode, struct file *filp) 912 { 913 int ret; 914 915 ret = single_open(filp, sched_show, NULL); 916 if (!ret) { 917 struct seq_file *m = filp->private_data; 918 919 m->private = inode; 920 } 921 return ret; 922 } 923 924 static const struct file_operations proc_pid_sched_operations = { 925 .open = sched_open, 926 .read = seq_read, 927 .write = sched_write, 928 .llseek = seq_lseek, 929 .release = seq_release, 930 }; 931 932 #endif 933 934 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 935 { 936 struct inode *inode = dentry->d_inode; 937 int error = -EACCES; 938 939 /* We don't need a base pointer in the /proc filesystem */ 940 path_release(nd); 941 942 /* Are we allowed to snoop on the tasks file descriptors? */ 943 if (!proc_fd_access_allowed(inode)) 944 goto out; 945 946 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); 947 nd->last_type = LAST_BIND; 948 out: 949 return ERR_PTR(error); 950 } 951 952 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, 953 char __user *buffer, int buflen) 954 { 955 struct inode * inode; 956 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; 957 int len; 958 959 if (!tmp) 960 return -ENOMEM; 961 962 inode = dentry->d_inode; 963 path = d_path(dentry, mnt, tmp, PAGE_SIZE); 964 len = PTR_ERR(path); 965 if (IS_ERR(path)) 966 goto out; 967 len = tmp + PAGE_SIZE - 1 - path; 968 969 if (len > buflen) 970 len = buflen; 971 if (copy_to_user(buffer, path, len)) 972 len = -EFAULT; 973 out: 974 free_page((unsigned long)tmp); 975 return len; 976 } 977 978 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) 979 { 980 int error = -EACCES; 981 struct inode *inode = dentry->d_inode; 982 struct dentry *de; 983 struct vfsmount *mnt = NULL; 984 985 /* Are we allowed to snoop on the tasks file descriptors? */ 986 if (!proc_fd_access_allowed(inode)) 987 goto out; 988 989 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt); 990 if (error) 991 goto out; 992 993 error = do_proc_readlink(de, mnt, buffer, buflen); 994 dput(de); 995 mntput(mnt); 996 out: 997 return error; 998 } 999 1000 static const struct inode_operations proc_pid_link_inode_operations = { 1001 .readlink = proc_pid_readlink, 1002 .follow_link = proc_pid_follow_link, 1003 .setattr = proc_setattr, 1004 }; 1005 1006 1007 /* building an inode */ 1008 1009 static int task_dumpable(struct task_struct *task) 1010 { 1011 int dumpable = 0; 1012 struct mm_struct *mm; 1013 1014 task_lock(task); 1015 mm = task->mm; 1016 if (mm) 1017 dumpable = mm->dumpable; 1018 task_unlock(task); 1019 if(dumpable == 1) 1020 return 1; 1021 return 0; 1022 } 1023 1024 1025 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) 1026 { 1027 struct inode * inode; 1028 struct proc_inode *ei; 1029 1030 /* We need a new inode */ 1031 1032 inode = new_inode(sb); 1033 if (!inode) 1034 goto out; 1035 1036 /* Common stuff */ 1037 ei = PROC_I(inode); 1038 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1039 inode->i_op = &proc_def_inode_operations; 1040 1041 /* 1042 * grab the reference to task. 1043 */ 1044 ei->pid = get_task_pid(task, PIDTYPE_PID); 1045 if (!ei->pid) 1046 goto out_unlock; 1047 1048 inode->i_uid = 0; 1049 inode->i_gid = 0; 1050 if (task_dumpable(task)) { 1051 inode->i_uid = task->euid; 1052 inode->i_gid = task->egid; 1053 } 1054 security_task_to_inode(task, inode); 1055 1056 out: 1057 return inode; 1058 1059 out_unlock: 1060 iput(inode); 1061 return NULL; 1062 } 1063 1064 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1065 { 1066 struct inode *inode = dentry->d_inode; 1067 struct task_struct *task; 1068 generic_fillattr(inode, stat); 1069 1070 rcu_read_lock(); 1071 stat->uid = 0; 1072 stat->gid = 0; 1073 task = pid_task(proc_pid(inode), PIDTYPE_PID); 1074 if (task) { 1075 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1076 task_dumpable(task)) { 1077 stat->uid = task->euid; 1078 stat->gid = task->egid; 1079 } 1080 } 1081 rcu_read_unlock(); 1082 return 0; 1083 } 1084 1085 /* dentry stuff */ 1086 1087 /* 1088 * Exceptional case: normally we are not allowed to unhash a busy 1089 * directory. In this case, however, we can do it - no aliasing problems 1090 * due to the way we treat inodes. 1091 * 1092 * Rewrite the inode's ownerships here because the owning task may have 1093 * performed a setuid(), etc. 1094 * 1095 * Before the /proc/pid/status file was created the only way to read 1096 * the effective uid of a /process was to stat /proc/pid. Reading 1097 * /proc/pid/status is slow enough that procps and other packages 1098 * kept stating /proc/pid. To keep the rules in /proc simple I have 1099 * made this apply to all per process world readable and executable 1100 * directories. 1101 */ 1102 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1103 { 1104 struct inode *inode = dentry->d_inode; 1105 struct task_struct *task = get_proc_task(inode); 1106 if (task) { 1107 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1108 task_dumpable(task)) { 1109 inode->i_uid = task->euid; 1110 inode->i_gid = task->egid; 1111 } else { 1112 inode->i_uid = 0; 1113 inode->i_gid = 0; 1114 } 1115 inode->i_mode &= ~(S_ISUID | S_ISGID); 1116 security_task_to_inode(task, inode); 1117 put_task_struct(task); 1118 return 1; 1119 } 1120 d_drop(dentry); 1121 return 0; 1122 } 1123 1124 static int pid_delete_dentry(struct dentry * dentry) 1125 { 1126 /* Is the task we represent dead? 1127 * If so, then don't put the dentry on the lru list, 1128 * kill it immediately. 1129 */ 1130 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1131 } 1132 1133 static struct dentry_operations pid_dentry_operations = 1134 { 1135 .d_revalidate = pid_revalidate, 1136 .d_delete = pid_delete_dentry, 1137 }; 1138 1139 /* Lookups */ 1140 1141 typedef struct dentry *instantiate_t(struct inode *, struct dentry *, 1142 struct task_struct *, const void *); 1143 1144 /* 1145 * Fill a directory entry. 1146 * 1147 * If possible create the dcache entry and derive our inode number and 1148 * file type from dcache entry. 1149 * 1150 * Since all of the proc inode numbers are dynamically generated, the inode 1151 * numbers do not exist until the inode is cache. This means creating the 1152 * the dcache entry in readdir is necessary to keep the inode numbers 1153 * reported by readdir in sync with the inode numbers reported 1154 * by stat. 1155 */ 1156 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1157 char *name, int len, 1158 instantiate_t instantiate, struct task_struct *task, const void *ptr) 1159 { 1160 struct dentry *child, *dir = filp->f_path.dentry; 1161 struct inode *inode; 1162 struct qstr qname; 1163 ino_t ino = 0; 1164 unsigned type = DT_UNKNOWN; 1165 1166 qname.name = name; 1167 qname.len = len; 1168 qname.hash = full_name_hash(name, len); 1169 1170 child = d_lookup(dir, &qname); 1171 if (!child) { 1172 struct dentry *new; 1173 new = d_alloc(dir, &qname); 1174 if (new) { 1175 child = instantiate(dir->d_inode, new, task, ptr); 1176 if (child) 1177 dput(new); 1178 else 1179 child = new; 1180 } 1181 } 1182 if (!child || IS_ERR(child) || !child->d_inode) 1183 goto end_instantiate; 1184 inode = child->d_inode; 1185 if (inode) { 1186 ino = inode->i_ino; 1187 type = inode->i_mode >> 12; 1188 } 1189 dput(child); 1190 end_instantiate: 1191 if (!ino) 1192 ino = find_inode_number(dir, &qname); 1193 if (!ino) 1194 ino = 1; 1195 return filldir(dirent, name, len, filp->f_pos, ino, type); 1196 } 1197 1198 static unsigned name_to_int(struct dentry *dentry) 1199 { 1200 const char *name = dentry->d_name.name; 1201 int len = dentry->d_name.len; 1202 unsigned n = 0; 1203 1204 if (len > 1 && *name == '0') 1205 goto out; 1206 while (len-- > 0) { 1207 unsigned c = *name++ - '0'; 1208 if (c > 9) 1209 goto out; 1210 if (n >= (~0U-9)/10) 1211 goto out; 1212 n *= 10; 1213 n += c; 1214 } 1215 return n; 1216 out: 1217 return ~0U; 1218 } 1219 1220 #define PROC_FDINFO_MAX 64 1221 1222 static int proc_fd_info(struct inode *inode, struct dentry **dentry, 1223 struct vfsmount **mnt, char *info) 1224 { 1225 struct task_struct *task = get_proc_task(inode); 1226 struct files_struct *files = NULL; 1227 struct file *file; 1228 int fd = proc_fd(inode); 1229 1230 if (task) { 1231 files = get_files_struct(task); 1232 put_task_struct(task); 1233 } 1234 if (files) { 1235 /* 1236 * We are not taking a ref to the file structure, so we must 1237 * hold ->file_lock. 1238 */ 1239 spin_lock(&files->file_lock); 1240 file = fcheck_files(files, fd); 1241 if (file) { 1242 if (mnt) 1243 *mnt = mntget(file->f_path.mnt); 1244 if (dentry) 1245 *dentry = dget(file->f_path.dentry); 1246 if (info) 1247 snprintf(info, PROC_FDINFO_MAX, 1248 "pos:\t%lli\n" 1249 "flags:\t0%o\n", 1250 (long long) file->f_pos, 1251 file->f_flags); 1252 spin_unlock(&files->file_lock); 1253 put_files_struct(files); 1254 return 0; 1255 } 1256 spin_unlock(&files->file_lock); 1257 put_files_struct(files); 1258 } 1259 return -ENOENT; 1260 } 1261 1262 static int proc_fd_link(struct inode *inode, struct dentry **dentry, 1263 struct vfsmount **mnt) 1264 { 1265 return proc_fd_info(inode, dentry, mnt, NULL); 1266 } 1267 1268 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1269 { 1270 struct inode *inode = dentry->d_inode; 1271 struct task_struct *task = get_proc_task(inode); 1272 int fd = proc_fd(inode); 1273 struct files_struct *files; 1274 1275 if (task) { 1276 files = get_files_struct(task); 1277 if (files) { 1278 rcu_read_lock(); 1279 if (fcheck_files(files, fd)) { 1280 rcu_read_unlock(); 1281 put_files_struct(files); 1282 if (task_dumpable(task)) { 1283 inode->i_uid = task->euid; 1284 inode->i_gid = task->egid; 1285 } else { 1286 inode->i_uid = 0; 1287 inode->i_gid = 0; 1288 } 1289 inode->i_mode &= ~(S_ISUID | S_ISGID); 1290 security_task_to_inode(task, inode); 1291 put_task_struct(task); 1292 return 1; 1293 } 1294 rcu_read_unlock(); 1295 put_files_struct(files); 1296 } 1297 put_task_struct(task); 1298 } 1299 d_drop(dentry); 1300 return 0; 1301 } 1302 1303 static struct dentry_operations tid_fd_dentry_operations = 1304 { 1305 .d_revalidate = tid_fd_revalidate, 1306 .d_delete = pid_delete_dentry, 1307 }; 1308 1309 static struct dentry *proc_fd_instantiate(struct inode *dir, 1310 struct dentry *dentry, struct task_struct *task, const void *ptr) 1311 { 1312 unsigned fd = *(const unsigned *)ptr; 1313 struct file *file; 1314 struct files_struct *files; 1315 struct inode *inode; 1316 struct proc_inode *ei; 1317 struct dentry *error = ERR_PTR(-ENOENT); 1318 1319 inode = proc_pid_make_inode(dir->i_sb, task); 1320 if (!inode) 1321 goto out; 1322 ei = PROC_I(inode); 1323 ei->fd = fd; 1324 files = get_files_struct(task); 1325 if (!files) 1326 goto out_iput; 1327 inode->i_mode = S_IFLNK; 1328 1329 /* 1330 * We are not taking a ref to the file structure, so we must 1331 * hold ->file_lock. 1332 */ 1333 spin_lock(&files->file_lock); 1334 file = fcheck_files(files, fd); 1335 if (!file) 1336 goto out_unlock; 1337 if (file->f_mode & 1) 1338 inode->i_mode |= S_IRUSR | S_IXUSR; 1339 if (file->f_mode & 2) 1340 inode->i_mode |= S_IWUSR | S_IXUSR; 1341 spin_unlock(&files->file_lock); 1342 put_files_struct(files); 1343 1344 inode->i_op = &proc_pid_link_inode_operations; 1345 inode->i_size = 64; 1346 ei->op.proc_get_link = proc_fd_link; 1347 dentry->d_op = &tid_fd_dentry_operations; 1348 d_add(dentry, inode); 1349 /* Close the race of the process dying before we return the dentry */ 1350 if (tid_fd_revalidate(dentry, NULL)) 1351 error = NULL; 1352 1353 out: 1354 return error; 1355 out_unlock: 1356 spin_unlock(&files->file_lock); 1357 put_files_struct(files); 1358 out_iput: 1359 iput(inode); 1360 goto out; 1361 } 1362 1363 static struct dentry *proc_lookupfd_common(struct inode *dir, 1364 struct dentry *dentry, 1365 instantiate_t instantiate) 1366 { 1367 struct task_struct *task = get_proc_task(dir); 1368 unsigned fd = name_to_int(dentry); 1369 struct dentry *result = ERR_PTR(-ENOENT); 1370 1371 if (!task) 1372 goto out_no_task; 1373 if (fd == ~0U) 1374 goto out; 1375 1376 result = instantiate(dir, dentry, task, &fd); 1377 out: 1378 put_task_struct(task); 1379 out_no_task: 1380 return result; 1381 } 1382 1383 static int proc_readfd_common(struct file * filp, void * dirent, 1384 filldir_t filldir, instantiate_t instantiate) 1385 { 1386 struct dentry *dentry = filp->f_path.dentry; 1387 struct inode *inode = dentry->d_inode; 1388 struct task_struct *p = get_proc_task(inode); 1389 unsigned int fd, tid, ino; 1390 int retval; 1391 struct files_struct * files; 1392 struct fdtable *fdt; 1393 1394 retval = -ENOENT; 1395 if (!p) 1396 goto out_no_task; 1397 retval = 0; 1398 tid = p->pid; 1399 1400 fd = filp->f_pos; 1401 switch (fd) { 1402 case 0: 1403 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 1404 goto out; 1405 filp->f_pos++; 1406 case 1: 1407 ino = parent_ino(dentry); 1408 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 1409 goto out; 1410 filp->f_pos++; 1411 default: 1412 files = get_files_struct(p); 1413 if (!files) 1414 goto out; 1415 rcu_read_lock(); 1416 fdt = files_fdtable(files); 1417 for (fd = filp->f_pos-2; 1418 fd < fdt->max_fds; 1419 fd++, filp->f_pos++) { 1420 char name[PROC_NUMBUF]; 1421 int len; 1422 1423 if (!fcheck_files(files, fd)) 1424 continue; 1425 rcu_read_unlock(); 1426 1427 len = snprintf(name, sizeof(name), "%d", fd); 1428 if (proc_fill_cache(filp, dirent, filldir, 1429 name, len, instantiate, 1430 p, &fd) < 0) { 1431 rcu_read_lock(); 1432 break; 1433 } 1434 rcu_read_lock(); 1435 } 1436 rcu_read_unlock(); 1437 put_files_struct(files); 1438 } 1439 out: 1440 put_task_struct(p); 1441 out_no_task: 1442 return retval; 1443 } 1444 1445 static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, 1446 struct nameidata *nd) 1447 { 1448 return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); 1449 } 1450 1451 static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) 1452 { 1453 return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); 1454 } 1455 1456 static ssize_t proc_fdinfo_read(struct file *file, char __user *buf, 1457 size_t len, loff_t *ppos) 1458 { 1459 char tmp[PROC_FDINFO_MAX]; 1460 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp); 1461 if (!err) 1462 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); 1463 return err; 1464 } 1465 1466 static const struct file_operations proc_fdinfo_file_operations = { 1467 .open = nonseekable_open, 1468 .read = proc_fdinfo_read, 1469 }; 1470 1471 static const struct file_operations proc_fd_operations = { 1472 .read = generic_read_dir, 1473 .readdir = proc_readfd, 1474 }; 1475 1476 /* 1477 * /proc/pid/fd needs a special permission handler so that a process can still 1478 * access /proc/self/fd after it has executed a setuid(). 1479 */ 1480 static int proc_fd_permission(struct inode *inode, int mask, 1481 struct nameidata *nd) 1482 { 1483 int rv; 1484 1485 rv = generic_permission(inode, mask, NULL); 1486 if (rv == 0) 1487 return 0; 1488 if (task_pid(current) == proc_pid(inode)) 1489 rv = 0; 1490 return rv; 1491 } 1492 1493 /* 1494 * proc directories can do almost nothing.. 1495 */ 1496 static const struct inode_operations proc_fd_inode_operations = { 1497 .lookup = proc_lookupfd, 1498 .permission = proc_fd_permission, 1499 .setattr = proc_setattr, 1500 }; 1501 1502 static struct dentry *proc_fdinfo_instantiate(struct inode *dir, 1503 struct dentry *dentry, struct task_struct *task, const void *ptr) 1504 { 1505 unsigned fd = *(unsigned *)ptr; 1506 struct inode *inode; 1507 struct proc_inode *ei; 1508 struct dentry *error = ERR_PTR(-ENOENT); 1509 1510 inode = proc_pid_make_inode(dir->i_sb, task); 1511 if (!inode) 1512 goto out; 1513 ei = PROC_I(inode); 1514 ei->fd = fd; 1515 inode->i_mode = S_IFREG | S_IRUSR; 1516 inode->i_fop = &proc_fdinfo_file_operations; 1517 dentry->d_op = &tid_fd_dentry_operations; 1518 d_add(dentry, inode); 1519 /* Close the race of the process dying before we return the dentry */ 1520 if (tid_fd_revalidate(dentry, NULL)) 1521 error = NULL; 1522 1523 out: 1524 return error; 1525 } 1526 1527 static struct dentry *proc_lookupfdinfo(struct inode *dir, 1528 struct dentry *dentry, 1529 struct nameidata *nd) 1530 { 1531 return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); 1532 } 1533 1534 static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) 1535 { 1536 return proc_readfd_common(filp, dirent, filldir, 1537 proc_fdinfo_instantiate); 1538 } 1539 1540 static const struct file_operations proc_fdinfo_operations = { 1541 .read = generic_read_dir, 1542 .readdir = proc_readfdinfo, 1543 }; 1544 1545 /* 1546 * proc directories can do almost nothing.. 1547 */ 1548 static const struct inode_operations proc_fdinfo_inode_operations = { 1549 .lookup = proc_lookupfdinfo, 1550 .setattr = proc_setattr, 1551 }; 1552 1553 1554 static struct dentry *proc_pident_instantiate(struct inode *dir, 1555 struct dentry *dentry, struct task_struct *task, const void *ptr) 1556 { 1557 const struct pid_entry *p = ptr; 1558 struct inode *inode; 1559 struct proc_inode *ei; 1560 struct dentry *error = ERR_PTR(-EINVAL); 1561 1562 inode = proc_pid_make_inode(dir->i_sb, task); 1563 if (!inode) 1564 goto out; 1565 1566 ei = PROC_I(inode); 1567 inode->i_mode = p->mode; 1568 if (S_ISDIR(inode->i_mode)) 1569 inode->i_nlink = 2; /* Use getattr to fix if necessary */ 1570 if (p->iop) 1571 inode->i_op = p->iop; 1572 if (p->fop) 1573 inode->i_fop = p->fop; 1574 ei->op = p->op; 1575 dentry->d_op = &pid_dentry_operations; 1576 d_add(dentry, inode); 1577 /* Close the race of the process dying before we return the dentry */ 1578 if (pid_revalidate(dentry, NULL)) 1579 error = NULL; 1580 out: 1581 return error; 1582 } 1583 1584 static struct dentry *proc_pident_lookup(struct inode *dir, 1585 struct dentry *dentry, 1586 const struct pid_entry *ents, 1587 unsigned int nents) 1588 { 1589 struct inode *inode; 1590 struct dentry *error; 1591 struct task_struct *task = get_proc_task(dir); 1592 const struct pid_entry *p, *last; 1593 1594 error = ERR_PTR(-ENOENT); 1595 inode = NULL; 1596 1597 if (!task) 1598 goto out_no_task; 1599 1600 /* 1601 * Yes, it does not scale. And it should not. Don't add 1602 * new entries into /proc/<tgid>/ without very good reasons. 1603 */ 1604 last = &ents[nents - 1]; 1605 for (p = ents; p <= last; p++) { 1606 if (p->len != dentry->d_name.len) 1607 continue; 1608 if (!memcmp(dentry->d_name.name, p->name, p->len)) 1609 break; 1610 } 1611 if (p > last) 1612 goto out; 1613 1614 error = proc_pident_instantiate(dir, dentry, task, p); 1615 out: 1616 put_task_struct(task); 1617 out_no_task: 1618 return error; 1619 } 1620 1621 static int proc_pident_fill_cache(struct file *filp, void *dirent, 1622 filldir_t filldir, struct task_struct *task, const struct pid_entry *p) 1623 { 1624 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 1625 proc_pident_instantiate, task, p); 1626 } 1627 1628 static int proc_pident_readdir(struct file *filp, 1629 void *dirent, filldir_t filldir, 1630 const struct pid_entry *ents, unsigned int nents) 1631 { 1632 int i; 1633 int pid; 1634 struct dentry *dentry = filp->f_path.dentry; 1635 struct inode *inode = dentry->d_inode; 1636 struct task_struct *task = get_proc_task(inode); 1637 const struct pid_entry *p, *last; 1638 ino_t ino; 1639 int ret; 1640 1641 ret = -ENOENT; 1642 if (!task) 1643 goto out_no_task; 1644 1645 ret = 0; 1646 pid = task->pid; 1647 i = filp->f_pos; 1648 switch (i) { 1649 case 0: 1650 ino = inode->i_ino; 1651 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 1652 goto out; 1653 i++; 1654 filp->f_pos++; 1655 /* fall through */ 1656 case 1: 1657 ino = parent_ino(dentry); 1658 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) 1659 goto out; 1660 i++; 1661 filp->f_pos++; 1662 /* fall through */ 1663 default: 1664 i -= 2; 1665 if (i >= nents) { 1666 ret = 1; 1667 goto out; 1668 } 1669 p = ents + i; 1670 last = &ents[nents - 1]; 1671 while (p <= last) { 1672 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) 1673 goto out; 1674 filp->f_pos++; 1675 p++; 1676 } 1677 } 1678 1679 ret = 1; 1680 out: 1681 put_task_struct(task); 1682 out_no_task: 1683 return ret; 1684 } 1685 1686 #ifdef CONFIG_SECURITY 1687 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, 1688 size_t count, loff_t *ppos) 1689 { 1690 struct inode * inode = file->f_path.dentry->d_inode; 1691 char *p = NULL; 1692 ssize_t length; 1693 struct task_struct *task = get_proc_task(inode); 1694 1695 if (!task) 1696 return -ESRCH; 1697 1698 length = security_getprocattr(task, 1699 (char*)file->f_path.dentry->d_name.name, 1700 &p); 1701 put_task_struct(task); 1702 if (length > 0) 1703 length = simple_read_from_buffer(buf, count, ppos, p, length); 1704 kfree(p); 1705 return length; 1706 } 1707 1708 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, 1709 size_t count, loff_t *ppos) 1710 { 1711 struct inode * inode = file->f_path.dentry->d_inode; 1712 char *page; 1713 ssize_t length; 1714 struct task_struct *task = get_proc_task(inode); 1715 1716 length = -ESRCH; 1717 if (!task) 1718 goto out_no_task; 1719 if (count > PAGE_SIZE) 1720 count = PAGE_SIZE; 1721 1722 /* No partial writes. */ 1723 length = -EINVAL; 1724 if (*ppos != 0) 1725 goto out; 1726 1727 length = -ENOMEM; 1728 page = (char*)__get_free_page(GFP_USER); 1729 if (!page) 1730 goto out; 1731 1732 length = -EFAULT; 1733 if (copy_from_user(page, buf, count)) 1734 goto out_free; 1735 1736 length = security_setprocattr(task, 1737 (char*)file->f_path.dentry->d_name.name, 1738 (void*)page, count); 1739 out_free: 1740 free_page((unsigned long) page); 1741 out: 1742 put_task_struct(task); 1743 out_no_task: 1744 return length; 1745 } 1746 1747 static const struct file_operations proc_pid_attr_operations = { 1748 .read = proc_pid_attr_read, 1749 .write = proc_pid_attr_write, 1750 }; 1751 1752 static const struct pid_entry attr_dir_stuff[] = { 1753 REG("current", S_IRUGO|S_IWUGO, pid_attr), 1754 REG("prev", S_IRUGO, pid_attr), 1755 REG("exec", S_IRUGO|S_IWUGO, pid_attr), 1756 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr), 1757 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr), 1758 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr), 1759 }; 1760 1761 static int proc_attr_dir_readdir(struct file * filp, 1762 void * dirent, filldir_t filldir) 1763 { 1764 return proc_pident_readdir(filp,dirent,filldir, 1765 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); 1766 } 1767 1768 static const struct file_operations proc_attr_dir_operations = { 1769 .read = generic_read_dir, 1770 .readdir = proc_attr_dir_readdir, 1771 }; 1772 1773 static struct dentry *proc_attr_dir_lookup(struct inode *dir, 1774 struct dentry *dentry, struct nameidata *nd) 1775 { 1776 return proc_pident_lookup(dir, dentry, 1777 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 1778 } 1779 1780 static const struct inode_operations proc_attr_dir_inode_operations = { 1781 .lookup = proc_attr_dir_lookup, 1782 .getattr = pid_getattr, 1783 .setattr = proc_setattr, 1784 }; 1785 1786 #endif 1787 1788 /* 1789 * /proc/self: 1790 */ 1791 static int proc_self_readlink(struct dentry *dentry, char __user *buffer, 1792 int buflen) 1793 { 1794 char tmp[PROC_NUMBUF]; 1795 sprintf(tmp, "%d", current->tgid); 1796 return vfs_readlink(dentry,buffer,buflen,tmp); 1797 } 1798 1799 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 1800 { 1801 char tmp[PROC_NUMBUF]; 1802 sprintf(tmp, "%d", current->tgid); 1803 return ERR_PTR(vfs_follow_link(nd,tmp)); 1804 } 1805 1806 static const struct inode_operations proc_self_inode_operations = { 1807 .readlink = proc_self_readlink, 1808 .follow_link = proc_self_follow_link, 1809 }; 1810 1811 /* 1812 * proc base 1813 * 1814 * These are the directory entries in the root directory of /proc 1815 * that properly belong to the /proc filesystem, as they describe 1816 * describe something that is process related. 1817 */ 1818 static const struct pid_entry proc_base_stuff[] = { 1819 NOD("self", S_IFLNK|S_IRWXUGO, 1820 &proc_self_inode_operations, NULL, {}), 1821 }; 1822 1823 /* 1824 * Exceptional case: normally we are not allowed to unhash a busy 1825 * directory. In this case, however, we can do it - no aliasing problems 1826 * due to the way we treat inodes. 1827 */ 1828 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 1829 { 1830 struct inode *inode = dentry->d_inode; 1831 struct task_struct *task = get_proc_task(inode); 1832 if (task) { 1833 put_task_struct(task); 1834 return 1; 1835 } 1836 d_drop(dentry); 1837 return 0; 1838 } 1839 1840 static struct dentry_operations proc_base_dentry_operations = 1841 { 1842 .d_revalidate = proc_base_revalidate, 1843 .d_delete = pid_delete_dentry, 1844 }; 1845 1846 static struct dentry *proc_base_instantiate(struct inode *dir, 1847 struct dentry *dentry, struct task_struct *task, const void *ptr) 1848 { 1849 const struct pid_entry *p = ptr; 1850 struct inode *inode; 1851 struct proc_inode *ei; 1852 struct dentry *error = ERR_PTR(-EINVAL); 1853 1854 /* Allocate the inode */ 1855 error = ERR_PTR(-ENOMEM); 1856 inode = new_inode(dir->i_sb); 1857 if (!inode) 1858 goto out; 1859 1860 /* Initialize the inode */ 1861 ei = PROC_I(inode); 1862 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1863 1864 /* 1865 * grab the reference to the task. 1866 */ 1867 ei->pid = get_task_pid(task, PIDTYPE_PID); 1868 if (!ei->pid) 1869 goto out_iput; 1870 1871 inode->i_uid = 0; 1872 inode->i_gid = 0; 1873 inode->i_mode = p->mode; 1874 if (S_ISDIR(inode->i_mode)) 1875 inode->i_nlink = 2; 1876 if (S_ISLNK(inode->i_mode)) 1877 inode->i_size = 64; 1878 if (p->iop) 1879 inode->i_op = p->iop; 1880 if (p->fop) 1881 inode->i_fop = p->fop; 1882 ei->op = p->op; 1883 dentry->d_op = &proc_base_dentry_operations; 1884 d_add(dentry, inode); 1885 error = NULL; 1886 out: 1887 return error; 1888 out_iput: 1889 iput(inode); 1890 goto out; 1891 } 1892 1893 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry) 1894 { 1895 struct dentry *error; 1896 struct task_struct *task = get_proc_task(dir); 1897 const struct pid_entry *p, *last; 1898 1899 error = ERR_PTR(-ENOENT); 1900 1901 if (!task) 1902 goto out_no_task; 1903 1904 /* Lookup the directory entry */ 1905 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1]; 1906 for (p = proc_base_stuff; p <= last; p++) { 1907 if (p->len != dentry->d_name.len) 1908 continue; 1909 if (!memcmp(dentry->d_name.name, p->name, p->len)) 1910 break; 1911 } 1912 if (p > last) 1913 goto out; 1914 1915 error = proc_base_instantiate(dir, dentry, task, p); 1916 1917 out: 1918 put_task_struct(task); 1919 out_no_task: 1920 return error; 1921 } 1922 1923 static int proc_base_fill_cache(struct file *filp, void *dirent, 1924 filldir_t filldir, struct task_struct *task, const struct pid_entry *p) 1925 { 1926 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 1927 proc_base_instantiate, task, p); 1928 } 1929 1930 #ifdef CONFIG_TASK_IO_ACCOUNTING 1931 static int proc_pid_io_accounting(struct task_struct *task, char *buffer) 1932 { 1933 return sprintf(buffer, 1934 #ifdef CONFIG_TASK_XACCT 1935 "rchar: %llu\n" 1936 "wchar: %llu\n" 1937 "syscr: %llu\n" 1938 "syscw: %llu\n" 1939 #endif 1940 "read_bytes: %llu\n" 1941 "write_bytes: %llu\n" 1942 "cancelled_write_bytes: %llu\n", 1943 #ifdef CONFIG_TASK_XACCT 1944 (unsigned long long)task->rchar, 1945 (unsigned long long)task->wchar, 1946 (unsigned long long)task->syscr, 1947 (unsigned long long)task->syscw, 1948 #endif 1949 (unsigned long long)task->ioac.read_bytes, 1950 (unsigned long long)task->ioac.write_bytes, 1951 (unsigned long long)task->ioac.cancelled_write_bytes); 1952 } 1953 #endif 1954 1955 /* 1956 * Thread groups 1957 */ 1958 static const struct file_operations proc_task_operations; 1959 static const struct inode_operations proc_task_inode_operations; 1960 1961 static const struct pid_entry tgid_base_stuff[] = { 1962 DIR("task", S_IRUGO|S_IXUGO, task), 1963 DIR("fd", S_IRUSR|S_IXUSR, fd), 1964 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 1965 INF("environ", S_IRUSR, pid_environ), 1966 INF("auxv", S_IRUSR, pid_auxv), 1967 INF("status", S_IRUGO, pid_status), 1968 #ifdef CONFIG_SCHED_DEBUG 1969 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 1970 #endif 1971 INF("cmdline", S_IRUGO, pid_cmdline), 1972 INF("stat", S_IRUGO, tgid_stat), 1973 INF("statm", S_IRUGO, pid_statm), 1974 REG("maps", S_IRUGO, maps), 1975 #ifdef CONFIG_NUMA 1976 REG("numa_maps", S_IRUGO, numa_maps), 1977 #endif 1978 REG("mem", S_IRUSR|S_IWUSR, mem), 1979 LNK("cwd", cwd), 1980 LNK("root", root), 1981 LNK("exe", exe), 1982 REG("mounts", S_IRUGO, mounts), 1983 REG("mountstats", S_IRUSR, mountstats), 1984 #ifdef CONFIG_MMU 1985 REG("clear_refs", S_IWUSR, clear_refs), 1986 REG("smaps", S_IRUGO, smaps), 1987 #endif 1988 #ifdef CONFIG_SECURITY 1989 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 1990 #endif 1991 #ifdef CONFIG_KALLSYMS 1992 INF("wchan", S_IRUGO, pid_wchan), 1993 #endif 1994 #ifdef CONFIG_SCHEDSTATS 1995 INF("schedstat", S_IRUGO, pid_schedstat), 1996 #endif 1997 #ifdef CONFIG_CPUSETS 1998 REG("cpuset", S_IRUGO, cpuset), 1999 #endif 2000 INF("oom_score", S_IRUGO, oom_score), 2001 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2002 #ifdef CONFIG_AUDITSYSCALL 2003 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2004 #endif 2005 #ifdef CONFIG_FAULT_INJECTION 2006 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2007 #endif 2008 #ifdef CONFIG_TASK_IO_ACCOUNTING 2009 INF("io", S_IRUGO, pid_io_accounting), 2010 #endif 2011 }; 2012 2013 static int proc_tgid_base_readdir(struct file * filp, 2014 void * dirent, filldir_t filldir) 2015 { 2016 return proc_pident_readdir(filp,dirent,filldir, 2017 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); 2018 } 2019 2020 static const struct file_operations proc_tgid_base_operations = { 2021 .read = generic_read_dir, 2022 .readdir = proc_tgid_base_readdir, 2023 }; 2024 2025 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 2026 return proc_pident_lookup(dir, dentry, 2027 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 2028 } 2029 2030 static const struct inode_operations proc_tgid_base_inode_operations = { 2031 .lookup = proc_tgid_base_lookup, 2032 .getattr = pid_getattr, 2033 .setattr = proc_setattr, 2034 }; 2035 2036 /** 2037 * proc_flush_task - Remove dcache entries for @task from the /proc dcache. 2038 * 2039 * @task: task that should be flushed. 2040 * 2041 * Looks in the dcache for 2042 * /proc/@pid 2043 * /proc/@tgid/task/@pid 2044 * if either directory is present flushes it and all of it'ts children 2045 * from the dcache. 2046 * 2047 * It is safe and reasonable to cache /proc entries for a task until 2048 * that task exits. After that they just clog up the dcache with 2049 * useless entries, possibly causing useful dcache entries to be 2050 * flushed instead. This routine is proved to flush those useless 2051 * dcache entries at process exit time. 2052 * 2053 * NOTE: This routine is just an optimization so it does not guarantee 2054 * that no dcache entries will exist at process exit time it 2055 * just makes it very unlikely that any will persist. 2056 */ 2057 void proc_flush_task(struct task_struct *task) 2058 { 2059 struct dentry *dentry, *leader, *dir; 2060 char buf[PROC_NUMBUF]; 2061 struct qstr name; 2062 2063 name.name = buf; 2064 name.len = snprintf(buf, sizeof(buf), "%d", task->pid); 2065 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name); 2066 if (dentry) { 2067 shrink_dcache_parent(dentry); 2068 d_drop(dentry); 2069 dput(dentry); 2070 } 2071 2072 if (thread_group_leader(task)) 2073 goto out; 2074 2075 name.name = buf; 2076 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid); 2077 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name); 2078 if (!leader) 2079 goto out; 2080 2081 name.name = "task"; 2082 name.len = strlen(name.name); 2083 dir = d_hash_and_lookup(leader, &name); 2084 if (!dir) 2085 goto out_put_leader; 2086 2087 name.name = buf; 2088 name.len = snprintf(buf, sizeof(buf), "%d", task->pid); 2089 dentry = d_hash_and_lookup(dir, &name); 2090 if (dentry) { 2091 shrink_dcache_parent(dentry); 2092 d_drop(dentry); 2093 dput(dentry); 2094 } 2095 2096 dput(dir); 2097 out_put_leader: 2098 dput(leader); 2099 out: 2100 return; 2101 } 2102 2103 static struct dentry *proc_pid_instantiate(struct inode *dir, 2104 struct dentry * dentry, 2105 struct task_struct *task, const void *ptr) 2106 { 2107 struct dentry *error = ERR_PTR(-ENOENT); 2108 struct inode *inode; 2109 2110 inode = proc_pid_make_inode(dir->i_sb, task); 2111 if (!inode) 2112 goto out; 2113 2114 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2115 inode->i_op = &proc_tgid_base_inode_operations; 2116 inode->i_fop = &proc_tgid_base_operations; 2117 inode->i_flags|=S_IMMUTABLE; 2118 inode->i_nlink = 5; 2119 #ifdef CONFIG_SECURITY 2120 inode->i_nlink += 1; 2121 #endif 2122 2123 dentry->d_op = &pid_dentry_operations; 2124 2125 d_add(dentry, inode); 2126 /* Close the race of the process dying before we return the dentry */ 2127 if (pid_revalidate(dentry, NULL)) 2128 error = NULL; 2129 out: 2130 return error; 2131 } 2132 2133 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2134 { 2135 struct dentry *result = ERR_PTR(-ENOENT); 2136 struct task_struct *task; 2137 unsigned tgid; 2138 2139 result = proc_base_lookup(dir, dentry); 2140 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT) 2141 goto out; 2142 2143 tgid = name_to_int(dentry); 2144 if (tgid == ~0U) 2145 goto out; 2146 2147 rcu_read_lock(); 2148 task = find_task_by_pid(tgid); 2149 if (task) 2150 get_task_struct(task); 2151 rcu_read_unlock(); 2152 if (!task) 2153 goto out; 2154 2155 result = proc_pid_instantiate(dir, dentry, task, NULL); 2156 put_task_struct(task); 2157 out: 2158 return result; 2159 } 2160 2161 /* 2162 * Find the first task with tgid >= tgid 2163 * 2164 */ 2165 static struct task_struct *next_tgid(unsigned int tgid) 2166 { 2167 struct task_struct *task; 2168 struct pid *pid; 2169 2170 rcu_read_lock(); 2171 retry: 2172 task = NULL; 2173 pid = find_ge_pid(tgid); 2174 if (pid) { 2175 tgid = pid->nr + 1; 2176 task = pid_task(pid, PIDTYPE_PID); 2177 /* What we to know is if the pid we have find is the 2178 * pid of a thread_group_leader. Testing for task 2179 * being a thread_group_leader is the obvious thing 2180 * todo but there is a window when it fails, due to 2181 * the pid transfer logic in de_thread. 2182 * 2183 * So we perform the straight forward test of seeing 2184 * if the pid we have found is the pid of a thread 2185 * group leader, and don't worry if the task we have 2186 * found doesn't happen to be a thread group leader. 2187 * As we don't care in the case of readdir. 2188 */ 2189 if (!task || !has_group_leader_pid(task)) 2190 goto retry; 2191 get_task_struct(task); 2192 } 2193 rcu_read_unlock(); 2194 return task; 2195 } 2196 2197 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff)) 2198 2199 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2200 struct task_struct *task, int tgid) 2201 { 2202 char name[PROC_NUMBUF]; 2203 int len = snprintf(name, sizeof(name), "%d", tgid); 2204 return proc_fill_cache(filp, dirent, filldir, name, len, 2205 proc_pid_instantiate, task, NULL); 2206 } 2207 2208 /* for the /proc/ directory itself, after non-process stuff has been done */ 2209 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 2210 { 2211 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 2212 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); 2213 struct task_struct *task; 2214 int tgid; 2215 2216 if (!reaper) 2217 goto out_no_task; 2218 2219 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) { 2220 const struct pid_entry *p = &proc_base_stuff[nr]; 2221 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0) 2222 goto out; 2223 } 2224 2225 tgid = filp->f_pos - TGID_OFFSET; 2226 for (task = next_tgid(tgid); 2227 task; 2228 put_task_struct(task), task = next_tgid(tgid + 1)) { 2229 tgid = task->pid; 2230 filp->f_pos = tgid + TGID_OFFSET; 2231 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) { 2232 put_task_struct(task); 2233 goto out; 2234 } 2235 } 2236 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; 2237 out: 2238 put_task_struct(reaper); 2239 out_no_task: 2240 return 0; 2241 } 2242 2243 /* 2244 * Tasks 2245 */ 2246 static const struct pid_entry tid_base_stuff[] = { 2247 DIR("fd", S_IRUSR|S_IXUSR, fd), 2248 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2249 INF("environ", S_IRUSR, pid_environ), 2250 INF("auxv", S_IRUSR, pid_auxv), 2251 INF("status", S_IRUGO, pid_status), 2252 #ifdef CONFIG_SCHED_DEBUG 2253 REG("sched", S_IRUGO|S_IWUSR, pid_sched), 2254 #endif 2255 INF("cmdline", S_IRUGO, pid_cmdline), 2256 INF("stat", S_IRUGO, tid_stat), 2257 INF("statm", S_IRUGO, pid_statm), 2258 REG("maps", S_IRUGO, maps), 2259 #ifdef CONFIG_NUMA 2260 REG("numa_maps", S_IRUGO, numa_maps), 2261 #endif 2262 REG("mem", S_IRUSR|S_IWUSR, mem), 2263 LNK("cwd", cwd), 2264 LNK("root", root), 2265 LNK("exe", exe), 2266 REG("mounts", S_IRUGO, mounts), 2267 #ifdef CONFIG_MMU 2268 REG("clear_refs", S_IWUSR, clear_refs), 2269 REG("smaps", S_IRUGO, smaps), 2270 #endif 2271 #ifdef CONFIG_SECURITY 2272 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2273 #endif 2274 #ifdef CONFIG_KALLSYMS 2275 INF("wchan", S_IRUGO, pid_wchan), 2276 #endif 2277 #ifdef CONFIG_SCHEDSTATS 2278 INF("schedstat", S_IRUGO, pid_schedstat), 2279 #endif 2280 #ifdef CONFIG_CPUSETS 2281 REG("cpuset", S_IRUGO, cpuset), 2282 #endif 2283 INF("oom_score", S_IRUGO, oom_score), 2284 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2285 #ifdef CONFIG_AUDITSYSCALL 2286 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2287 #endif 2288 #ifdef CONFIG_FAULT_INJECTION 2289 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2290 #endif 2291 }; 2292 2293 static int proc_tid_base_readdir(struct file * filp, 2294 void * dirent, filldir_t filldir) 2295 { 2296 return proc_pident_readdir(filp,dirent,filldir, 2297 tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); 2298 } 2299 2300 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 2301 return proc_pident_lookup(dir, dentry, 2302 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 2303 } 2304 2305 static const struct file_operations proc_tid_base_operations = { 2306 .read = generic_read_dir, 2307 .readdir = proc_tid_base_readdir, 2308 }; 2309 2310 static const struct inode_operations proc_tid_base_inode_operations = { 2311 .lookup = proc_tid_base_lookup, 2312 .getattr = pid_getattr, 2313 .setattr = proc_setattr, 2314 }; 2315 2316 static struct dentry *proc_task_instantiate(struct inode *dir, 2317 struct dentry *dentry, struct task_struct *task, const void *ptr) 2318 { 2319 struct dentry *error = ERR_PTR(-ENOENT); 2320 struct inode *inode; 2321 inode = proc_pid_make_inode(dir->i_sb, task); 2322 2323 if (!inode) 2324 goto out; 2325 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2326 inode->i_op = &proc_tid_base_inode_operations; 2327 inode->i_fop = &proc_tid_base_operations; 2328 inode->i_flags|=S_IMMUTABLE; 2329 inode->i_nlink = 4; 2330 #ifdef CONFIG_SECURITY 2331 inode->i_nlink += 1; 2332 #endif 2333 2334 dentry->d_op = &pid_dentry_operations; 2335 2336 d_add(dentry, inode); 2337 /* Close the race of the process dying before we return the dentry */ 2338 if (pid_revalidate(dentry, NULL)) 2339 error = NULL; 2340 out: 2341 return error; 2342 } 2343 2344 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2345 { 2346 struct dentry *result = ERR_PTR(-ENOENT); 2347 struct task_struct *task; 2348 struct task_struct *leader = get_proc_task(dir); 2349 unsigned tid; 2350 2351 if (!leader) 2352 goto out_no_task; 2353 2354 tid = name_to_int(dentry); 2355 if (tid == ~0U) 2356 goto out; 2357 2358 rcu_read_lock(); 2359 task = find_task_by_pid(tid); 2360 if (task) 2361 get_task_struct(task); 2362 rcu_read_unlock(); 2363 if (!task) 2364 goto out; 2365 if (leader->tgid != task->tgid) 2366 goto out_drop_task; 2367 2368 result = proc_task_instantiate(dir, dentry, task, NULL); 2369 out_drop_task: 2370 put_task_struct(task); 2371 out: 2372 put_task_struct(leader); 2373 out_no_task: 2374 return result; 2375 } 2376 2377 /* 2378 * Find the first tid of a thread group to return to user space. 2379 * 2380 * Usually this is just the thread group leader, but if the users 2381 * buffer was too small or there was a seek into the middle of the 2382 * directory we have more work todo. 2383 * 2384 * In the case of a short read we start with find_task_by_pid. 2385 * 2386 * In the case of a seek we start with the leader and walk nr 2387 * threads past it. 2388 */ 2389 static struct task_struct *first_tid(struct task_struct *leader, 2390 int tid, int nr) 2391 { 2392 struct task_struct *pos; 2393 2394 rcu_read_lock(); 2395 /* Attempt to start with the pid of a thread */ 2396 if (tid && (nr > 0)) { 2397 pos = find_task_by_pid(tid); 2398 if (pos && (pos->group_leader == leader)) 2399 goto found; 2400 } 2401 2402 /* If nr exceeds the number of threads there is nothing todo */ 2403 pos = NULL; 2404 if (nr && nr >= get_nr_threads(leader)) 2405 goto out; 2406 2407 /* If we haven't found our starting place yet start 2408 * with the leader and walk nr threads forward. 2409 */ 2410 for (pos = leader; nr > 0; --nr) { 2411 pos = next_thread(pos); 2412 if (pos == leader) { 2413 pos = NULL; 2414 goto out; 2415 } 2416 } 2417 found: 2418 get_task_struct(pos); 2419 out: 2420 rcu_read_unlock(); 2421 return pos; 2422 } 2423 2424 /* 2425 * Find the next thread in the thread list. 2426 * Return NULL if there is an error or no next thread. 2427 * 2428 * The reference to the input task_struct is released. 2429 */ 2430 static struct task_struct *next_tid(struct task_struct *start) 2431 { 2432 struct task_struct *pos = NULL; 2433 rcu_read_lock(); 2434 if (pid_alive(start)) { 2435 pos = next_thread(start); 2436 if (thread_group_leader(pos)) 2437 pos = NULL; 2438 else 2439 get_task_struct(pos); 2440 } 2441 rcu_read_unlock(); 2442 put_task_struct(start); 2443 return pos; 2444 } 2445 2446 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2447 struct task_struct *task, int tid) 2448 { 2449 char name[PROC_NUMBUF]; 2450 int len = snprintf(name, sizeof(name), "%d", tid); 2451 return proc_fill_cache(filp, dirent, filldir, name, len, 2452 proc_task_instantiate, task, NULL); 2453 } 2454 2455 /* for the /proc/TGID/task/ directories */ 2456 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) 2457 { 2458 struct dentry *dentry = filp->f_path.dentry; 2459 struct inode *inode = dentry->d_inode; 2460 struct task_struct *leader = NULL; 2461 struct task_struct *task; 2462 int retval = -ENOENT; 2463 ino_t ino; 2464 int tid; 2465 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */ 2466 2467 task = get_proc_task(inode); 2468 if (!task) 2469 goto out_no_task; 2470 rcu_read_lock(); 2471 if (pid_alive(task)) { 2472 leader = task->group_leader; 2473 get_task_struct(leader); 2474 } 2475 rcu_read_unlock(); 2476 put_task_struct(task); 2477 if (!leader) 2478 goto out_no_task; 2479 retval = 0; 2480 2481 switch (pos) { 2482 case 0: 2483 ino = inode->i_ino; 2484 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) 2485 goto out; 2486 pos++; 2487 /* fall through */ 2488 case 1: 2489 ino = parent_ino(dentry); 2490 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) 2491 goto out; 2492 pos++; 2493 /* fall through */ 2494 } 2495 2496 /* f_version caches the tgid value that the last readdir call couldn't 2497 * return. lseek aka telldir automagically resets f_version to 0. 2498 */ 2499 tid = filp->f_version; 2500 filp->f_version = 0; 2501 for (task = first_tid(leader, tid, pos - 2); 2502 task; 2503 task = next_tid(task), pos++) { 2504 tid = task->pid; 2505 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { 2506 /* returning this tgid failed, save it as the first 2507 * pid for the next readir call */ 2508 filp->f_version = tid; 2509 put_task_struct(task); 2510 break; 2511 } 2512 } 2513 out: 2514 filp->f_pos = pos; 2515 put_task_struct(leader); 2516 out_no_task: 2517 return retval; 2518 } 2519 2520 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 2521 { 2522 struct inode *inode = dentry->d_inode; 2523 struct task_struct *p = get_proc_task(inode); 2524 generic_fillattr(inode, stat); 2525 2526 if (p) { 2527 rcu_read_lock(); 2528 stat->nlink += get_nr_threads(p); 2529 rcu_read_unlock(); 2530 put_task_struct(p); 2531 } 2532 2533 return 0; 2534 } 2535 2536 static const struct inode_operations proc_task_inode_operations = { 2537 .lookup = proc_task_lookup, 2538 .getattr = proc_task_getattr, 2539 .setattr = proc_setattr, 2540 }; 2541 2542 static const struct file_operations proc_task_operations = { 2543 .read = generic_read_dir, 2544 .readdir = proc_task_readdir, 2545 }; 2546