1 /* 2 * linux/fs/proc/base.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * proc base directory handling functions 7 * 8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. 9 * Instead of using magical inumbers to determine the kind of object 10 * we allocate and fill in-core inodes upon lookup. They don't even 11 * go into icache. We cache the reference to task_struct upon lookup too. 12 * Eventually it should become a filesystem in its own. We don't use the 13 * rest of procfs anymore. 14 * 15 * 16 * Changelog: 17 * 17-Jan-2005 18 * Allan Bezerra 19 * Bruna Moreira <bruna.moreira@indt.org.br> 20 * Edjard Mota <edjard.mota@indt.org.br> 21 * Ilias Biris <ilias.biris@indt.org.br> 22 * Mauricio Lin <mauricio.lin@indt.org.br> 23 * 24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 25 * 26 * A new process specific entry (smaps) included in /proc. It shows the 27 * size of rss for each memory area. The maps entry lacks information 28 * about physical memory size (rss) for each mapped file, i.e., 29 * rss information for executables and library files. 30 * This additional information is useful for any tools that need to know 31 * about physical memory consumption for a process specific library. 32 * 33 * Changelog: 34 * 21-Feb-2005 35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 36 * Pud inclusion in the page table walking. 37 * 38 * ChangeLog: 39 * 10-Mar-2005 40 * 10LE Instituto Nokia de Tecnologia - INdT: 41 * A better way to walks through the page table as suggested by Hugh Dickins. 42 * 43 * Simo Piiroinen <simo.piiroinen@nokia.com>: 44 * Smaps information related to shared, private, clean and dirty pages. 45 * 46 * Paul Mundt <paul.mundt@nokia.com>: 47 * Overall revision about smaps. 48 */ 49 50 #include <asm/uaccess.h> 51 52 #include <linux/errno.h> 53 #include <linux/time.h> 54 #include <linux/proc_fs.h> 55 #include <linux/stat.h> 56 #include <linux/init.h> 57 #include <linux/capability.h> 58 #include <linux/file.h> 59 #include <linux/string.h> 60 #include <linux/seq_file.h> 61 #include <linux/namei.h> 62 #include <linux/mnt_namespace.h> 63 #include <linux/mm.h> 64 #include <linux/smp_lock.h> 65 #include <linux/rcupdate.h> 66 #include <linux/kallsyms.h> 67 #include <linux/mount.h> 68 #include <linux/security.h> 69 #include <linux/ptrace.h> 70 #include <linux/seccomp.h> 71 #include <linux/cpuset.h> 72 #include <linux/audit.h> 73 #include <linux/poll.h> 74 #include <linux/nsproxy.h> 75 #include <linux/oom.h> 76 #include "internal.h" 77 78 /* NOTE: 79 * Implementing inode permission operations in /proc is almost 80 * certainly an error. Permission checks need to happen during 81 * each system call not at open time. The reason is that most of 82 * what we wish to check for permissions in /proc varies at runtime. 83 * 84 * The classic example of a problem is opening file descriptors 85 * in /proc for a task before it execs a suid executable. 86 */ 87 88 89 /* Worst case buffer size needed for holding an integer. */ 90 #define PROC_NUMBUF 13 91 92 struct pid_entry { 93 int len; 94 char *name; 95 mode_t mode; 96 const struct inode_operations *iop; 97 const struct file_operations *fop; 98 union proc_op op; 99 }; 100 101 #define NOD(NAME, MODE, IOP, FOP, OP) { \ 102 .len = sizeof(NAME) - 1, \ 103 .name = (NAME), \ 104 .mode = MODE, \ 105 .iop = IOP, \ 106 .fop = FOP, \ 107 .op = OP, \ 108 } 109 110 #define DIR(NAME, MODE, OTYPE) \ 111 NOD(NAME, (S_IFDIR|(MODE)), \ 112 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \ 113 {} ) 114 #define LNK(NAME, OTYPE) \ 115 NOD(NAME, (S_IFLNK|S_IRWXUGO), \ 116 &proc_pid_link_inode_operations, NULL, \ 117 { .proc_get_link = &proc_##OTYPE##_link } ) 118 #define REG(NAME, MODE, OTYPE) \ 119 NOD(NAME, (S_IFREG|(MODE)), NULL, \ 120 &proc_##OTYPE##_operations, {}) 121 #define INF(NAME, MODE, OTYPE) \ 122 NOD(NAME, (S_IFREG|(MODE)), \ 123 NULL, &proc_info_file_operations, \ 124 { .proc_read = &proc_##OTYPE } ) 125 126 static struct fs_struct *get_fs_struct(struct task_struct *task) 127 { 128 struct fs_struct *fs; 129 task_lock(task); 130 fs = task->fs; 131 if(fs) 132 atomic_inc(&fs->count); 133 task_unlock(task); 134 return fs; 135 } 136 137 static int get_nr_threads(struct task_struct *tsk) 138 { 139 /* Must be called with the rcu_read_lock held */ 140 unsigned long flags; 141 int count = 0; 142 143 if (lock_task_sighand(tsk, &flags)) { 144 count = atomic_read(&tsk->signal->count); 145 unlock_task_sighand(tsk, &flags); 146 } 147 return count; 148 } 149 150 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 151 { 152 struct task_struct *task = get_proc_task(inode); 153 struct fs_struct *fs = NULL; 154 int result = -ENOENT; 155 156 if (task) { 157 fs = get_fs_struct(task); 158 put_task_struct(task); 159 } 160 if (fs) { 161 read_lock(&fs->lock); 162 *mnt = mntget(fs->pwdmnt); 163 *dentry = dget(fs->pwd); 164 read_unlock(&fs->lock); 165 result = 0; 166 put_fs_struct(fs); 167 } 168 return result; 169 } 170 171 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 172 { 173 struct task_struct *task = get_proc_task(inode); 174 struct fs_struct *fs = NULL; 175 int result = -ENOENT; 176 177 if (task) { 178 fs = get_fs_struct(task); 179 put_task_struct(task); 180 } 181 if (fs) { 182 read_lock(&fs->lock); 183 *mnt = mntget(fs->rootmnt); 184 *dentry = dget(fs->root); 185 read_unlock(&fs->lock); 186 result = 0; 187 put_fs_struct(fs); 188 } 189 return result; 190 } 191 192 #define MAY_PTRACE(task) \ 193 (task == current || \ 194 (task->parent == current && \ 195 (task->ptrace & PT_PTRACED) && \ 196 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \ 197 security_ptrace(current,task) == 0)) 198 199 static int proc_pid_environ(struct task_struct *task, char * buffer) 200 { 201 int res = 0; 202 struct mm_struct *mm = get_task_mm(task); 203 if (mm) { 204 unsigned int len = mm->env_end - mm->env_start; 205 if (len > PAGE_SIZE) 206 len = PAGE_SIZE; 207 res = access_process_vm(task, mm->env_start, buffer, len, 0); 208 if (!ptrace_may_attach(task)) 209 res = -ESRCH; 210 mmput(mm); 211 } 212 return res; 213 } 214 215 static int proc_pid_cmdline(struct task_struct *task, char * buffer) 216 { 217 int res = 0; 218 unsigned int len; 219 struct mm_struct *mm = get_task_mm(task); 220 if (!mm) 221 goto out; 222 if (!mm->arg_end) 223 goto out_mm; /* Shh! No looking before we're done */ 224 225 len = mm->arg_end - mm->arg_start; 226 227 if (len > PAGE_SIZE) 228 len = PAGE_SIZE; 229 230 res = access_process_vm(task, mm->arg_start, buffer, len, 0); 231 232 // If the nul at the end of args has been overwritten, then 233 // assume application is using setproctitle(3). 234 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) { 235 len = strnlen(buffer, res); 236 if (len < res) { 237 res = len; 238 } else { 239 len = mm->env_end - mm->env_start; 240 if (len > PAGE_SIZE - res) 241 len = PAGE_SIZE - res; 242 res += access_process_vm(task, mm->env_start, buffer+res, len, 0); 243 res = strnlen(buffer, res); 244 } 245 } 246 out_mm: 247 mmput(mm); 248 out: 249 return res; 250 } 251 252 static int proc_pid_auxv(struct task_struct *task, char *buffer) 253 { 254 int res = 0; 255 struct mm_struct *mm = get_task_mm(task); 256 if (mm) { 257 unsigned int nwords = 0; 258 do 259 nwords += 2; 260 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 261 res = nwords * sizeof(mm->saved_auxv[0]); 262 if (res > PAGE_SIZE) 263 res = PAGE_SIZE; 264 memcpy(buffer, mm->saved_auxv, res); 265 mmput(mm); 266 } 267 return res; 268 } 269 270 271 #ifdef CONFIG_KALLSYMS 272 /* 273 * Provides a wchan file via kallsyms in a proper one-value-per-file format. 274 * Returns the resolved symbol. If that fails, simply return the address. 275 */ 276 static int proc_pid_wchan(struct task_struct *task, char *buffer) 277 { 278 char *modname; 279 const char *sym_name; 280 unsigned long wchan, size, offset; 281 char namebuf[KSYM_NAME_LEN+1]; 282 283 wchan = get_wchan(task); 284 285 sym_name = kallsyms_lookup(wchan, &size, &offset, &modname, namebuf); 286 if (sym_name) 287 return sprintf(buffer, "%s", sym_name); 288 return sprintf(buffer, "%lu", wchan); 289 } 290 #endif /* CONFIG_KALLSYMS */ 291 292 #ifdef CONFIG_SCHEDSTATS 293 /* 294 * Provides /proc/PID/schedstat 295 */ 296 static int proc_pid_schedstat(struct task_struct *task, char *buffer) 297 { 298 return sprintf(buffer, "%lu %lu %lu\n", 299 task->sched_info.cpu_time, 300 task->sched_info.run_delay, 301 task->sched_info.pcnt); 302 } 303 #endif 304 305 /* The badness from the OOM killer */ 306 unsigned long badness(struct task_struct *p, unsigned long uptime); 307 static int proc_oom_score(struct task_struct *task, char *buffer) 308 { 309 unsigned long points; 310 struct timespec uptime; 311 312 do_posix_clock_monotonic_gettime(&uptime); 313 points = badness(task, uptime.tv_sec); 314 return sprintf(buffer, "%lu\n", points); 315 } 316 317 /************************************************************************/ 318 /* Here the fs part begins */ 319 /************************************************************************/ 320 321 /* permission checks */ 322 static int proc_fd_access_allowed(struct inode *inode) 323 { 324 struct task_struct *task; 325 int allowed = 0; 326 /* Allow access to a task's file descriptors if it is us or we 327 * may use ptrace attach to the process and find out that 328 * information. 329 */ 330 task = get_proc_task(inode); 331 if (task) { 332 allowed = ptrace_may_attach(task); 333 put_task_struct(task); 334 } 335 return allowed; 336 } 337 338 static int proc_setattr(struct dentry *dentry, struct iattr *attr) 339 { 340 int error; 341 struct inode *inode = dentry->d_inode; 342 343 if (attr->ia_valid & ATTR_MODE) 344 return -EPERM; 345 346 error = inode_change_ok(inode, attr); 347 if (!error) { 348 error = security_inode_setattr(dentry, attr); 349 if (!error) 350 error = inode_setattr(inode, attr); 351 } 352 return error; 353 } 354 355 static const struct inode_operations proc_def_inode_operations = { 356 .setattr = proc_setattr, 357 }; 358 359 extern struct seq_operations mounts_op; 360 struct proc_mounts { 361 struct seq_file m; 362 int event; 363 }; 364 365 static int mounts_open(struct inode *inode, struct file *file) 366 { 367 struct task_struct *task = get_proc_task(inode); 368 struct mnt_namespace *ns = NULL; 369 struct proc_mounts *p; 370 int ret = -EINVAL; 371 372 if (task) { 373 task_lock(task); 374 if (task->nsproxy) { 375 ns = task->nsproxy->mnt_ns; 376 if (ns) 377 get_mnt_ns(ns); 378 } 379 task_unlock(task); 380 put_task_struct(task); 381 } 382 383 if (ns) { 384 ret = -ENOMEM; 385 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); 386 if (p) { 387 file->private_data = &p->m; 388 ret = seq_open(file, &mounts_op); 389 if (!ret) { 390 p->m.private = ns; 391 p->event = ns->event; 392 return 0; 393 } 394 kfree(p); 395 } 396 put_mnt_ns(ns); 397 } 398 return ret; 399 } 400 401 static int mounts_release(struct inode *inode, struct file *file) 402 { 403 struct seq_file *m = file->private_data; 404 struct mnt_namespace *ns = m->private; 405 put_mnt_ns(ns); 406 return seq_release(inode, file); 407 } 408 409 static unsigned mounts_poll(struct file *file, poll_table *wait) 410 { 411 struct proc_mounts *p = file->private_data; 412 struct mnt_namespace *ns = p->m.private; 413 unsigned res = 0; 414 415 poll_wait(file, &ns->poll, wait); 416 417 spin_lock(&vfsmount_lock); 418 if (p->event != ns->event) { 419 p->event = ns->event; 420 res = POLLERR; 421 } 422 spin_unlock(&vfsmount_lock); 423 424 return res; 425 } 426 427 static const struct file_operations proc_mounts_operations = { 428 .open = mounts_open, 429 .read = seq_read, 430 .llseek = seq_lseek, 431 .release = mounts_release, 432 .poll = mounts_poll, 433 }; 434 435 extern struct seq_operations mountstats_op; 436 static int mountstats_open(struct inode *inode, struct file *file) 437 { 438 int ret = seq_open(file, &mountstats_op); 439 440 if (!ret) { 441 struct seq_file *m = file->private_data; 442 struct mnt_namespace *mnt_ns = NULL; 443 struct task_struct *task = get_proc_task(inode); 444 445 if (task) { 446 task_lock(task); 447 if (task->nsproxy) 448 mnt_ns = task->nsproxy->mnt_ns; 449 if (mnt_ns) 450 get_mnt_ns(mnt_ns); 451 task_unlock(task); 452 put_task_struct(task); 453 } 454 455 if (mnt_ns) 456 m->private = mnt_ns; 457 else { 458 seq_release(inode, file); 459 ret = -EINVAL; 460 } 461 } 462 return ret; 463 } 464 465 static const struct file_operations proc_mountstats_operations = { 466 .open = mountstats_open, 467 .read = seq_read, 468 .llseek = seq_lseek, 469 .release = mounts_release, 470 }; 471 472 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ 473 474 static ssize_t proc_info_read(struct file * file, char __user * buf, 475 size_t count, loff_t *ppos) 476 { 477 struct inode * inode = file->f_path.dentry->d_inode; 478 unsigned long page; 479 ssize_t length; 480 struct task_struct *task = get_proc_task(inode); 481 482 length = -ESRCH; 483 if (!task) 484 goto out_no_task; 485 486 if (count > PROC_BLOCK_SIZE) 487 count = PROC_BLOCK_SIZE; 488 489 length = -ENOMEM; 490 if (!(page = __get_free_page(GFP_KERNEL))) 491 goto out; 492 493 length = PROC_I(inode)->op.proc_read(task, (char*)page); 494 495 if (length >= 0) 496 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 497 free_page(page); 498 out: 499 put_task_struct(task); 500 out_no_task: 501 return length; 502 } 503 504 static const struct file_operations proc_info_file_operations = { 505 .read = proc_info_read, 506 }; 507 508 static int mem_open(struct inode* inode, struct file* file) 509 { 510 file->private_data = (void*)((long)current->self_exec_id); 511 return 0; 512 } 513 514 static ssize_t mem_read(struct file * file, char __user * buf, 515 size_t count, loff_t *ppos) 516 { 517 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 518 char *page; 519 unsigned long src = *ppos; 520 int ret = -ESRCH; 521 struct mm_struct *mm; 522 523 if (!task) 524 goto out_no_task; 525 526 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 527 goto out; 528 529 ret = -ENOMEM; 530 page = (char *)__get_free_page(GFP_USER); 531 if (!page) 532 goto out; 533 534 ret = 0; 535 536 mm = get_task_mm(task); 537 if (!mm) 538 goto out_free; 539 540 ret = -EIO; 541 542 if (file->private_data != (void*)((long)current->self_exec_id)) 543 goto out_put; 544 545 ret = 0; 546 547 while (count > 0) { 548 int this_len, retval; 549 550 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 551 retval = access_process_vm(task, src, page, this_len, 0); 552 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) { 553 if (!ret) 554 ret = -EIO; 555 break; 556 } 557 558 if (copy_to_user(buf, page, retval)) { 559 ret = -EFAULT; 560 break; 561 } 562 563 ret += retval; 564 src += retval; 565 buf += retval; 566 count -= retval; 567 } 568 *ppos = src; 569 570 out_put: 571 mmput(mm); 572 out_free: 573 free_page((unsigned long) page); 574 out: 575 put_task_struct(task); 576 out_no_task: 577 return ret; 578 } 579 580 #define mem_write NULL 581 582 #ifndef mem_write 583 /* This is a security hazard */ 584 static ssize_t mem_write(struct file * file, const char __user *buf, 585 size_t count, loff_t *ppos) 586 { 587 int copied; 588 char *page; 589 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 590 unsigned long dst = *ppos; 591 592 copied = -ESRCH; 593 if (!task) 594 goto out_no_task; 595 596 if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) 597 goto out; 598 599 copied = -ENOMEM; 600 page = (char *)__get_free_page(GFP_USER); 601 if (!page) 602 goto out; 603 604 copied = 0; 605 while (count > 0) { 606 int this_len, retval; 607 608 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 609 if (copy_from_user(page, buf, this_len)) { 610 copied = -EFAULT; 611 break; 612 } 613 retval = access_process_vm(task, dst, page, this_len, 1); 614 if (!retval) { 615 if (!copied) 616 copied = -EIO; 617 break; 618 } 619 copied += retval; 620 buf += retval; 621 dst += retval; 622 count -= retval; 623 } 624 *ppos = dst; 625 free_page((unsigned long) page); 626 out: 627 put_task_struct(task); 628 out_no_task: 629 return copied; 630 } 631 #endif 632 633 static loff_t mem_lseek(struct file * file, loff_t offset, int orig) 634 { 635 switch (orig) { 636 case 0: 637 file->f_pos = offset; 638 break; 639 case 1: 640 file->f_pos += offset; 641 break; 642 default: 643 return -EINVAL; 644 } 645 force_successful_syscall_return(); 646 return file->f_pos; 647 } 648 649 static const struct file_operations proc_mem_operations = { 650 .llseek = mem_lseek, 651 .read = mem_read, 652 .write = mem_write, 653 .open = mem_open, 654 }; 655 656 static ssize_t oom_adjust_read(struct file *file, char __user *buf, 657 size_t count, loff_t *ppos) 658 { 659 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 660 char buffer[PROC_NUMBUF]; 661 size_t len; 662 int oom_adjust; 663 loff_t __ppos = *ppos; 664 665 if (!task) 666 return -ESRCH; 667 oom_adjust = task->oomkilladj; 668 put_task_struct(task); 669 670 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); 671 if (__ppos >= len) 672 return 0; 673 if (count > len-__ppos) 674 count = len-__ppos; 675 if (copy_to_user(buf, buffer + __ppos, count)) 676 return -EFAULT; 677 *ppos = __ppos + count; 678 return count; 679 } 680 681 static ssize_t oom_adjust_write(struct file *file, const char __user *buf, 682 size_t count, loff_t *ppos) 683 { 684 struct task_struct *task; 685 char buffer[PROC_NUMBUF], *end; 686 int oom_adjust; 687 688 memset(buffer, 0, sizeof(buffer)); 689 if (count > sizeof(buffer) - 1) 690 count = sizeof(buffer) - 1; 691 if (copy_from_user(buffer, buf, count)) 692 return -EFAULT; 693 oom_adjust = simple_strtol(buffer, &end, 0); 694 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && 695 oom_adjust != OOM_DISABLE) 696 return -EINVAL; 697 if (*end == '\n') 698 end++; 699 task = get_proc_task(file->f_path.dentry->d_inode); 700 if (!task) 701 return -ESRCH; 702 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { 703 put_task_struct(task); 704 return -EACCES; 705 } 706 task->oomkilladj = oom_adjust; 707 put_task_struct(task); 708 if (end - buffer == 0) 709 return -EIO; 710 return end - buffer; 711 } 712 713 static const struct file_operations proc_oom_adjust_operations = { 714 .read = oom_adjust_read, 715 .write = oom_adjust_write, 716 }; 717 718 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 719 size_t count, loff_t *ppos) 720 { 721 struct task_struct *task; 722 char buffer[PROC_NUMBUF], *end; 723 struct mm_struct *mm; 724 725 memset(buffer, 0, sizeof(buffer)); 726 if (count > sizeof(buffer) - 1) 727 count = sizeof(buffer) - 1; 728 if (copy_from_user(buffer, buf, count)) 729 return -EFAULT; 730 if (!simple_strtol(buffer, &end, 0)) 731 return -EINVAL; 732 if (*end == '\n') 733 end++; 734 task = get_proc_task(file->f_path.dentry->d_inode); 735 if (!task) 736 return -ESRCH; 737 mm = get_task_mm(task); 738 if (mm) { 739 clear_refs_smap(mm); 740 mmput(mm); 741 } 742 put_task_struct(task); 743 if (end - buffer == 0) 744 return -EIO; 745 return end - buffer; 746 } 747 748 static struct file_operations proc_clear_refs_operations = { 749 .write = clear_refs_write, 750 }; 751 752 #ifdef CONFIG_AUDITSYSCALL 753 #define TMPBUFLEN 21 754 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 755 size_t count, loff_t *ppos) 756 { 757 struct inode * inode = file->f_path.dentry->d_inode; 758 struct task_struct *task = get_proc_task(inode); 759 ssize_t length; 760 char tmpbuf[TMPBUFLEN]; 761 762 if (!task) 763 return -ESRCH; 764 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 765 audit_get_loginuid(task->audit_context)); 766 put_task_struct(task); 767 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 768 } 769 770 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, 771 size_t count, loff_t *ppos) 772 { 773 struct inode * inode = file->f_path.dentry->d_inode; 774 char *page, *tmp; 775 ssize_t length; 776 uid_t loginuid; 777 778 if (!capable(CAP_AUDIT_CONTROL)) 779 return -EPERM; 780 781 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) 782 return -EPERM; 783 784 if (count >= PAGE_SIZE) 785 count = PAGE_SIZE - 1; 786 787 if (*ppos != 0) { 788 /* No partial writes. */ 789 return -EINVAL; 790 } 791 page = (char*)__get_free_page(GFP_USER); 792 if (!page) 793 return -ENOMEM; 794 length = -EFAULT; 795 if (copy_from_user(page, buf, count)) 796 goto out_free_page; 797 798 page[count] = '\0'; 799 loginuid = simple_strtoul(page, &tmp, 10); 800 if (tmp == page) { 801 length = -EINVAL; 802 goto out_free_page; 803 804 } 805 length = audit_set_loginuid(current, loginuid); 806 if (likely(length == 0)) 807 length = count; 808 809 out_free_page: 810 free_page((unsigned long) page); 811 return length; 812 } 813 814 static const struct file_operations proc_loginuid_operations = { 815 .read = proc_loginuid_read, 816 .write = proc_loginuid_write, 817 }; 818 #endif 819 820 #ifdef CONFIG_SECCOMP 821 static ssize_t seccomp_read(struct file *file, char __user *buf, 822 size_t count, loff_t *ppos) 823 { 824 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode); 825 char __buf[20]; 826 loff_t __ppos = *ppos; 827 size_t len; 828 829 if (!tsk) 830 return -ESRCH; 831 /* no need to print the trailing zero, so use only len */ 832 len = sprintf(__buf, "%u\n", tsk->seccomp.mode); 833 put_task_struct(tsk); 834 if (__ppos >= len) 835 return 0; 836 if (count > len - __ppos) 837 count = len - __ppos; 838 if (copy_to_user(buf, __buf + __ppos, count)) 839 return -EFAULT; 840 *ppos = __ppos + count; 841 return count; 842 } 843 844 static ssize_t seccomp_write(struct file *file, const char __user *buf, 845 size_t count, loff_t *ppos) 846 { 847 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode); 848 char __buf[20], *end; 849 unsigned int seccomp_mode; 850 ssize_t result; 851 852 result = -ESRCH; 853 if (!tsk) 854 goto out_no_task; 855 856 /* can set it only once to be even more secure */ 857 result = -EPERM; 858 if (unlikely(tsk->seccomp.mode)) 859 goto out; 860 861 result = -EFAULT; 862 memset(__buf, 0, sizeof(__buf)); 863 count = min(count, sizeof(__buf) - 1); 864 if (copy_from_user(__buf, buf, count)) 865 goto out; 866 867 seccomp_mode = simple_strtoul(__buf, &end, 0); 868 if (*end == '\n') 869 end++; 870 result = -EINVAL; 871 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) { 872 tsk->seccomp.mode = seccomp_mode; 873 set_tsk_thread_flag(tsk, TIF_SECCOMP); 874 } else 875 goto out; 876 result = -EIO; 877 if (unlikely(!(end - __buf))) 878 goto out; 879 result = end - __buf; 880 out: 881 put_task_struct(tsk); 882 out_no_task: 883 return result; 884 } 885 886 static const struct file_operations proc_seccomp_operations = { 887 .read = seccomp_read, 888 .write = seccomp_write, 889 }; 890 #endif /* CONFIG_SECCOMP */ 891 892 #ifdef CONFIG_FAULT_INJECTION 893 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, 894 size_t count, loff_t *ppos) 895 { 896 struct task_struct *task = get_proc_task(file->f_dentry->d_inode); 897 char buffer[PROC_NUMBUF]; 898 size_t len; 899 int make_it_fail; 900 loff_t __ppos = *ppos; 901 902 if (!task) 903 return -ESRCH; 904 make_it_fail = task->make_it_fail; 905 put_task_struct(task); 906 907 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); 908 if (__ppos >= len) 909 return 0; 910 if (count > len-__ppos) 911 count = len-__ppos; 912 if (copy_to_user(buf, buffer + __ppos, count)) 913 return -EFAULT; 914 *ppos = __ppos + count; 915 return count; 916 } 917 918 static ssize_t proc_fault_inject_write(struct file * file, 919 const char __user * buf, size_t count, loff_t *ppos) 920 { 921 struct task_struct *task; 922 char buffer[PROC_NUMBUF], *end; 923 int make_it_fail; 924 925 if (!capable(CAP_SYS_RESOURCE)) 926 return -EPERM; 927 memset(buffer, 0, sizeof(buffer)); 928 if (count > sizeof(buffer) - 1) 929 count = sizeof(buffer) - 1; 930 if (copy_from_user(buffer, buf, count)) 931 return -EFAULT; 932 make_it_fail = simple_strtol(buffer, &end, 0); 933 if (*end == '\n') 934 end++; 935 task = get_proc_task(file->f_dentry->d_inode); 936 if (!task) 937 return -ESRCH; 938 task->make_it_fail = make_it_fail; 939 put_task_struct(task); 940 if (end - buffer == 0) 941 return -EIO; 942 return end - buffer; 943 } 944 945 static const struct file_operations proc_fault_inject_operations = { 946 .read = proc_fault_inject_read, 947 .write = proc_fault_inject_write, 948 }; 949 #endif 950 951 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 952 { 953 struct inode *inode = dentry->d_inode; 954 int error = -EACCES; 955 956 /* We don't need a base pointer in the /proc filesystem */ 957 path_release(nd); 958 959 /* Are we allowed to snoop on the tasks file descriptors? */ 960 if (!proc_fd_access_allowed(inode)) 961 goto out; 962 963 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); 964 nd->last_type = LAST_BIND; 965 out: 966 return ERR_PTR(error); 967 } 968 969 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, 970 char __user *buffer, int buflen) 971 { 972 struct inode * inode; 973 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; 974 int len; 975 976 if (!tmp) 977 return -ENOMEM; 978 979 inode = dentry->d_inode; 980 path = d_path(dentry, mnt, tmp, PAGE_SIZE); 981 len = PTR_ERR(path); 982 if (IS_ERR(path)) 983 goto out; 984 len = tmp + PAGE_SIZE - 1 - path; 985 986 if (len > buflen) 987 len = buflen; 988 if (copy_to_user(buffer, path, len)) 989 len = -EFAULT; 990 out: 991 free_page((unsigned long)tmp); 992 return len; 993 } 994 995 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) 996 { 997 int error = -EACCES; 998 struct inode *inode = dentry->d_inode; 999 struct dentry *de; 1000 struct vfsmount *mnt = NULL; 1001 1002 /* Are we allowed to snoop on the tasks file descriptors? */ 1003 if (!proc_fd_access_allowed(inode)) 1004 goto out; 1005 1006 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt); 1007 if (error) 1008 goto out; 1009 1010 error = do_proc_readlink(de, mnt, buffer, buflen); 1011 dput(de); 1012 mntput(mnt); 1013 out: 1014 return error; 1015 } 1016 1017 static const struct inode_operations proc_pid_link_inode_operations = { 1018 .readlink = proc_pid_readlink, 1019 .follow_link = proc_pid_follow_link, 1020 .setattr = proc_setattr, 1021 }; 1022 1023 1024 /* building an inode */ 1025 1026 static int task_dumpable(struct task_struct *task) 1027 { 1028 int dumpable = 0; 1029 struct mm_struct *mm; 1030 1031 task_lock(task); 1032 mm = task->mm; 1033 if (mm) 1034 dumpable = mm->dumpable; 1035 task_unlock(task); 1036 if(dumpable == 1) 1037 return 1; 1038 return 0; 1039 } 1040 1041 1042 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) 1043 { 1044 struct inode * inode; 1045 struct proc_inode *ei; 1046 1047 /* We need a new inode */ 1048 1049 inode = new_inode(sb); 1050 if (!inode) 1051 goto out; 1052 1053 /* Common stuff */ 1054 ei = PROC_I(inode); 1055 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1056 inode->i_op = &proc_def_inode_operations; 1057 1058 /* 1059 * grab the reference to task. 1060 */ 1061 ei->pid = get_task_pid(task, PIDTYPE_PID); 1062 if (!ei->pid) 1063 goto out_unlock; 1064 1065 inode->i_uid = 0; 1066 inode->i_gid = 0; 1067 if (task_dumpable(task)) { 1068 inode->i_uid = task->euid; 1069 inode->i_gid = task->egid; 1070 } 1071 security_task_to_inode(task, inode); 1072 1073 out: 1074 return inode; 1075 1076 out_unlock: 1077 iput(inode); 1078 return NULL; 1079 } 1080 1081 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1082 { 1083 struct inode *inode = dentry->d_inode; 1084 struct task_struct *task; 1085 generic_fillattr(inode, stat); 1086 1087 rcu_read_lock(); 1088 stat->uid = 0; 1089 stat->gid = 0; 1090 task = pid_task(proc_pid(inode), PIDTYPE_PID); 1091 if (task) { 1092 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1093 task_dumpable(task)) { 1094 stat->uid = task->euid; 1095 stat->gid = task->egid; 1096 } 1097 } 1098 rcu_read_unlock(); 1099 return 0; 1100 } 1101 1102 /* dentry stuff */ 1103 1104 /* 1105 * Exceptional case: normally we are not allowed to unhash a busy 1106 * directory. In this case, however, we can do it - no aliasing problems 1107 * due to the way we treat inodes. 1108 * 1109 * Rewrite the inode's ownerships here because the owning task may have 1110 * performed a setuid(), etc. 1111 * 1112 * Before the /proc/pid/status file was created the only way to read 1113 * the effective uid of a /process was to stat /proc/pid. Reading 1114 * /proc/pid/status is slow enough that procps and other packages 1115 * kept stating /proc/pid. To keep the rules in /proc simple I have 1116 * made this apply to all per process world readable and executable 1117 * directories. 1118 */ 1119 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1120 { 1121 struct inode *inode = dentry->d_inode; 1122 struct task_struct *task = get_proc_task(inode); 1123 if (task) { 1124 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1125 task_dumpable(task)) { 1126 inode->i_uid = task->euid; 1127 inode->i_gid = task->egid; 1128 } else { 1129 inode->i_uid = 0; 1130 inode->i_gid = 0; 1131 } 1132 inode->i_mode &= ~(S_ISUID | S_ISGID); 1133 security_task_to_inode(task, inode); 1134 put_task_struct(task); 1135 return 1; 1136 } 1137 d_drop(dentry); 1138 return 0; 1139 } 1140 1141 static int pid_delete_dentry(struct dentry * dentry) 1142 { 1143 /* Is the task we represent dead? 1144 * If so, then don't put the dentry on the lru list, 1145 * kill it immediately. 1146 */ 1147 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1148 } 1149 1150 static struct dentry_operations pid_dentry_operations = 1151 { 1152 .d_revalidate = pid_revalidate, 1153 .d_delete = pid_delete_dentry, 1154 }; 1155 1156 /* Lookups */ 1157 1158 typedef struct dentry *instantiate_t(struct inode *, struct dentry *, struct task_struct *, void *); 1159 1160 /* 1161 * Fill a directory entry. 1162 * 1163 * If possible create the dcache entry and derive our inode number and 1164 * file type from dcache entry. 1165 * 1166 * Since all of the proc inode numbers are dynamically generated, the inode 1167 * numbers do not exist until the inode is cache. This means creating the 1168 * the dcache entry in readdir is necessary to keep the inode numbers 1169 * reported by readdir in sync with the inode numbers reported 1170 * by stat. 1171 */ 1172 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1173 char *name, int len, 1174 instantiate_t instantiate, struct task_struct *task, void *ptr) 1175 { 1176 struct dentry *child, *dir = filp->f_path.dentry; 1177 struct inode *inode; 1178 struct qstr qname; 1179 ino_t ino = 0; 1180 unsigned type = DT_UNKNOWN; 1181 1182 qname.name = name; 1183 qname.len = len; 1184 qname.hash = full_name_hash(name, len); 1185 1186 child = d_lookup(dir, &qname); 1187 if (!child) { 1188 struct dentry *new; 1189 new = d_alloc(dir, &qname); 1190 if (new) { 1191 child = instantiate(dir->d_inode, new, task, ptr); 1192 if (child) 1193 dput(new); 1194 else 1195 child = new; 1196 } 1197 } 1198 if (!child || IS_ERR(child) || !child->d_inode) 1199 goto end_instantiate; 1200 inode = child->d_inode; 1201 if (inode) { 1202 ino = inode->i_ino; 1203 type = inode->i_mode >> 12; 1204 } 1205 dput(child); 1206 end_instantiate: 1207 if (!ino) 1208 ino = find_inode_number(dir, &qname); 1209 if (!ino) 1210 ino = 1; 1211 return filldir(dirent, name, len, filp->f_pos, ino, type); 1212 } 1213 1214 static unsigned name_to_int(struct dentry *dentry) 1215 { 1216 const char *name = dentry->d_name.name; 1217 int len = dentry->d_name.len; 1218 unsigned n = 0; 1219 1220 if (len > 1 && *name == '0') 1221 goto out; 1222 while (len-- > 0) { 1223 unsigned c = *name++ - '0'; 1224 if (c > 9) 1225 goto out; 1226 if (n >= (~0U-9)/10) 1227 goto out; 1228 n *= 10; 1229 n += c; 1230 } 1231 return n; 1232 out: 1233 return ~0U; 1234 } 1235 1236 static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 1237 { 1238 struct task_struct *task = get_proc_task(inode); 1239 struct files_struct *files = NULL; 1240 struct file *file; 1241 int fd = proc_fd(inode); 1242 1243 if (task) { 1244 files = get_files_struct(task); 1245 put_task_struct(task); 1246 } 1247 if (files) { 1248 /* 1249 * We are not taking a ref to the file structure, so we must 1250 * hold ->file_lock. 1251 */ 1252 spin_lock(&files->file_lock); 1253 file = fcheck_files(files, fd); 1254 if (file) { 1255 *mnt = mntget(file->f_path.mnt); 1256 *dentry = dget(file->f_path.dentry); 1257 spin_unlock(&files->file_lock); 1258 put_files_struct(files); 1259 return 0; 1260 } 1261 spin_unlock(&files->file_lock); 1262 put_files_struct(files); 1263 } 1264 return -ENOENT; 1265 } 1266 1267 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) 1268 { 1269 struct inode *inode = dentry->d_inode; 1270 struct task_struct *task = get_proc_task(inode); 1271 int fd = proc_fd(inode); 1272 struct files_struct *files; 1273 1274 if (task) { 1275 files = get_files_struct(task); 1276 if (files) { 1277 rcu_read_lock(); 1278 if (fcheck_files(files, fd)) { 1279 rcu_read_unlock(); 1280 put_files_struct(files); 1281 if (task_dumpable(task)) { 1282 inode->i_uid = task->euid; 1283 inode->i_gid = task->egid; 1284 } else { 1285 inode->i_uid = 0; 1286 inode->i_gid = 0; 1287 } 1288 inode->i_mode &= ~(S_ISUID | S_ISGID); 1289 security_task_to_inode(task, inode); 1290 put_task_struct(task); 1291 return 1; 1292 } 1293 rcu_read_unlock(); 1294 put_files_struct(files); 1295 } 1296 put_task_struct(task); 1297 } 1298 d_drop(dentry); 1299 return 0; 1300 } 1301 1302 static struct dentry_operations tid_fd_dentry_operations = 1303 { 1304 .d_revalidate = tid_fd_revalidate, 1305 .d_delete = pid_delete_dentry, 1306 }; 1307 1308 static struct dentry *proc_fd_instantiate(struct inode *dir, 1309 struct dentry *dentry, struct task_struct *task, void *ptr) 1310 { 1311 unsigned fd = *(unsigned *)ptr; 1312 struct file *file; 1313 struct files_struct *files; 1314 struct inode *inode; 1315 struct proc_inode *ei; 1316 struct dentry *error = ERR_PTR(-ENOENT); 1317 1318 inode = proc_pid_make_inode(dir->i_sb, task); 1319 if (!inode) 1320 goto out; 1321 ei = PROC_I(inode); 1322 ei->fd = fd; 1323 files = get_files_struct(task); 1324 if (!files) 1325 goto out_iput; 1326 inode->i_mode = S_IFLNK; 1327 1328 /* 1329 * We are not taking a ref to the file structure, so we must 1330 * hold ->file_lock. 1331 */ 1332 spin_lock(&files->file_lock); 1333 file = fcheck_files(files, fd); 1334 if (!file) 1335 goto out_unlock; 1336 if (file->f_mode & 1) 1337 inode->i_mode |= S_IRUSR | S_IXUSR; 1338 if (file->f_mode & 2) 1339 inode->i_mode |= S_IWUSR | S_IXUSR; 1340 spin_unlock(&files->file_lock); 1341 put_files_struct(files); 1342 1343 inode->i_op = &proc_pid_link_inode_operations; 1344 inode->i_size = 64; 1345 ei->op.proc_get_link = proc_fd_link; 1346 dentry->d_op = &tid_fd_dentry_operations; 1347 d_add(dentry, inode); 1348 /* Close the race of the process dying before we return the dentry */ 1349 if (tid_fd_revalidate(dentry, NULL)) 1350 error = NULL; 1351 1352 out: 1353 return error; 1354 out_unlock: 1355 spin_unlock(&files->file_lock); 1356 put_files_struct(files); 1357 out_iput: 1358 iput(inode); 1359 goto out; 1360 } 1361 1362 static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd) 1363 { 1364 struct task_struct *task = get_proc_task(dir); 1365 unsigned fd = name_to_int(dentry); 1366 struct dentry *result = ERR_PTR(-ENOENT); 1367 1368 if (!task) 1369 goto out_no_task; 1370 if (fd == ~0U) 1371 goto out; 1372 1373 result = proc_fd_instantiate(dir, dentry, task, &fd); 1374 out: 1375 put_task_struct(task); 1376 out_no_task: 1377 return result; 1378 } 1379 1380 static int proc_fd_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1381 struct task_struct *task, int fd) 1382 { 1383 char name[PROC_NUMBUF]; 1384 int len = snprintf(name, sizeof(name), "%d", fd); 1385 return proc_fill_cache(filp, dirent, filldir, name, len, 1386 proc_fd_instantiate, task, &fd); 1387 } 1388 1389 static int proc_readfd(struct file * filp, void * dirent, filldir_t filldir) 1390 { 1391 struct dentry *dentry = filp->f_path.dentry; 1392 struct inode *inode = dentry->d_inode; 1393 struct task_struct *p = get_proc_task(inode); 1394 unsigned int fd, tid, ino; 1395 int retval; 1396 struct files_struct * files; 1397 struct fdtable *fdt; 1398 1399 retval = -ENOENT; 1400 if (!p) 1401 goto out_no_task; 1402 retval = 0; 1403 tid = p->pid; 1404 1405 fd = filp->f_pos; 1406 switch (fd) { 1407 case 0: 1408 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 1409 goto out; 1410 filp->f_pos++; 1411 case 1: 1412 ino = parent_ino(dentry); 1413 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 1414 goto out; 1415 filp->f_pos++; 1416 default: 1417 files = get_files_struct(p); 1418 if (!files) 1419 goto out; 1420 rcu_read_lock(); 1421 fdt = files_fdtable(files); 1422 for (fd = filp->f_pos-2; 1423 fd < fdt->max_fds; 1424 fd++, filp->f_pos++) { 1425 1426 if (!fcheck_files(files, fd)) 1427 continue; 1428 rcu_read_unlock(); 1429 1430 if (proc_fd_fill_cache(filp, dirent, filldir, p, fd) < 0) { 1431 rcu_read_lock(); 1432 break; 1433 } 1434 rcu_read_lock(); 1435 } 1436 rcu_read_unlock(); 1437 put_files_struct(files); 1438 } 1439 out: 1440 put_task_struct(p); 1441 out_no_task: 1442 return retval; 1443 } 1444 1445 static const struct file_operations proc_fd_operations = { 1446 .read = generic_read_dir, 1447 .readdir = proc_readfd, 1448 }; 1449 1450 /* 1451 * proc directories can do almost nothing.. 1452 */ 1453 static const struct inode_operations proc_fd_inode_operations = { 1454 .lookup = proc_lookupfd, 1455 .setattr = proc_setattr, 1456 }; 1457 1458 static struct dentry *proc_pident_instantiate(struct inode *dir, 1459 struct dentry *dentry, struct task_struct *task, void *ptr) 1460 { 1461 struct pid_entry *p = ptr; 1462 struct inode *inode; 1463 struct proc_inode *ei; 1464 struct dentry *error = ERR_PTR(-EINVAL); 1465 1466 inode = proc_pid_make_inode(dir->i_sb, task); 1467 if (!inode) 1468 goto out; 1469 1470 ei = PROC_I(inode); 1471 inode->i_mode = p->mode; 1472 if (S_ISDIR(inode->i_mode)) 1473 inode->i_nlink = 2; /* Use getattr to fix if necessary */ 1474 if (p->iop) 1475 inode->i_op = p->iop; 1476 if (p->fop) 1477 inode->i_fop = p->fop; 1478 ei->op = p->op; 1479 dentry->d_op = &pid_dentry_operations; 1480 d_add(dentry, inode); 1481 /* Close the race of the process dying before we return the dentry */ 1482 if (pid_revalidate(dentry, NULL)) 1483 error = NULL; 1484 out: 1485 return error; 1486 } 1487 1488 static struct dentry *proc_pident_lookup(struct inode *dir, 1489 struct dentry *dentry, 1490 struct pid_entry *ents, 1491 unsigned int nents) 1492 { 1493 struct inode *inode; 1494 struct dentry *error; 1495 struct task_struct *task = get_proc_task(dir); 1496 struct pid_entry *p, *last; 1497 1498 error = ERR_PTR(-ENOENT); 1499 inode = NULL; 1500 1501 if (!task) 1502 goto out_no_task; 1503 1504 /* 1505 * Yes, it does not scale. And it should not. Don't add 1506 * new entries into /proc/<tgid>/ without very good reasons. 1507 */ 1508 last = &ents[nents - 1]; 1509 for (p = ents; p <= last; p++) { 1510 if (p->len != dentry->d_name.len) 1511 continue; 1512 if (!memcmp(dentry->d_name.name, p->name, p->len)) 1513 break; 1514 } 1515 if (p > last) 1516 goto out; 1517 1518 error = proc_pident_instantiate(dir, dentry, task, p); 1519 out: 1520 put_task_struct(task); 1521 out_no_task: 1522 return error; 1523 } 1524 1525 static int proc_pident_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1526 struct task_struct *task, struct pid_entry *p) 1527 { 1528 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 1529 proc_pident_instantiate, task, p); 1530 } 1531 1532 static int proc_pident_readdir(struct file *filp, 1533 void *dirent, filldir_t filldir, 1534 struct pid_entry *ents, unsigned int nents) 1535 { 1536 int i; 1537 int pid; 1538 struct dentry *dentry = filp->f_path.dentry; 1539 struct inode *inode = dentry->d_inode; 1540 struct task_struct *task = get_proc_task(inode); 1541 struct pid_entry *p, *last; 1542 ino_t ino; 1543 int ret; 1544 1545 ret = -ENOENT; 1546 if (!task) 1547 goto out_no_task; 1548 1549 ret = 0; 1550 pid = task->pid; 1551 i = filp->f_pos; 1552 switch (i) { 1553 case 0: 1554 ino = inode->i_ino; 1555 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 1556 goto out; 1557 i++; 1558 filp->f_pos++; 1559 /* fall through */ 1560 case 1: 1561 ino = parent_ino(dentry); 1562 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) 1563 goto out; 1564 i++; 1565 filp->f_pos++; 1566 /* fall through */ 1567 default: 1568 i -= 2; 1569 if (i >= nents) { 1570 ret = 1; 1571 goto out; 1572 } 1573 p = ents + i; 1574 last = &ents[nents - 1]; 1575 while (p <= last) { 1576 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) 1577 goto out; 1578 filp->f_pos++; 1579 p++; 1580 } 1581 } 1582 1583 ret = 1; 1584 out: 1585 put_task_struct(task); 1586 out_no_task: 1587 return ret; 1588 } 1589 1590 #ifdef CONFIG_SECURITY 1591 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, 1592 size_t count, loff_t *ppos) 1593 { 1594 struct inode * inode = file->f_path.dentry->d_inode; 1595 char *p = NULL; 1596 ssize_t length; 1597 struct task_struct *task = get_proc_task(inode); 1598 1599 if (!task) 1600 return -ESRCH; 1601 1602 length = security_getprocattr(task, 1603 (char*)file->f_path.dentry->d_name.name, 1604 &p); 1605 put_task_struct(task); 1606 if (length > 0) 1607 length = simple_read_from_buffer(buf, count, ppos, p, length); 1608 kfree(p); 1609 return length; 1610 } 1611 1612 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, 1613 size_t count, loff_t *ppos) 1614 { 1615 struct inode * inode = file->f_path.dentry->d_inode; 1616 char *page; 1617 ssize_t length; 1618 struct task_struct *task = get_proc_task(inode); 1619 1620 length = -ESRCH; 1621 if (!task) 1622 goto out_no_task; 1623 if (count > PAGE_SIZE) 1624 count = PAGE_SIZE; 1625 1626 /* No partial writes. */ 1627 length = -EINVAL; 1628 if (*ppos != 0) 1629 goto out; 1630 1631 length = -ENOMEM; 1632 page = (char*)__get_free_page(GFP_USER); 1633 if (!page) 1634 goto out; 1635 1636 length = -EFAULT; 1637 if (copy_from_user(page, buf, count)) 1638 goto out_free; 1639 1640 length = security_setprocattr(task, 1641 (char*)file->f_path.dentry->d_name.name, 1642 (void*)page, count); 1643 out_free: 1644 free_page((unsigned long) page); 1645 out: 1646 put_task_struct(task); 1647 out_no_task: 1648 return length; 1649 } 1650 1651 static const struct file_operations proc_pid_attr_operations = { 1652 .read = proc_pid_attr_read, 1653 .write = proc_pid_attr_write, 1654 }; 1655 1656 static struct pid_entry attr_dir_stuff[] = { 1657 REG("current", S_IRUGO|S_IWUGO, pid_attr), 1658 REG("prev", S_IRUGO, pid_attr), 1659 REG("exec", S_IRUGO|S_IWUGO, pid_attr), 1660 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr), 1661 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr), 1662 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr), 1663 }; 1664 1665 static int proc_attr_dir_readdir(struct file * filp, 1666 void * dirent, filldir_t filldir) 1667 { 1668 return proc_pident_readdir(filp,dirent,filldir, 1669 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); 1670 } 1671 1672 static const struct file_operations proc_attr_dir_operations = { 1673 .read = generic_read_dir, 1674 .readdir = proc_attr_dir_readdir, 1675 }; 1676 1677 static struct dentry *proc_attr_dir_lookup(struct inode *dir, 1678 struct dentry *dentry, struct nameidata *nd) 1679 { 1680 return proc_pident_lookup(dir, dentry, 1681 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 1682 } 1683 1684 static const struct inode_operations proc_attr_dir_inode_operations = { 1685 .lookup = proc_attr_dir_lookup, 1686 .getattr = pid_getattr, 1687 .setattr = proc_setattr, 1688 }; 1689 1690 #endif 1691 1692 /* 1693 * /proc/self: 1694 */ 1695 static int proc_self_readlink(struct dentry *dentry, char __user *buffer, 1696 int buflen) 1697 { 1698 char tmp[PROC_NUMBUF]; 1699 sprintf(tmp, "%d", current->tgid); 1700 return vfs_readlink(dentry,buffer,buflen,tmp); 1701 } 1702 1703 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) 1704 { 1705 char tmp[PROC_NUMBUF]; 1706 sprintf(tmp, "%d", current->tgid); 1707 return ERR_PTR(vfs_follow_link(nd,tmp)); 1708 } 1709 1710 static const struct inode_operations proc_self_inode_operations = { 1711 .readlink = proc_self_readlink, 1712 .follow_link = proc_self_follow_link, 1713 }; 1714 1715 /* 1716 * proc base 1717 * 1718 * These are the directory entries in the root directory of /proc 1719 * that properly belong to the /proc filesystem, as they describe 1720 * describe something that is process related. 1721 */ 1722 static struct pid_entry proc_base_stuff[] = { 1723 NOD("self", S_IFLNK|S_IRWXUGO, 1724 &proc_self_inode_operations, NULL, {}), 1725 }; 1726 1727 /* 1728 * Exceptional case: normally we are not allowed to unhash a busy 1729 * directory. In this case, however, we can do it - no aliasing problems 1730 * due to the way we treat inodes. 1731 */ 1732 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) 1733 { 1734 struct inode *inode = dentry->d_inode; 1735 struct task_struct *task = get_proc_task(inode); 1736 if (task) { 1737 put_task_struct(task); 1738 return 1; 1739 } 1740 d_drop(dentry); 1741 return 0; 1742 } 1743 1744 static struct dentry_operations proc_base_dentry_operations = 1745 { 1746 .d_revalidate = proc_base_revalidate, 1747 .d_delete = pid_delete_dentry, 1748 }; 1749 1750 static struct dentry *proc_base_instantiate(struct inode *dir, 1751 struct dentry *dentry, struct task_struct *task, void *ptr) 1752 { 1753 struct pid_entry *p = ptr; 1754 struct inode *inode; 1755 struct proc_inode *ei; 1756 struct dentry *error = ERR_PTR(-EINVAL); 1757 1758 /* Allocate the inode */ 1759 error = ERR_PTR(-ENOMEM); 1760 inode = new_inode(dir->i_sb); 1761 if (!inode) 1762 goto out; 1763 1764 /* Initialize the inode */ 1765 ei = PROC_I(inode); 1766 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1767 1768 /* 1769 * grab the reference to the task. 1770 */ 1771 ei->pid = get_task_pid(task, PIDTYPE_PID); 1772 if (!ei->pid) 1773 goto out_iput; 1774 1775 inode->i_uid = 0; 1776 inode->i_gid = 0; 1777 inode->i_mode = p->mode; 1778 if (S_ISDIR(inode->i_mode)) 1779 inode->i_nlink = 2; 1780 if (S_ISLNK(inode->i_mode)) 1781 inode->i_size = 64; 1782 if (p->iop) 1783 inode->i_op = p->iop; 1784 if (p->fop) 1785 inode->i_fop = p->fop; 1786 ei->op = p->op; 1787 dentry->d_op = &proc_base_dentry_operations; 1788 d_add(dentry, inode); 1789 error = NULL; 1790 out: 1791 return error; 1792 out_iput: 1793 iput(inode); 1794 goto out; 1795 } 1796 1797 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry) 1798 { 1799 struct dentry *error; 1800 struct task_struct *task = get_proc_task(dir); 1801 struct pid_entry *p, *last; 1802 1803 error = ERR_PTR(-ENOENT); 1804 1805 if (!task) 1806 goto out_no_task; 1807 1808 /* Lookup the directory entry */ 1809 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1]; 1810 for (p = proc_base_stuff; p <= last; p++) { 1811 if (p->len != dentry->d_name.len) 1812 continue; 1813 if (!memcmp(dentry->d_name.name, p->name, p->len)) 1814 break; 1815 } 1816 if (p > last) 1817 goto out; 1818 1819 error = proc_base_instantiate(dir, dentry, task, p); 1820 1821 out: 1822 put_task_struct(task); 1823 out_no_task: 1824 return error; 1825 } 1826 1827 static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1828 struct task_struct *task, struct pid_entry *p) 1829 { 1830 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 1831 proc_base_instantiate, task, p); 1832 } 1833 1834 #ifdef CONFIG_TASK_IO_ACCOUNTING 1835 static int proc_pid_io_accounting(struct task_struct *task, char *buffer) 1836 { 1837 return sprintf(buffer, 1838 #ifdef CONFIG_TASK_XACCT 1839 "rchar: %llu\n" 1840 "wchar: %llu\n" 1841 "syscr: %llu\n" 1842 "syscw: %llu\n" 1843 #endif 1844 "read_bytes: %llu\n" 1845 "write_bytes: %llu\n" 1846 "cancelled_write_bytes: %llu\n", 1847 #ifdef CONFIG_TASK_XACCT 1848 (unsigned long long)task->rchar, 1849 (unsigned long long)task->wchar, 1850 (unsigned long long)task->syscr, 1851 (unsigned long long)task->syscw, 1852 #endif 1853 (unsigned long long)task->ioac.read_bytes, 1854 (unsigned long long)task->ioac.write_bytes, 1855 (unsigned long long)task->ioac.cancelled_write_bytes); 1856 } 1857 #endif 1858 1859 /* 1860 * Thread groups 1861 */ 1862 static const struct file_operations proc_task_operations; 1863 static const struct inode_operations proc_task_inode_operations; 1864 1865 static struct pid_entry tgid_base_stuff[] = { 1866 DIR("task", S_IRUGO|S_IXUGO, task), 1867 DIR("fd", S_IRUSR|S_IXUSR, fd), 1868 INF("environ", S_IRUSR, pid_environ), 1869 INF("auxv", S_IRUSR, pid_auxv), 1870 INF("status", S_IRUGO, pid_status), 1871 INF("cmdline", S_IRUGO, pid_cmdline), 1872 INF("stat", S_IRUGO, tgid_stat), 1873 INF("statm", S_IRUGO, pid_statm), 1874 REG("maps", S_IRUGO, maps), 1875 #ifdef CONFIG_NUMA 1876 REG("numa_maps", S_IRUGO, numa_maps), 1877 #endif 1878 REG("mem", S_IRUSR|S_IWUSR, mem), 1879 #ifdef CONFIG_SECCOMP 1880 REG("seccomp", S_IRUSR|S_IWUSR, seccomp), 1881 #endif 1882 LNK("cwd", cwd), 1883 LNK("root", root), 1884 LNK("exe", exe), 1885 REG("mounts", S_IRUGO, mounts), 1886 REG("mountstats", S_IRUSR, mountstats), 1887 #ifdef CONFIG_MMU 1888 REG("clear_refs", S_IWUSR, clear_refs), 1889 REG("smaps", S_IRUGO, smaps), 1890 #endif 1891 #ifdef CONFIG_SECURITY 1892 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 1893 #endif 1894 #ifdef CONFIG_KALLSYMS 1895 INF("wchan", S_IRUGO, pid_wchan), 1896 #endif 1897 #ifdef CONFIG_SCHEDSTATS 1898 INF("schedstat", S_IRUGO, pid_schedstat), 1899 #endif 1900 #ifdef CONFIG_CPUSETS 1901 REG("cpuset", S_IRUGO, cpuset), 1902 #endif 1903 INF("oom_score", S_IRUGO, oom_score), 1904 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 1905 #ifdef CONFIG_AUDITSYSCALL 1906 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 1907 #endif 1908 #ifdef CONFIG_FAULT_INJECTION 1909 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 1910 #endif 1911 #ifdef CONFIG_TASK_IO_ACCOUNTING 1912 INF("io", S_IRUGO, pid_io_accounting), 1913 #endif 1914 }; 1915 1916 static int proc_tgid_base_readdir(struct file * filp, 1917 void * dirent, filldir_t filldir) 1918 { 1919 return proc_pident_readdir(filp,dirent,filldir, 1920 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); 1921 } 1922 1923 static const struct file_operations proc_tgid_base_operations = { 1924 .read = generic_read_dir, 1925 .readdir = proc_tgid_base_readdir, 1926 }; 1927 1928 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 1929 return proc_pident_lookup(dir, dentry, 1930 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 1931 } 1932 1933 static const struct inode_operations proc_tgid_base_inode_operations = { 1934 .lookup = proc_tgid_base_lookup, 1935 .getattr = pid_getattr, 1936 .setattr = proc_setattr, 1937 }; 1938 1939 /** 1940 * proc_flush_task - Remove dcache entries for @task from the /proc dcache. 1941 * 1942 * @task: task that should be flushed. 1943 * 1944 * Looks in the dcache for 1945 * /proc/@pid 1946 * /proc/@tgid/task/@pid 1947 * if either directory is present flushes it and all of it'ts children 1948 * from the dcache. 1949 * 1950 * It is safe and reasonable to cache /proc entries for a task until 1951 * that task exits. After that they just clog up the dcache with 1952 * useless entries, possibly causing useful dcache entries to be 1953 * flushed instead. This routine is proved to flush those useless 1954 * dcache entries at process exit time. 1955 * 1956 * NOTE: This routine is just an optimization so it does not guarantee 1957 * that no dcache entries will exist at process exit time it 1958 * just makes it very unlikely that any will persist. 1959 */ 1960 void proc_flush_task(struct task_struct *task) 1961 { 1962 struct dentry *dentry, *leader, *dir; 1963 char buf[PROC_NUMBUF]; 1964 struct qstr name; 1965 1966 name.name = buf; 1967 name.len = snprintf(buf, sizeof(buf), "%d", task->pid); 1968 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name); 1969 if (dentry) { 1970 shrink_dcache_parent(dentry); 1971 d_drop(dentry); 1972 dput(dentry); 1973 } 1974 1975 if (thread_group_leader(task)) 1976 goto out; 1977 1978 name.name = buf; 1979 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid); 1980 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name); 1981 if (!leader) 1982 goto out; 1983 1984 name.name = "task"; 1985 name.len = strlen(name.name); 1986 dir = d_hash_and_lookup(leader, &name); 1987 if (!dir) 1988 goto out_put_leader; 1989 1990 name.name = buf; 1991 name.len = snprintf(buf, sizeof(buf), "%d", task->pid); 1992 dentry = d_hash_and_lookup(dir, &name); 1993 if (dentry) { 1994 shrink_dcache_parent(dentry); 1995 d_drop(dentry); 1996 dput(dentry); 1997 } 1998 1999 dput(dir); 2000 out_put_leader: 2001 dput(leader); 2002 out: 2003 return; 2004 } 2005 2006 static struct dentry *proc_pid_instantiate(struct inode *dir, 2007 struct dentry * dentry, 2008 struct task_struct *task, void *ptr) 2009 { 2010 struct dentry *error = ERR_PTR(-ENOENT); 2011 struct inode *inode; 2012 2013 inode = proc_pid_make_inode(dir->i_sb, task); 2014 if (!inode) 2015 goto out; 2016 2017 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2018 inode->i_op = &proc_tgid_base_inode_operations; 2019 inode->i_fop = &proc_tgid_base_operations; 2020 inode->i_flags|=S_IMMUTABLE; 2021 inode->i_nlink = 4; 2022 #ifdef CONFIG_SECURITY 2023 inode->i_nlink += 1; 2024 #endif 2025 2026 dentry->d_op = &pid_dentry_operations; 2027 2028 d_add(dentry, inode); 2029 /* Close the race of the process dying before we return the dentry */ 2030 if (pid_revalidate(dentry, NULL)) 2031 error = NULL; 2032 out: 2033 return error; 2034 } 2035 2036 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2037 { 2038 struct dentry *result = ERR_PTR(-ENOENT); 2039 struct task_struct *task; 2040 unsigned tgid; 2041 2042 result = proc_base_lookup(dir, dentry); 2043 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT) 2044 goto out; 2045 2046 tgid = name_to_int(dentry); 2047 if (tgid == ~0U) 2048 goto out; 2049 2050 rcu_read_lock(); 2051 task = find_task_by_pid(tgid); 2052 if (task) 2053 get_task_struct(task); 2054 rcu_read_unlock(); 2055 if (!task) 2056 goto out; 2057 2058 result = proc_pid_instantiate(dir, dentry, task, NULL); 2059 put_task_struct(task); 2060 out: 2061 return result; 2062 } 2063 2064 /* 2065 * Find the first task with tgid >= tgid 2066 * 2067 */ 2068 static struct task_struct *next_tgid(unsigned int tgid) 2069 { 2070 struct task_struct *task; 2071 struct pid *pid; 2072 2073 rcu_read_lock(); 2074 retry: 2075 task = NULL; 2076 pid = find_ge_pid(tgid); 2077 if (pid) { 2078 tgid = pid->nr + 1; 2079 task = pid_task(pid, PIDTYPE_PID); 2080 /* What we to know is if the pid we have find is the 2081 * pid of a thread_group_leader. Testing for task 2082 * being a thread_group_leader is the obvious thing 2083 * todo but there is a window when it fails, due to 2084 * the pid transfer logic in de_thread. 2085 * 2086 * So we perform the straight forward test of seeing 2087 * if the pid we have found is the pid of a thread 2088 * group leader, and don't worry if the task we have 2089 * found doesn't happen to be a thread group leader. 2090 * As we don't care in the case of readdir. 2091 */ 2092 if (!task || !has_group_leader_pid(task)) 2093 goto retry; 2094 get_task_struct(task); 2095 } 2096 rcu_read_unlock(); 2097 return task; 2098 } 2099 2100 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff)) 2101 2102 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2103 struct task_struct *task, int tgid) 2104 { 2105 char name[PROC_NUMBUF]; 2106 int len = snprintf(name, sizeof(name), "%d", tgid); 2107 return proc_fill_cache(filp, dirent, filldir, name, len, 2108 proc_pid_instantiate, task, NULL); 2109 } 2110 2111 /* for the /proc/ directory itself, after non-process stuff has been done */ 2112 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 2113 { 2114 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 2115 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); 2116 struct task_struct *task; 2117 int tgid; 2118 2119 if (!reaper) 2120 goto out_no_task; 2121 2122 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) { 2123 struct pid_entry *p = &proc_base_stuff[nr]; 2124 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0) 2125 goto out; 2126 } 2127 2128 tgid = filp->f_pos - TGID_OFFSET; 2129 for (task = next_tgid(tgid); 2130 task; 2131 put_task_struct(task), task = next_tgid(tgid + 1)) { 2132 tgid = task->pid; 2133 filp->f_pos = tgid + TGID_OFFSET; 2134 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) { 2135 put_task_struct(task); 2136 goto out; 2137 } 2138 } 2139 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; 2140 out: 2141 put_task_struct(reaper); 2142 out_no_task: 2143 return 0; 2144 } 2145 2146 /* 2147 * Tasks 2148 */ 2149 static struct pid_entry tid_base_stuff[] = { 2150 DIR("fd", S_IRUSR|S_IXUSR, fd), 2151 INF("environ", S_IRUSR, pid_environ), 2152 INF("auxv", S_IRUSR, pid_auxv), 2153 INF("status", S_IRUGO, pid_status), 2154 INF("cmdline", S_IRUGO, pid_cmdline), 2155 INF("stat", S_IRUGO, tid_stat), 2156 INF("statm", S_IRUGO, pid_statm), 2157 REG("maps", S_IRUGO, maps), 2158 #ifdef CONFIG_NUMA 2159 REG("numa_maps", S_IRUGO, numa_maps), 2160 #endif 2161 REG("mem", S_IRUSR|S_IWUSR, mem), 2162 #ifdef CONFIG_SECCOMP 2163 REG("seccomp", S_IRUSR|S_IWUSR, seccomp), 2164 #endif 2165 LNK("cwd", cwd), 2166 LNK("root", root), 2167 LNK("exe", exe), 2168 REG("mounts", S_IRUGO, mounts), 2169 #ifdef CONFIG_MMU 2170 REG("clear_refs", S_IWUSR, clear_refs), 2171 REG("smaps", S_IRUGO, smaps), 2172 #endif 2173 #ifdef CONFIG_SECURITY 2174 DIR("attr", S_IRUGO|S_IXUGO, attr_dir), 2175 #endif 2176 #ifdef CONFIG_KALLSYMS 2177 INF("wchan", S_IRUGO, pid_wchan), 2178 #endif 2179 #ifdef CONFIG_SCHEDSTATS 2180 INF("schedstat", S_IRUGO, pid_schedstat), 2181 #endif 2182 #ifdef CONFIG_CPUSETS 2183 REG("cpuset", S_IRUGO, cpuset), 2184 #endif 2185 INF("oom_score", S_IRUGO, oom_score), 2186 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2187 #ifdef CONFIG_AUDITSYSCALL 2188 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2189 #endif 2190 #ifdef CONFIG_FAULT_INJECTION 2191 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2192 #endif 2193 }; 2194 2195 static int proc_tid_base_readdir(struct file * filp, 2196 void * dirent, filldir_t filldir) 2197 { 2198 return proc_pident_readdir(filp,dirent,filldir, 2199 tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); 2200 } 2201 2202 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ 2203 return proc_pident_lookup(dir, dentry, 2204 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 2205 } 2206 2207 static const struct file_operations proc_tid_base_operations = { 2208 .read = generic_read_dir, 2209 .readdir = proc_tid_base_readdir, 2210 }; 2211 2212 static const struct inode_operations proc_tid_base_inode_operations = { 2213 .lookup = proc_tid_base_lookup, 2214 .getattr = pid_getattr, 2215 .setattr = proc_setattr, 2216 }; 2217 2218 static struct dentry *proc_task_instantiate(struct inode *dir, 2219 struct dentry *dentry, struct task_struct *task, void *ptr) 2220 { 2221 struct dentry *error = ERR_PTR(-ENOENT); 2222 struct inode *inode; 2223 inode = proc_pid_make_inode(dir->i_sb, task); 2224 2225 if (!inode) 2226 goto out; 2227 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2228 inode->i_op = &proc_tid_base_inode_operations; 2229 inode->i_fop = &proc_tid_base_operations; 2230 inode->i_flags|=S_IMMUTABLE; 2231 inode->i_nlink = 3; 2232 #ifdef CONFIG_SECURITY 2233 inode->i_nlink += 1; 2234 #endif 2235 2236 dentry->d_op = &pid_dentry_operations; 2237 2238 d_add(dentry, inode); 2239 /* Close the race of the process dying before we return the dentry */ 2240 if (pid_revalidate(dentry, NULL)) 2241 error = NULL; 2242 out: 2243 return error; 2244 } 2245 2246 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2247 { 2248 struct dentry *result = ERR_PTR(-ENOENT); 2249 struct task_struct *task; 2250 struct task_struct *leader = get_proc_task(dir); 2251 unsigned tid; 2252 2253 if (!leader) 2254 goto out_no_task; 2255 2256 tid = name_to_int(dentry); 2257 if (tid == ~0U) 2258 goto out; 2259 2260 rcu_read_lock(); 2261 task = find_task_by_pid(tid); 2262 if (task) 2263 get_task_struct(task); 2264 rcu_read_unlock(); 2265 if (!task) 2266 goto out; 2267 if (leader->tgid != task->tgid) 2268 goto out_drop_task; 2269 2270 result = proc_task_instantiate(dir, dentry, task, NULL); 2271 out_drop_task: 2272 put_task_struct(task); 2273 out: 2274 put_task_struct(leader); 2275 out_no_task: 2276 return result; 2277 } 2278 2279 /* 2280 * Find the first tid of a thread group to return to user space. 2281 * 2282 * Usually this is just the thread group leader, but if the users 2283 * buffer was too small or there was a seek into the middle of the 2284 * directory we have more work todo. 2285 * 2286 * In the case of a short read we start with find_task_by_pid. 2287 * 2288 * In the case of a seek we start with the leader and walk nr 2289 * threads past it. 2290 */ 2291 static struct task_struct *first_tid(struct task_struct *leader, 2292 int tid, int nr) 2293 { 2294 struct task_struct *pos; 2295 2296 rcu_read_lock(); 2297 /* Attempt to start with the pid of a thread */ 2298 if (tid && (nr > 0)) { 2299 pos = find_task_by_pid(tid); 2300 if (pos && (pos->group_leader == leader)) 2301 goto found; 2302 } 2303 2304 /* If nr exceeds the number of threads there is nothing todo */ 2305 pos = NULL; 2306 if (nr && nr >= get_nr_threads(leader)) 2307 goto out; 2308 2309 /* If we haven't found our starting place yet start 2310 * with the leader and walk nr threads forward. 2311 */ 2312 for (pos = leader; nr > 0; --nr) { 2313 pos = next_thread(pos); 2314 if (pos == leader) { 2315 pos = NULL; 2316 goto out; 2317 } 2318 } 2319 found: 2320 get_task_struct(pos); 2321 out: 2322 rcu_read_unlock(); 2323 return pos; 2324 } 2325 2326 /* 2327 * Find the next thread in the thread list. 2328 * Return NULL if there is an error or no next thread. 2329 * 2330 * The reference to the input task_struct is released. 2331 */ 2332 static struct task_struct *next_tid(struct task_struct *start) 2333 { 2334 struct task_struct *pos = NULL; 2335 rcu_read_lock(); 2336 if (pid_alive(start)) { 2337 pos = next_thread(start); 2338 if (thread_group_leader(pos)) 2339 pos = NULL; 2340 else 2341 get_task_struct(pos); 2342 } 2343 rcu_read_unlock(); 2344 put_task_struct(start); 2345 return pos; 2346 } 2347 2348 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2349 struct task_struct *task, int tid) 2350 { 2351 char name[PROC_NUMBUF]; 2352 int len = snprintf(name, sizeof(name), "%d", tid); 2353 return proc_fill_cache(filp, dirent, filldir, name, len, 2354 proc_task_instantiate, task, NULL); 2355 } 2356 2357 /* for the /proc/TGID/task/ directories */ 2358 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) 2359 { 2360 struct dentry *dentry = filp->f_path.dentry; 2361 struct inode *inode = dentry->d_inode; 2362 struct task_struct *leader = NULL; 2363 struct task_struct *task; 2364 int retval = -ENOENT; 2365 ino_t ino; 2366 int tid; 2367 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */ 2368 2369 task = get_proc_task(inode); 2370 if (!task) 2371 goto out_no_task; 2372 rcu_read_lock(); 2373 if (pid_alive(task)) { 2374 leader = task->group_leader; 2375 get_task_struct(leader); 2376 } 2377 rcu_read_unlock(); 2378 put_task_struct(task); 2379 if (!leader) 2380 goto out_no_task; 2381 retval = 0; 2382 2383 switch (pos) { 2384 case 0: 2385 ino = inode->i_ino; 2386 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0) 2387 goto out; 2388 pos++; 2389 /* fall through */ 2390 case 1: 2391 ino = parent_ino(dentry); 2392 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0) 2393 goto out; 2394 pos++; 2395 /* fall through */ 2396 } 2397 2398 /* f_version caches the tgid value that the last readdir call couldn't 2399 * return. lseek aka telldir automagically resets f_version to 0. 2400 */ 2401 tid = filp->f_version; 2402 filp->f_version = 0; 2403 for (task = first_tid(leader, tid, pos - 2); 2404 task; 2405 task = next_tid(task), pos++) { 2406 tid = task->pid; 2407 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { 2408 /* returning this tgid failed, save it as the first 2409 * pid for the next readir call */ 2410 filp->f_version = tid; 2411 put_task_struct(task); 2412 break; 2413 } 2414 } 2415 out: 2416 filp->f_pos = pos; 2417 put_task_struct(leader); 2418 out_no_task: 2419 return retval; 2420 } 2421 2422 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 2423 { 2424 struct inode *inode = dentry->d_inode; 2425 struct task_struct *p = get_proc_task(inode); 2426 generic_fillattr(inode, stat); 2427 2428 if (p) { 2429 rcu_read_lock(); 2430 stat->nlink += get_nr_threads(p); 2431 rcu_read_unlock(); 2432 put_task_struct(p); 2433 } 2434 2435 return 0; 2436 } 2437 2438 static const struct inode_operations proc_task_inode_operations = { 2439 .lookup = proc_task_lookup, 2440 .getattr = proc_task_getattr, 2441 .setattr = proc_setattr, 2442 }; 2443 2444 static const struct file_operations proc_task_operations = { 2445 .read = generic_read_dir, 2446 .readdir = proc_task_readdir, 2447 }; 2448