1 /* 2 * proc/fs/generic.c --- generic routines for the proc-fs 3 * 4 * This file contains generic proc-fs routines for handling 5 * directories and files. 6 * 7 * Copyright (C) 1991, 1992 Linus Torvalds. 8 * Copyright (C) 1997 Theodore Ts'o 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/time.h> 13 #include <linux/proc_fs.h> 14 #include <linux/stat.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/mount.h> 19 #include <linux/init.h> 20 #include <linux/idr.h> 21 #include <linux/namei.h> 22 #include <linux/bitops.h> 23 #include <linux/spinlock.h> 24 #include <linux/completion.h> 25 #include <asm/uaccess.h> 26 27 #include "internal.h" 28 29 DEFINE_SPINLOCK(proc_subdir_lock); 30 31 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de) 32 { 33 if (de->namelen != len) 34 return 0; 35 return !memcmp(name, de->name, len); 36 } 37 38 /* buffer size is one page but our output routines use some slack for overruns */ 39 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) 40 41 static ssize_t 42 __proc_file_read(struct file *file, char __user *buf, size_t nbytes, 43 loff_t *ppos) 44 { 45 struct inode * inode = file->f_path.dentry->d_inode; 46 char *page; 47 ssize_t retval=0; 48 int eof=0; 49 ssize_t n, count; 50 char *start; 51 struct proc_dir_entry * dp; 52 unsigned long long pos; 53 54 /* 55 * Gaah, please just use "seq_file" instead. The legacy /proc 56 * interfaces cut loff_t down to off_t for reads, and ignore 57 * the offset entirely for writes.. 58 */ 59 pos = *ppos; 60 if (pos > MAX_NON_LFS) 61 return 0; 62 if (nbytes > MAX_NON_LFS - pos) 63 nbytes = MAX_NON_LFS - pos; 64 65 dp = PDE(inode); 66 if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) 67 return -ENOMEM; 68 69 while ((nbytes > 0) && !eof) { 70 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); 71 72 start = NULL; 73 if (dp->read_proc) { 74 /* 75 * How to be a proc read function 76 * ------------------------------ 77 * Prototype: 78 * int f(char *buffer, char **start, off_t offset, 79 * int count, int *peof, void *dat) 80 * 81 * Assume that the buffer is "count" bytes in size. 82 * 83 * If you know you have supplied all the data you 84 * have, set *peof. 85 * 86 * You have three ways to return data: 87 * 0) Leave *start = NULL. (This is the default.) 88 * Put the data of the requested offset at that 89 * offset within the buffer. Return the number (n) 90 * of bytes there are from the beginning of the 91 * buffer up to the last byte of data. If the 92 * number of supplied bytes (= n - offset) is 93 * greater than zero and you didn't signal eof 94 * and the reader is prepared to take more data 95 * you will be called again with the requested 96 * offset advanced by the number of bytes 97 * absorbed. This interface is useful for files 98 * no larger than the buffer. 99 * 1) Set *start = an unsigned long value less than 100 * the buffer address but greater than zero. 101 * Put the data of the requested offset at the 102 * beginning of the buffer. Return the number of 103 * bytes of data placed there. If this number is 104 * greater than zero and you didn't signal eof 105 * and the reader is prepared to take more data 106 * you will be called again with the requested 107 * offset advanced by *start. This interface is 108 * useful when you have a large file consisting 109 * of a series of blocks which you want to count 110 * and return as wholes. 111 * (Hack by Paul.Russell@rustcorp.com.au) 112 * 2) Set *start = an address within the buffer. 113 * Put the data of the requested offset at *start. 114 * Return the number of bytes of data placed there. 115 * If this number is greater than zero and you 116 * didn't signal eof and the reader is prepared to 117 * take more data you will be called again with the 118 * requested offset advanced by the number of bytes 119 * absorbed. 120 */ 121 n = dp->read_proc(page, &start, *ppos, 122 count, &eof, dp->data); 123 } else 124 break; 125 126 if (n == 0) /* end of file */ 127 break; 128 if (n < 0) { /* error */ 129 if (retval == 0) 130 retval = n; 131 break; 132 } 133 134 if (start == NULL) { 135 if (n > PAGE_SIZE) { 136 printk(KERN_ERR 137 "proc_file_read: Apparent buffer overflow!\n"); 138 n = PAGE_SIZE; 139 } 140 n -= *ppos; 141 if (n <= 0) 142 break; 143 if (n > count) 144 n = count; 145 start = page + *ppos; 146 } else if (start < page) { 147 if (n > PAGE_SIZE) { 148 printk(KERN_ERR 149 "proc_file_read: Apparent buffer overflow!\n"); 150 n = PAGE_SIZE; 151 } 152 if (n > count) { 153 /* 154 * Don't reduce n because doing so might 155 * cut off part of a data block. 156 */ 157 printk(KERN_WARNING 158 "proc_file_read: Read count exceeded\n"); 159 } 160 } else /* start >= page */ { 161 unsigned long startoff = (unsigned long)(start - page); 162 if (n > (PAGE_SIZE - startoff)) { 163 printk(KERN_ERR 164 "proc_file_read: Apparent buffer overflow!\n"); 165 n = PAGE_SIZE - startoff; 166 } 167 if (n > count) 168 n = count; 169 } 170 171 n -= copy_to_user(buf, start < page ? page : start, n); 172 if (n == 0) { 173 if (retval == 0) 174 retval = -EFAULT; 175 break; 176 } 177 178 *ppos += start < page ? (unsigned long)start : n; 179 nbytes -= n; 180 buf += n; 181 retval += n; 182 } 183 free_page((unsigned long) page); 184 return retval; 185 } 186 187 static ssize_t 188 proc_file_read(struct file *file, char __user *buf, size_t nbytes, 189 loff_t *ppos) 190 { 191 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 192 ssize_t rv = -EIO; 193 194 spin_lock(&pde->pde_unload_lock); 195 if (!pde->proc_fops) { 196 spin_unlock(&pde->pde_unload_lock); 197 return rv; 198 } 199 pde->pde_users++; 200 spin_unlock(&pde->pde_unload_lock); 201 202 rv = __proc_file_read(file, buf, nbytes, ppos); 203 204 pde_users_dec(pde); 205 return rv; 206 } 207 208 static ssize_t 209 proc_file_write(struct file *file, const char __user *buffer, 210 size_t count, loff_t *ppos) 211 { 212 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 213 ssize_t rv = -EIO; 214 215 if (pde->write_proc) { 216 spin_lock(&pde->pde_unload_lock); 217 if (!pde->proc_fops) { 218 spin_unlock(&pde->pde_unload_lock); 219 return rv; 220 } 221 pde->pde_users++; 222 spin_unlock(&pde->pde_unload_lock); 223 224 /* FIXME: does this routine need ppos? probably... */ 225 rv = pde->write_proc(file, buffer, count, pde->data); 226 pde_users_dec(pde); 227 } 228 return rv; 229 } 230 231 232 static loff_t 233 proc_file_lseek(struct file *file, loff_t offset, int orig) 234 { 235 loff_t retval = -EINVAL; 236 switch (orig) { 237 case 1: 238 offset += file->f_pos; 239 /* fallthrough */ 240 case 0: 241 if (offset < 0 || offset > MAX_NON_LFS) 242 break; 243 file->f_pos = retval = offset; 244 } 245 return retval; 246 } 247 248 static const struct file_operations proc_file_operations = { 249 .llseek = proc_file_lseek, 250 .read = proc_file_read, 251 .write = proc_file_write, 252 }; 253 254 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) 255 { 256 struct inode *inode = dentry->d_inode; 257 struct proc_dir_entry *de = PDE(inode); 258 int error; 259 260 error = inode_change_ok(inode, iattr); 261 if (error) 262 return error; 263 264 if ((iattr->ia_valid & ATTR_SIZE) && 265 iattr->ia_size != i_size_read(inode)) { 266 error = vmtruncate(inode, iattr->ia_size); 267 if (error) 268 return error; 269 } 270 271 setattr_copy(inode, iattr); 272 mark_inode_dirty(inode); 273 274 de->uid = inode->i_uid; 275 de->gid = inode->i_gid; 276 de->mode = inode->i_mode; 277 return 0; 278 } 279 280 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, 281 struct kstat *stat) 282 { 283 struct inode *inode = dentry->d_inode; 284 struct proc_dir_entry *de = PROC_I(inode)->pde; 285 if (de && de->nlink) 286 set_nlink(inode, de->nlink); 287 288 generic_fillattr(inode, stat); 289 return 0; 290 } 291 292 static const struct inode_operations proc_file_inode_operations = { 293 .setattr = proc_notify_change, 294 }; 295 296 /* 297 * This function parses a name such as "tty/driver/serial", and 298 * returns the struct proc_dir_entry for "/proc/tty/driver", and 299 * returns "serial" in residual. 300 */ 301 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret, 302 const char **residual) 303 { 304 const char *cp = name, *next; 305 struct proc_dir_entry *de; 306 unsigned int len; 307 308 de = *ret; 309 if (!de) 310 de = &proc_root; 311 312 while (1) { 313 next = strchr(cp, '/'); 314 if (!next) 315 break; 316 317 len = next - cp; 318 for (de = de->subdir; de ; de = de->next) { 319 if (proc_match(len, cp, de)) 320 break; 321 } 322 if (!de) { 323 WARN(1, "name '%s'\n", name); 324 return -ENOENT; 325 } 326 cp += len + 1; 327 } 328 *residual = cp; 329 *ret = de; 330 return 0; 331 } 332 333 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, 334 const char **residual) 335 { 336 int rv; 337 338 spin_lock(&proc_subdir_lock); 339 rv = __xlate_proc_name(name, ret, residual); 340 spin_unlock(&proc_subdir_lock); 341 return rv; 342 } 343 344 static DEFINE_IDA(proc_inum_ida); 345 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ 346 347 #define PROC_DYNAMIC_FIRST 0xF0000000U 348 349 /* 350 * Return an inode number between PROC_DYNAMIC_FIRST and 351 * 0xffffffff, or zero on failure. 352 */ 353 int proc_alloc_inum(unsigned int *inum) 354 { 355 unsigned int i; 356 int error; 357 358 retry: 359 if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL)) 360 return -ENOMEM; 361 362 spin_lock(&proc_inum_lock); 363 error = ida_get_new(&proc_inum_ida, &i); 364 spin_unlock(&proc_inum_lock); 365 if (error == -EAGAIN) 366 goto retry; 367 else if (error) 368 return error; 369 370 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { 371 spin_lock(&proc_inum_lock); 372 ida_remove(&proc_inum_ida, i); 373 spin_unlock(&proc_inum_lock); 374 return -ENOSPC; 375 } 376 *inum = PROC_DYNAMIC_FIRST + i; 377 return 0; 378 } 379 380 void proc_free_inum(unsigned int inum) 381 { 382 spin_lock(&proc_inum_lock); 383 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); 384 spin_unlock(&proc_inum_lock); 385 } 386 387 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 388 { 389 nd_set_link(nd, PDE(dentry->d_inode)->data); 390 return NULL; 391 } 392 393 static const struct inode_operations proc_link_inode_operations = { 394 .readlink = generic_readlink, 395 .follow_link = proc_follow_link, 396 }; 397 398 /* 399 * As some entries in /proc are volatile, we want to 400 * get rid of unused dentries. This could be made 401 * smarter: we could keep a "volatile" flag in the 402 * inode to indicate which ones to keep. 403 */ 404 static int proc_delete_dentry(const struct dentry * dentry) 405 { 406 return 1; 407 } 408 409 static const struct dentry_operations proc_dentry_operations = 410 { 411 .d_delete = proc_delete_dentry, 412 }; 413 414 /* 415 * Don't create negative dentries here, return -ENOENT by hand 416 * instead. 417 */ 418 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, 419 struct dentry *dentry) 420 { 421 struct inode *inode = NULL; 422 int error = -ENOENT; 423 424 spin_lock(&proc_subdir_lock); 425 for (de = de->subdir; de ; de = de->next) { 426 if (de->namelen != dentry->d_name.len) 427 continue; 428 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 429 pde_get(de); 430 spin_unlock(&proc_subdir_lock); 431 error = -ENOMEM; 432 inode = proc_get_inode(dir->i_sb, de); 433 goto out_unlock; 434 } 435 } 436 spin_unlock(&proc_subdir_lock); 437 out_unlock: 438 439 if (inode) { 440 d_set_d_op(dentry, &proc_dentry_operations); 441 d_add(dentry, inode); 442 return NULL; 443 } 444 if (de) 445 pde_put(de); 446 return ERR_PTR(error); 447 } 448 449 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, 450 unsigned int flags) 451 { 452 return proc_lookup_de(PDE(dir), dir, dentry); 453 } 454 455 /* 456 * This returns non-zero if at EOF, so that the /proc 457 * root directory can use this and check if it should 458 * continue with the <pid> entries.. 459 * 460 * Note that the VFS-layer doesn't care about the return 461 * value of the readdir() call, as long as it's non-negative 462 * for success.. 463 */ 464 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 465 filldir_t filldir) 466 { 467 unsigned int ino; 468 int i; 469 struct inode *inode = filp->f_path.dentry->d_inode; 470 int ret = 0; 471 472 ino = inode->i_ino; 473 i = filp->f_pos; 474 switch (i) { 475 case 0: 476 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 477 goto out; 478 i++; 479 filp->f_pos++; 480 /* fall through */ 481 case 1: 482 if (filldir(dirent, "..", 2, i, 483 parent_ino(filp->f_path.dentry), 484 DT_DIR) < 0) 485 goto out; 486 i++; 487 filp->f_pos++; 488 /* fall through */ 489 default: 490 spin_lock(&proc_subdir_lock); 491 de = de->subdir; 492 i -= 2; 493 for (;;) { 494 if (!de) { 495 ret = 1; 496 spin_unlock(&proc_subdir_lock); 497 goto out; 498 } 499 if (!i) 500 break; 501 de = de->next; 502 i--; 503 } 504 505 do { 506 struct proc_dir_entry *next; 507 508 /* filldir passes info to user space */ 509 pde_get(de); 510 spin_unlock(&proc_subdir_lock); 511 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 512 de->low_ino, de->mode >> 12) < 0) { 513 pde_put(de); 514 goto out; 515 } 516 spin_lock(&proc_subdir_lock); 517 filp->f_pos++; 518 next = de->next; 519 pde_put(de); 520 de = next; 521 } while (de); 522 spin_unlock(&proc_subdir_lock); 523 } 524 ret = 1; 525 out: 526 return ret; 527 } 528 529 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) 530 { 531 struct inode *inode = filp->f_path.dentry->d_inode; 532 533 return proc_readdir_de(PDE(inode), filp, dirent, filldir); 534 } 535 536 /* 537 * These are the generic /proc directory operations. They 538 * use the in-memory "struct proc_dir_entry" tree to parse 539 * the /proc directory. 540 */ 541 static const struct file_operations proc_dir_operations = { 542 .llseek = generic_file_llseek, 543 .read = generic_read_dir, 544 .readdir = proc_readdir, 545 }; 546 547 /* 548 * proc directories can do almost nothing.. 549 */ 550 static const struct inode_operations proc_dir_inode_operations = { 551 .lookup = proc_lookup, 552 .getattr = proc_getattr, 553 .setattr = proc_notify_change, 554 }; 555 556 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) 557 { 558 struct proc_dir_entry *tmp; 559 int ret; 560 561 ret = proc_alloc_inum(&dp->low_ino); 562 if (ret) 563 return ret; 564 565 if (S_ISDIR(dp->mode)) { 566 if (dp->proc_iops == NULL) { 567 dp->proc_fops = &proc_dir_operations; 568 dp->proc_iops = &proc_dir_inode_operations; 569 } 570 dir->nlink++; 571 } else if (S_ISLNK(dp->mode)) { 572 if (dp->proc_iops == NULL) 573 dp->proc_iops = &proc_link_inode_operations; 574 } else if (S_ISREG(dp->mode)) { 575 if (dp->proc_fops == NULL) 576 dp->proc_fops = &proc_file_operations; 577 if (dp->proc_iops == NULL) 578 dp->proc_iops = &proc_file_inode_operations; 579 } 580 581 spin_lock(&proc_subdir_lock); 582 583 for (tmp = dir->subdir; tmp; tmp = tmp->next) 584 if (strcmp(tmp->name, dp->name) == 0) { 585 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", 586 dir->name, dp->name); 587 break; 588 } 589 590 dp->next = dir->subdir; 591 dp->parent = dir; 592 dir->subdir = dp; 593 spin_unlock(&proc_subdir_lock); 594 595 return 0; 596 } 597 598 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, 599 const char *name, 600 umode_t mode, 601 nlink_t nlink) 602 { 603 struct proc_dir_entry *ent = NULL; 604 const char *fn = name; 605 unsigned int len; 606 607 /* make sure name is valid */ 608 if (!name || !strlen(name)) 609 goto out; 610 611 if (xlate_proc_name(name, parent, &fn) != 0) 612 goto out; 613 614 /* At this point there must not be any '/' characters beyond *fn */ 615 if (strchr(fn, '/')) 616 goto out; 617 618 len = strlen(fn); 619 620 ent = kzalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL); 621 if (!ent) 622 goto out; 623 624 memcpy(ent->name, fn, len + 1); 625 ent->namelen = len; 626 ent->mode = mode; 627 ent->nlink = nlink; 628 atomic_set(&ent->count, 1); 629 spin_lock_init(&ent->pde_unload_lock); 630 INIT_LIST_HEAD(&ent->pde_openers); 631 out: 632 return ent; 633 } 634 635 struct proc_dir_entry *proc_symlink(const char *name, 636 struct proc_dir_entry *parent, const char *dest) 637 { 638 struct proc_dir_entry *ent; 639 640 ent = __proc_create(&parent, name, 641 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); 642 643 if (ent) { 644 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); 645 if (ent->data) { 646 strcpy((char*)ent->data,dest); 647 if (proc_register(parent, ent) < 0) { 648 kfree(ent->data); 649 kfree(ent); 650 ent = NULL; 651 } 652 } else { 653 kfree(ent); 654 ent = NULL; 655 } 656 } 657 return ent; 658 } 659 EXPORT_SYMBOL(proc_symlink); 660 661 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, 662 struct proc_dir_entry *parent) 663 { 664 struct proc_dir_entry *ent; 665 666 ent = __proc_create(&parent, name, S_IFDIR | mode, 2); 667 if (ent) { 668 if (proc_register(parent, ent) < 0) { 669 kfree(ent); 670 ent = NULL; 671 } 672 } 673 return ent; 674 } 675 EXPORT_SYMBOL(proc_mkdir_mode); 676 677 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 678 struct proc_dir_entry *parent) 679 { 680 struct proc_dir_entry *ent; 681 682 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2); 683 if (ent) { 684 ent->data = net; 685 if (proc_register(parent, ent) < 0) { 686 kfree(ent); 687 ent = NULL; 688 } 689 } 690 return ent; 691 } 692 EXPORT_SYMBOL_GPL(proc_net_mkdir); 693 694 struct proc_dir_entry *proc_mkdir(const char *name, 695 struct proc_dir_entry *parent) 696 { 697 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 698 } 699 EXPORT_SYMBOL(proc_mkdir); 700 701 struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode, 702 struct proc_dir_entry *parent) 703 { 704 struct proc_dir_entry *ent; 705 nlink_t nlink; 706 707 if (S_ISDIR(mode)) { 708 if ((mode & S_IALLUGO) == 0) 709 mode |= S_IRUGO | S_IXUGO; 710 nlink = 2; 711 } else { 712 if ((mode & S_IFMT) == 0) 713 mode |= S_IFREG; 714 if ((mode & S_IALLUGO) == 0) 715 mode |= S_IRUGO; 716 nlink = 1; 717 } 718 719 ent = __proc_create(&parent, name, mode, nlink); 720 if (ent) { 721 if (proc_register(parent, ent) < 0) { 722 kfree(ent); 723 ent = NULL; 724 } 725 } 726 return ent; 727 } 728 EXPORT_SYMBOL(create_proc_entry); 729 730 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, 731 struct proc_dir_entry *parent, 732 const struct file_operations *proc_fops, 733 void *data) 734 { 735 struct proc_dir_entry *pde; 736 nlink_t nlink; 737 738 if (S_ISDIR(mode)) { 739 if ((mode & S_IALLUGO) == 0) 740 mode |= S_IRUGO | S_IXUGO; 741 nlink = 2; 742 } else { 743 if ((mode & S_IFMT) == 0) 744 mode |= S_IFREG; 745 if ((mode & S_IALLUGO) == 0) 746 mode |= S_IRUGO; 747 nlink = 1; 748 } 749 750 pde = __proc_create(&parent, name, mode, nlink); 751 if (!pde) 752 goto out; 753 pde->proc_fops = proc_fops; 754 pde->data = data; 755 if (proc_register(parent, pde) < 0) 756 goto out_free; 757 return pde; 758 out_free: 759 kfree(pde); 760 out: 761 return NULL; 762 } 763 EXPORT_SYMBOL(proc_create_data); 764 765 static void free_proc_entry(struct proc_dir_entry *de) 766 { 767 proc_free_inum(de->low_ino); 768 769 if (S_ISLNK(de->mode)) 770 kfree(de->data); 771 kfree(de); 772 } 773 774 void pde_put(struct proc_dir_entry *pde) 775 { 776 if (atomic_dec_and_test(&pde->count)) 777 free_proc_entry(pde); 778 } 779 780 /* 781 * Remove a /proc entry and free it if it's not currently in use. 782 */ 783 void remove_proc_entry(const char *name, struct proc_dir_entry *parent) 784 { 785 struct proc_dir_entry **p; 786 struct proc_dir_entry *de = NULL; 787 const char *fn = name; 788 unsigned int len; 789 790 spin_lock(&proc_subdir_lock); 791 if (__xlate_proc_name(name, &parent, &fn) != 0) { 792 spin_unlock(&proc_subdir_lock); 793 return; 794 } 795 len = strlen(fn); 796 797 for (p = &parent->subdir; *p; p=&(*p)->next ) { 798 if (proc_match(len, fn, *p)) { 799 de = *p; 800 *p = de->next; 801 de->next = NULL; 802 break; 803 } 804 } 805 spin_unlock(&proc_subdir_lock); 806 if (!de) { 807 WARN(1, "name '%s'\n", name); 808 return; 809 } 810 811 spin_lock(&de->pde_unload_lock); 812 /* 813 * Stop accepting new callers into module. If you're 814 * dynamically allocating ->proc_fops, save a pointer somewhere. 815 */ 816 de->proc_fops = NULL; 817 /* Wait until all existing callers into module are done. */ 818 if (de->pde_users > 0) { 819 DECLARE_COMPLETION_ONSTACK(c); 820 821 if (!de->pde_unload_completion) 822 de->pde_unload_completion = &c; 823 824 spin_unlock(&de->pde_unload_lock); 825 826 wait_for_completion(de->pde_unload_completion); 827 828 spin_lock(&de->pde_unload_lock); 829 } 830 831 while (!list_empty(&de->pde_openers)) { 832 struct pde_opener *pdeo; 833 834 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 835 list_del(&pdeo->lh); 836 spin_unlock(&de->pde_unload_lock); 837 pdeo->release(pdeo->inode, pdeo->file); 838 kfree(pdeo); 839 spin_lock(&de->pde_unload_lock); 840 } 841 spin_unlock(&de->pde_unload_lock); 842 843 if (S_ISDIR(de->mode)) 844 parent->nlink--; 845 de->nlink = 0; 846 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 847 "'%s/%s', leaking at least '%s'\n", __func__, 848 de->parent->name, de->name, de->subdir->name); 849 pde_put(de); 850 } 851 EXPORT_SYMBOL(remove_proc_entry); 852