1 /* 2 * proc/fs/generic.c --- generic routines for the proc-fs 3 * 4 * This file contains generic proc-fs routines for handling 5 * directories and files. 6 * 7 * Copyright (C) 1991, 1992 Linus Torvalds. 8 * Copyright (C) 1997 Theodore Ts'o 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/time.h> 13 #include <linux/proc_fs.h> 14 #include <linux/stat.h> 15 #include <linux/module.h> 16 #include <linux/mount.h> 17 #include <linux/init.h> 18 #include <linux/idr.h> 19 #include <linux/namei.h> 20 #include <linux/bitops.h> 21 #include <linux/spinlock.h> 22 #include <linux/completion.h> 23 #include <asm/uaccess.h> 24 25 #include "internal.h" 26 27 DEFINE_SPINLOCK(proc_subdir_lock); 28 29 static int proc_match(int len, const char *name, struct proc_dir_entry *de) 30 { 31 if (de->namelen != len) 32 return 0; 33 return !memcmp(name, de->name, len); 34 } 35 36 /* buffer size is one page but our output routines use some slack for overruns */ 37 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) 38 39 static ssize_t 40 proc_file_read(struct file *file, char __user *buf, size_t nbytes, 41 loff_t *ppos) 42 { 43 struct inode * inode = file->f_path.dentry->d_inode; 44 char *page; 45 ssize_t retval=0; 46 int eof=0; 47 ssize_t n, count; 48 char *start; 49 struct proc_dir_entry * dp; 50 unsigned long long pos; 51 52 /* 53 * Gaah, please just use "seq_file" instead. The legacy /proc 54 * interfaces cut loff_t down to off_t for reads, and ignore 55 * the offset entirely for writes.. 56 */ 57 pos = *ppos; 58 if (pos > MAX_NON_LFS) 59 return 0; 60 if (nbytes > MAX_NON_LFS - pos) 61 nbytes = MAX_NON_LFS - pos; 62 63 dp = PDE(inode); 64 if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) 65 return -ENOMEM; 66 67 while ((nbytes > 0) && !eof) { 68 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); 69 70 start = NULL; 71 if (dp->read_proc) { 72 /* 73 * How to be a proc read function 74 * ------------------------------ 75 * Prototype: 76 * int f(char *buffer, char **start, off_t offset, 77 * int count, int *peof, void *dat) 78 * 79 * Assume that the buffer is "count" bytes in size. 80 * 81 * If you know you have supplied all the data you 82 * have, set *peof. 83 * 84 * You have three ways to return data: 85 * 0) Leave *start = NULL. (This is the default.) 86 * Put the data of the requested offset at that 87 * offset within the buffer. Return the number (n) 88 * of bytes there are from the beginning of the 89 * buffer up to the last byte of data. If the 90 * number of supplied bytes (= n - offset) is 91 * greater than zero and you didn't signal eof 92 * and the reader is prepared to take more data 93 * you will be called again with the requested 94 * offset advanced by the number of bytes 95 * absorbed. This interface is useful for files 96 * no larger than the buffer. 97 * 1) Set *start = an unsigned long value less than 98 * the buffer address but greater than zero. 99 * Put the data of the requested offset at the 100 * beginning of the buffer. Return the number of 101 * bytes of data placed there. If this number is 102 * greater than zero and you didn't signal eof 103 * and the reader is prepared to take more data 104 * you will be called again with the requested 105 * offset advanced by *start. This interface is 106 * useful when you have a large file consisting 107 * of a series of blocks which you want to count 108 * and return as wholes. 109 * (Hack by Paul.Russell@rustcorp.com.au) 110 * 2) Set *start = an address within the buffer. 111 * Put the data of the requested offset at *start. 112 * Return the number of bytes of data placed there. 113 * If this number is greater than zero and you 114 * didn't signal eof and the reader is prepared to 115 * take more data you will be called again with the 116 * requested offset advanced by the number of bytes 117 * absorbed. 118 */ 119 n = dp->read_proc(page, &start, *ppos, 120 count, &eof, dp->data); 121 } else 122 break; 123 124 if (n == 0) /* end of file */ 125 break; 126 if (n < 0) { /* error */ 127 if (retval == 0) 128 retval = n; 129 break; 130 } 131 132 if (start == NULL) { 133 if (n > PAGE_SIZE) { 134 printk(KERN_ERR 135 "proc_file_read: Apparent buffer overflow!\n"); 136 n = PAGE_SIZE; 137 } 138 n -= *ppos; 139 if (n <= 0) 140 break; 141 if (n > count) 142 n = count; 143 start = page + *ppos; 144 } else if (start < page) { 145 if (n > PAGE_SIZE) { 146 printk(KERN_ERR 147 "proc_file_read: Apparent buffer overflow!\n"); 148 n = PAGE_SIZE; 149 } 150 if (n > count) { 151 /* 152 * Don't reduce n because doing so might 153 * cut off part of a data block. 154 */ 155 printk(KERN_WARNING 156 "proc_file_read: Read count exceeded\n"); 157 } 158 } else /* start >= page */ { 159 unsigned long startoff = (unsigned long)(start - page); 160 if (n > (PAGE_SIZE - startoff)) { 161 printk(KERN_ERR 162 "proc_file_read: Apparent buffer overflow!\n"); 163 n = PAGE_SIZE - startoff; 164 } 165 if (n > count) 166 n = count; 167 } 168 169 n -= copy_to_user(buf, start < page ? page : start, n); 170 if (n == 0) { 171 if (retval == 0) 172 retval = -EFAULT; 173 break; 174 } 175 176 *ppos += start < page ? (unsigned long)start : n; 177 nbytes -= n; 178 buf += n; 179 retval += n; 180 } 181 free_page((unsigned long) page); 182 return retval; 183 } 184 185 static ssize_t 186 proc_file_write(struct file *file, const char __user *buffer, 187 size_t count, loff_t *ppos) 188 { 189 struct inode *inode = file->f_path.dentry->d_inode; 190 struct proc_dir_entry * dp; 191 192 dp = PDE(inode); 193 194 if (!dp->write_proc) 195 return -EIO; 196 197 /* FIXME: does this routine need ppos? probably... */ 198 return dp->write_proc(file, buffer, count, dp->data); 199 } 200 201 202 static loff_t 203 proc_file_lseek(struct file *file, loff_t offset, int orig) 204 { 205 loff_t retval = -EINVAL; 206 switch (orig) { 207 case 1: 208 offset += file->f_pos; 209 /* fallthrough */ 210 case 0: 211 if (offset < 0 || offset > MAX_NON_LFS) 212 break; 213 file->f_pos = retval = offset; 214 } 215 return retval; 216 } 217 218 static const struct file_operations proc_file_operations = { 219 .llseek = proc_file_lseek, 220 .read = proc_file_read, 221 .write = proc_file_write, 222 }; 223 224 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) 225 { 226 struct inode *inode = dentry->d_inode; 227 struct proc_dir_entry *de = PDE(inode); 228 int error; 229 230 error = inode_change_ok(inode, iattr); 231 if (error) 232 goto out; 233 234 error = inode_setattr(inode, iattr); 235 if (error) 236 goto out; 237 238 de->uid = inode->i_uid; 239 de->gid = inode->i_gid; 240 de->mode = inode->i_mode; 241 out: 242 return error; 243 } 244 245 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, 246 struct kstat *stat) 247 { 248 struct inode *inode = dentry->d_inode; 249 struct proc_dir_entry *de = PROC_I(inode)->pde; 250 if (de && de->nlink) 251 inode->i_nlink = de->nlink; 252 253 generic_fillattr(inode, stat); 254 return 0; 255 } 256 257 static const struct inode_operations proc_file_inode_operations = { 258 .setattr = proc_notify_change, 259 }; 260 261 /* 262 * This function parses a name such as "tty/driver/serial", and 263 * returns the struct proc_dir_entry for "/proc/tty/driver", and 264 * returns "serial" in residual. 265 */ 266 static int xlate_proc_name(const char *name, 267 struct proc_dir_entry **ret, const char **residual) 268 { 269 const char *cp = name, *next; 270 struct proc_dir_entry *de; 271 int len; 272 int rtn = 0; 273 274 de = *ret; 275 if (!de) 276 de = &proc_root; 277 278 spin_lock(&proc_subdir_lock); 279 while (1) { 280 next = strchr(cp, '/'); 281 if (!next) 282 break; 283 284 len = next - cp; 285 for (de = de->subdir; de ; de = de->next) { 286 if (proc_match(len, cp, de)) 287 break; 288 } 289 if (!de) { 290 rtn = -ENOENT; 291 goto out; 292 } 293 cp += len + 1; 294 } 295 *residual = cp; 296 *ret = de; 297 out: 298 spin_unlock(&proc_subdir_lock); 299 return rtn; 300 } 301 302 static DEFINE_IDA(proc_inum_ida); 303 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ 304 305 #define PROC_DYNAMIC_FIRST 0xF0000000U 306 307 /* 308 * Return an inode number between PROC_DYNAMIC_FIRST and 309 * 0xffffffff, or zero on failure. 310 */ 311 static unsigned int get_inode_number(void) 312 { 313 unsigned int i; 314 int error; 315 316 retry: 317 if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0) 318 return 0; 319 320 spin_lock(&proc_inum_lock); 321 error = ida_get_new(&proc_inum_ida, &i); 322 spin_unlock(&proc_inum_lock); 323 if (error == -EAGAIN) 324 goto retry; 325 else if (error) 326 return 0; 327 328 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { 329 spin_lock(&proc_inum_lock); 330 ida_remove(&proc_inum_ida, i); 331 spin_unlock(&proc_inum_lock); 332 return 0; 333 } 334 return PROC_DYNAMIC_FIRST + i; 335 } 336 337 static void release_inode_number(unsigned int inum) 338 { 339 spin_lock(&proc_inum_lock); 340 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); 341 spin_unlock(&proc_inum_lock); 342 } 343 344 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 345 { 346 nd_set_link(nd, PDE(dentry->d_inode)->data); 347 return NULL; 348 } 349 350 static const struct inode_operations proc_link_inode_operations = { 351 .readlink = generic_readlink, 352 .follow_link = proc_follow_link, 353 }; 354 355 /* 356 * As some entries in /proc are volatile, we want to 357 * get rid of unused dentries. This could be made 358 * smarter: we could keep a "volatile" flag in the 359 * inode to indicate which ones to keep. 360 */ 361 static int proc_delete_dentry(struct dentry * dentry) 362 { 363 return 1; 364 } 365 366 static struct dentry_operations proc_dentry_operations = 367 { 368 .d_delete = proc_delete_dentry, 369 }; 370 371 /* 372 * Don't create negative dentries here, return -ENOENT by hand 373 * instead. 374 */ 375 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, 376 struct dentry *dentry) 377 { 378 struct inode *inode = NULL; 379 int error = -ENOENT; 380 381 spin_lock(&proc_subdir_lock); 382 for (de = de->subdir; de ; de = de->next) { 383 if (de->namelen != dentry->d_name.len) 384 continue; 385 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 386 unsigned int ino; 387 388 ino = de->low_ino; 389 de_get(de); 390 spin_unlock(&proc_subdir_lock); 391 error = -EINVAL; 392 inode = proc_get_inode(dir->i_sb, ino, de); 393 goto out_unlock; 394 } 395 } 396 spin_unlock(&proc_subdir_lock); 397 out_unlock: 398 399 if (inode) { 400 dentry->d_op = &proc_dentry_operations; 401 d_add(dentry, inode); 402 return NULL; 403 } 404 if (de) 405 de_put(de); 406 return ERR_PTR(error); 407 } 408 409 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, 410 struct nameidata *nd) 411 { 412 return proc_lookup_de(PDE(dir), dir, dentry); 413 } 414 415 /* 416 * This returns non-zero if at EOF, so that the /proc 417 * root directory can use this and check if it should 418 * continue with the <pid> entries.. 419 * 420 * Note that the VFS-layer doesn't care about the return 421 * value of the readdir() call, as long as it's non-negative 422 * for success.. 423 */ 424 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 425 filldir_t filldir) 426 { 427 unsigned int ino; 428 int i; 429 struct inode *inode = filp->f_path.dentry->d_inode; 430 int ret = 0; 431 432 ino = inode->i_ino; 433 i = filp->f_pos; 434 switch (i) { 435 case 0: 436 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 437 goto out; 438 i++; 439 filp->f_pos++; 440 /* fall through */ 441 case 1: 442 if (filldir(dirent, "..", 2, i, 443 parent_ino(filp->f_path.dentry), 444 DT_DIR) < 0) 445 goto out; 446 i++; 447 filp->f_pos++; 448 /* fall through */ 449 default: 450 spin_lock(&proc_subdir_lock); 451 de = de->subdir; 452 i -= 2; 453 for (;;) { 454 if (!de) { 455 ret = 1; 456 spin_unlock(&proc_subdir_lock); 457 goto out; 458 } 459 if (!i) 460 break; 461 de = de->next; 462 i--; 463 } 464 465 do { 466 struct proc_dir_entry *next; 467 468 /* filldir passes info to user space */ 469 de_get(de); 470 spin_unlock(&proc_subdir_lock); 471 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 472 de->low_ino, de->mode >> 12) < 0) { 473 de_put(de); 474 goto out; 475 } 476 spin_lock(&proc_subdir_lock); 477 filp->f_pos++; 478 next = de->next; 479 de_put(de); 480 de = next; 481 } while (de); 482 spin_unlock(&proc_subdir_lock); 483 } 484 ret = 1; 485 out: 486 return ret; 487 } 488 489 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) 490 { 491 struct inode *inode = filp->f_path.dentry->d_inode; 492 493 return proc_readdir_de(PDE(inode), filp, dirent, filldir); 494 } 495 496 /* 497 * These are the generic /proc directory operations. They 498 * use the in-memory "struct proc_dir_entry" tree to parse 499 * the /proc directory. 500 */ 501 static const struct file_operations proc_dir_operations = { 502 .llseek = generic_file_llseek, 503 .read = generic_read_dir, 504 .readdir = proc_readdir, 505 }; 506 507 /* 508 * proc directories can do almost nothing.. 509 */ 510 static const struct inode_operations proc_dir_inode_operations = { 511 .lookup = proc_lookup, 512 .getattr = proc_getattr, 513 .setattr = proc_notify_change, 514 }; 515 516 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) 517 { 518 unsigned int i; 519 struct proc_dir_entry *tmp; 520 521 i = get_inode_number(); 522 if (i == 0) 523 return -EAGAIN; 524 dp->low_ino = i; 525 526 if (S_ISDIR(dp->mode)) { 527 if (dp->proc_iops == NULL) { 528 dp->proc_fops = &proc_dir_operations; 529 dp->proc_iops = &proc_dir_inode_operations; 530 } 531 dir->nlink++; 532 } else if (S_ISLNK(dp->mode)) { 533 if (dp->proc_iops == NULL) 534 dp->proc_iops = &proc_link_inode_operations; 535 } else if (S_ISREG(dp->mode)) { 536 if (dp->proc_fops == NULL) 537 dp->proc_fops = &proc_file_operations; 538 if (dp->proc_iops == NULL) 539 dp->proc_iops = &proc_file_inode_operations; 540 } 541 542 spin_lock(&proc_subdir_lock); 543 544 for (tmp = dir->subdir; tmp; tmp = tmp->next) 545 if (strcmp(tmp->name, dp->name) == 0) { 546 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", 547 dir->name, dp->name); 548 break; 549 } 550 551 dp->next = dir->subdir; 552 dp->parent = dir; 553 dir->subdir = dp; 554 spin_unlock(&proc_subdir_lock); 555 556 return 0; 557 } 558 559 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, 560 const char *name, 561 mode_t mode, 562 nlink_t nlink) 563 { 564 struct proc_dir_entry *ent = NULL; 565 const char *fn = name; 566 int len; 567 568 /* make sure name is valid */ 569 if (!name || !strlen(name)) goto out; 570 571 if (xlate_proc_name(name, parent, &fn) != 0) 572 goto out; 573 574 /* At this point there must not be any '/' characters beyond *fn */ 575 if (strchr(fn, '/')) 576 goto out; 577 578 len = strlen(fn); 579 580 ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL); 581 if (!ent) goto out; 582 583 memset(ent, 0, sizeof(struct proc_dir_entry)); 584 memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1); 585 ent->name = ((char *) ent) + sizeof(*ent); 586 ent->namelen = len; 587 ent->mode = mode; 588 ent->nlink = nlink; 589 atomic_set(&ent->count, 1); 590 ent->pde_users = 0; 591 spin_lock_init(&ent->pde_unload_lock); 592 ent->pde_unload_completion = NULL; 593 INIT_LIST_HEAD(&ent->pde_openers); 594 out: 595 return ent; 596 } 597 598 struct proc_dir_entry *proc_symlink(const char *name, 599 struct proc_dir_entry *parent, const char *dest) 600 { 601 struct proc_dir_entry *ent; 602 603 ent = __proc_create(&parent, name, 604 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); 605 606 if (ent) { 607 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); 608 if (ent->data) { 609 strcpy((char*)ent->data,dest); 610 if (proc_register(parent, ent) < 0) { 611 kfree(ent->data); 612 kfree(ent); 613 ent = NULL; 614 } 615 } else { 616 kfree(ent); 617 ent = NULL; 618 } 619 } 620 return ent; 621 } 622 623 struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, 624 struct proc_dir_entry *parent) 625 { 626 struct proc_dir_entry *ent; 627 628 ent = __proc_create(&parent, name, S_IFDIR | mode, 2); 629 if (ent) { 630 if (proc_register(parent, ent) < 0) { 631 kfree(ent); 632 ent = NULL; 633 } 634 } 635 return ent; 636 } 637 638 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 639 struct proc_dir_entry *parent) 640 { 641 struct proc_dir_entry *ent; 642 643 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2); 644 if (ent) { 645 ent->data = net; 646 if (proc_register(parent, ent) < 0) { 647 kfree(ent); 648 ent = NULL; 649 } 650 } 651 return ent; 652 } 653 EXPORT_SYMBOL_GPL(proc_net_mkdir); 654 655 struct proc_dir_entry *proc_mkdir(const char *name, 656 struct proc_dir_entry *parent) 657 { 658 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 659 } 660 661 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 662 struct proc_dir_entry *parent) 663 { 664 struct proc_dir_entry *ent; 665 nlink_t nlink; 666 667 if (S_ISDIR(mode)) { 668 if ((mode & S_IALLUGO) == 0) 669 mode |= S_IRUGO | S_IXUGO; 670 nlink = 2; 671 } else { 672 if ((mode & S_IFMT) == 0) 673 mode |= S_IFREG; 674 if ((mode & S_IALLUGO) == 0) 675 mode |= S_IRUGO; 676 nlink = 1; 677 } 678 679 ent = __proc_create(&parent, name, mode, nlink); 680 if (ent) { 681 if (proc_register(parent, ent) < 0) { 682 kfree(ent); 683 ent = NULL; 684 } 685 } 686 return ent; 687 } 688 689 struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, 690 struct proc_dir_entry *parent, 691 const struct file_operations *proc_fops, 692 void *data) 693 { 694 struct proc_dir_entry *pde; 695 nlink_t nlink; 696 697 if (S_ISDIR(mode)) { 698 if ((mode & S_IALLUGO) == 0) 699 mode |= S_IRUGO | S_IXUGO; 700 nlink = 2; 701 } else { 702 if ((mode & S_IFMT) == 0) 703 mode |= S_IFREG; 704 if ((mode & S_IALLUGO) == 0) 705 mode |= S_IRUGO; 706 nlink = 1; 707 } 708 709 pde = __proc_create(&parent, name, mode, nlink); 710 if (!pde) 711 goto out; 712 pde->proc_fops = proc_fops; 713 pde->data = data; 714 if (proc_register(parent, pde) < 0) 715 goto out_free; 716 return pde; 717 out_free: 718 kfree(pde); 719 out: 720 return NULL; 721 } 722 723 void free_proc_entry(struct proc_dir_entry *de) 724 { 725 unsigned int ino = de->low_ino; 726 727 if (ino < PROC_DYNAMIC_FIRST) 728 return; 729 730 release_inode_number(ino); 731 732 if (S_ISLNK(de->mode)) 733 kfree(de->data); 734 kfree(de); 735 } 736 737 /* 738 * Remove a /proc entry and free it if it's not currently in use. 739 */ 740 void remove_proc_entry(const char *name, struct proc_dir_entry *parent) 741 { 742 struct proc_dir_entry **p; 743 struct proc_dir_entry *de = NULL; 744 const char *fn = name; 745 int len; 746 747 if (xlate_proc_name(name, &parent, &fn) != 0) 748 return; 749 len = strlen(fn); 750 751 spin_lock(&proc_subdir_lock); 752 for (p = &parent->subdir; *p; p=&(*p)->next ) { 753 if (proc_match(len, fn, *p)) { 754 de = *p; 755 *p = de->next; 756 de->next = NULL; 757 break; 758 } 759 } 760 spin_unlock(&proc_subdir_lock); 761 if (!de) 762 return; 763 764 spin_lock(&de->pde_unload_lock); 765 /* 766 * Stop accepting new callers into module. If you're 767 * dynamically allocating ->proc_fops, save a pointer somewhere. 768 */ 769 de->proc_fops = NULL; 770 /* Wait until all existing callers into module are done. */ 771 if (de->pde_users > 0) { 772 DECLARE_COMPLETION_ONSTACK(c); 773 774 if (!de->pde_unload_completion) 775 de->pde_unload_completion = &c; 776 777 spin_unlock(&de->pde_unload_lock); 778 779 wait_for_completion(de->pde_unload_completion); 780 781 goto continue_removing; 782 } 783 spin_unlock(&de->pde_unload_lock); 784 785 continue_removing: 786 spin_lock(&de->pde_unload_lock); 787 while (!list_empty(&de->pde_openers)) { 788 struct pde_opener *pdeo; 789 790 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 791 list_del(&pdeo->lh); 792 spin_unlock(&de->pde_unload_lock); 793 pdeo->release(pdeo->inode, pdeo->file); 794 kfree(pdeo); 795 spin_lock(&de->pde_unload_lock); 796 } 797 spin_unlock(&de->pde_unload_lock); 798 799 if (S_ISDIR(de->mode)) 800 parent->nlink--; 801 de->nlink = 0; 802 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 803 "'%s/%s', leaking at least '%s'\n", __func__, 804 de->parent->name, de->name, de->subdir->name); 805 if (atomic_dec_and_test(&de->count)) 806 free_proc_entry(de); 807 } 808