1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include "internal.h" 35 36 37 int sysctl_vfs_cache_pressure __read_mostly = 100; 38 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 39 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 41 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 42 43 EXPORT_SYMBOL(dcache_lock); 44 45 static struct kmem_cache *dentry_cache __read_mostly; 46 47 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 48 49 /* 50 * This is the single most critical data structure when it comes 51 * to the dcache: the hashtable for lookups. Somebody should try 52 * to make this good - I've just made it work. 53 * 54 * This hash-function tries to avoid losing too many bits of hash 55 * information, yet avoid using a prime hash-size or similar. 56 */ 57 #define D_HASHBITS d_hash_shift 58 #define D_HASHMASK d_hash_mask 59 60 static unsigned int d_hash_mask __read_mostly; 61 static unsigned int d_hash_shift __read_mostly; 62 static struct hlist_head *dentry_hashtable __read_mostly; 63 static LIST_HEAD(dentry_unused); 64 65 /* Statistics gathering. */ 66 struct dentry_stat_t dentry_stat = { 67 .age_limit = 45, 68 }; 69 70 static void __d_free(struct dentry *dentry) 71 { 72 if (dname_external(dentry)) 73 kfree(dentry->d_name.name); 74 kmem_cache_free(dentry_cache, dentry); 75 } 76 77 static void d_callback(struct rcu_head *head) 78 { 79 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); 80 __d_free(dentry); 81 } 82 83 /* 84 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 85 * inside dcache_lock. 86 */ 87 static void d_free(struct dentry *dentry) 88 { 89 if (dentry->d_op && dentry->d_op->d_release) 90 dentry->d_op->d_release(dentry); 91 /* if dentry was never inserted into hash, immediate free is OK */ 92 if (hlist_unhashed(&dentry->d_hash)) 93 __d_free(dentry); 94 else 95 call_rcu(&dentry->d_u.d_rcu, d_callback); 96 } 97 98 /* 99 * Release the dentry's inode, using the filesystem 100 * d_iput() operation if defined. 101 * Called with dcache_lock and per dentry lock held, drops both. 102 */ 103 static void dentry_iput(struct dentry * dentry) 104 { 105 struct inode *inode = dentry->d_inode; 106 if (inode) { 107 dentry->d_inode = NULL; 108 list_del_init(&dentry->d_alias); 109 spin_unlock(&dentry->d_lock); 110 spin_unlock(&dcache_lock); 111 if (!inode->i_nlink) 112 fsnotify_inoderemove(inode); 113 if (dentry->d_op && dentry->d_op->d_iput) 114 dentry->d_op->d_iput(dentry, inode); 115 else 116 iput(inode); 117 } else { 118 spin_unlock(&dentry->d_lock); 119 spin_unlock(&dcache_lock); 120 } 121 } 122 123 /** 124 * d_kill - kill dentry and return parent 125 * @dentry: dentry to kill 126 * 127 * Called with dcache_lock and d_lock, releases both. The dentry must 128 * already be unhashed and removed from the LRU. 129 * 130 * If this is the root of the dentry tree, return NULL. 131 */ 132 static struct dentry *d_kill(struct dentry *dentry) 133 { 134 struct dentry *parent; 135 136 list_del(&dentry->d_u.d_child); 137 dentry_stat.nr_dentry--; /* For d_free, below */ 138 /*drops the locks, at that point nobody can reach this dentry */ 139 dentry_iput(dentry); 140 parent = dentry->d_parent; 141 d_free(dentry); 142 return dentry == parent ? NULL : parent; 143 } 144 145 /* 146 * This is dput 147 * 148 * This is complicated by the fact that we do not want to put 149 * dentries that are no longer on any hash chain on the unused 150 * list: we'd much rather just get rid of them immediately. 151 * 152 * However, that implies that we have to traverse the dentry 153 * tree upwards to the parents which might _also_ now be 154 * scheduled for deletion (it may have been only waiting for 155 * its last child to go away). 156 * 157 * This tail recursion is done by hand as we don't want to depend 158 * on the compiler to always get this right (gcc generally doesn't). 159 * Real recursion would eat up our stack space. 160 */ 161 162 /* 163 * dput - release a dentry 164 * @dentry: dentry to release 165 * 166 * Release a dentry. This will drop the usage count and if appropriate 167 * call the dentry unlink method as well as removing it from the queues and 168 * releasing its resources. If the parent dentries were scheduled for release 169 * they too may now get deleted. 170 * 171 * no dcache lock, please. 172 */ 173 174 void dput(struct dentry *dentry) 175 { 176 if (!dentry) 177 return; 178 179 repeat: 180 if (atomic_read(&dentry->d_count) == 1) 181 might_sleep(); 182 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) 183 return; 184 185 spin_lock(&dentry->d_lock); 186 if (atomic_read(&dentry->d_count)) { 187 spin_unlock(&dentry->d_lock); 188 spin_unlock(&dcache_lock); 189 return; 190 } 191 192 /* 193 * AV: ->d_delete() is _NOT_ allowed to block now. 194 */ 195 if (dentry->d_op && dentry->d_op->d_delete) { 196 if (dentry->d_op->d_delete(dentry)) 197 goto unhash_it; 198 } 199 /* Unreachable? Get rid of it */ 200 if (d_unhashed(dentry)) 201 goto kill_it; 202 if (list_empty(&dentry->d_lru)) { 203 dentry->d_flags |= DCACHE_REFERENCED; 204 list_add(&dentry->d_lru, &dentry_unused); 205 dentry_stat.nr_unused++; 206 } 207 spin_unlock(&dentry->d_lock); 208 spin_unlock(&dcache_lock); 209 return; 210 211 unhash_it: 212 __d_drop(dentry); 213 kill_it: 214 /* If dentry was on d_lru list 215 * delete it from there 216 */ 217 if (!list_empty(&dentry->d_lru)) { 218 list_del(&dentry->d_lru); 219 dentry_stat.nr_unused--; 220 } 221 dentry = d_kill(dentry); 222 if (dentry) 223 goto repeat; 224 } 225 226 /** 227 * d_invalidate - invalidate a dentry 228 * @dentry: dentry to invalidate 229 * 230 * Try to invalidate the dentry if it turns out to be 231 * possible. If there are other dentries that can be 232 * reached through this one we can't delete it and we 233 * return -EBUSY. On success we return 0. 234 * 235 * no dcache lock. 236 */ 237 238 int d_invalidate(struct dentry * dentry) 239 { 240 /* 241 * If it's already been dropped, return OK. 242 */ 243 spin_lock(&dcache_lock); 244 if (d_unhashed(dentry)) { 245 spin_unlock(&dcache_lock); 246 return 0; 247 } 248 /* 249 * Check whether to do a partial shrink_dcache 250 * to get rid of unused child entries. 251 */ 252 if (!list_empty(&dentry->d_subdirs)) { 253 spin_unlock(&dcache_lock); 254 shrink_dcache_parent(dentry); 255 spin_lock(&dcache_lock); 256 } 257 258 /* 259 * Somebody else still using it? 260 * 261 * If it's a directory, we can't drop it 262 * for fear of somebody re-populating it 263 * with children (even though dropping it 264 * would make it unreachable from the root, 265 * we might still populate it if it was a 266 * working directory or similar). 267 */ 268 spin_lock(&dentry->d_lock); 269 if (atomic_read(&dentry->d_count) > 1) { 270 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 271 spin_unlock(&dentry->d_lock); 272 spin_unlock(&dcache_lock); 273 return -EBUSY; 274 } 275 } 276 277 __d_drop(dentry); 278 spin_unlock(&dentry->d_lock); 279 spin_unlock(&dcache_lock); 280 return 0; 281 } 282 283 /* This should be called _only_ with dcache_lock held */ 284 285 static inline struct dentry * __dget_locked(struct dentry *dentry) 286 { 287 atomic_inc(&dentry->d_count); 288 if (!list_empty(&dentry->d_lru)) { 289 dentry_stat.nr_unused--; 290 list_del_init(&dentry->d_lru); 291 } 292 return dentry; 293 } 294 295 struct dentry * dget_locked(struct dentry *dentry) 296 { 297 return __dget_locked(dentry); 298 } 299 300 /** 301 * d_find_alias - grab a hashed alias of inode 302 * @inode: inode in question 303 * @want_discon: flag, used by d_splice_alias, to request 304 * that only a DISCONNECTED alias be returned. 305 * 306 * If inode has a hashed alias, or is a directory and has any alias, 307 * acquire the reference to alias and return it. Otherwise return NULL. 308 * Notice that if inode is a directory there can be only one alias and 309 * it can be unhashed only if it has no children, or if it is the root 310 * of a filesystem. 311 * 312 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 313 * any other hashed alias over that one unless @want_discon is set, 314 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 315 */ 316 317 static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 318 { 319 struct list_head *head, *next, *tmp; 320 struct dentry *alias, *discon_alias=NULL; 321 322 head = &inode->i_dentry; 323 next = inode->i_dentry.next; 324 while (next != head) { 325 tmp = next; 326 next = tmp->next; 327 prefetch(next); 328 alias = list_entry(tmp, struct dentry, d_alias); 329 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 330 if (IS_ROOT(alias) && 331 (alias->d_flags & DCACHE_DISCONNECTED)) 332 discon_alias = alias; 333 else if (!want_discon) { 334 __dget_locked(alias); 335 return alias; 336 } 337 } 338 } 339 if (discon_alias) 340 __dget_locked(discon_alias); 341 return discon_alias; 342 } 343 344 struct dentry * d_find_alias(struct inode *inode) 345 { 346 struct dentry *de = NULL; 347 348 if (!list_empty(&inode->i_dentry)) { 349 spin_lock(&dcache_lock); 350 de = __d_find_alias(inode, 0); 351 spin_unlock(&dcache_lock); 352 } 353 return de; 354 } 355 356 /* 357 * Try to kill dentries associated with this inode. 358 * WARNING: you must own a reference to inode. 359 */ 360 void d_prune_aliases(struct inode *inode) 361 { 362 struct dentry *dentry; 363 restart: 364 spin_lock(&dcache_lock); 365 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 366 spin_lock(&dentry->d_lock); 367 if (!atomic_read(&dentry->d_count)) { 368 __dget_locked(dentry); 369 __d_drop(dentry); 370 spin_unlock(&dentry->d_lock); 371 spin_unlock(&dcache_lock); 372 dput(dentry); 373 goto restart; 374 } 375 spin_unlock(&dentry->d_lock); 376 } 377 spin_unlock(&dcache_lock); 378 } 379 380 /* 381 * Throw away a dentry - free the inode, dput the parent. This requires that 382 * the LRU list has already been removed. 383 * 384 * Try to prune ancestors as well. This is necessary to prevent 385 * quadratic behavior of shrink_dcache_parent(), but is also expected 386 * to be beneficial in reducing dentry cache fragmentation. 387 * 388 * Called with dcache_lock, drops it and then regains. 389 * Called with dentry->d_lock held, drops it. 390 */ 391 static void prune_one_dentry(struct dentry * dentry) 392 { 393 __d_drop(dentry); 394 dentry = d_kill(dentry); 395 396 /* 397 * Prune ancestors. Locking is simpler than in dput(), 398 * because dcache_lock needs to be taken anyway. 399 */ 400 spin_lock(&dcache_lock); 401 while (dentry) { 402 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 403 return; 404 405 if (dentry->d_op && dentry->d_op->d_delete) 406 dentry->d_op->d_delete(dentry); 407 if (!list_empty(&dentry->d_lru)) { 408 list_del(&dentry->d_lru); 409 dentry_stat.nr_unused--; 410 } 411 __d_drop(dentry); 412 dentry = d_kill(dentry); 413 spin_lock(&dcache_lock); 414 } 415 } 416 417 /** 418 * prune_dcache - shrink the dcache 419 * @count: number of entries to try and free 420 * @sb: if given, ignore dentries for other superblocks 421 * which are being unmounted. 422 * 423 * Shrink the dcache. This is done when we need 424 * more memory, or simply when we need to unmount 425 * something (at which point we need to unuse 426 * all dentries). 427 * 428 * This function may fail to free any resources if 429 * all the dentries are in use. 430 */ 431 432 static void prune_dcache(int count, struct super_block *sb) 433 { 434 spin_lock(&dcache_lock); 435 for (; count ; count--) { 436 struct dentry *dentry; 437 struct list_head *tmp; 438 struct rw_semaphore *s_umount; 439 440 cond_resched_lock(&dcache_lock); 441 442 tmp = dentry_unused.prev; 443 if (sb) { 444 /* Try to find a dentry for this sb, but don't try 445 * too hard, if they aren't near the tail they will 446 * be moved down again soon 447 */ 448 int skip = count; 449 while (skip && tmp != &dentry_unused && 450 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { 451 skip--; 452 tmp = tmp->prev; 453 } 454 } 455 if (tmp == &dentry_unused) 456 break; 457 list_del_init(tmp); 458 prefetch(dentry_unused.prev); 459 dentry_stat.nr_unused--; 460 dentry = list_entry(tmp, struct dentry, d_lru); 461 462 spin_lock(&dentry->d_lock); 463 /* 464 * We found an inuse dentry which was not removed from 465 * dentry_unused because of laziness during lookup. Do not free 466 * it - just keep it off the dentry_unused list. 467 */ 468 if (atomic_read(&dentry->d_count)) { 469 spin_unlock(&dentry->d_lock); 470 continue; 471 } 472 /* If the dentry was recently referenced, don't free it. */ 473 if (dentry->d_flags & DCACHE_REFERENCED) { 474 dentry->d_flags &= ~DCACHE_REFERENCED; 475 list_add(&dentry->d_lru, &dentry_unused); 476 dentry_stat.nr_unused++; 477 spin_unlock(&dentry->d_lock); 478 continue; 479 } 480 /* 481 * If the dentry is not DCACHED_REFERENCED, it is time 482 * to remove it from the dcache, provided the super block is 483 * NULL (which means we are trying to reclaim memory) 484 * or this dentry belongs to the same super block that 485 * we want to shrink. 486 */ 487 /* 488 * If this dentry is for "my" filesystem, then I can prune it 489 * without taking the s_umount lock (I already hold it). 490 */ 491 if (sb && dentry->d_sb == sb) { 492 prune_one_dentry(dentry); 493 continue; 494 } 495 /* 496 * ...otherwise we need to be sure this filesystem isn't being 497 * unmounted, otherwise we could race with 498 * generic_shutdown_super(), and end up holding a reference to 499 * an inode while the filesystem is unmounted. 500 * So we try to get s_umount, and make sure s_root isn't NULL. 501 * (Take a local copy of s_umount to avoid a use-after-free of 502 * `dentry'). 503 */ 504 s_umount = &dentry->d_sb->s_umount; 505 if (down_read_trylock(s_umount)) { 506 if (dentry->d_sb->s_root != NULL) { 507 prune_one_dentry(dentry); 508 up_read(s_umount); 509 continue; 510 } 511 up_read(s_umount); 512 } 513 spin_unlock(&dentry->d_lock); 514 /* 515 * Insert dentry at the head of the list as inserting at the 516 * tail leads to a cycle. 517 */ 518 list_add(&dentry->d_lru, &dentry_unused); 519 dentry_stat.nr_unused++; 520 } 521 spin_unlock(&dcache_lock); 522 } 523 524 /* 525 * Shrink the dcache for the specified super block. 526 * This allows us to unmount a device without disturbing 527 * the dcache for the other devices. 528 * 529 * This implementation makes just two traversals of the 530 * unused list. On the first pass we move the selected 531 * dentries to the most recent end, and on the second 532 * pass we free them. The second pass must restart after 533 * each dput(), but since the target dentries are all at 534 * the end, it's really just a single traversal. 535 */ 536 537 /** 538 * shrink_dcache_sb - shrink dcache for a superblock 539 * @sb: superblock 540 * 541 * Shrink the dcache for the specified super block. This 542 * is used to free the dcache before unmounting a file 543 * system 544 */ 545 546 void shrink_dcache_sb(struct super_block * sb) 547 { 548 struct list_head *tmp, *next; 549 struct dentry *dentry; 550 551 /* 552 * Pass one ... move the dentries for the specified 553 * superblock to the most recent end of the unused list. 554 */ 555 spin_lock(&dcache_lock); 556 list_for_each_prev_safe(tmp, next, &dentry_unused) { 557 dentry = list_entry(tmp, struct dentry, d_lru); 558 if (dentry->d_sb != sb) 559 continue; 560 list_move_tail(tmp, &dentry_unused); 561 } 562 563 /* 564 * Pass two ... free the dentries for this superblock. 565 */ 566 repeat: 567 list_for_each_prev_safe(tmp, next, &dentry_unused) { 568 dentry = list_entry(tmp, struct dentry, d_lru); 569 if (dentry->d_sb != sb) 570 continue; 571 dentry_stat.nr_unused--; 572 list_del_init(tmp); 573 spin_lock(&dentry->d_lock); 574 if (atomic_read(&dentry->d_count)) { 575 spin_unlock(&dentry->d_lock); 576 continue; 577 } 578 prune_one_dentry(dentry); 579 cond_resched_lock(&dcache_lock); 580 goto repeat; 581 } 582 spin_unlock(&dcache_lock); 583 } 584 585 /* 586 * destroy a single subtree of dentries for unmount 587 * - see the comments on shrink_dcache_for_umount() for a description of the 588 * locking 589 */ 590 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 591 { 592 struct dentry *parent; 593 unsigned detached = 0; 594 595 BUG_ON(!IS_ROOT(dentry)); 596 597 /* detach this root from the system */ 598 spin_lock(&dcache_lock); 599 if (!list_empty(&dentry->d_lru)) { 600 dentry_stat.nr_unused--; 601 list_del_init(&dentry->d_lru); 602 } 603 __d_drop(dentry); 604 spin_unlock(&dcache_lock); 605 606 for (;;) { 607 /* descend to the first leaf in the current subtree */ 608 while (!list_empty(&dentry->d_subdirs)) { 609 struct dentry *loop; 610 611 /* this is a branch with children - detach all of them 612 * from the system in one go */ 613 spin_lock(&dcache_lock); 614 list_for_each_entry(loop, &dentry->d_subdirs, 615 d_u.d_child) { 616 if (!list_empty(&loop->d_lru)) { 617 dentry_stat.nr_unused--; 618 list_del_init(&loop->d_lru); 619 } 620 621 __d_drop(loop); 622 cond_resched_lock(&dcache_lock); 623 } 624 spin_unlock(&dcache_lock); 625 626 /* move to the first child */ 627 dentry = list_entry(dentry->d_subdirs.next, 628 struct dentry, d_u.d_child); 629 } 630 631 /* consume the dentries from this leaf up through its parents 632 * until we find one with children or run out altogether */ 633 do { 634 struct inode *inode; 635 636 if (atomic_read(&dentry->d_count) != 0) { 637 printk(KERN_ERR 638 "BUG: Dentry %p{i=%lx,n=%s}" 639 " still in use (%d)" 640 " [unmount of %s %s]\n", 641 dentry, 642 dentry->d_inode ? 643 dentry->d_inode->i_ino : 0UL, 644 dentry->d_name.name, 645 atomic_read(&dentry->d_count), 646 dentry->d_sb->s_type->name, 647 dentry->d_sb->s_id); 648 BUG(); 649 } 650 651 parent = dentry->d_parent; 652 if (parent == dentry) 653 parent = NULL; 654 else 655 atomic_dec(&parent->d_count); 656 657 list_del(&dentry->d_u.d_child); 658 detached++; 659 660 inode = dentry->d_inode; 661 if (inode) { 662 dentry->d_inode = NULL; 663 list_del_init(&dentry->d_alias); 664 if (dentry->d_op && dentry->d_op->d_iput) 665 dentry->d_op->d_iput(dentry, inode); 666 else 667 iput(inode); 668 } 669 670 d_free(dentry); 671 672 /* finished when we fall off the top of the tree, 673 * otherwise we ascend to the parent and move to the 674 * next sibling if there is one */ 675 if (!parent) 676 goto out; 677 678 dentry = parent; 679 680 } while (list_empty(&dentry->d_subdirs)); 681 682 dentry = list_entry(dentry->d_subdirs.next, 683 struct dentry, d_u.d_child); 684 } 685 out: 686 /* several dentries were freed, need to correct nr_dentry */ 687 spin_lock(&dcache_lock); 688 dentry_stat.nr_dentry -= detached; 689 spin_unlock(&dcache_lock); 690 } 691 692 /* 693 * destroy the dentries attached to a superblock on unmounting 694 * - we don't need to use dentry->d_lock, and only need dcache_lock when 695 * removing the dentry from the system lists and hashes because: 696 * - the superblock is detached from all mountings and open files, so the 697 * dentry trees will not be rearranged by the VFS 698 * - s_umount is write-locked, so the memory pressure shrinker will ignore 699 * any dentries belonging to this superblock that it comes across 700 * - the filesystem itself is no longer permitted to rearrange the dentries 701 * in this superblock 702 */ 703 void shrink_dcache_for_umount(struct super_block *sb) 704 { 705 struct dentry *dentry; 706 707 if (down_read_trylock(&sb->s_umount)) 708 BUG(); 709 710 dentry = sb->s_root; 711 sb->s_root = NULL; 712 atomic_dec(&dentry->d_count); 713 shrink_dcache_for_umount_subtree(dentry); 714 715 while (!hlist_empty(&sb->s_anon)) { 716 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 717 shrink_dcache_for_umount_subtree(dentry); 718 } 719 } 720 721 /* 722 * Search for at least 1 mount point in the dentry's subdirs. 723 * We descend to the next level whenever the d_subdirs 724 * list is non-empty and continue searching. 725 */ 726 727 /** 728 * have_submounts - check for mounts over a dentry 729 * @parent: dentry to check. 730 * 731 * Return true if the parent or its subdirectories contain 732 * a mount point 733 */ 734 735 int have_submounts(struct dentry *parent) 736 { 737 struct dentry *this_parent = parent; 738 struct list_head *next; 739 740 spin_lock(&dcache_lock); 741 if (d_mountpoint(parent)) 742 goto positive; 743 repeat: 744 next = this_parent->d_subdirs.next; 745 resume: 746 while (next != &this_parent->d_subdirs) { 747 struct list_head *tmp = next; 748 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 749 next = tmp->next; 750 /* Have we found a mount point ? */ 751 if (d_mountpoint(dentry)) 752 goto positive; 753 if (!list_empty(&dentry->d_subdirs)) { 754 this_parent = dentry; 755 goto repeat; 756 } 757 } 758 /* 759 * All done at this level ... ascend and resume the search. 760 */ 761 if (this_parent != parent) { 762 next = this_parent->d_u.d_child.next; 763 this_parent = this_parent->d_parent; 764 goto resume; 765 } 766 spin_unlock(&dcache_lock); 767 return 0; /* No mount points found in tree */ 768 positive: 769 spin_unlock(&dcache_lock); 770 return 1; 771 } 772 773 /* 774 * Search the dentry child list for the specified parent, 775 * and move any unused dentries to the end of the unused 776 * list for prune_dcache(). We descend to the next level 777 * whenever the d_subdirs list is non-empty and continue 778 * searching. 779 * 780 * It returns zero iff there are no unused children, 781 * otherwise it returns the number of children moved to 782 * the end of the unused list. This may not be the total 783 * number of unused children, because select_parent can 784 * drop the lock and return early due to latency 785 * constraints. 786 */ 787 static int select_parent(struct dentry * parent) 788 { 789 struct dentry *this_parent = parent; 790 struct list_head *next; 791 int found = 0; 792 793 spin_lock(&dcache_lock); 794 repeat: 795 next = this_parent->d_subdirs.next; 796 resume: 797 while (next != &this_parent->d_subdirs) { 798 struct list_head *tmp = next; 799 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 800 next = tmp->next; 801 802 if (!list_empty(&dentry->d_lru)) { 803 dentry_stat.nr_unused--; 804 list_del_init(&dentry->d_lru); 805 } 806 /* 807 * move only zero ref count dentries to the end 808 * of the unused list for prune_dcache 809 */ 810 if (!atomic_read(&dentry->d_count)) { 811 list_add_tail(&dentry->d_lru, &dentry_unused); 812 dentry_stat.nr_unused++; 813 found++; 814 } 815 816 /* 817 * We can return to the caller if we have found some (this 818 * ensures forward progress). We'll be coming back to find 819 * the rest. 820 */ 821 if (found && need_resched()) 822 goto out; 823 824 /* 825 * Descend a level if the d_subdirs list is non-empty. 826 */ 827 if (!list_empty(&dentry->d_subdirs)) { 828 this_parent = dentry; 829 goto repeat; 830 } 831 } 832 /* 833 * All done at this level ... ascend and resume the search. 834 */ 835 if (this_parent != parent) { 836 next = this_parent->d_u.d_child.next; 837 this_parent = this_parent->d_parent; 838 goto resume; 839 } 840 out: 841 spin_unlock(&dcache_lock); 842 return found; 843 } 844 845 /** 846 * shrink_dcache_parent - prune dcache 847 * @parent: parent of entries to prune 848 * 849 * Prune the dcache to remove unused children of the parent dentry. 850 */ 851 852 void shrink_dcache_parent(struct dentry * parent) 853 { 854 int found; 855 856 while ((found = select_parent(parent)) != 0) 857 prune_dcache(found, parent->d_sb); 858 } 859 860 /* 861 * Scan `nr' dentries and return the number which remain. 862 * 863 * We need to avoid reentering the filesystem if the caller is performing a 864 * GFP_NOFS allocation attempt. One example deadlock is: 865 * 866 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 867 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 868 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 869 * 870 * In this case we return -1 to tell the caller that we baled. 871 */ 872 static int shrink_dcache_memory(int nr, gfp_t gfp_mask) 873 { 874 if (nr) { 875 if (!(gfp_mask & __GFP_FS)) 876 return -1; 877 prune_dcache(nr, NULL); 878 } 879 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 880 } 881 882 static struct shrinker dcache_shrinker = { 883 .shrink = shrink_dcache_memory, 884 .seeks = DEFAULT_SEEKS, 885 }; 886 887 /** 888 * d_alloc - allocate a dcache entry 889 * @parent: parent of entry to allocate 890 * @name: qstr of the name 891 * 892 * Allocates a dentry. It returns %NULL if there is insufficient memory 893 * available. On a success the dentry is returned. The name passed in is 894 * copied and the copy passed in may be reused after this call. 895 */ 896 897 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 898 { 899 struct dentry *dentry; 900 char *dname; 901 902 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 903 if (!dentry) 904 return NULL; 905 906 if (name->len > DNAME_INLINE_LEN-1) { 907 dname = kmalloc(name->len + 1, GFP_KERNEL); 908 if (!dname) { 909 kmem_cache_free(dentry_cache, dentry); 910 return NULL; 911 } 912 } else { 913 dname = dentry->d_iname; 914 } 915 dentry->d_name.name = dname; 916 917 dentry->d_name.len = name->len; 918 dentry->d_name.hash = name->hash; 919 memcpy(dname, name->name, name->len); 920 dname[name->len] = 0; 921 922 atomic_set(&dentry->d_count, 1); 923 dentry->d_flags = DCACHE_UNHASHED; 924 spin_lock_init(&dentry->d_lock); 925 dentry->d_inode = NULL; 926 dentry->d_parent = NULL; 927 dentry->d_sb = NULL; 928 dentry->d_op = NULL; 929 dentry->d_fsdata = NULL; 930 dentry->d_mounted = 0; 931 #ifdef CONFIG_PROFILING 932 dentry->d_cookie = NULL; 933 #endif 934 INIT_HLIST_NODE(&dentry->d_hash); 935 INIT_LIST_HEAD(&dentry->d_lru); 936 INIT_LIST_HEAD(&dentry->d_subdirs); 937 INIT_LIST_HEAD(&dentry->d_alias); 938 939 if (parent) { 940 dentry->d_parent = dget(parent); 941 dentry->d_sb = parent->d_sb; 942 } else { 943 INIT_LIST_HEAD(&dentry->d_u.d_child); 944 } 945 946 spin_lock(&dcache_lock); 947 if (parent) 948 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 949 dentry_stat.nr_dentry++; 950 spin_unlock(&dcache_lock); 951 952 return dentry; 953 } 954 955 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 956 { 957 struct qstr q; 958 959 q.name = name; 960 q.len = strlen(name); 961 q.hash = full_name_hash(q.name, q.len); 962 return d_alloc(parent, &q); 963 } 964 965 /** 966 * d_instantiate - fill in inode information for a dentry 967 * @entry: dentry to complete 968 * @inode: inode to attach to this dentry 969 * 970 * Fill in inode information in the entry. 971 * 972 * This turns negative dentries into productive full members 973 * of society. 974 * 975 * NOTE! This assumes that the inode count has been incremented 976 * (or otherwise set) by the caller to indicate that it is now 977 * in use by the dcache. 978 */ 979 980 void d_instantiate(struct dentry *entry, struct inode * inode) 981 { 982 BUG_ON(!list_empty(&entry->d_alias)); 983 spin_lock(&dcache_lock); 984 if (inode) 985 list_add(&entry->d_alias, &inode->i_dentry); 986 entry->d_inode = inode; 987 fsnotify_d_instantiate(entry, inode); 988 spin_unlock(&dcache_lock); 989 security_d_instantiate(entry, inode); 990 } 991 992 /** 993 * d_instantiate_unique - instantiate a non-aliased dentry 994 * @entry: dentry to instantiate 995 * @inode: inode to attach to this dentry 996 * 997 * Fill in inode information in the entry. On success, it returns NULL. 998 * If an unhashed alias of "entry" already exists, then we return the 999 * aliased dentry instead and drop one reference to inode. 1000 * 1001 * Note that in order to avoid conflicts with rename() etc, the caller 1002 * had better be holding the parent directory semaphore. 1003 * 1004 * This also assumes that the inode count has been incremented 1005 * (or otherwise set) by the caller to indicate that it is now 1006 * in use by the dcache. 1007 */ 1008 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1009 struct inode *inode) 1010 { 1011 struct dentry *alias; 1012 int len = entry->d_name.len; 1013 const char *name = entry->d_name.name; 1014 unsigned int hash = entry->d_name.hash; 1015 1016 if (!inode) { 1017 entry->d_inode = NULL; 1018 return NULL; 1019 } 1020 1021 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1022 struct qstr *qstr = &alias->d_name; 1023 1024 if (qstr->hash != hash) 1025 continue; 1026 if (alias->d_parent != entry->d_parent) 1027 continue; 1028 if (qstr->len != len) 1029 continue; 1030 if (memcmp(qstr->name, name, len)) 1031 continue; 1032 dget_locked(alias); 1033 return alias; 1034 } 1035 1036 list_add(&entry->d_alias, &inode->i_dentry); 1037 entry->d_inode = inode; 1038 fsnotify_d_instantiate(entry, inode); 1039 return NULL; 1040 } 1041 1042 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1043 { 1044 struct dentry *result; 1045 1046 BUG_ON(!list_empty(&entry->d_alias)); 1047 1048 spin_lock(&dcache_lock); 1049 result = __d_instantiate_unique(entry, inode); 1050 spin_unlock(&dcache_lock); 1051 1052 if (!result) { 1053 security_d_instantiate(entry, inode); 1054 return NULL; 1055 } 1056 1057 BUG_ON(!d_unhashed(result)); 1058 iput(inode); 1059 return result; 1060 } 1061 1062 EXPORT_SYMBOL(d_instantiate_unique); 1063 1064 /** 1065 * d_alloc_root - allocate root dentry 1066 * @root_inode: inode to allocate the root for 1067 * 1068 * Allocate a root ("/") dentry for the inode given. The inode is 1069 * instantiated and returned. %NULL is returned if there is insufficient 1070 * memory or the inode passed is %NULL. 1071 */ 1072 1073 struct dentry * d_alloc_root(struct inode * root_inode) 1074 { 1075 struct dentry *res = NULL; 1076 1077 if (root_inode) { 1078 static const struct qstr name = { .name = "/", .len = 1 }; 1079 1080 res = d_alloc(NULL, &name); 1081 if (res) { 1082 res->d_sb = root_inode->i_sb; 1083 res->d_parent = res; 1084 d_instantiate(res, root_inode); 1085 } 1086 } 1087 return res; 1088 } 1089 1090 static inline struct hlist_head *d_hash(struct dentry *parent, 1091 unsigned long hash) 1092 { 1093 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 1094 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 1095 return dentry_hashtable + (hash & D_HASHMASK); 1096 } 1097 1098 /** 1099 * d_alloc_anon - allocate an anonymous dentry 1100 * @inode: inode to allocate the dentry for 1101 * 1102 * This is similar to d_alloc_root. It is used by filesystems when 1103 * creating a dentry for a given inode, often in the process of 1104 * mapping a filehandle to a dentry. The returned dentry may be 1105 * anonymous, or may have a full name (if the inode was already 1106 * in the cache). The file system may need to make further 1107 * efforts to connect this dentry into the dcache properly. 1108 * 1109 * When called on a directory inode, we must ensure that 1110 * the inode only ever has one dentry. If a dentry is 1111 * found, that is returned instead of allocating a new one. 1112 * 1113 * On successful return, the reference to the inode has been transferred 1114 * to the dentry. If %NULL is returned (indicating kmalloc failure), 1115 * the reference on the inode has not been released. 1116 */ 1117 1118 struct dentry * d_alloc_anon(struct inode *inode) 1119 { 1120 static const struct qstr anonstring = { .name = "" }; 1121 struct dentry *tmp; 1122 struct dentry *res; 1123 1124 if ((res = d_find_alias(inode))) { 1125 iput(inode); 1126 return res; 1127 } 1128 1129 tmp = d_alloc(NULL, &anonstring); 1130 if (!tmp) 1131 return NULL; 1132 1133 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1134 1135 spin_lock(&dcache_lock); 1136 res = __d_find_alias(inode, 0); 1137 if (!res) { 1138 /* attach a disconnected dentry */ 1139 res = tmp; 1140 tmp = NULL; 1141 spin_lock(&res->d_lock); 1142 res->d_sb = inode->i_sb; 1143 res->d_parent = res; 1144 res->d_inode = inode; 1145 res->d_flags |= DCACHE_DISCONNECTED; 1146 res->d_flags &= ~DCACHE_UNHASHED; 1147 list_add(&res->d_alias, &inode->i_dentry); 1148 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon); 1149 spin_unlock(&res->d_lock); 1150 1151 inode = NULL; /* don't drop reference */ 1152 } 1153 spin_unlock(&dcache_lock); 1154 1155 if (inode) 1156 iput(inode); 1157 if (tmp) 1158 dput(tmp); 1159 return res; 1160 } 1161 1162 1163 /** 1164 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1165 * @inode: the inode which may have a disconnected dentry 1166 * @dentry: a negative dentry which we want to point to the inode. 1167 * 1168 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1169 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1170 * and return it, else simply d_add the inode to the dentry and return NULL. 1171 * 1172 * This is needed in the lookup routine of any filesystem that is exportable 1173 * (via knfsd) so that we can build dcache paths to directories effectively. 1174 * 1175 * If a dentry was found and moved, then it is returned. Otherwise NULL 1176 * is returned. This matches the expected return value of ->lookup. 1177 * 1178 */ 1179 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1180 { 1181 struct dentry *new = NULL; 1182 1183 if (inode && S_ISDIR(inode->i_mode)) { 1184 spin_lock(&dcache_lock); 1185 new = __d_find_alias(inode, 1); 1186 if (new) { 1187 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1188 fsnotify_d_instantiate(new, inode); 1189 spin_unlock(&dcache_lock); 1190 security_d_instantiate(new, inode); 1191 d_rehash(dentry); 1192 d_move(new, dentry); 1193 iput(inode); 1194 } else { 1195 /* d_instantiate takes dcache_lock, so we do it by hand */ 1196 list_add(&dentry->d_alias, &inode->i_dentry); 1197 dentry->d_inode = inode; 1198 fsnotify_d_instantiate(dentry, inode); 1199 spin_unlock(&dcache_lock); 1200 security_d_instantiate(dentry, inode); 1201 d_rehash(dentry); 1202 } 1203 } else 1204 d_add(dentry, inode); 1205 return new; 1206 } 1207 1208 1209 /** 1210 * d_lookup - search for a dentry 1211 * @parent: parent dentry 1212 * @name: qstr of name we wish to find 1213 * 1214 * Searches the children of the parent dentry for the name in question. If 1215 * the dentry is found its reference count is incremented and the dentry 1216 * is returned. The caller must use d_put to free the entry when it has 1217 * finished using it. %NULL is returned on failure. 1218 * 1219 * __d_lookup is dcache_lock free. The hash list is protected using RCU. 1220 * Memory barriers are used while updating and doing lockless traversal. 1221 * To avoid races with d_move while rename is happening, d_lock is used. 1222 * 1223 * Overflows in memcmp(), while d_move, are avoided by keeping the length 1224 * and name pointer in one structure pointed by d_qstr. 1225 * 1226 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1227 * lookup is going on. 1228 * 1229 * dentry_unused list is not updated even if lookup finds the required dentry 1230 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1231 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1232 * acquisition. 1233 * 1234 * d_lookup() is protected against the concurrent renames in some unrelated 1235 * directory using the seqlockt_t rename_lock. 1236 */ 1237 1238 struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1239 { 1240 struct dentry * dentry = NULL; 1241 unsigned long seq; 1242 1243 do { 1244 seq = read_seqbegin(&rename_lock); 1245 dentry = __d_lookup(parent, name); 1246 if (dentry) 1247 break; 1248 } while (read_seqretry(&rename_lock, seq)); 1249 return dentry; 1250 } 1251 1252 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1253 { 1254 unsigned int len = name->len; 1255 unsigned int hash = name->hash; 1256 const unsigned char *str = name->name; 1257 struct hlist_head *head = d_hash(parent,hash); 1258 struct dentry *found = NULL; 1259 struct hlist_node *node; 1260 struct dentry *dentry; 1261 1262 rcu_read_lock(); 1263 1264 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1265 struct qstr *qstr; 1266 1267 if (dentry->d_name.hash != hash) 1268 continue; 1269 if (dentry->d_parent != parent) 1270 continue; 1271 1272 spin_lock(&dentry->d_lock); 1273 1274 /* 1275 * Recheck the dentry after taking the lock - d_move may have 1276 * changed things. Don't bother checking the hash because we're 1277 * about to compare the whole name anyway. 1278 */ 1279 if (dentry->d_parent != parent) 1280 goto next; 1281 1282 /* 1283 * It is safe to compare names since d_move() cannot 1284 * change the qstr (protected by d_lock). 1285 */ 1286 qstr = &dentry->d_name; 1287 if (parent->d_op && parent->d_op->d_compare) { 1288 if (parent->d_op->d_compare(parent, qstr, name)) 1289 goto next; 1290 } else { 1291 if (qstr->len != len) 1292 goto next; 1293 if (memcmp(qstr->name, str, len)) 1294 goto next; 1295 } 1296 1297 if (!d_unhashed(dentry)) { 1298 atomic_inc(&dentry->d_count); 1299 found = dentry; 1300 } 1301 spin_unlock(&dentry->d_lock); 1302 break; 1303 next: 1304 spin_unlock(&dentry->d_lock); 1305 } 1306 rcu_read_unlock(); 1307 1308 return found; 1309 } 1310 1311 /** 1312 * d_hash_and_lookup - hash the qstr then search for a dentry 1313 * @dir: Directory to search in 1314 * @name: qstr of name we wish to find 1315 * 1316 * On hash failure or on lookup failure NULL is returned. 1317 */ 1318 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1319 { 1320 struct dentry *dentry = NULL; 1321 1322 /* 1323 * Check for a fs-specific hash function. Note that we must 1324 * calculate the standard hash first, as the d_op->d_hash() 1325 * routine may choose to leave the hash value unchanged. 1326 */ 1327 name->hash = full_name_hash(name->name, name->len); 1328 if (dir->d_op && dir->d_op->d_hash) { 1329 if (dir->d_op->d_hash(dir, name) < 0) 1330 goto out; 1331 } 1332 dentry = d_lookup(dir, name); 1333 out: 1334 return dentry; 1335 } 1336 1337 /** 1338 * d_validate - verify dentry provided from insecure source 1339 * @dentry: The dentry alleged to be valid child of @dparent 1340 * @dparent: The parent dentry (known to be valid) 1341 * @hash: Hash of the dentry 1342 * @len: Length of the name 1343 * 1344 * An insecure source has sent us a dentry, here we verify it and dget() it. 1345 * This is used by ncpfs in its readdir implementation. 1346 * Zero is returned in the dentry is invalid. 1347 */ 1348 1349 int d_validate(struct dentry *dentry, struct dentry *dparent) 1350 { 1351 struct hlist_head *base; 1352 struct hlist_node *lhp; 1353 1354 /* Check whether the ptr might be valid at all.. */ 1355 if (!kmem_ptr_validate(dentry_cache, dentry)) 1356 goto out; 1357 1358 if (dentry->d_parent != dparent) 1359 goto out; 1360 1361 spin_lock(&dcache_lock); 1362 base = d_hash(dparent, dentry->d_name.hash); 1363 hlist_for_each(lhp,base) { 1364 /* hlist_for_each_entry_rcu() not required for d_hash list 1365 * as it is parsed under dcache_lock 1366 */ 1367 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { 1368 __dget_locked(dentry); 1369 spin_unlock(&dcache_lock); 1370 return 1; 1371 } 1372 } 1373 spin_unlock(&dcache_lock); 1374 out: 1375 return 0; 1376 } 1377 1378 /* 1379 * When a file is deleted, we have two options: 1380 * - turn this dentry into a negative dentry 1381 * - unhash this dentry and free it. 1382 * 1383 * Usually, we want to just turn this into 1384 * a negative dentry, but if anybody else is 1385 * currently using the dentry or the inode 1386 * we can't do that and we fall back on removing 1387 * it from the hash queues and waiting for 1388 * it to be deleted later when it has no users 1389 */ 1390 1391 /** 1392 * d_delete - delete a dentry 1393 * @dentry: The dentry to delete 1394 * 1395 * Turn the dentry into a negative dentry if possible, otherwise 1396 * remove it from the hash queues so it can be deleted later 1397 */ 1398 1399 void d_delete(struct dentry * dentry) 1400 { 1401 int isdir = 0; 1402 /* 1403 * Are we the only user? 1404 */ 1405 spin_lock(&dcache_lock); 1406 spin_lock(&dentry->d_lock); 1407 isdir = S_ISDIR(dentry->d_inode->i_mode); 1408 if (atomic_read(&dentry->d_count) == 1) { 1409 dentry_iput(dentry); 1410 fsnotify_nameremove(dentry, isdir); 1411 return; 1412 } 1413 1414 if (!d_unhashed(dentry)) 1415 __d_drop(dentry); 1416 1417 spin_unlock(&dentry->d_lock); 1418 spin_unlock(&dcache_lock); 1419 1420 fsnotify_nameremove(dentry, isdir); 1421 } 1422 1423 static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1424 { 1425 1426 entry->d_flags &= ~DCACHE_UNHASHED; 1427 hlist_add_head_rcu(&entry->d_hash, list); 1428 } 1429 1430 static void _d_rehash(struct dentry * entry) 1431 { 1432 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 1433 } 1434 1435 /** 1436 * d_rehash - add an entry back to the hash 1437 * @entry: dentry to add to the hash 1438 * 1439 * Adds a dentry to the hash according to its name. 1440 */ 1441 1442 void d_rehash(struct dentry * entry) 1443 { 1444 spin_lock(&dcache_lock); 1445 spin_lock(&entry->d_lock); 1446 _d_rehash(entry); 1447 spin_unlock(&entry->d_lock); 1448 spin_unlock(&dcache_lock); 1449 } 1450 1451 #define do_switch(x,y) do { \ 1452 __typeof__ (x) __tmp = x; \ 1453 x = y; y = __tmp; } while (0) 1454 1455 /* 1456 * When switching names, the actual string doesn't strictly have to 1457 * be preserved in the target - because we're dropping the target 1458 * anyway. As such, we can just do a simple memcpy() to copy over 1459 * the new name before we switch. 1460 * 1461 * Note that we have to be a lot more careful about getting the hash 1462 * switched - we have to switch the hash value properly even if it 1463 * then no longer matches the actual (corrupted) string of the target. 1464 * The hash value has to match the hash queue that the dentry is on.. 1465 */ 1466 static void switch_names(struct dentry *dentry, struct dentry *target) 1467 { 1468 if (dname_external(target)) { 1469 if (dname_external(dentry)) { 1470 /* 1471 * Both external: swap the pointers 1472 */ 1473 do_switch(target->d_name.name, dentry->d_name.name); 1474 } else { 1475 /* 1476 * dentry:internal, target:external. Steal target's 1477 * storage and make target internal. 1478 */ 1479 memcpy(target->d_iname, dentry->d_name.name, 1480 dentry->d_name.len + 1); 1481 dentry->d_name.name = target->d_name.name; 1482 target->d_name.name = target->d_iname; 1483 } 1484 } else { 1485 if (dname_external(dentry)) { 1486 /* 1487 * dentry:external, target:internal. Give dentry's 1488 * storage to target and make dentry internal 1489 */ 1490 memcpy(dentry->d_iname, target->d_name.name, 1491 target->d_name.len + 1); 1492 target->d_name.name = dentry->d_name.name; 1493 dentry->d_name.name = dentry->d_iname; 1494 } else { 1495 /* 1496 * Both are internal. Just copy target to dentry 1497 */ 1498 memcpy(dentry->d_iname, target->d_name.name, 1499 target->d_name.len + 1); 1500 } 1501 } 1502 } 1503 1504 /* 1505 * We cannibalize "target" when moving dentry on top of it, 1506 * because it's going to be thrown away anyway. We could be more 1507 * polite about it, though. 1508 * 1509 * This forceful removal will result in ugly /proc output if 1510 * somebody holds a file open that got deleted due to a rename. 1511 * We could be nicer about the deleted file, and let it show 1512 * up under the name it had before it was deleted rather than 1513 * under the original name of the file that was moved on top of it. 1514 */ 1515 1516 /* 1517 * d_move_locked - move a dentry 1518 * @dentry: entry to move 1519 * @target: new dentry 1520 * 1521 * Update the dcache to reflect the move of a file name. Negative 1522 * dcache entries should not be moved in this way. 1523 */ 1524 static void d_move_locked(struct dentry * dentry, struct dentry * target) 1525 { 1526 struct hlist_head *list; 1527 1528 if (!dentry->d_inode) 1529 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 1530 1531 write_seqlock(&rename_lock); 1532 /* 1533 * XXXX: do we really need to take target->d_lock? 1534 */ 1535 if (target < dentry) { 1536 spin_lock(&target->d_lock); 1537 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1538 } else { 1539 spin_lock(&dentry->d_lock); 1540 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); 1541 } 1542 1543 /* Move the dentry to the target hash queue, if on different bucket */ 1544 if (d_unhashed(dentry)) 1545 goto already_unhashed; 1546 1547 hlist_del_rcu(&dentry->d_hash); 1548 1549 already_unhashed: 1550 list = d_hash(target->d_parent, target->d_name.hash); 1551 __d_rehash(dentry, list); 1552 1553 /* Unhash the target: dput() will then get rid of it */ 1554 __d_drop(target); 1555 1556 list_del(&dentry->d_u.d_child); 1557 list_del(&target->d_u.d_child); 1558 1559 /* Switch the names.. */ 1560 switch_names(dentry, target); 1561 do_switch(dentry->d_name.len, target->d_name.len); 1562 do_switch(dentry->d_name.hash, target->d_name.hash); 1563 1564 /* ... and switch the parents */ 1565 if (IS_ROOT(dentry)) { 1566 dentry->d_parent = target->d_parent; 1567 target->d_parent = target; 1568 INIT_LIST_HEAD(&target->d_u.d_child); 1569 } else { 1570 do_switch(dentry->d_parent, target->d_parent); 1571 1572 /* And add them back to the (new) parent lists */ 1573 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1574 } 1575 1576 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1577 spin_unlock(&target->d_lock); 1578 fsnotify_d_move(dentry); 1579 spin_unlock(&dentry->d_lock); 1580 write_sequnlock(&rename_lock); 1581 } 1582 1583 /** 1584 * d_move - move a dentry 1585 * @dentry: entry to move 1586 * @target: new dentry 1587 * 1588 * Update the dcache to reflect the move of a file name. Negative 1589 * dcache entries should not be moved in this way. 1590 */ 1591 1592 void d_move(struct dentry * dentry, struct dentry * target) 1593 { 1594 spin_lock(&dcache_lock); 1595 d_move_locked(dentry, target); 1596 spin_unlock(&dcache_lock); 1597 } 1598 1599 /* 1600 * Helper that returns 1 if p1 is a parent of p2, else 0 1601 */ 1602 static int d_isparent(struct dentry *p1, struct dentry *p2) 1603 { 1604 struct dentry *p; 1605 1606 for (p = p2; p->d_parent != p; p = p->d_parent) { 1607 if (p->d_parent == p1) 1608 return 1; 1609 } 1610 return 0; 1611 } 1612 1613 /* 1614 * This helper attempts to cope with remotely renamed directories 1615 * 1616 * It assumes that the caller is already holding 1617 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 1618 * 1619 * Note: If ever the locking in lock_rename() changes, then please 1620 * remember to update this too... 1621 * 1622 * On return, dcache_lock will have been unlocked. 1623 */ 1624 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 1625 { 1626 struct mutex *m1 = NULL, *m2 = NULL; 1627 struct dentry *ret; 1628 1629 /* If alias and dentry share a parent, then no extra locks required */ 1630 if (alias->d_parent == dentry->d_parent) 1631 goto out_unalias; 1632 1633 /* Check for loops */ 1634 ret = ERR_PTR(-ELOOP); 1635 if (d_isparent(alias, dentry)) 1636 goto out_err; 1637 1638 /* See lock_rename() */ 1639 ret = ERR_PTR(-EBUSY); 1640 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 1641 goto out_err; 1642 m1 = &dentry->d_sb->s_vfs_rename_mutex; 1643 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 1644 goto out_err; 1645 m2 = &alias->d_parent->d_inode->i_mutex; 1646 out_unalias: 1647 d_move_locked(alias, dentry); 1648 ret = alias; 1649 out_err: 1650 spin_unlock(&dcache_lock); 1651 if (m2) 1652 mutex_unlock(m2); 1653 if (m1) 1654 mutex_unlock(m1); 1655 return ret; 1656 } 1657 1658 /* 1659 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 1660 * named dentry in place of the dentry to be replaced. 1661 */ 1662 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 1663 { 1664 struct dentry *dparent, *aparent; 1665 1666 switch_names(dentry, anon); 1667 do_switch(dentry->d_name.len, anon->d_name.len); 1668 do_switch(dentry->d_name.hash, anon->d_name.hash); 1669 1670 dparent = dentry->d_parent; 1671 aparent = anon->d_parent; 1672 1673 dentry->d_parent = (aparent == anon) ? dentry : aparent; 1674 list_del(&dentry->d_u.d_child); 1675 if (!IS_ROOT(dentry)) 1676 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1677 else 1678 INIT_LIST_HEAD(&dentry->d_u.d_child); 1679 1680 anon->d_parent = (dparent == dentry) ? anon : dparent; 1681 list_del(&anon->d_u.d_child); 1682 if (!IS_ROOT(anon)) 1683 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 1684 else 1685 INIT_LIST_HEAD(&anon->d_u.d_child); 1686 1687 anon->d_flags &= ~DCACHE_DISCONNECTED; 1688 } 1689 1690 /** 1691 * d_materialise_unique - introduce an inode into the tree 1692 * @dentry: candidate dentry 1693 * @inode: inode to bind to the dentry, to which aliases may be attached 1694 * 1695 * Introduces an dentry into the tree, substituting an extant disconnected 1696 * root directory alias in its place if there is one 1697 */ 1698 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 1699 { 1700 struct dentry *actual; 1701 1702 BUG_ON(!d_unhashed(dentry)); 1703 1704 spin_lock(&dcache_lock); 1705 1706 if (!inode) { 1707 actual = dentry; 1708 dentry->d_inode = NULL; 1709 goto found_lock; 1710 } 1711 1712 if (S_ISDIR(inode->i_mode)) { 1713 struct dentry *alias; 1714 1715 /* Does an aliased dentry already exist? */ 1716 alias = __d_find_alias(inode, 0); 1717 if (alias) { 1718 actual = alias; 1719 /* Is this an anonymous mountpoint that we could splice 1720 * into our tree? */ 1721 if (IS_ROOT(alias)) { 1722 spin_lock(&alias->d_lock); 1723 __d_materialise_dentry(dentry, alias); 1724 __d_drop(alias); 1725 goto found; 1726 } 1727 /* Nope, but we must(!) avoid directory aliasing */ 1728 actual = __d_unalias(dentry, alias); 1729 if (IS_ERR(actual)) 1730 dput(alias); 1731 goto out_nolock; 1732 } 1733 } 1734 1735 /* Add a unique reference */ 1736 actual = __d_instantiate_unique(dentry, inode); 1737 if (!actual) 1738 actual = dentry; 1739 else if (unlikely(!d_unhashed(actual))) 1740 goto shouldnt_be_hashed; 1741 1742 found_lock: 1743 spin_lock(&actual->d_lock); 1744 found: 1745 _d_rehash(actual); 1746 spin_unlock(&actual->d_lock); 1747 spin_unlock(&dcache_lock); 1748 out_nolock: 1749 if (actual == dentry) { 1750 security_d_instantiate(dentry, inode); 1751 return NULL; 1752 } 1753 1754 iput(inode); 1755 return actual; 1756 1757 shouldnt_be_hashed: 1758 spin_unlock(&dcache_lock); 1759 BUG(); 1760 goto shouldnt_be_hashed; 1761 } 1762 1763 /** 1764 * d_path - return the path of a dentry 1765 * @dentry: dentry to report 1766 * @vfsmnt: vfsmnt to which the dentry belongs 1767 * @root: root dentry 1768 * @rootmnt: vfsmnt to which the root dentry belongs 1769 * @buffer: buffer to return value in 1770 * @buflen: buffer length 1771 * 1772 * Convert a dentry into an ASCII path name. If the entry has been deleted 1773 * the string " (deleted)" is appended. Note that this is ambiguous. 1774 * 1775 * Returns the buffer or an error code if the path was too long. 1776 * 1777 * "buflen" should be positive. Caller holds the dcache_lock. 1778 */ 1779 static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, 1780 struct dentry *root, struct vfsmount *rootmnt, 1781 char *buffer, int buflen) 1782 { 1783 char * end = buffer+buflen; 1784 char * retval; 1785 int namelen; 1786 1787 *--end = '\0'; 1788 buflen--; 1789 if (!IS_ROOT(dentry) && d_unhashed(dentry)) { 1790 buflen -= 10; 1791 end -= 10; 1792 if (buflen < 0) 1793 goto Elong; 1794 memcpy(end, " (deleted)", 10); 1795 } 1796 1797 if (buflen < 1) 1798 goto Elong; 1799 /* Get '/' right */ 1800 retval = end-1; 1801 *retval = '/'; 1802 1803 for (;;) { 1804 struct dentry * parent; 1805 1806 if (dentry == root && vfsmnt == rootmnt) 1807 break; 1808 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1809 /* Global root? */ 1810 spin_lock(&vfsmount_lock); 1811 if (vfsmnt->mnt_parent == vfsmnt) { 1812 spin_unlock(&vfsmount_lock); 1813 goto global_root; 1814 } 1815 dentry = vfsmnt->mnt_mountpoint; 1816 vfsmnt = vfsmnt->mnt_parent; 1817 spin_unlock(&vfsmount_lock); 1818 continue; 1819 } 1820 parent = dentry->d_parent; 1821 prefetch(parent); 1822 namelen = dentry->d_name.len; 1823 buflen -= namelen + 1; 1824 if (buflen < 0) 1825 goto Elong; 1826 end -= namelen; 1827 memcpy(end, dentry->d_name.name, namelen); 1828 *--end = '/'; 1829 retval = end; 1830 dentry = parent; 1831 } 1832 1833 return retval; 1834 1835 global_root: 1836 namelen = dentry->d_name.len; 1837 buflen -= namelen; 1838 if (buflen < 0) 1839 goto Elong; 1840 retval -= namelen-1; /* hit the slash */ 1841 memcpy(retval, dentry->d_name.name, namelen); 1842 return retval; 1843 Elong: 1844 return ERR_PTR(-ENAMETOOLONG); 1845 } 1846 1847 /* write full pathname into buffer and return start of pathname */ 1848 char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, 1849 char *buf, int buflen) 1850 { 1851 char *res; 1852 struct vfsmount *rootmnt; 1853 struct dentry *root; 1854 1855 /* 1856 * We have various synthetic filesystems that never get mounted. On 1857 * these filesystems dentries are never used for lookup purposes, and 1858 * thus don't need to be hashed. They also don't need a name until a 1859 * user wants to identify the object in /proc/pid/fd/. The little hack 1860 * below allows us to generate a name for these objects on demand: 1861 */ 1862 if (dentry->d_op && dentry->d_op->d_dname) 1863 return dentry->d_op->d_dname(dentry, buf, buflen); 1864 1865 read_lock(¤t->fs->lock); 1866 rootmnt = mntget(current->fs->rootmnt); 1867 root = dget(current->fs->root); 1868 read_unlock(¤t->fs->lock); 1869 spin_lock(&dcache_lock); 1870 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); 1871 spin_unlock(&dcache_lock); 1872 dput(root); 1873 mntput(rootmnt); 1874 return res; 1875 } 1876 1877 /* 1878 * Helper function for dentry_operations.d_dname() members 1879 */ 1880 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 1881 const char *fmt, ...) 1882 { 1883 va_list args; 1884 char temp[64]; 1885 int sz; 1886 1887 va_start(args, fmt); 1888 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 1889 va_end(args); 1890 1891 if (sz > sizeof(temp) || sz > buflen) 1892 return ERR_PTR(-ENAMETOOLONG); 1893 1894 buffer += buflen - sz; 1895 return memcpy(buffer, temp, sz); 1896 } 1897 1898 /* 1899 * NOTE! The user-level library version returns a 1900 * character pointer. The kernel system call just 1901 * returns the length of the buffer filled (which 1902 * includes the ending '\0' character), or a negative 1903 * error value. So libc would do something like 1904 * 1905 * char *getcwd(char * buf, size_t size) 1906 * { 1907 * int retval; 1908 * 1909 * retval = sys_getcwd(buf, size); 1910 * if (retval >= 0) 1911 * return buf; 1912 * errno = -retval; 1913 * return NULL; 1914 * } 1915 */ 1916 asmlinkage long sys_getcwd(char __user *buf, unsigned long size) 1917 { 1918 int error; 1919 struct vfsmount *pwdmnt, *rootmnt; 1920 struct dentry *pwd, *root; 1921 char *page = (char *) __get_free_page(GFP_USER); 1922 1923 if (!page) 1924 return -ENOMEM; 1925 1926 read_lock(¤t->fs->lock); 1927 pwdmnt = mntget(current->fs->pwdmnt); 1928 pwd = dget(current->fs->pwd); 1929 rootmnt = mntget(current->fs->rootmnt); 1930 root = dget(current->fs->root); 1931 read_unlock(¤t->fs->lock); 1932 1933 error = -ENOENT; 1934 /* Has the current directory has been unlinked? */ 1935 spin_lock(&dcache_lock); 1936 if (pwd->d_parent == pwd || !d_unhashed(pwd)) { 1937 unsigned long len; 1938 char * cwd; 1939 1940 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE); 1941 spin_unlock(&dcache_lock); 1942 1943 error = PTR_ERR(cwd); 1944 if (IS_ERR(cwd)) 1945 goto out; 1946 1947 error = -ERANGE; 1948 len = PAGE_SIZE + page - cwd; 1949 if (len <= size) { 1950 error = len; 1951 if (copy_to_user(buf, cwd, len)) 1952 error = -EFAULT; 1953 } 1954 } else 1955 spin_unlock(&dcache_lock); 1956 1957 out: 1958 dput(pwd); 1959 mntput(pwdmnt); 1960 dput(root); 1961 mntput(rootmnt); 1962 free_page((unsigned long) page); 1963 return error; 1964 } 1965 1966 /* 1967 * Test whether new_dentry is a subdirectory of old_dentry. 1968 * 1969 * Trivially implemented using the dcache structure 1970 */ 1971 1972 /** 1973 * is_subdir - is new dentry a subdirectory of old_dentry 1974 * @new_dentry: new dentry 1975 * @old_dentry: old dentry 1976 * 1977 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 1978 * Returns 0 otherwise. 1979 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 1980 */ 1981 1982 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry) 1983 { 1984 int result; 1985 struct dentry * saved = new_dentry; 1986 unsigned long seq; 1987 1988 /* need rcu_readlock to protect against the d_parent trashing due to 1989 * d_move 1990 */ 1991 rcu_read_lock(); 1992 do { 1993 /* for restarting inner loop in case of seq retry */ 1994 new_dentry = saved; 1995 result = 0; 1996 seq = read_seqbegin(&rename_lock); 1997 for (;;) { 1998 if (new_dentry != old_dentry) { 1999 struct dentry * parent = new_dentry->d_parent; 2000 if (parent == new_dentry) 2001 break; 2002 new_dentry = parent; 2003 continue; 2004 } 2005 result = 1; 2006 break; 2007 } 2008 } while (read_seqretry(&rename_lock, seq)); 2009 rcu_read_unlock(); 2010 2011 return result; 2012 } 2013 2014 void d_genocide(struct dentry *root) 2015 { 2016 struct dentry *this_parent = root; 2017 struct list_head *next; 2018 2019 spin_lock(&dcache_lock); 2020 repeat: 2021 next = this_parent->d_subdirs.next; 2022 resume: 2023 while (next != &this_parent->d_subdirs) { 2024 struct list_head *tmp = next; 2025 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2026 next = tmp->next; 2027 if (d_unhashed(dentry)||!dentry->d_inode) 2028 continue; 2029 if (!list_empty(&dentry->d_subdirs)) { 2030 this_parent = dentry; 2031 goto repeat; 2032 } 2033 atomic_dec(&dentry->d_count); 2034 } 2035 if (this_parent != root) { 2036 next = this_parent->d_u.d_child.next; 2037 atomic_dec(&this_parent->d_count); 2038 this_parent = this_parent->d_parent; 2039 goto resume; 2040 } 2041 spin_unlock(&dcache_lock); 2042 } 2043 2044 /** 2045 * find_inode_number - check for dentry with name 2046 * @dir: directory to check 2047 * @name: Name to find. 2048 * 2049 * Check whether a dentry already exists for the given name, 2050 * and return the inode number if it has an inode. Otherwise 2051 * 0 is returned. 2052 * 2053 * This routine is used to post-process directory listings for 2054 * filesystems using synthetic inode numbers, and is necessary 2055 * to keep getcwd() working. 2056 */ 2057 2058 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2059 { 2060 struct dentry * dentry; 2061 ino_t ino = 0; 2062 2063 dentry = d_hash_and_lookup(dir, name); 2064 if (dentry) { 2065 if (dentry->d_inode) 2066 ino = dentry->d_inode->i_ino; 2067 dput(dentry); 2068 } 2069 return ino; 2070 } 2071 2072 static __initdata unsigned long dhash_entries; 2073 static int __init set_dhash_entries(char *str) 2074 { 2075 if (!str) 2076 return 0; 2077 dhash_entries = simple_strtoul(str, &str, 0); 2078 return 1; 2079 } 2080 __setup("dhash_entries=", set_dhash_entries); 2081 2082 static void __init dcache_init_early(void) 2083 { 2084 int loop; 2085 2086 /* If hashes are distributed across NUMA nodes, defer 2087 * hash allocation until vmalloc space is available. 2088 */ 2089 if (hashdist) 2090 return; 2091 2092 dentry_hashtable = 2093 alloc_large_system_hash("Dentry cache", 2094 sizeof(struct hlist_head), 2095 dhash_entries, 2096 13, 2097 HASH_EARLY, 2098 &d_hash_shift, 2099 &d_hash_mask, 2100 0); 2101 2102 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2103 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2104 } 2105 2106 static void __init dcache_init(void) 2107 { 2108 int loop; 2109 2110 /* 2111 * A constructor could be added for stable state like the lists, 2112 * but it is probably not worth it because of the cache nature 2113 * of the dcache. 2114 */ 2115 dentry_cache = KMEM_CACHE(dentry, 2116 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2117 2118 register_shrinker(&dcache_shrinker); 2119 2120 /* Hash may have been set up in dcache_init_early */ 2121 if (!hashdist) 2122 return; 2123 2124 dentry_hashtable = 2125 alloc_large_system_hash("Dentry cache", 2126 sizeof(struct hlist_head), 2127 dhash_entries, 2128 13, 2129 0, 2130 &d_hash_shift, 2131 &d_hash_mask, 2132 0); 2133 2134 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2135 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2136 } 2137 2138 /* SLAB cache for __getname() consumers */ 2139 struct kmem_cache *names_cachep __read_mostly; 2140 2141 /* SLAB cache for file structures */ 2142 struct kmem_cache *filp_cachep __read_mostly; 2143 2144 EXPORT_SYMBOL(d_genocide); 2145 2146 void __init vfs_caches_init_early(void) 2147 { 2148 dcache_init_early(); 2149 inode_init_early(); 2150 } 2151 2152 void __init vfs_caches_init(unsigned long mempages) 2153 { 2154 unsigned long reserve; 2155 2156 /* Base hash sizes on available memory, with a reserve equal to 2157 150% of current kernel size */ 2158 2159 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 2160 mempages -= reserve; 2161 2162 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2163 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2164 2165 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2166 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2167 2168 dcache_init(); 2169 inode_init(); 2170 files_init(mempages); 2171 mnt_init(); 2172 bdev_cache_init(); 2173 chrdev_init(); 2174 } 2175 2176 EXPORT_SYMBOL(d_alloc); 2177 EXPORT_SYMBOL(d_alloc_anon); 2178 EXPORT_SYMBOL(d_alloc_root); 2179 EXPORT_SYMBOL(d_delete); 2180 EXPORT_SYMBOL(d_find_alias); 2181 EXPORT_SYMBOL(d_instantiate); 2182 EXPORT_SYMBOL(d_invalidate); 2183 EXPORT_SYMBOL(d_lookup); 2184 EXPORT_SYMBOL(d_move); 2185 EXPORT_SYMBOL_GPL(d_materialise_unique); 2186 EXPORT_SYMBOL(d_path); 2187 EXPORT_SYMBOL(d_prune_aliases); 2188 EXPORT_SYMBOL(d_rehash); 2189 EXPORT_SYMBOL(d_splice_alias); 2190 EXPORT_SYMBOL(d_validate); 2191 EXPORT_SYMBOL(dget_locked); 2192 EXPORT_SYMBOL(dput); 2193 EXPORT_SYMBOL(find_inode_number); 2194 EXPORT_SYMBOL(have_submounts); 2195 EXPORT_SYMBOL(names_cachep); 2196 EXPORT_SYMBOL(shrink_dcache_parent); 2197 EXPORT_SYMBOL(shrink_dcache_sb); 2198