1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fdtable.h> 21 #include <linux/fs.h> 22 #include <linux/fsnotify.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/hash.h> 26 #include <linux/cache.h> 27 #include <linux/module.h> 28 #include <linux/mount.h> 29 #include <linux/file.h> 30 #include <asm/uaccess.h> 31 #include <linux/security.h> 32 #include <linux/seqlock.h> 33 #include <linux/swap.h> 34 #include <linux/bootmem.h> 35 #include "internal.h" 36 37 38 int sysctl_vfs_cache_pressure __read_mostly = 100; 39 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 40 41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 42 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 43 44 EXPORT_SYMBOL(dcache_lock); 45 46 static struct kmem_cache *dentry_cache __read_mostly; 47 48 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 49 50 /* 51 * This is the single most critical data structure when it comes 52 * to the dcache: the hashtable for lookups. Somebody should try 53 * to make this good - I've just made it work. 54 * 55 * This hash-function tries to avoid losing too many bits of hash 56 * information, yet avoid using a prime hash-size or similar. 57 */ 58 #define D_HASHBITS d_hash_shift 59 #define D_HASHMASK d_hash_mask 60 61 static unsigned int d_hash_mask __read_mostly; 62 static unsigned int d_hash_shift __read_mostly; 63 static struct hlist_head *dentry_hashtable __read_mostly; 64 65 /* Statistics gathering. */ 66 struct dentry_stat_t dentry_stat = { 67 .age_limit = 45, 68 }; 69 70 static void __d_free(struct dentry *dentry) 71 { 72 if (dname_external(dentry)) 73 kfree(dentry->d_name.name); 74 kmem_cache_free(dentry_cache, dentry); 75 } 76 77 static void d_callback(struct rcu_head *head) 78 { 79 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); 80 __d_free(dentry); 81 } 82 83 /* 84 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 85 * inside dcache_lock. 86 */ 87 static void d_free(struct dentry *dentry) 88 { 89 if (dentry->d_op && dentry->d_op->d_release) 90 dentry->d_op->d_release(dentry); 91 /* if dentry was never inserted into hash, immediate free is OK */ 92 if (hlist_unhashed(&dentry->d_hash)) 93 __d_free(dentry); 94 else 95 call_rcu(&dentry->d_u.d_rcu, d_callback); 96 } 97 98 /* 99 * Release the dentry's inode, using the filesystem 100 * d_iput() operation if defined. 101 */ 102 static void dentry_iput(struct dentry * dentry) 103 __releases(dentry->d_lock) 104 __releases(dcache_lock) 105 { 106 struct inode *inode = dentry->d_inode; 107 if (inode) { 108 dentry->d_inode = NULL; 109 list_del_init(&dentry->d_alias); 110 spin_unlock(&dentry->d_lock); 111 spin_unlock(&dcache_lock); 112 if (!inode->i_nlink) 113 fsnotify_inoderemove(inode); 114 if (dentry->d_op && dentry->d_op->d_iput) 115 dentry->d_op->d_iput(dentry, inode); 116 else 117 iput(inode); 118 } else { 119 spin_unlock(&dentry->d_lock); 120 spin_unlock(&dcache_lock); 121 } 122 } 123 124 /* 125 * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. 126 */ 127 static void dentry_lru_add(struct dentry *dentry) 128 { 129 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 130 dentry->d_sb->s_nr_dentry_unused++; 131 dentry_stat.nr_unused++; 132 } 133 134 static void dentry_lru_add_tail(struct dentry *dentry) 135 { 136 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 137 dentry->d_sb->s_nr_dentry_unused++; 138 dentry_stat.nr_unused++; 139 } 140 141 static void dentry_lru_del(struct dentry *dentry) 142 { 143 if (!list_empty(&dentry->d_lru)) { 144 list_del(&dentry->d_lru); 145 dentry->d_sb->s_nr_dentry_unused--; 146 dentry_stat.nr_unused--; 147 } 148 } 149 150 static void dentry_lru_del_init(struct dentry *dentry) 151 { 152 if (likely(!list_empty(&dentry->d_lru))) { 153 list_del_init(&dentry->d_lru); 154 dentry->d_sb->s_nr_dentry_unused--; 155 dentry_stat.nr_unused--; 156 } 157 } 158 159 /** 160 * d_kill - kill dentry and return parent 161 * @dentry: dentry to kill 162 * 163 * The dentry must already be unhashed and removed from the LRU. 164 * 165 * If this is the root of the dentry tree, return NULL. 166 */ 167 static struct dentry *d_kill(struct dentry *dentry) 168 __releases(dentry->d_lock) 169 __releases(dcache_lock) 170 { 171 struct dentry *parent; 172 173 list_del(&dentry->d_u.d_child); 174 dentry_stat.nr_dentry--; /* For d_free, below */ 175 /*drops the locks, at that point nobody can reach this dentry */ 176 dentry_iput(dentry); 177 parent = dentry->d_parent; 178 d_free(dentry); 179 return dentry == parent ? NULL : parent; 180 } 181 182 /* 183 * This is dput 184 * 185 * This is complicated by the fact that we do not want to put 186 * dentries that are no longer on any hash chain on the unused 187 * list: we'd much rather just get rid of them immediately. 188 * 189 * However, that implies that we have to traverse the dentry 190 * tree upwards to the parents which might _also_ now be 191 * scheduled for deletion (it may have been only waiting for 192 * its last child to go away). 193 * 194 * This tail recursion is done by hand as we don't want to depend 195 * on the compiler to always get this right (gcc generally doesn't). 196 * Real recursion would eat up our stack space. 197 */ 198 199 /* 200 * dput - release a dentry 201 * @dentry: dentry to release 202 * 203 * Release a dentry. This will drop the usage count and if appropriate 204 * call the dentry unlink method as well as removing it from the queues and 205 * releasing its resources. If the parent dentries were scheduled for release 206 * they too may now get deleted. 207 * 208 * no dcache lock, please. 209 */ 210 211 void dput(struct dentry *dentry) 212 { 213 if (!dentry) 214 return; 215 216 repeat: 217 if (atomic_read(&dentry->d_count) == 1) 218 might_sleep(); 219 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) 220 return; 221 222 spin_lock(&dentry->d_lock); 223 if (atomic_read(&dentry->d_count)) { 224 spin_unlock(&dentry->d_lock); 225 spin_unlock(&dcache_lock); 226 return; 227 } 228 229 /* 230 * AV: ->d_delete() is _NOT_ allowed to block now. 231 */ 232 if (dentry->d_op && dentry->d_op->d_delete) { 233 if (dentry->d_op->d_delete(dentry)) 234 goto unhash_it; 235 } 236 /* Unreachable? Get rid of it */ 237 if (d_unhashed(dentry)) 238 goto kill_it; 239 if (list_empty(&dentry->d_lru)) { 240 dentry->d_flags |= DCACHE_REFERENCED; 241 dentry_lru_add(dentry); 242 } 243 spin_unlock(&dentry->d_lock); 244 spin_unlock(&dcache_lock); 245 return; 246 247 unhash_it: 248 __d_drop(dentry); 249 kill_it: 250 /* if dentry was on the d_lru list delete it from there */ 251 dentry_lru_del(dentry); 252 dentry = d_kill(dentry); 253 if (dentry) 254 goto repeat; 255 } 256 257 /** 258 * d_invalidate - invalidate a dentry 259 * @dentry: dentry to invalidate 260 * 261 * Try to invalidate the dentry if it turns out to be 262 * possible. If there are other dentries that can be 263 * reached through this one we can't delete it and we 264 * return -EBUSY. On success we return 0. 265 * 266 * no dcache lock. 267 */ 268 269 int d_invalidate(struct dentry * dentry) 270 { 271 /* 272 * If it's already been dropped, return OK. 273 */ 274 spin_lock(&dcache_lock); 275 if (d_unhashed(dentry)) { 276 spin_unlock(&dcache_lock); 277 return 0; 278 } 279 /* 280 * Check whether to do a partial shrink_dcache 281 * to get rid of unused child entries. 282 */ 283 if (!list_empty(&dentry->d_subdirs)) { 284 spin_unlock(&dcache_lock); 285 shrink_dcache_parent(dentry); 286 spin_lock(&dcache_lock); 287 } 288 289 /* 290 * Somebody else still using it? 291 * 292 * If it's a directory, we can't drop it 293 * for fear of somebody re-populating it 294 * with children (even though dropping it 295 * would make it unreachable from the root, 296 * we might still populate it if it was a 297 * working directory or similar). 298 */ 299 spin_lock(&dentry->d_lock); 300 if (atomic_read(&dentry->d_count) > 1) { 301 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 302 spin_unlock(&dentry->d_lock); 303 spin_unlock(&dcache_lock); 304 return -EBUSY; 305 } 306 } 307 308 __d_drop(dentry); 309 spin_unlock(&dentry->d_lock); 310 spin_unlock(&dcache_lock); 311 return 0; 312 } 313 314 /* This should be called _only_ with dcache_lock held */ 315 316 static inline struct dentry * __dget_locked(struct dentry *dentry) 317 { 318 atomic_inc(&dentry->d_count); 319 dentry_lru_del_init(dentry); 320 return dentry; 321 } 322 323 struct dentry * dget_locked(struct dentry *dentry) 324 { 325 return __dget_locked(dentry); 326 } 327 328 /** 329 * d_find_alias - grab a hashed alias of inode 330 * @inode: inode in question 331 * @want_discon: flag, used by d_splice_alias, to request 332 * that only a DISCONNECTED alias be returned. 333 * 334 * If inode has a hashed alias, or is a directory and has any alias, 335 * acquire the reference to alias and return it. Otherwise return NULL. 336 * Notice that if inode is a directory there can be only one alias and 337 * it can be unhashed only if it has no children, or if it is the root 338 * of a filesystem. 339 * 340 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 341 * any other hashed alias over that one unless @want_discon is set, 342 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 343 */ 344 345 static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 346 { 347 struct list_head *head, *next, *tmp; 348 struct dentry *alias, *discon_alias=NULL; 349 350 head = &inode->i_dentry; 351 next = inode->i_dentry.next; 352 while (next != head) { 353 tmp = next; 354 next = tmp->next; 355 prefetch(next); 356 alias = list_entry(tmp, struct dentry, d_alias); 357 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 358 if (IS_ROOT(alias) && 359 (alias->d_flags & DCACHE_DISCONNECTED)) 360 discon_alias = alias; 361 else if (!want_discon) { 362 __dget_locked(alias); 363 return alias; 364 } 365 } 366 } 367 if (discon_alias) 368 __dget_locked(discon_alias); 369 return discon_alias; 370 } 371 372 struct dentry * d_find_alias(struct inode *inode) 373 { 374 struct dentry *de = NULL; 375 376 if (!list_empty(&inode->i_dentry)) { 377 spin_lock(&dcache_lock); 378 de = __d_find_alias(inode, 0); 379 spin_unlock(&dcache_lock); 380 } 381 return de; 382 } 383 384 /* 385 * Try to kill dentries associated with this inode. 386 * WARNING: you must own a reference to inode. 387 */ 388 void d_prune_aliases(struct inode *inode) 389 { 390 struct dentry *dentry; 391 restart: 392 spin_lock(&dcache_lock); 393 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 394 spin_lock(&dentry->d_lock); 395 if (!atomic_read(&dentry->d_count)) { 396 __dget_locked(dentry); 397 __d_drop(dentry); 398 spin_unlock(&dentry->d_lock); 399 spin_unlock(&dcache_lock); 400 dput(dentry); 401 goto restart; 402 } 403 spin_unlock(&dentry->d_lock); 404 } 405 spin_unlock(&dcache_lock); 406 } 407 408 /* 409 * Throw away a dentry - free the inode, dput the parent. This requires that 410 * the LRU list has already been removed. 411 * 412 * Try to prune ancestors as well. This is necessary to prevent 413 * quadratic behavior of shrink_dcache_parent(), but is also expected 414 * to be beneficial in reducing dentry cache fragmentation. 415 */ 416 static void prune_one_dentry(struct dentry * dentry) 417 __releases(dentry->d_lock) 418 __releases(dcache_lock) 419 __acquires(dcache_lock) 420 { 421 __d_drop(dentry); 422 dentry = d_kill(dentry); 423 424 /* 425 * Prune ancestors. Locking is simpler than in dput(), 426 * because dcache_lock needs to be taken anyway. 427 */ 428 spin_lock(&dcache_lock); 429 while (dentry) { 430 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 431 return; 432 433 if (dentry->d_op && dentry->d_op->d_delete) 434 dentry->d_op->d_delete(dentry); 435 dentry_lru_del_init(dentry); 436 __d_drop(dentry); 437 dentry = d_kill(dentry); 438 spin_lock(&dcache_lock); 439 } 440 } 441 442 /* 443 * Shrink the dentry LRU on a given superblock. 444 * @sb : superblock to shrink dentry LRU. 445 * @count: If count is NULL, we prune all dentries on superblock. 446 * @flags: If flags is non-zero, we need to do special processing based on 447 * which flags are set. This means we don't need to maintain multiple 448 * similar copies of this loop. 449 */ 450 static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) 451 { 452 LIST_HEAD(referenced); 453 LIST_HEAD(tmp); 454 struct dentry *dentry; 455 int cnt = 0; 456 457 BUG_ON(!sb); 458 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); 459 spin_lock(&dcache_lock); 460 if (count != NULL) 461 /* called from prune_dcache() and shrink_dcache_parent() */ 462 cnt = *count; 463 restart: 464 if (count == NULL) 465 list_splice_init(&sb->s_dentry_lru, &tmp); 466 else { 467 while (!list_empty(&sb->s_dentry_lru)) { 468 dentry = list_entry(sb->s_dentry_lru.prev, 469 struct dentry, d_lru); 470 BUG_ON(dentry->d_sb != sb); 471 472 spin_lock(&dentry->d_lock); 473 /* 474 * If we are honouring the DCACHE_REFERENCED flag and 475 * the dentry has this flag set, don't free it. Clear 476 * the flag and put it back on the LRU. 477 */ 478 if ((flags & DCACHE_REFERENCED) 479 && (dentry->d_flags & DCACHE_REFERENCED)) { 480 dentry->d_flags &= ~DCACHE_REFERENCED; 481 list_move_tail(&dentry->d_lru, &referenced); 482 spin_unlock(&dentry->d_lock); 483 } else { 484 list_move_tail(&dentry->d_lru, &tmp); 485 spin_unlock(&dentry->d_lock); 486 cnt--; 487 if (!cnt) 488 break; 489 } 490 } 491 } 492 while (!list_empty(&tmp)) { 493 dentry = list_entry(tmp.prev, struct dentry, d_lru); 494 dentry_lru_del_init(dentry); 495 spin_lock(&dentry->d_lock); 496 /* 497 * We found an inuse dentry which was not removed from 498 * the LRU because of laziness during lookup. Do not free 499 * it - just keep it off the LRU list. 500 */ 501 if (atomic_read(&dentry->d_count)) { 502 spin_unlock(&dentry->d_lock); 503 continue; 504 } 505 prune_one_dentry(dentry); 506 /* dentry->d_lock was dropped in prune_one_dentry() */ 507 cond_resched_lock(&dcache_lock); 508 } 509 if (count == NULL && !list_empty(&sb->s_dentry_lru)) 510 goto restart; 511 if (count != NULL) 512 *count = cnt; 513 if (!list_empty(&referenced)) 514 list_splice(&referenced, &sb->s_dentry_lru); 515 spin_unlock(&dcache_lock); 516 } 517 518 /** 519 * prune_dcache - shrink the dcache 520 * @count: number of entries to try to free 521 * 522 * Shrink the dcache. This is done when we need more memory, or simply when we 523 * need to unmount something (at which point we need to unuse all dentries). 524 * 525 * This function may fail to free any resources if all the dentries are in use. 526 */ 527 static void prune_dcache(int count) 528 { 529 struct super_block *sb; 530 int w_count; 531 int unused = dentry_stat.nr_unused; 532 int prune_ratio; 533 int pruned; 534 535 if (unused == 0 || count == 0) 536 return; 537 spin_lock(&dcache_lock); 538 restart: 539 if (count >= unused) 540 prune_ratio = 1; 541 else 542 prune_ratio = unused / count; 543 spin_lock(&sb_lock); 544 list_for_each_entry(sb, &super_blocks, s_list) { 545 if (sb->s_nr_dentry_unused == 0) 546 continue; 547 sb->s_count++; 548 /* Now, we reclaim unused dentrins with fairness. 549 * We reclaim them same percentage from each superblock. 550 * We calculate number of dentries to scan on this sb 551 * as follows, but the implementation is arranged to avoid 552 * overflows: 553 * number of dentries to scan on this sb = 554 * count * (number of dentries on this sb / 555 * number of dentries in the machine) 556 */ 557 spin_unlock(&sb_lock); 558 if (prune_ratio != 1) 559 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; 560 else 561 w_count = sb->s_nr_dentry_unused; 562 pruned = w_count; 563 /* 564 * We need to be sure this filesystem isn't being unmounted, 565 * otherwise we could race with generic_shutdown_super(), and 566 * end up holding a reference to an inode while the filesystem 567 * is unmounted. So we try to get s_umount, and make sure 568 * s_root isn't NULL. 569 */ 570 if (down_read_trylock(&sb->s_umount)) { 571 if ((sb->s_root != NULL) && 572 (!list_empty(&sb->s_dentry_lru))) { 573 spin_unlock(&dcache_lock); 574 __shrink_dcache_sb(sb, &w_count, 575 DCACHE_REFERENCED); 576 pruned -= w_count; 577 spin_lock(&dcache_lock); 578 } 579 up_read(&sb->s_umount); 580 } 581 spin_lock(&sb_lock); 582 count -= pruned; 583 /* 584 * restart only when sb is no longer on the list and 585 * we have more work to do. 586 */ 587 if (__put_super_and_need_restart(sb) && count > 0) { 588 spin_unlock(&sb_lock); 589 goto restart; 590 } 591 } 592 spin_unlock(&sb_lock); 593 spin_unlock(&dcache_lock); 594 } 595 596 /** 597 * shrink_dcache_sb - shrink dcache for a superblock 598 * @sb: superblock 599 * 600 * Shrink the dcache for the specified super block. This 601 * is used to free the dcache before unmounting a file 602 * system 603 */ 604 void shrink_dcache_sb(struct super_block * sb) 605 { 606 __shrink_dcache_sb(sb, NULL, 0); 607 } 608 609 /* 610 * destroy a single subtree of dentries for unmount 611 * - see the comments on shrink_dcache_for_umount() for a description of the 612 * locking 613 */ 614 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 615 { 616 struct dentry *parent; 617 unsigned detached = 0; 618 619 BUG_ON(!IS_ROOT(dentry)); 620 621 /* detach this root from the system */ 622 spin_lock(&dcache_lock); 623 dentry_lru_del_init(dentry); 624 __d_drop(dentry); 625 spin_unlock(&dcache_lock); 626 627 for (;;) { 628 /* descend to the first leaf in the current subtree */ 629 while (!list_empty(&dentry->d_subdirs)) { 630 struct dentry *loop; 631 632 /* this is a branch with children - detach all of them 633 * from the system in one go */ 634 spin_lock(&dcache_lock); 635 list_for_each_entry(loop, &dentry->d_subdirs, 636 d_u.d_child) { 637 dentry_lru_del_init(loop); 638 __d_drop(loop); 639 cond_resched_lock(&dcache_lock); 640 } 641 spin_unlock(&dcache_lock); 642 643 /* move to the first child */ 644 dentry = list_entry(dentry->d_subdirs.next, 645 struct dentry, d_u.d_child); 646 } 647 648 /* consume the dentries from this leaf up through its parents 649 * until we find one with children or run out altogether */ 650 do { 651 struct inode *inode; 652 653 if (atomic_read(&dentry->d_count) != 0) { 654 printk(KERN_ERR 655 "BUG: Dentry %p{i=%lx,n=%s}" 656 " still in use (%d)" 657 " [unmount of %s %s]\n", 658 dentry, 659 dentry->d_inode ? 660 dentry->d_inode->i_ino : 0UL, 661 dentry->d_name.name, 662 atomic_read(&dentry->d_count), 663 dentry->d_sb->s_type->name, 664 dentry->d_sb->s_id); 665 BUG(); 666 } 667 668 parent = dentry->d_parent; 669 if (parent == dentry) 670 parent = NULL; 671 else 672 atomic_dec(&parent->d_count); 673 674 list_del(&dentry->d_u.d_child); 675 detached++; 676 677 inode = dentry->d_inode; 678 if (inode) { 679 dentry->d_inode = NULL; 680 list_del_init(&dentry->d_alias); 681 if (dentry->d_op && dentry->d_op->d_iput) 682 dentry->d_op->d_iput(dentry, inode); 683 else 684 iput(inode); 685 } 686 687 d_free(dentry); 688 689 /* finished when we fall off the top of the tree, 690 * otherwise we ascend to the parent and move to the 691 * next sibling if there is one */ 692 if (!parent) 693 goto out; 694 695 dentry = parent; 696 697 } while (list_empty(&dentry->d_subdirs)); 698 699 dentry = list_entry(dentry->d_subdirs.next, 700 struct dentry, d_u.d_child); 701 } 702 out: 703 /* several dentries were freed, need to correct nr_dentry */ 704 spin_lock(&dcache_lock); 705 dentry_stat.nr_dentry -= detached; 706 spin_unlock(&dcache_lock); 707 } 708 709 /* 710 * destroy the dentries attached to a superblock on unmounting 711 * - we don't need to use dentry->d_lock, and only need dcache_lock when 712 * removing the dentry from the system lists and hashes because: 713 * - the superblock is detached from all mountings and open files, so the 714 * dentry trees will not be rearranged by the VFS 715 * - s_umount is write-locked, so the memory pressure shrinker will ignore 716 * any dentries belonging to this superblock that it comes across 717 * - the filesystem itself is no longer permitted to rearrange the dentries 718 * in this superblock 719 */ 720 void shrink_dcache_for_umount(struct super_block *sb) 721 { 722 struct dentry *dentry; 723 724 if (down_read_trylock(&sb->s_umount)) 725 BUG(); 726 727 dentry = sb->s_root; 728 sb->s_root = NULL; 729 atomic_dec(&dentry->d_count); 730 shrink_dcache_for_umount_subtree(dentry); 731 732 while (!hlist_empty(&sb->s_anon)) { 733 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 734 shrink_dcache_for_umount_subtree(dentry); 735 } 736 } 737 738 /* 739 * Search for at least 1 mount point in the dentry's subdirs. 740 * We descend to the next level whenever the d_subdirs 741 * list is non-empty and continue searching. 742 */ 743 744 /** 745 * have_submounts - check for mounts over a dentry 746 * @parent: dentry to check. 747 * 748 * Return true if the parent or its subdirectories contain 749 * a mount point 750 */ 751 752 int have_submounts(struct dentry *parent) 753 { 754 struct dentry *this_parent = parent; 755 struct list_head *next; 756 757 spin_lock(&dcache_lock); 758 if (d_mountpoint(parent)) 759 goto positive; 760 repeat: 761 next = this_parent->d_subdirs.next; 762 resume: 763 while (next != &this_parent->d_subdirs) { 764 struct list_head *tmp = next; 765 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 766 next = tmp->next; 767 /* Have we found a mount point ? */ 768 if (d_mountpoint(dentry)) 769 goto positive; 770 if (!list_empty(&dentry->d_subdirs)) { 771 this_parent = dentry; 772 goto repeat; 773 } 774 } 775 /* 776 * All done at this level ... ascend and resume the search. 777 */ 778 if (this_parent != parent) { 779 next = this_parent->d_u.d_child.next; 780 this_parent = this_parent->d_parent; 781 goto resume; 782 } 783 spin_unlock(&dcache_lock); 784 return 0; /* No mount points found in tree */ 785 positive: 786 spin_unlock(&dcache_lock); 787 return 1; 788 } 789 790 /* 791 * Search the dentry child list for the specified parent, 792 * and move any unused dentries to the end of the unused 793 * list for prune_dcache(). We descend to the next level 794 * whenever the d_subdirs list is non-empty and continue 795 * searching. 796 * 797 * It returns zero iff there are no unused children, 798 * otherwise it returns the number of children moved to 799 * the end of the unused list. This may not be the total 800 * number of unused children, because select_parent can 801 * drop the lock and return early due to latency 802 * constraints. 803 */ 804 static int select_parent(struct dentry * parent) 805 { 806 struct dentry *this_parent = parent; 807 struct list_head *next; 808 int found = 0; 809 810 spin_lock(&dcache_lock); 811 repeat: 812 next = this_parent->d_subdirs.next; 813 resume: 814 while (next != &this_parent->d_subdirs) { 815 struct list_head *tmp = next; 816 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 817 next = tmp->next; 818 819 dentry_lru_del_init(dentry); 820 /* 821 * move only zero ref count dentries to the end 822 * of the unused list for prune_dcache 823 */ 824 if (!atomic_read(&dentry->d_count)) { 825 dentry_lru_add_tail(dentry); 826 found++; 827 } 828 829 /* 830 * We can return to the caller if we have found some (this 831 * ensures forward progress). We'll be coming back to find 832 * the rest. 833 */ 834 if (found && need_resched()) 835 goto out; 836 837 /* 838 * Descend a level if the d_subdirs list is non-empty. 839 */ 840 if (!list_empty(&dentry->d_subdirs)) { 841 this_parent = dentry; 842 goto repeat; 843 } 844 } 845 /* 846 * All done at this level ... ascend and resume the search. 847 */ 848 if (this_parent != parent) { 849 next = this_parent->d_u.d_child.next; 850 this_parent = this_parent->d_parent; 851 goto resume; 852 } 853 out: 854 spin_unlock(&dcache_lock); 855 return found; 856 } 857 858 /** 859 * shrink_dcache_parent - prune dcache 860 * @parent: parent of entries to prune 861 * 862 * Prune the dcache to remove unused children of the parent dentry. 863 */ 864 865 void shrink_dcache_parent(struct dentry * parent) 866 { 867 struct super_block *sb = parent->d_sb; 868 int found; 869 870 while ((found = select_parent(parent)) != 0) 871 __shrink_dcache_sb(sb, &found, 0); 872 } 873 874 /* 875 * Scan `nr' dentries and return the number which remain. 876 * 877 * We need to avoid reentering the filesystem if the caller is performing a 878 * GFP_NOFS allocation attempt. One example deadlock is: 879 * 880 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 881 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 882 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 883 * 884 * In this case we return -1 to tell the caller that we baled. 885 */ 886 static int shrink_dcache_memory(int nr, gfp_t gfp_mask) 887 { 888 if (nr) { 889 if (!(gfp_mask & __GFP_FS)) 890 return -1; 891 prune_dcache(nr); 892 } 893 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 894 } 895 896 static struct shrinker dcache_shrinker = { 897 .shrink = shrink_dcache_memory, 898 .seeks = DEFAULT_SEEKS, 899 }; 900 901 /** 902 * d_alloc - allocate a dcache entry 903 * @parent: parent of entry to allocate 904 * @name: qstr of the name 905 * 906 * Allocates a dentry. It returns %NULL if there is insufficient memory 907 * available. On a success the dentry is returned. The name passed in is 908 * copied and the copy passed in may be reused after this call. 909 */ 910 911 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 912 { 913 struct dentry *dentry; 914 char *dname; 915 916 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 917 if (!dentry) 918 return NULL; 919 920 if (name->len > DNAME_INLINE_LEN-1) { 921 dname = kmalloc(name->len + 1, GFP_KERNEL); 922 if (!dname) { 923 kmem_cache_free(dentry_cache, dentry); 924 return NULL; 925 } 926 } else { 927 dname = dentry->d_iname; 928 } 929 dentry->d_name.name = dname; 930 931 dentry->d_name.len = name->len; 932 dentry->d_name.hash = name->hash; 933 memcpy(dname, name->name, name->len); 934 dname[name->len] = 0; 935 936 atomic_set(&dentry->d_count, 1); 937 dentry->d_flags = DCACHE_UNHASHED; 938 spin_lock_init(&dentry->d_lock); 939 dentry->d_inode = NULL; 940 dentry->d_parent = NULL; 941 dentry->d_sb = NULL; 942 dentry->d_op = NULL; 943 dentry->d_fsdata = NULL; 944 dentry->d_mounted = 0; 945 #ifdef CONFIG_PROFILING 946 dentry->d_cookie = NULL; 947 #endif 948 INIT_HLIST_NODE(&dentry->d_hash); 949 INIT_LIST_HEAD(&dentry->d_lru); 950 INIT_LIST_HEAD(&dentry->d_subdirs); 951 INIT_LIST_HEAD(&dentry->d_alias); 952 953 if (parent) { 954 dentry->d_parent = dget(parent); 955 dentry->d_sb = parent->d_sb; 956 } else { 957 INIT_LIST_HEAD(&dentry->d_u.d_child); 958 } 959 960 spin_lock(&dcache_lock); 961 if (parent) 962 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 963 dentry_stat.nr_dentry++; 964 spin_unlock(&dcache_lock); 965 966 return dentry; 967 } 968 969 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 970 { 971 struct qstr q; 972 973 q.name = name; 974 q.len = strlen(name); 975 q.hash = full_name_hash(q.name, q.len); 976 return d_alloc(parent, &q); 977 } 978 979 /** 980 * d_instantiate - fill in inode information for a dentry 981 * @entry: dentry to complete 982 * @inode: inode to attach to this dentry 983 * 984 * Fill in inode information in the entry. 985 * 986 * This turns negative dentries into productive full members 987 * of society. 988 * 989 * NOTE! This assumes that the inode count has been incremented 990 * (or otherwise set) by the caller to indicate that it is now 991 * in use by the dcache. 992 */ 993 994 void d_instantiate(struct dentry *entry, struct inode * inode) 995 { 996 BUG_ON(!list_empty(&entry->d_alias)); 997 spin_lock(&dcache_lock); 998 if (inode) 999 list_add(&entry->d_alias, &inode->i_dentry); 1000 entry->d_inode = inode; 1001 fsnotify_d_instantiate(entry, inode); 1002 spin_unlock(&dcache_lock); 1003 security_d_instantiate(entry, inode); 1004 } 1005 1006 /** 1007 * d_instantiate_unique - instantiate a non-aliased dentry 1008 * @entry: dentry to instantiate 1009 * @inode: inode to attach to this dentry 1010 * 1011 * Fill in inode information in the entry. On success, it returns NULL. 1012 * If an unhashed alias of "entry" already exists, then we return the 1013 * aliased dentry instead and drop one reference to inode. 1014 * 1015 * Note that in order to avoid conflicts with rename() etc, the caller 1016 * had better be holding the parent directory semaphore. 1017 * 1018 * This also assumes that the inode count has been incremented 1019 * (or otherwise set) by the caller to indicate that it is now 1020 * in use by the dcache. 1021 */ 1022 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1023 struct inode *inode) 1024 { 1025 struct dentry *alias; 1026 int len = entry->d_name.len; 1027 const char *name = entry->d_name.name; 1028 unsigned int hash = entry->d_name.hash; 1029 1030 if (!inode) { 1031 entry->d_inode = NULL; 1032 return NULL; 1033 } 1034 1035 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1036 struct qstr *qstr = &alias->d_name; 1037 1038 if (qstr->hash != hash) 1039 continue; 1040 if (alias->d_parent != entry->d_parent) 1041 continue; 1042 if (qstr->len != len) 1043 continue; 1044 if (memcmp(qstr->name, name, len)) 1045 continue; 1046 dget_locked(alias); 1047 return alias; 1048 } 1049 1050 list_add(&entry->d_alias, &inode->i_dentry); 1051 entry->d_inode = inode; 1052 fsnotify_d_instantiate(entry, inode); 1053 return NULL; 1054 } 1055 1056 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1057 { 1058 struct dentry *result; 1059 1060 BUG_ON(!list_empty(&entry->d_alias)); 1061 1062 spin_lock(&dcache_lock); 1063 result = __d_instantiate_unique(entry, inode); 1064 spin_unlock(&dcache_lock); 1065 1066 if (!result) { 1067 security_d_instantiate(entry, inode); 1068 return NULL; 1069 } 1070 1071 BUG_ON(!d_unhashed(result)); 1072 iput(inode); 1073 return result; 1074 } 1075 1076 EXPORT_SYMBOL(d_instantiate_unique); 1077 1078 /** 1079 * d_alloc_root - allocate root dentry 1080 * @root_inode: inode to allocate the root for 1081 * 1082 * Allocate a root ("/") dentry for the inode given. The inode is 1083 * instantiated and returned. %NULL is returned if there is insufficient 1084 * memory or the inode passed is %NULL. 1085 */ 1086 1087 struct dentry * d_alloc_root(struct inode * root_inode) 1088 { 1089 struct dentry *res = NULL; 1090 1091 if (root_inode) { 1092 static const struct qstr name = { .name = "/", .len = 1 }; 1093 1094 res = d_alloc(NULL, &name); 1095 if (res) { 1096 res->d_sb = root_inode->i_sb; 1097 res->d_parent = res; 1098 d_instantiate(res, root_inode); 1099 } 1100 } 1101 return res; 1102 } 1103 1104 static inline struct hlist_head *d_hash(struct dentry *parent, 1105 unsigned long hash) 1106 { 1107 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 1108 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 1109 return dentry_hashtable + (hash & D_HASHMASK); 1110 } 1111 1112 /** 1113 * d_alloc_anon - allocate an anonymous dentry 1114 * @inode: inode to allocate the dentry for 1115 * 1116 * This is similar to d_alloc_root. It is used by filesystems when 1117 * creating a dentry for a given inode, often in the process of 1118 * mapping a filehandle to a dentry. The returned dentry may be 1119 * anonymous, or may have a full name (if the inode was already 1120 * in the cache). The file system may need to make further 1121 * efforts to connect this dentry into the dcache properly. 1122 * 1123 * When called on a directory inode, we must ensure that 1124 * the inode only ever has one dentry. If a dentry is 1125 * found, that is returned instead of allocating a new one. 1126 * 1127 * On successful return, the reference to the inode has been transferred 1128 * to the dentry. If %NULL is returned (indicating kmalloc failure), 1129 * the reference on the inode has not been released. 1130 */ 1131 1132 struct dentry * d_alloc_anon(struct inode *inode) 1133 { 1134 static const struct qstr anonstring = { .name = "" }; 1135 struct dentry *tmp; 1136 struct dentry *res; 1137 1138 if ((res = d_find_alias(inode))) { 1139 iput(inode); 1140 return res; 1141 } 1142 1143 tmp = d_alloc(NULL, &anonstring); 1144 if (!tmp) 1145 return NULL; 1146 1147 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1148 1149 spin_lock(&dcache_lock); 1150 res = __d_find_alias(inode, 0); 1151 if (!res) { 1152 /* attach a disconnected dentry */ 1153 res = tmp; 1154 tmp = NULL; 1155 spin_lock(&res->d_lock); 1156 res->d_sb = inode->i_sb; 1157 res->d_parent = res; 1158 res->d_inode = inode; 1159 res->d_flags |= DCACHE_DISCONNECTED; 1160 res->d_flags &= ~DCACHE_UNHASHED; 1161 list_add(&res->d_alias, &inode->i_dentry); 1162 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon); 1163 spin_unlock(&res->d_lock); 1164 1165 inode = NULL; /* don't drop reference */ 1166 } 1167 spin_unlock(&dcache_lock); 1168 1169 if (inode) 1170 iput(inode); 1171 if (tmp) 1172 dput(tmp); 1173 return res; 1174 } 1175 1176 1177 /** 1178 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1179 * @inode: the inode which may have a disconnected dentry 1180 * @dentry: a negative dentry which we want to point to the inode. 1181 * 1182 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1183 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1184 * and return it, else simply d_add the inode to the dentry and return NULL. 1185 * 1186 * This is needed in the lookup routine of any filesystem that is exportable 1187 * (via knfsd) so that we can build dcache paths to directories effectively. 1188 * 1189 * If a dentry was found and moved, then it is returned. Otherwise NULL 1190 * is returned. This matches the expected return value of ->lookup. 1191 * 1192 */ 1193 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1194 { 1195 struct dentry *new = NULL; 1196 1197 if (inode && S_ISDIR(inode->i_mode)) { 1198 spin_lock(&dcache_lock); 1199 new = __d_find_alias(inode, 1); 1200 if (new) { 1201 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1202 fsnotify_d_instantiate(new, inode); 1203 spin_unlock(&dcache_lock); 1204 security_d_instantiate(new, inode); 1205 d_rehash(dentry); 1206 d_move(new, dentry); 1207 iput(inode); 1208 } else { 1209 /* d_instantiate takes dcache_lock, so we do it by hand */ 1210 list_add(&dentry->d_alias, &inode->i_dentry); 1211 dentry->d_inode = inode; 1212 fsnotify_d_instantiate(dentry, inode); 1213 spin_unlock(&dcache_lock); 1214 security_d_instantiate(dentry, inode); 1215 d_rehash(dentry); 1216 } 1217 } else 1218 d_add(dentry, inode); 1219 return new; 1220 } 1221 1222 1223 /** 1224 * d_lookup - search for a dentry 1225 * @parent: parent dentry 1226 * @name: qstr of name we wish to find 1227 * 1228 * Searches the children of the parent dentry for the name in question. If 1229 * the dentry is found its reference count is incremented and the dentry 1230 * is returned. The caller must use d_put to free the entry when it has 1231 * finished using it. %NULL is returned on failure. 1232 * 1233 * __d_lookup is dcache_lock free. The hash list is protected using RCU. 1234 * Memory barriers are used while updating and doing lockless traversal. 1235 * To avoid races with d_move while rename is happening, d_lock is used. 1236 * 1237 * Overflows in memcmp(), while d_move, are avoided by keeping the length 1238 * and name pointer in one structure pointed by d_qstr. 1239 * 1240 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1241 * lookup is going on. 1242 * 1243 * The dentry unused LRU is not updated even if lookup finds the required dentry 1244 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1245 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1246 * acquisition. 1247 * 1248 * d_lookup() is protected against the concurrent renames in some unrelated 1249 * directory using the seqlockt_t rename_lock. 1250 */ 1251 1252 struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1253 { 1254 struct dentry * dentry = NULL; 1255 unsigned long seq; 1256 1257 do { 1258 seq = read_seqbegin(&rename_lock); 1259 dentry = __d_lookup(parent, name); 1260 if (dentry) 1261 break; 1262 } while (read_seqretry(&rename_lock, seq)); 1263 return dentry; 1264 } 1265 1266 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1267 { 1268 unsigned int len = name->len; 1269 unsigned int hash = name->hash; 1270 const unsigned char *str = name->name; 1271 struct hlist_head *head = d_hash(parent,hash); 1272 struct dentry *found = NULL; 1273 struct hlist_node *node; 1274 struct dentry *dentry; 1275 1276 rcu_read_lock(); 1277 1278 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1279 struct qstr *qstr; 1280 1281 if (dentry->d_name.hash != hash) 1282 continue; 1283 if (dentry->d_parent != parent) 1284 continue; 1285 1286 spin_lock(&dentry->d_lock); 1287 1288 /* 1289 * Recheck the dentry after taking the lock - d_move may have 1290 * changed things. Don't bother checking the hash because we're 1291 * about to compare the whole name anyway. 1292 */ 1293 if (dentry->d_parent != parent) 1294 goto next; 1295 1296 /* 1297 * It is safe to compare names since d_move() cannot 1298 * change the qstr (protected by d_lock). 1299 */ 1300 qstr = &dentry->d_name; 1301 if (parent->d_op && parent->d_op->d_compare) { 1302 if (parent->d_op->d_compare(parent, qstr, name)) 1303 goto next; 1304 } else { 1305 if (qstr->len != len) 1306 goto next; 1307 if (memcmp(qstr->name, str, len)) 1308 goto next; 1309 } 1310 1311 if (!d_unhashed(dentry)) { 1312 atomic_inc(&dentry->d_count); 1313 found = dentry; 1314 } 1315 spin_unlock(&dentry->d_lock); 1316 break; 1317 next: 1318 spin_unlock(&dentry->d_lock); 1319 } 1320 rcu_read_unlock(); 1321 1322 return found; 1323 } 1324 1325 /** 1326 * d_hash_and_lookup - hash the qstr then search for a dentry 1327 * @dir: Directory to search in 1328 * @name: qstr of name we wish to find 1329 * 1330 * On hash failure or on lookup failure NULL is returned. 1331 */ 1332 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1333 { 1334 struct dentry *dentry = NULL; 1335 1336 /* 1337 * Check for a fs-specific hash function. Note that we must 1338 * calculate the standard hash first, as the d_op->d_hash() 1339 * routine may choose to leave the hash value unchanged. 1340 */ 1341 name->hash = full_name_hash(name->name, name->len); 1342 if (dir->d_op && dir->d_op->d_hash) { 1343 if (dir->d_op->d_hash(dir, name) < 0) 1344 goto out; 1345 } 1346 dentry = d_lookup(dir, name); 1347 out: 1348 return dentry; 1349 } 1350 1351 /** 1352 * d_validate - verify dentry provided from insecure source 1353 * @dentry: The dentry alleged to be valid child of @dparent 1354 * @dparent: The parent dentry (known to be valid) 1355 * @hash: Hash of the dentry 1356 * @len: Length of the name 1357 * 1358 * An insecure source has sent us a dentry, here we verify it and dget() it. 1359 * This is used by ncpfs in its readdir implementation. 1360 * Zero is returned in the dentry is invalid. 1361 */ 1362 1363 int d_validate(struct dentry *dentry, struct dentry *dparent) 1364 { 1365 struct hlist_head *base; 1366 struct hlist_node *lhp; 1367 1368 /* Check whether the ptr might be valid at all.. */ 1369 if (!kmem_ptr_validate(dentry_cache, dentry)) 1370 goto out; 1371 1372 if (dentry->d_parent != dparent) 1373 goto out; 1374 1375 spin_lock(&dcache_lock); 1376 base = d_hash(dparent, dentry->d_name.hash); 1377 hlist_for_each(lhp,base) { 1378 /* hlist_for_each_entry_rcu() not required for d_hash list 1379 * as it is parsed under dcache_lock 1380 */ 1381 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { 1382 __dget_locked(dentry); 1383 spin_unlock(&dcache_lock); 1384 return 1; 1385 } 1386 } 1387 spin_unlock(&dcache_lock); 1388 out: 1389 return 0; 1390 } 1391 1392 /* 1393 * When a file is deleted, we have two options: 1394 * - turn this dentry into a negative dentry 1395 * - unhash this dentry and free it. 1396 * 1397 * Usually, we want to just turn this into 1398 * a negative dentry, but if anybody else is 1399 * currently using the dentry or the inode 1400 * we can't do that and we fall back on removing 1401 * it from the hash queues and waiting for 1402 * it to be deleted later when it has no users 1403 */ 1404 1405 /** 1406 * d_delete - delete a dentry 1407 * @dentry: The dentry to delete 1408 * 1409 * Turn the dentry into a negative dentry if possible, otherwise 1410 * remove it from the hash queues so it can be deleted later 1411 */ 1412 1413 void d_delete(struct dentry * dentry) 1414 { 1415 int isdir = 0; 1416 /* 1417 * Are we the only user? 1418 */ 1419 spin_lock(&dcache_lock); 1420 spin_lock(&dentry->d_lock); 1421 isdir = S_ISDIR(dentry->d_inode->i_mode); 1422 if (atomic_read(&dentry->d_count) == 1) { 1423 dentry_iput(dentry); 1424 fsnotify_nameremove(dentry, isdir); 1425 return; 1426 } 1427 1428 if (!d_unhashed(dentry)) 1429 __d_drop(dentry); 1430 1431 spin_unlock(&dentry->d_lock); 1432 spin_unlock(&dcache_lock); 1433 1434 fsnotify_nameremove(dentry, isdir); 1435 } 1436 1437 static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1438 { 1439 1440 entry->d_flags &= ~DCACHE_UNHASHED; 1441 hlist_add_head_rcu(&entry->d_hash, list); 1442 } 1443 1444 static void _d_rehash(struct dentry * entry) 1445 { 1446 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 1447 } 1448 1449 /** 1450 * d_rehash - add an entry back to the hash 1451 * @entry: dentry to add to the hash 1452 * 1453 * Adds a dentry to the hash according to its name. 1454 */ 1455 1456 void d_rehash(struct dentry * entry) 1457 { 1458 spin_lock(&dcache_lock); 1459 spin_lock(&entry->d_lock); 1460 _d_rehash(entry); 1461 spin_unlock(&entry->d_lock); 1462 spin_unlock(&dcache_lock); 1463 } 1464 1465 #define do_switch(x,y) do { \ 1466 __typeof__ (x) __tmp = x; \ 1467 x = y; y = __tmp; } while (0) 1468 1469 /* 1470 * When switching names, the actual string doesn't strictly have to 1471 * be preserved in the target - because we're dropping the target 1472 * anyway. As such, we can just do a simple memcpy() to copy over 1473 * the new name before we switch. 1474 * 1475 * Note that we have to be a lot more careful about getting the hash 1476 * switched - we have to switch the hash value properly even if it 1477 * then no longer matches the actual (corrupted) string of the target. 1478 * The hash value has to match the hash queue that the dentry is on.. 1479 */ 1480 static void switch_names(struct dentry *dentry, struct dentry *target) 1481 { 1482 if (dname_external(target)) { 1483 if (dname_external(dentry)) { 1484 /* 1485 * Both external: swap the pointers 1486 */ 1487 do_switch(target->d_name.name, dentry->d_name.name); 1488 } else { 1489 /* 1490 * dentry:internal, target:external. Steal target's 1491 * storage and make target internal. 1492 */ 1493 memcpy(target->d_iname, dentry->d_name.name, 1494 dentry->d_name.len + 1); 1495 dentry->d_name.name = target->d_name.name; 1496 target->d_name.name = target->d_iname; 1497 } 1498 } else { 1499 if (dname_external(dentry)) { 1500 /* 1501 * dentry:external, target:internal. Give dentry's 1502 * storage to target and make dentry internal 1503 */ 1504 memcpy(dentry->d_iname, target->d_name.name, 1505 target->d_name.len + 1); 1506 target->d_name.name = dentry->d_name.name; 1507 dentry->d_name.name = dentry->d_iname; 1508 } else { 1509 /* 1510 * Both are internal. Just copy target to dentry 1511 */ 1512 memcpy(dentry->d_iname, target->d_name.name, 1513 target->d_name.len + 1); 1514 } 1515 } 1516 } 1517 1518 /* 1519 * We cannibalize "target" when moving dentry on top of it, 1520 * because it's going to be thrown away anyway. We could be more 1521 * polite about it, though. 1522 * 1523 * This forceful removal will result in ugly /proc output if 1524 * somebody holds a file open that got deleted due to a rename. 1525 * We could be nicer about the deleted file, and let it show 1526 * up under the name it had before it was deleted rather than 1527 * under the original name of the file that was moved on top of it. 1528 */ 1529 1530 /* 1531 * d_move_locked - move a dentry 1532 * @dentry: entry to move 1533 * @target: new dentry 1534 * 1535 * Update the dcache to reflect the move of a file name. Negative 1536 * dcache entries should not be moved in this way. 1537 */ 1538 static void d_move_locked(struct dentry * dentry, struct dentry * target) 1539 { 1540 struct hlist_head *list; 1541 1542 if (!dentry->d_inode) 1543 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 1544 1545 write_seqlock(&rename_lock); 1546 /* 1547 * XXXX: do we really need to take target->d_lock? 1548 */ 1549 if (target < dentry) { 1550 spin_lock(&target->d_lock); 1551 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1552 } else { 1553 spin_lock(&dentry->d_lock); 1554 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); 1555 } 1556 1557 /* Move the dentry to the target hash queue, if on different bucket */ 1558 if (d_unhashed(dentry)) 1559 goto already_unhashed; 1560 1561 hlist_del_rcu(&dentry->d_hash); 1562 1563 already_unhashed: 1564 list = d_hash(target->d_parent, target->d_name.hash); 1565 __d_rehash(dentry, list); 1566 1567 /* Unhash the target: dput() will then get rid of it */ 1568 __d_drop(target); 1569 1570 list_del(&dentry->d_u.d_child); 1571 list_del(&target->d_u.d_child); 1572 1573 /* Switch the names.. */ 1574 switch_names(dentry, target); 1575 do_switch(dentry->d_name.len, target->d_name.len); 1576 do_switch(dentry->d_name.hash, target->d_name.hash); 1577 1578 /* ... and switch the parents */ 1579 if (IS_ROOT(dentry)) { 1580 dentry->d_parent = target->d_parent; 1581 target->d_parent = target; 1582 INIT_LIST_HEAD(&target->d_u.d_child); 1583 } else { 1584 do_switch(dentry->d_parent, target->d_parent); 1585 1586 /* And add them back to the (new) parent lists */ 1587 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1588 } 1589 1590 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1591 spin_unlock(&target->d_lock); 1592 fsnotify_d_move(dentry); 1593 spin_unlock(&dentry->d_lock); 1594 write_sequnlock(&rename_lock); 1595 } 1596 1597 /** 1598 * d_move - move a dentry 1599 * @dentry: entry to move 1600 * @target: new dentry 1601 * 1602 * Update the dcache to reflect the move of a file name. Negative 1603 * dcache entries should not be moved in this way. 1604 */ 1605 1606 void d_move(struct dentry * dentry, struct dentry * target) 1607 { 1608 spin_lock(&dcache_lock); 1609 d_move_locked(dentry, target); 1610 spin_unlock(&dcache_lock); 1611 } 1612 1613 /* 1614 * Helper that returns 1 if p1 is a parent of p2, else 0 1615 */ 1616 static int d_isparent(struct dentry *p1, struct dentry *p2) 1617 { 1618 struct dentry *p; 1619 1620 for (p = p2; p->d_parent != p; p = p->d_parent) { 1621 if (p->d_parent == p1) 1622 return 1; 1623 } 1624 return 0; 1625 } 1626 1627 /* 1628 * This helper attempts to cope with remotely renamed directories 1629 * 1630 * It assumes that the caller is already holding 1631 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 1632 * 1633 * Note: If ever the locking in lock_rename() changes, then please 1634 * remember to update this too... 1635 */ 1636 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 1637 __releases(dcache_lock) 1638 { 1639 struct mutex *m1 = NULL, *m2 = NULL; 1640 struct dentry *ret; 1641 1642 /* If alias and dentry share a parent, then no extra locks required */ 1643 if (alias->d_parent == dentry->d_parent) 1644 goto out_unalias; 1645 1646 /* Check for loops */ 1647 ret = ERR_PTR(-ELOOP); 1648 if (d_isparent(alias, dentry)) 1649 goto out_err; 1650 1651 /* See lock_rename() */ 1652 ret = ERR_PTR(-EBUSY); 1653 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 1654 goto out_err; 1655 m1 = &dentry->d_sb->s_vfs_rename_mutex; 1656 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 1657 goto out_err; 1658 m2 = &alias->d_parent->d_inode->i_mutex; 1659 out_unalias: 1660 d_move_locked(alias, dentry); 1661 ret = alias; 1662 out_err: 1663 spin_unlock(&dcache_lock); 1664 if (m2) 1665 mutex_unlock(m2); 1666 if (m1) 1667 mutex_unlock(m1); 1668 return ret; 1669 } 1670 1671 /* 1672 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 1673 * named dentry in place of the dentry to be replaced. 1674 */ 1675 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 1676 { 1677 struct dentry *dparent, *aparent; 1678 1679 switch_names(dentry, anon); 1680 do_switch(dentry->d_name.len, anon->d_name.len); 1681 do_switch(dentry->d_name.hash, anon->d_name.hash); 1682 1683 dparent = dentry->d_parent; 1684 aparent = anon->d_parent; 1685 1686 dentry->d_parent = (aparent == anon) ? dentry : aparent; 1687 list_del(&dentry->d_u.d_child); 1688 if (!IS_ROOT(dentry)) 1689 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1690 else 1691 INIT_LIST_HEAD(&dentry->d_u.d_child); 1692 1693 anon->d_parent = (dparent == dentry) ? anon : dparent; 1694 list_del(&anon->d_u.d_child); 1695 if (!IS_ROOT(anon)) 1696 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 1697 else 1698 INIT_LIST_HEAD(&anon->d_u.d_child); 1699 1700 anon->d_flags &= ~DCACHE_DISCONNECTED; 1701 } 1702 1703 /** 1704 * d_materialise_unique - introduce an inode into the tree 1705 * @dentry: candidate dentry 1706 * @inode: inode to bind to the dentry, to which aliases may be attached 1707 * 1708 * Introduces an dentry into the tree, substituting an extant disconnected 1709 * root directory alias in its place if there is one 1710 */ 1711 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 1712 { 1713 struct dentry *actual; 1714 1715 BUG_ON(!d_unhashed(dentry)); 1716 1717 spin_lock(&dcache_lock); 1718 1719 if (!inode) { 1720 actual = dentry; 1721 dentry->d_inode = NULL; 1722 goto found_lock; 1723 } 1724 1725 if (S_ISDIR(inode->i_mode)) { 1726 struct dentry *alias; 1727 1728 /* Does an aliased dentry already exist? */ 1729 alias = __d_find_alias(inode, 0); 1730 if (alias) { 1731 actual = alias; 1732 /* Is this an anonymous mountpoint that we could splice 1733 * into our tree? */ 1734 if (IS_ROOT(alias)) { 1735 spin_lock(&alias->d_lock); 1736 __d_materialise_dentry(dentry, alias); 1737 __d_drop(alias); 1738 goto found; 1739 } 1740 /* Nope, but we must(!) avoid directory aliasing */ 1741 actual = __d_unalias(dentry, alias); 1742 if (IS_ERR(actual)) 1743 dput(alias); 1744 goto out_nolock; 1745 } 1746 } 1747 1748 /* Add a unique reference */ 1749 actual = __d_instantiate_unique(dentry, inode); 1750 if (!actual) 1751 actual = dentry; 1752 else if (unlikely(!d_unhashed(actual))) 1753 goto shouldnt_be_hashed; 1754 1755 found_lock: 1756 spin_lock(&actual->d_lock); 1757 found: 1758 _d_rehash(actual); 1759 spin_unlock(&actual->d_lock); 1760 spin_unlock(&dcache_lock); 1761 out_nolock: 1762 if (actual == dentry) { 1763 security_d_instantiate(dentry, inode); 1764 return NULL; 1765 } 1766 1767 iput(inode); 1768 return actual; 1769 1770 shouldnt_be_hashed: 1771 spin_unlock(&dcache_lock); 1772 BUG(); 1773 } 1774 1775 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 1776 { 1777 *buflen -= namelen; 1778 if (*buflen < 0) 1779 return -ENAMETOOLONG; 1780 *buffer -= namelen; 1781 memcpy(*buffer, str, namelen); 1782 return 0; 1783 } 1784 1785 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 1786 { 1787 return prepend(buffer, buflen, name->name, name->len); 1788 } 1789 1790 /** 1791 * __d_path - return the path of a dentry 1792 * @path: the dentry/vfsmount to report 1793 * @root: root vfsmnt/dentry (may be modified by this function) 1794 * @buffer: buffer to return value in 1795 * @buflen: buffer length 1796 * 1797 * Convert a dentry into an ASCII path name. If the entry has been deleted 1798 * the string " (deleted)" is appended. Note that this is ambiguous. 1799 * 1800 * Returns the buffer or an error code if the path was too long. 1801 * 1802 * "buflen" should be positive. Caller holds the dcache_lock. 1803 * 1804 * If path is not reachable from the supplied root, then the value of 1805 * root is changed (without modifying refcounts). 1806 */ 1807 char *__d_path(const struct path *path, struct path *root, 1808 char *buffer, int buflen) 1809 { 1810 struct dentry *dentry = path->dentry; 1811 struct vfsmount *vfsmnt = path->mnt; 1812 char *end = buffer + buflen; 1813 char *retval; 1814 1815 spin_lock(&vfsmount_lock); 1816 prepend(&end, &buflen, "\0", 1); 1817 if (!IS_ROOT(dentry) && d_unhashed(dentry) && 1818 (prepend(&end, &buflen, " (deleted)", 10) != 0)) 1819 goto Elong; 1820 1821 if (buflen < 1) 1822 goto Elong; 1823 /* Get '/' right */ 1824 retval = end-1; 1825 *retval = '/'; 1826 1827 for (;;) { 1828 struct dentry * parent; 1829 1830 if (dentry == root->dentry && vfsmnt == root->mnt) 1831 break; 1832 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1833 /* Global root? */ 1834 if (vfsmnt->mnt_parent == vfsmnt) { 1835 goto global_root; 1836 } 1837 dentry = vfsmnt->mnt_mountpoint; 1838 vfsmnt = vfsmnt->mnt_parent; 1839 continue; 1840 } 1841 parent = dentry->d_parent; 1842 prefetch(parent); 1843 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 1844 (prepend(&end, &buflen, "/", 1) != 0)) 1845 goto Elong; 1846 retval = end; 1847 dentry = parent; 1848 } 1849 1850 out: 1851 spin_unlock(&vfsmount_lock); 1852 return retval; 1853 1854 global_root: 1855 retval += 1; /* hit the slash */ 1856 if (prepend_name(&retval, &buflen, &dentry->d_name) != 0) 1857 goto Elong; 1858 root->mnt = vfsmnt; 1859 root->dentry = dentry; 1860 goto out; 1861 1862 Elong: 1863 retval = ERR_PTR(-ENAMETOOLONG); 1864 goto out; 1865 } 1866 1867 /** 1868 * d_path - return the path of a dentry 1869 * @path: path to report 1870 * @buf: buffer to return value in 1871 * @buflen: buffer length 1872 * 1873 * Convert a dentry into an ASCII path name. If the entry has been deleted 1874 * the string " (deleted)" is appended. Note that this is ambiguous. 1875 * 1876 * Returns the buffer or an error code if the path was too long. 1877 * 1878 * "buflen" should be positive. 1879 */ 1880 char *d_path(const struct path *path, char *buf, int buflen) 1881 { 1882 char *res; 1883 struct path root; 1884 struct path tmp; 1885 1886 /* 1887 * We have various synthetic filesystems that never get mounted. On 1888 * these filesystems dentries are never used for lookup purposes, and 1889 * thus don't need to be hashed. They also don't need a name until a 1890 * user wants to identify the object in /proc/pid/fd/. The little hack 1891 * below allows us to generate a name for these objects on demand: 1892 */ 1893 if (path->dentry->d_op && path->dentry->d_op->d_dname) 1894 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 1895 1896 read_lock(¤t->fs->lock); 1897 root = current->fs->root; 1898 path_get(&root); 1899 read_unlock(¤t->fs->lock); 1900 spin_lock(&dcache_lock); 1901 tmp = root; 1902 res = __d_path(path, &tmp, buf, buflen); 1903 spin_unlock(&dcache_lock); 1904 path_put(&root); 1905 return res; 1906 } 1907 1908 /* 1909 * Helper function for dentry_operations.d_dname() members 1910 */ 1911 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 1912 const char *fmt, ...) 1913 { 1914 va_list args; 1915 char temp[64]; 1916 int sz; 1917 1918 va_start(args, fmt); 1919 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 1920 va_end(args); 1921 1922 if (sz > sizeof(temp) || sz > buflen) 1923 return ERR_PTR(-ENAMETOOLONG); 1924 1925 buffer += buflen - sz; 1926 return memcpy(buffer, temp, sz); 1927 } 1928 1929 /* 1930 * Write full pathname from the root of the filesystem into the buffer. 1931 */ 1932 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 1933 { 1934 char *end = buf + buflen; 1935 char *retval; 1936 1937 spin_lock(&dcache_lock); 1938 prepend(&end, &buflen, "\0", 1); 1939 if (!IS_ROOT(dentry) && d_unhashed(dentry) && 1940 (prepend(&end, &buflen, "//deleted", 9) != 0)) 1941 goto Elong; 1942 if (buflen < 1) 1943 goto Elong; 1944 /* Get '/' right */ 1945 retval = end-1; 1946 *retval = '/'; 1947 1948 while (!IS_ROOT(dentry)) { 1949 struct dentry *parent = dentry->d_parent; 1950 1951 prefetch(parent); 1952 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 1953 (prepend(&end, &buflen, "/", 1) != 0)) 1954 goto Elong; 1955 1956 retval = end; 1957 dentry = parent; 1958 } 1959 spin_unlock(&dcache_lock); 1960 return retval; 1961 Elong: 1962 spin_unlock(&dcache_lock); 1963 return ERR_PTR(-ENAMETOOLONG); 1964 } 1965 1966 /* 1967 * NOTE! The user-level library version returns a 1968 * character pointer. The kernel system call just 1969 * returns the length of the buffer filled (which 1970 * includes the ending '\0' character), or a negative 1971 * error value. So libc would do something like 1972 * 1973 * char *getcwd(char * buf, size_t size) 1974 * { 1975 * int retval; 1976 * 1977 * retval = sys_getcwd(buf, size); 1978 * if (retval >= 0) 1979 * return buf; 1980 * errno = -retval; 1981 * return NULL; 1982 * } 1983 */ 1984 asmlinkage long sys_getcwd(char __user *buf, unsigned long size) 1985 { 1986 int error; 1987 struct path pwd, root; 1988 char *page = (char *) __get_free_page(GFP_USER); 1989 1990 if (!page) 1991 return -ENOMEM; 1992 1993 read_lock(¤t->fs->lock); 1994 pwd = current->fs->pwd; 1995 path_get(&pwd); 1996 root = current->fs->root; 1997 path_get(&root); 1998 read_unlock(¤t->fs->lock); 1999 2000 error = -ENOENT; 2001 /* Has the current directory has been unlinked? */ 2002 spin_lock(&dcache_lock); 2003 if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) { 2004 unsigned long len; 2005 struct path tmp = root; 2006 char * cwd; 2007 2008 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE); 2009 spin_unlock(&dcache_lock); 2010 2011 error = PTR_ERR(cwd); 2012 if (IS_ERR(cwd)) 2013 goto out; 2014 2015 error = -ERANGE; 2016 len = PAGE_SIZE + page - cwd; 2017 if (len <= size) { 2018 error = len; 2019 if (copy_to_user(buf, cwd, len)) 2020 error = -EFAULT; 2021 } 2022 } else 2023 spin_unlock(&dcache_lock); 2024 2025 out: 2026 path_put(&pwd); 2027 path_put(&root); 2028 free_page((unsigned long) page); 2029 return error; 2030 } 2031 2032 /* 2033 * Test whether new_dentry is a subdirectory of old_dentry. 2034 * 2035 * Trivially implemented using the dcache structure 2036 */ 2037 2038 /** 2039 * is_subdir - is new dentry a subdirectory of old_dentry 2040 * @new_dentry: new dentry 2041 * @old_dentry: old dentry 2042 * 2043 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2044 * Returns 0 otherwise. 2045 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2046 */ 2047 2048 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry) 2049 { 2050 int result; 2051 struct dentry * saved = new_dentry; 2052 unsigned long seq; 2053 2054 /* need rcu_readlock to protect against the d_parent trashing due to 2055 * d_move 2056 */ 2057 rcu_read_lock(); 2058 do { 2059 /* for restarting inner loop in case of seq retry */ 2060 new_dentry = saved; 2061 result = 0; 2062 seq = read_seqbegin(&rename_lock); 2063 for (;;) { 2064 if (new_dentry != old_dentry) { 2065 struct dentry * parent = new_dentry->d_parent; 2066 if (parent == new_dentry) 2067 break; 2068 new_dentry = parent; 2069 continue; 2070 } 2071 result = 1; 2072 break; 2073 } 2074 } while (read_seqretry(&rename_lock, seq)); 2075 rcu_read_unlock(); 2076 2077 return result; 2078 } 2079 2080 void d_genocide(struct dentry *root) 2081 { 2082 struct dentry *this_parent = root; 2083 struct list_head *next; 2084 2085 spin_lock(&dcache_lock); 2086 repeat: 2087 next = this_parent->d_subdirs.next; 2088 resume: 2089 while (next != &this_parent->d_subdirs) { 2090 struct list_head *tmp = next; 2091 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2092 next = tmp->next; 2093 if (d_unhashed(dentry)||!dentry->d_inode) 2094 continue; 2095 if (!list_empty(&dentry->d_subdirs)) { 2096 this_parent = dentry; 2097 goto repeat; 2098 } 2099 atomic_dec(&dentry->d_count); 2100 } 2101 if (this_parent != root) { 2102 next = this_parent->d_u.d_child.next; 2103 atomic_dec(&this_parent->d_count); 2104 this_parent = this_parent->d_parent; 2105 goto resume; 2106 } 2107 spin_unlock(&dcache_lock); 2108 } 2109 2110 /** 2111 * find_inode_number - check for dentry with name 2112 * @dir: directory to check 2113 * @name: Name to find. 2114 * 2115 * Check whether a dentry already exists for the given name, 2116 * and return the inode number if it has an inode. Otherwise 2117 * 0 is returned. 2118 * 2119 * This routine is used to post-process directory listings for 2120 * filesystems using synthetic inode numbers, and is necessary 2121 * to keep getcwd() working. 2122 */ 2123 2124 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2125 { 2126 struct dentry * dentry; 2127 ino_t ino = 0; 2128 2129 dentry = d_hash_and_lookup(dir, name); 2130 if (dentry) { 2131 if (dentry->d_inode) 2132 ino = dentry->d_inode->i_ino; 2133 dput(dentry); 2134 } 2135 return ino; 2136 } 2137 2138 static __initdata unsigned long dhash_entries; 2139 static int __init set_dhash_entries(char *str) 2140 { 2141 if (!str) 2142 return 0; 2143 dhash_entries = simple_strtoul(str, &str, 0); 2144 return 1; 2145 } 2146 __setup("dhash_entries=", set_dhash_entries); 2147 2148 static void __init dcache_init_early(void) 2149 { 2150 int loop; 2151 2152 /* If hashes are distributed across NUMA nodes, defer 2153 * hash allocation until vmalloc space is available. 2154 */ 2155 if (hashdist) 2156 return; 2157 2158 dentry_hashtable = 2159 alloc_large_system_hash("Dentry cache", 2160 sizeof(struct hlist_head), 2161 dhash_entries, 2162 13, 2163 HASH_EARLY, 2164 &d_hash_shift, 2165 &d_hash_mask, 2166 0); 2167 2168 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2169 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2170 } 2171 2172 static void __init dcache_init(void) 2173 { 2174 int loop; 2175 2176 /* 2177 * A constructor could be added for stable state like the lists, 2178 * but it is probably not worth it because of the cache nature 2179 * of the dcache. 2180 */ 2181 dentry_cache = KMEM_CACHE(dentry, 2182 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2183 2184 register_shrinker(&dcache_shrinker); 2185 2186 /* Hash may have been set up in dcache_init_early */ 2187 if (!hashdist) 2188 return; 2189 2190 dentry_hashtable = 2191 alloc_large_system_hash("Dentry cache", 2192 sizeof(struct hlist_head), 2193 dhash_entries, 2194 13, 2195 0, 2196 &d_hash_shift, 2197 &d_hash_mask, 2198 0); 2199 2200 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2201 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2202 } 2203 2204 /* SLAB cache for __getname() consumers */ 2205 struct kmem_cache *names_cachep __read_mostly; 2206 2207 /* SLAB cache for file structures */ 2208 struct kmem_cache *filp_cachep __read_mostly; 2209 2210 EXPORT_SYMBOL(d_genocide); 2211 2212 void __init vfs_caches_init_early(void) 2213 { 2214 dcache_init_early(); 2215 inode_init_early(); 2216 } 2217 2218 void __init vfs_caches_init(unsigned long mempages) 2219 { 2220 unsigned long reserve; 2221 2222 /* Base hash sizes on available memory, with a reserve equal to 2223 150% of current kernel size */ 2224 2225 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 2226 mempages -= reserve; 2227 2228 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2229 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2230 2231 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2232 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2233 2234 dcache_init(); 2235 inode_init(); 2236 files_init(mempages); 2237 mnt_init(); 2238 bdev_cache_init(); 2239 chrdev_init(); 2240 } 2241 2242 EXPORT_SYMBOL(d_alloc); 2243 EXPORT_SYMBOL(d_alloc_anon); 2244 EXPORT_SYMBOL(d_alloc_root); 2245 EXPORT_SYMBOL(d_delete); 2246 EXPORT_SYMBOL(d_find_alias); 2247 EXPORT_SYMBOL(d_instantiate); 2248 EXPORT_SYMBOL(d_invalidate); 2249 EXPORT_SYMBOL(d_lookup); 2250 EXPORT_SYMBOL(d_move); 2251 EXPORT_SYMBOL_GPL(d_materialise_unique); 2252 EXPORT_SYMBOL(d_path); 2253 EXPORT_SYMBOL(d_prune_aliases); 2254 EXPORT_SYMBOL(d_rehash); 2255 EXPORT_SYMBOL(d_splice_alias); 2256 EXPORT_SYMBOL(d_validate); 2257 EXPORT_SYMBOL(dget_locked); 2258 EXPORT_SYMBOL(dput); 2259 EXPORT_SYMBOL(find_inode_number); 2260 EXPORT_SYMBOL(have_submounts); 2261 EXPORT_SYMBOL(names_cachep); 2262 EXPORT_SYMBOL(shrink_dcache_parent); 2263 EXPORT_SYMBOL(shrink_dcache_sb); 2264