1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include "internal.h" 40 41 /* 42 * Usage: 43 * dcache->d_inode->i_lock protects: 44 * - i_dentry, d_alias, d_inode of aliases 45 * dcache_hash_bucket lock protects: 46 * - the dcache hash table 47 * s_anon bl list spinlock protects: 48 * - the s_anon list (see __d_drop) 49 * dcache_lru_lock protects: 50 * - the dcache lru lists and counters 51 * d_lock protects: 52 * - d_flags 53 * - d_name 54 * - d_lru 55 * - d_count 56 * - d_unhashed() 57 * - d_parent and d_subdirs 58 * - childrens' d_child and d_parent 59 * - d_alias, d_inode 60 * 61 * Ordering: 62 * dentry->d_inode->i_lock 63 * dentry->d_lock 64 * dcache_lru_lock 65 * dcache_hash_bucket lock 66 * s_anon lock 67 * 68 * If there is an ancestor relationship: 69 * dentry->d_parent->...->d_parent->d_lock 70 * ... 71 * dentry->d_parent->d_lock 72 * dentry->d_lock 73 * 74 * If no ancestor relationship: 75 * if (dentry1 < dentry2) 76 * dentry1->d_lock 77 * dentry2->d_lock 78 */ 79 int sysctl_vfs_cache_pressure __read_mostly = 100; 80 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 81 82 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 83 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 84 85 EXPORT_SYMBOL(rename_lock); 86 87 static struct kmem_cache *dentry_cache __read_mostly; 88 89 /* 90 * This is the single most critical data structure when it comes 91 * to the dcache: the hashtable for lookups. Somebody should try 92 * to make this good - I've just made it work. 93 * 94 * This hash-function tries to avoid losing too many bits of hash 95 * information, yet avoid using a prime hash-size or similar. 96 */ 97 #define D_HASHBITS d_hash_shift 98 #define D_HASHMASK d_hash_mask 99 100 static unsigned int d_hash_mask __read_mostly; 101 static unsigned int d_hash_shift __read_mostly; 102 103 static struct hlist_bl_head *dentry_hashtable __read_mostly; 104 105 static inline struct hlist_bl_head *d_hash(struct dentry *parent, 106 unsigned long hash) 107 { 108 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 109 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 110 return dentry_hashtable + (hash & D_HASHMASK); 111 } 112 113 /* Statistics gathering. */ 114 struct dentry_stat_t dentry_stat = { 115 .age_limit = 45, 116 }; 117 118 static DEFINE_PER_CPU(unsigned int, nr_dentry); 119 120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 121 static int get_nr_dentry(void) 122 { 123 int i; 124 int sum = 0; 125 for_each_possible_cpu(i) 126 sum += per_cpu(nr_dentry, i); 127 return sum < 0 ? 0 : sum; 128 } 129 130 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 131 size_t *lenp, loff_t *ppos) 132 { 133 dentry_stat.nr_dentry = get_nr_dentry(); 134 return proc_dointvec(table, write, buffer, lenp, ppos); 135 } 136 #endif 137 138 static void __d_free(struct rcu_head *head) 139 { 140 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 141 142 WARN_ON(!list_empty(&dentry->d_alias)); 143 if (dname_external(dentry)) 144 kfree(dentry->d_name.name); 145 kmem_cache_free(dentry_cache, dentry); 146 } 147 148 /* 149 * no locks, please. 150 */ 151 static void d_free(struct dentry *dentry) 152 { 153 BUG_ON(dentry->d_count); 154 this_cpu_dec(nr_dentry); 155 if (dentry->d_op && dentry->d_op->d_release) 156 dentry->d_op->d_release(dentry); 157 158 /* if dentry was never visible to RCU, immediate free is OK */ 159 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 160 __d_free(&dentry->d_u.d_rcu); 161 else 162 call_rcu(&dentry->d_u.d_rcu, __d_free); 163 } 164 165 /** 166 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 167 * @dentry: the target dentry 168 * After this call, in-progress rcu-walk path lookup will fail. This 169 * should be called after unhashing, and after changing d_inode (if 170 * the dentry has not already been unhashed). 171 */ 172 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 173 { 174 assert_spin_locked(&dentry->d_lock); 175 /* Go through a barrier */ 176 write_seqcount_barrier(&dentry->d_seq); 177 } 178 179 /* 180 * Release the dentry's inode, using the filesystem 181 * d_iput() operation if defined. Dentry has no refcount 182 * and is unhashed. 183 */ 184 static void dentry_iput(struct dentry * dentry) 185 __releases(dentry->d_lock) 186 __releases(dentry->d_inode->i_lock) 187 { 188 struct inode *inode = dentry->d_inode; 189 if (inode) { 190 dentry->d_inode = NULL; 191 list_del_init(&dentry->d_alias); 192 spin_unlock(&dentry->d_lock); 193 spin_unlock(&inode->i_lock); 194 if (!inode->i_nlink) 195 fsnotify_inoderemove(inode); 196 if (dentry->d_op && dentry->d_op->d_iput) 197 dentry->d_op->d_iput(dentry, inode); 198 else 199 iput(inode); 200 } else { 201 spin_unlock(&dentry->d_lock); 202 } 203 } 204 205 /* 206 * Release the dentry's inode, using the filesystem 207 * d_iput() operation if defined. dentry remains in-use. 208 */ 209 static void dentry_unlink_inode(struct dentry * dentry) 210 __releases(dentry->d_lock) 211 __releases(dentry->d_inode->i_lock) 212 { 213 struct inode *inode = dentry->d_inode; 214 dentry->d_inode = NULL; 215 list_del_init(&dentry->d_alias); 216 dentry_rcuwalk_barrier(dentry); 217 spin_unlock(&dentry->d_lock); 218 spin_unlock(&inode->i_lock); 219 if (!inode->i_nlink) 220 fsnotify_inoderemove(inode); 221 if (dentry->d_op && dentry->d_op->d_iput) 222 dentry->d_op->d_iput(dentry, inode); 223 else 224 iput(inode); 225 } 226 227 /* 228 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 229 */ 230 static void dentry_lru_add(struct dentry *dentry) 231 { 232 if (list_empty(&dentry->d_lru)) { 233 spin_lock(&dcache_lru_lock); 234 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 235 dentry->d_sb->s_nr_dentry_unused++; 236 dentry_stat.nr_unused++; 237 spin_unlock(&dcache_lru_lock); 238 } 239 } 240 241 static void __dentry_lru_del(struct dentry *dentry) 242 { 243 list_del_init(&dentry->d_lru); 244 dentry->d_sb->s_nr_dentry_unused--; 245 dentry_stat.nr_unused--; 246 } 247 248 /* 249 * Remove a dentry with references from the LRU. 250 */ 251 static void dentry_lru_del(struct dentry *dentry) 252 { 253 if (!list_empty(&dentry->d_lru)) { 254 spin_lock(&dcache_lru_lock); 255 __dentry_lru_del(dentry); 256 spin_unlock(&dcache_lru_lock); 257 } 258 } 259 260 /* 261 * Remove a dentry that is unreferenced and about to be pruned 262 * (unhashed and destroyed) from the LRU, and inform the file system. 263 * This wrapper should be called _prior_ to unhashing a victim dentry. 264 */ 265 static void dentry_lru_prune(struct dentry *dentry) 266 { 267 if (!list_empty(&dentry->d_lru)) { 268 if (dentry->d_flags & DCACHE_OP_PRUNE) 269 dentry->d_op->d_prune(dentry); 270 271 spin_lock(&dcache_lru_lock); 272 __dentry_lru_del(dentry); 273 spin_unlock(&dcache_lru_lock); 274 } 275 } 276 277 static void dentry_lru_move_tail(struct dentry *dentry) 278 { 279 spin_lock(&dcache_lru_lock); 280 if (list_empty(&dentry->d_lru)) { 281 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 282 dentry->d_sb->s_nr_dentry_unused++; 283 dentry_stat.nr_unused++; 284 } else { 285 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 286 } 287 spin_unlock(&dcache_lru_lock); 288 } 289 290 /** 291 * d_kill - kill dentry and return parent 292 * @dentry: dentry to kill 293 * @parent: parent dentry 294 * 295 * The dentry must already be unhashed and removed from the LRU. 296 * 297 * If this is the root of the dentry tree, return NULL. 298 * 299 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 300 * d_kill. 301 */ 302 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 303 __releases(dentry->d_lock) 304 __releases(parent->d_lock) 305 __releases(dentry->d_inode->i_lock) 306 { 307 list_del(&dentry->d_u.d_child); 308 /* 309 * Inform try_to_ascend() that we are no longer attached to the 310 * dentry tree 311 */ 312 dentry->d_flags |= DCACHE_DISCONNECTED; 313 if (parent) 314 spin_unlock(&parent->d_lock); 315 dentry_iput(dentry); 316 /* 317 * dentry_iput drops the locks, at which point nobody (except 318 * transient RCU lookups) can reach this dentry. 319 */ 320 d_free(dentry); 321 return parent; 322 } 323 324 /* 325 * Unhash a dentry without inserting an RCU walk barrier or checking that 326 * dentry->d_lock is locked. The caller must take care of that, if 327 * appropriate. 328 */ 329 static void __d_shrink(struct dentry *dentry) 330 { 331 if (!d_unhashed(dentry)) { 332 struct hlist_bl_head *b; 333 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 334 b = &dentry->d_sb->s_anon; 335 else 336 b = d_hash(dentry->d_parent, dentry->d_name.hash); 337 338 hlist_bl_lock(b); 339 __hlist_bl_del(&dentry->d_hash); 340 dentry->d_hash.pprev = NULL; 341 hlist_bl_unlock(b); 342 } 343 } 344 345 /** 346 * d_drop - drop a dentry 347 * @dentry: dentry to drop 348 * 349 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 350 * be found through a VFS lookup any more. Note that this is different from 351 * deleting the dentry - d_delete will try to mark the dentry negative if 352 * possible, giving a successful _negative_ lookup, while d_drop will 353 * just make the cache lookup fail. 354 * 355 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 356 * reason (NFS timeouts or autofs deletes). 357 * 358 * __d_drop requires dentry->d_lock. 359 */ 360 void __d_drop(struct dentry *dentry) 361 { 362 if (!d_unhashed(dentry)) { 363 __d_shrink(dentry); 364 dentry_rcuwalk_barrier(dentry); 365 } 366 } 367 EXPORT_SYMBOL(__d_drop); 368 369 void d_drop(struct dentry *dentry) 370 { 371 spin_lock(&dentry->d_lock); 372 __d_drop(dentry); 373 spin_unlock(&dentry->d_lock); 374 } 375 EXPORT_SYMBOL(d_drop); 376 377 /* 378 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 379 * @dentry: dentry to drop 380 * 381 * This is called when we do a lookup on a placeholder dentry that needed to be 382 * looked up. The dentry should have been hashed in order for it to be found by 383 * the lookup code, but now needs to be unhashed while we do the actual lookup 384 * and clear the DCACHE_NEED_LOOKUP flag. 385 */ 386 void d_clear_need_lookup(struct dentry *dentry) 387 { 388 spin_lock(&dentry->d_lock); 389 __d_drop(dentry); 390 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 391 spin_unlock(&dentry->d_lock); 392 } 393 EXPORT_SYMBOL(d_clear_need_lookup); 394 395 /* 396 * Finish off a dentry we've decided to kill. 397 * dentry->d_lock must be held, returns with it unlocked. 398 * If ref is non-zero, then decrement the refcount too. 399 * Returns dentry requiring refcount drop, or NULL if we're done. 400 */ 401 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 402 __releases(dentry->d_lock) 403 { 404 struct inode *inode; 405 struct dentry *parent; 406 407 inode = dentry->d_inode; 408 if (inode && !spin_trylock(&inode->i_lock)) { 409 relock: 410 spin_unlock(&dentry->d_lock); 411 cpu_relax(); 412 return dentry; /* try again with same dentry */ 413 } 414 if (IS_ROOT(dentry)) 415 parent = NULL; 416 else 417 parent = dentry->d_parent; 418 if (parent && !spin_trylock(&parent->d_lock)) { 419 if (inode) 420 spin_unlock(&inode->i_lock); 421 goto relock; 422 } 423 424 if (ref) 425 dentry->d_count--; 426 /* 427 * if dentry was on the d_lru list delete it from there. 428 * inform the fs via d_prune that this dentry is about to be 429 * unhashed and destroyed. 430 */ 431 dentry_lru_prune(dentry); 432 /* if it was on the hash then remove it */ 433 __d_drop(dentry); 434 return d_kill(dentry, parent); 435 } 436 437 /* 438 * This is dput 439 * 440 * This is complicated by the fact that we do not want to put 441 * dentries that are no longer on any hash chain on the unused 442 * list: we'd much rather just get rid of them immediately. 443 * 444 * However, that implies that we have to traverse the dentry 445 * tree upwards to the parents which might _also_ now be 446 * scheduled for deletion (it may have been only waiting for 447 * its last child to go away). 448 * 449 * This tail recursion is done by hand as we don't want to depend 450 * on the compiler to always get this right (gcc generally doesn't). 451 * Real recursion would eat up our stack space. 452 */ 453 454 /* 455 * dput - release a dentry 456 * @dentry: dentry to release 457 * 458 * Release a dentry. This will drop the usage count and if appropriate 459 * call the dentry unlink method as well as removing it from the queues and 460 * releasing its resources. If the parent dentries were scheduled for release 461 * they too may now get deleted. 462 */ 463 void dput(struct dentry *dentry) 464 { 465 if (!dentry) 466 return; 467 468 repeat: 469 if (dentry->d_count == 1) 470 might_sleep(); 471 spin_lock(&dentry->d_lock); 472 BUG_ON(!dentry->d_count); 473 if (dentry->d_count > 1) { 474 dentry->d_count--; 475 spin_unlock(&dentry->d_lock); 476 return; 477 } 478 479 if (dentry->d_flags & DCACHE_OP_DELETE) { 480 if (dentry->d_op->d_delete(dentry)) 481 goto kill_it; 482 } 483 484 /* Unreachable? Get rid of it */ 485 if (d_unhashed(dentry)) 486 goto kill_it; 487 488 /* 489 * If this dentry needs lookup, don't set the referenced flag so that it 490 * is more likely to be cleaned up by the dcache shrinker in case of 491 * memory pressure. 492 */ 493 if (!d_need_lookup(dentry)) 494 dentry->d_flags |= DCACHE_REFERENCED; 495 dentry_lru_add(dentry); 496 497 dentry->d_count--; 498 spin_unlock(&dentry->d_lock); 499 return; 500 501 kill_it: 502 dentry = dentry_kill(dentry, 1); 503 if (dentry) 504 goto repeat; 505 } 506 EXPORT_SYMBOL(dput); 507 508 /** 509 * d_invalidate - invalidate a dentry 510 * @dentry: dentry to invalidate 511 * 512 * Try to invalidate the dentry if it turns out to be 513 * possible. If there are other dentries that can be 514 * reached through this one we can't delete it and we 515 * return -EBUSY. On success we return 0. 516 * 517 * no dcache lock. 518 */ 519 520 int d_invalidate(struct dentry * dentry) 521 { 522 /* 523 * If it's already been dropped, return OK. 524 */ 525 spin_lock(&dentry->d_lock); 526 if (d_unhashed(dentry)) { 527 spin_unlock(&dentry->d_lock); 528 return 0; 529 } 530 /* 531 * Check whether to do a partial shrink_dcache 532 * to get rid of unused child entries. 533 */ 534 if (!list_empty(&dentry->d_subdirs)) { 535 spin_unlock(&dentry->d_lock); 536 shrink_dcache_parent(dentry); 537 spin_lock(&dentry->d_lock); 538 } 539 540 /* 541 * Somebody else still using it? 542 * 543 * If it's a directory, we can't drop it 544 * for fear of somebody re-populating it 545 * with children (even though dropping it 546 * would make it unreachable from the root, 547 * we might still populate it if it was a 548 * working directory or similar). 549 */ 550 if (dentry->d_count > 1) { 551 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 552 spin_unlock(&dentry->d_lock); 553 return -EBUSY; 554 } 555 } 556 557 __d_drop(dentry); 558 spin_unlock(&dentry->d_lock); 559 return 0; 560 } 561 EXPORT_SYMBOL(d_invalidate); 562 563 /* This must be called with d_lock held */ 564 static inline void __dget_dlock(struct dentry *dentry) 565 { 566 dentry->d_count++; 567 } 568 569 static inline void __dget(struct dentry *dentry) 570 { 571 spin_lock(&dentry->d_lock); 572 __dget_dlock(dentry); 573 spin_unlock(&dentry->d_lock); 574 } 575 576 struct dentry *dget_parent(struct dentry *dentry) 577 { 578 struct dentry *ret; 579 580 repeat: 581 /* 582 * Don't need rcu_dereference because we re-check it was correct under 583 * the lock. 584 */ 585 rcu_read_lock(); 586 ret = dentry->d_parent; 587 spin_lock(&ret->d_lock); 588 if (unlikely(ret != dentry->d_parent)) { 589 spin_unlock(&ret->d_lock); 590 rcu_read_unlock(); 591 goto repeat; 592 } 593 rcu_read_unlock(); 594 BUG_ON(!ret->d_count); 595 ret->d_count++; 596 spin_unlock(&ret->d_lock); 597 return ret; 598 } 599 EXPORT_SYMBOL(dget_parent); 600 601 /** 602 * d_find_alias - grab a hashed alias of inode 603 * @inode: inode in question 604 * @want_discon: flag, used by d_splice_alias, to request 605 * that only a DISCONNECTED alias be returned. 606 * 607 * If inode has a hashed alias, or is a directory and has any alias, 608 * acquire the reference to alias and return it. Otherwise return NULL. 609 * Notice that if inode is a directory there can be only one alias and 610 * it can be unhashed only if it has no children, or if it is the root 611 * of a filesystem. 612 * 613 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 614 * any other hashed alias over that one unless @want_discon is set, 615 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 616 */ 617 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 618 { 619 struct dentry *alias, *discon_alias; 620 621 again: 622 discon_alias = NULL; 623 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 624 spin_lock(&alias->d_lock); 625 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 626 if (IS_ROOT(alias) && 627 (alias->d_flags & DCACHE_DISCONNECTED)) { 628 discon_alias = alias; 629 } else if (!want_discon) { 630 __dget_dlock(alias); 631 spin_unlock(&alias->d_lock); 632 return alias; 633 } 634 } 635 spin_unlock(&alias->d_lock); 636 } 637 if (discon_alias) { 638 alias = discon_alias; 639 spin_lock(&alias->d_lock); 640 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 641 if (IS_ROOT(alias) && 642 (alias->d_flags & DCACHE_DISCONNECTED)) { 643 __dget_dlock(alias); 644 spin_unlock(&alias->d_lock); 645 return alias; 646 } 647 } 648 spin_unlock(&alias->d_lock); 649 goto again; 650 } 651 return NULL; 652 } 653 654 struct dentry *d_find_alias(struct inode *inode) 655 { 656 struct dentry *de = NULL; 657 658 if (!list_empty(&inode->i_dentry)) { 659 spin_lock(&inode->i_lock); 660 de = __d_find_alias(inode, 0); 661 spin_unlock(&inode->i_lock); 662 } 663 return de; 664 } 665 EXPORT_SYMBOL(d_find_alias); 666 667 /* 668 * Try to kill dentries associated with this inode. 669 * WARNING: you must own a reference to inode. 670 */ 671 void d_prune_aliases(struct inode *inode) 672 { 673 struct dentry *dentry; 674 restart: 675 spin_lock(&inode->i_lock); 676 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 677 spin_lock(&dentry->d_lock); 678 if (!dentry->d_count) { 679 __dget_dlock(dentry); 680 __d_drop(dentry); 681 spin_unlock(&dentry->d_lock); 682 spin_unlock(&inode->i_lock); 683 dput(dentry); 684 goto restart; 685 } 686 spin_unlock(&dentry->d_lock); 687 } 688 spin_unlock(&inode->i_lock); 689 } 690 EXPORT_SYMBOL(d_prune_aliases); 691 692 /* 693 * Try to throw away a dentry - free the inode, dput the parent. 694 * Requires dentry->d_lock is held, and dentry->d_count == 0. 695 * Releases dentry->d_lock. 696 * 697 * This may fail if locks cannot be acquired no problem, just try again. 698 */ 699 static void try_prune_one_dentry(struct dentry *dentry) 700 __releases(dentry->d_lock) 701 { 702 struct dentry *parent; 703 704 parent = dentry_kill(dentry, 0); 705 /* 706 * If dentry_kill returns NULL, we have nothing more to do. 707 * if it returns the same dentry, trylocks failed. In either 708 * case, just loop again. 709 * 710 * Otherwise, we need to prune ancestors too. This is necessary 711 * to prevent quadratic behavior of shrink_dcache_parent(), but 712 * is also expected to be beneficial in reducing dentry cache 713 * fragmentation. 714 */ 715 if (!parent) 716 return; 717 if (parent == dentry) 718 return; 719 720 /* Prune ancestors. */ 721 dentry = parent; 722 while (dentry) { 723 spin_lock(&dentry->d_lock); 724 if (dentry->d_count > 1) { 725 dentry->d_count--; 726 spin_unlock(&dentry->d_lock); 727 return; 728 } 729 dentry = dentry_kill(dentry, 1); 730 } 731 } 732 733 static void shrink_dentry_list(struct list_head *list) 734 { 735 struct dentry *dentry; 736 737 rcu_read_lock(); 738 for (;;) { 739 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 740 if (&dentry->d_lru == list) 741 break; /* empty */ 742 spin_lock(&dentry->d_lock); 743 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 744 spin_unlock(&dentry->d_lock); 745 continue; 746 } 747 748 /* 749 * We found an inuse dentry which was not removed from 750 * the LRU because of laziness during lookup. Do not free 751 * it - just keep it off the LRU list. 752 */ 753 if (dentry->d_count) { 754 dentry_lru_del(dentry); 755 spin_unlock(&dentry->d_lock); 756 continue; 757 } 758 759 rcu_read_unlock(); 760 761 try_prune_one_dentry(dentry); 762 763 rcu_read_lock(); 764 } 765 rcu_read_unlock(); 766 } 767 768 /** 769 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock 770 * @sb: superblock to shrink dentry LRU. 771 * @count: number of entries to prune 772 * @flags: flags to control the dentry processing 773 * 774 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. 775 */ 776 static void __shrink_dcache_sb(struct super_block *sb, int count, int flags) 777 { 778 struct dentry *dentry; 779 LIST_HEAD(referenced); 780 LIST_HEAD(tmp); 781 782 relock: 783 spin_lock(&dcache_lru_lock); 784 while (!list_empty(&sb->s_dentry_lru)) { 785 dentry = list_entry(sb->s_dentry_lru.prev, 786 struct dentry, d_lru); 787 BUG_ON(dentry->d_sb != sb); 788 789 if (!spin_trylock(&dentry->d_lock)) { 790 spin_unlock(&dcache_lru_lock); 791 cpu_relax(); 792 goto relock; 793 } 794 795 /* 796 * If we are honouring the DCACHE_REFERENCED flag and the 797 * dentry has this flag set, don't free it. Clear the flag 798 * and put it back on the LRU. 799 */ 800 if (flags & DCACHE_REFERENCED && 801 dentry->d_flags & DCACHE_REFERENCED) { 802 dentry->d_flags &= ~DCACHE_REFERENCED; 803 list_move(&dentry->d_lru, &referenced); 804 spin_unlock(&dentry->d_lock); 805 } else { 806 list_move_tail(&dentry->d_lru, &tmp); 807 spin_unlock(&dentry->d_lock); 808 if (!--count) 809 break; 810 } 811 cond_resched_lock(&dcache_lru_lock); 812 } 813 if (!list_empty(&referenced)) 814 list_splice(&referenced, &sb->s_dentry_lru); 815 spin_unlock(&dcache_lru_lock); 816 817 shrink_dentry_list(&tmp); 818 } 819 820 /** 821 * prune_dcache_sb - shrink the dcache 822 * @sb: superblock 823 * @nr_to_scan: number of entries to try to free 824 * 825 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is 826 * done when we need more memory an called from the superblock shrinker 827 * function. 828 * 829 * This function may fail to free any resources if all the dentries are in 830 * use. 831 */ 832 void prune_dcache_sb(struct super_block *sb, int nr_to_scan) 833 { 834 __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED); 835 } 836 837 /** 838 * shrink_dcache_sb - shrink dcache for a superblock 839 * @sb: superblock 840 * 841 * Shrink the dcache for the specified super block. This is used to free 842 * the dcache before unmounting a file system. 843 */ 844 void shrink_dcache_sb(struct super_block *sb) 845 { 846 LIST_HEAD(tmp); 847 848 spin_lock(&dcache_lru_lock); 849 while (!list_empty(&sb->s_dentry_lru)) { 850 list_splice_init(&sb->s_dentry_lru, &tmp); 851 spin_unlock(&dcache_lru_lock); 852 shrink_dentry_list(&tmp); 853 spin_lock(&dcache_lru_lock); 854 } 855 spin_unlock(&dcache_lru_lock); 856 } 857 EXPORT_SYMBOL(shrink_dcache_sb); 858 859 /* 860 * destroy a single subtree of dentries for unmount 861 * - see the comments on shrink_dcache_for_umount() for a description of the 862 * locking 863 */ 864 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 865 { 866 struct dentry *parent; 867 868 BUG_ON(!IS_ROOT(dentry)); 869 870 for (;;) { 871 /* descend to the first leaf in the current subtree */ 872 while (!list_empty(&dentry->d_subdirs)) 873 dentry = list_entry(dentry->d_subdirs.next, 874 struct dentry, d_u.d_child); 875 876 /* consume the dentries from this leaf up through its parents 877 * until we find one with children or run out altogether */ 878 do { 879 struct inode *inode; 880 881 /* 882 * remove the dentry from the lru, and inform 883 * the fs that this dentry is about to be 884 * unhashed and destroyed. 885 */ 886 dentry_lru_prune(dentry); 887 __d_shrink(dentry); 888 889 if (dentry->d_count != 0) { 890 printk(KERN_ERR 891 "BUG: Dentry %p{i=%lx,n=%s}" 892 " still in use (%d)" 893 " [unmount of %s %s]\n", 894 dentry, 895 dentry->d_inode ? 896 dentry->d_inode->i_ino : 0UL, 897 dentry->d_name.name, 898 dentry->d_count, 899 dentry->d_sb->s_type->name, 900 dentry->d_sb->s_id); 901 BUG(); 902 } 903 904 if (IS_ROOT(dentry)) { 905 parent = NULL; 906 list_del(&dentry->d_u.d_child); 907 } else { 908 parent = dentry->d_parent; 909 parent->d_count--; 910 list_del(&dentry->d_u.d_child); 911 } 912 913 inode = dentry->d_inode; 914 if (inode) { 915 dentry->d_inode = NULL; 916 list_del_init(&dentry->d_alias); 917 if (dentry->d_op && dentry->d_op->d_iput) 918 dentry->d_op->d_iput(dentry, inode); 919 else 920 iput(inode); 921 } 922 923 d_free(dentry); 924 925 /* finished when we fall off the top of the tree, 926 * otherwise we ascend to the parent and move to the 927 * next sibling if there is one */ 928 if (!parent) 929 return; 930 dentry = parent; 931 } while (list_empty(&dentry->d_subdirs)); 932 933 dentry = list_entry(dentry->d_subdirs.next, 934 struct dentry, d_u.d_child); 935 } 936 } 937 938 /* 939 * destroy the dentries attached to a superblock on unmounting 940 * - we don't need to use dentry->d_lock because: 941 * - the superblock is detached from all mountings and open files, so the 942 * dentry trees will not be rearranged by the VFS 943 * - s_umount is write-locked, so the memory pressure shrinker will ignore 944 * any dentries belonging to this superblock that it comes across 945 * - the filesystem itself is no longer permitted to rearrange the dentries 946 * in this superblock 947 */ 948 void shrink_dcache_for_umount(struct super_block *sb) 949 { 950 struct dentry *dentry; 951 952 if (down_read_trylock(&sb->s_umount)) 953 BUG(); 954 955 dentry = sb->s_root; 956 sb->s_root = NULL; 957 dentry->d_count--; 958 shrink_dcache_for_umount_subtree(dentry); 959 960 while (!hlist_bl_empty(&sb->s_anon)) { 961 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 962 shrink_dcache_for_umount_subtree(dentry); 963 } 964 } 965 966 /* 967 * This tries to ascend one level of parenthood, but 968 * we can race with renaming, so we need to re-check 969 * the parenthood after dropping the lock and check 970 * that the sequence number still matches. 971 */ 972 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 973 { 974 struct dentry *new = old->d_parent; 975 976 rcu_read_lock(); 977 spin_unlock(&old->d_lock); 978 spin_lock(&new->d_lock); 979 980 /* 981 * might go back up the wrong parent if we have had a rename 982 * or deletion 983 */ 984 if (new != old->d_parent || 985 (old->d_flags & DCACHE_DISCONNECTED) || 986 (!locked && read_seqretry(&rename_lock, seq))) { 987 spin_unlock(&new->d_lock); 988 new = NULL; 989 } 990 rcu_read_unlock(); 991 return new; 992 } 993 994 995 /* 996 * Search for at least 1 mount point in the dentry's subdirs. 997 * We descend to the next level whenever the d_subdirs 998 * list is non-empty and continue searching. 999 */ 1000 1001 /** 1002 * have_submounts - check for mounts over a dentry 1003 * @parent: dentry to check. 1004 * 1005 * Return true if the parent or its subdirectories contain 1006 * a mount point 1007 */ 1008 int have_submounts(struct dentry *parent) 1009 { 1010 struct dentry *this_parent; 1011 struct list_head *next; 1012 unsigned seq; 1013 int locked = 0; 1014 1015 seq = read_seqbegin(&rename_lock); 1016 again: 1017 this_parent = parent; 1018 1019 if (d_mountpoint(parent)) 1020 goto positive; 1021 spin_lock(&this_parent->d_lock); 1022 repeat: 1023 next = this_parent->d_subdirs.next; 1024 resume: 1025 while (next != &this_parent->d_subdirs) { 1026 struct list_head *tmp = next; 1027 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1028 next = tmp->next; 1029 1030 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1031 /* Have we found a mount point ? */ 1032 if (d_mountpoint(dentry)) { 1033 spin_unlock(&dentry->d_lock); 1034 spin_unlock(&this_parent->d_lock); 1035 goto positive; 1036 } 1037 if (!list_empty(&dentry->d_subdirs)) { 1038 spin_unlock(&this_parent->d_lock); 1039 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1040 this_parent = dentry; 1041 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1042 goto repeat; 1043 } 1044 spin_unlock(&dentry->d_lock); 1045 } 1046 /* 1047 * All done at this level ... ascend and resume the search. 1048 */ 1049 if (this_parent != parent) { 1050 struct dentry *child = this_parent; 1051 this_parent = try_to_ascend(this_parent, locked, seq); 1052 if (!this_parent) 1053 goto rename_retry; 1054 next = child->d_u.d_child.next; 1055 goto resume; 1056 } 1057 spin_unlock(&this_parent->d_lock); 1058 if (!locked && read_seqretry(&rename_lock, seq)) 1059 goto rename_retry; 1060 if (locked) 1061 write_sequnlock(&rename_lock); 1062 return 0; /* No mount points found in tree */ 1063 positive: 1064 if (!locked && read_seqretry(&rename_lock, seq)) 1065 goto rename_retry; 1066 if (locked) 1067 write_sequnlock(&rename_lock); 1068 return 1; 1069 1070 rename_retry: 1071 locked = 1; 1072 write_seqlock(&rename_lock); 1073 goto again; 1074 } 1075 EXPORT_SYMBOL(have_submounts); 1076 1077 /* 1078 * Search the dentry child list for the specified parent, 1079 * and move any unused dentries to the end of the unused 1080 * list for prune_dcache(). We descend to the next level 1081 * whenever the d_subdirs list is non-empty and continue 1082 * searching. 1083 * 1084 * It returns zero iff there are no unused children, 1085 * otherwise it returns the number of children moved to 1086 * the end of the unused list. This may not be the total 1087 * number of unused children, because select_parent can 1088 * drop the lock and return early due to latency 1089 * constraints. 1090 */ 1091 static int select_parent(struct dentry * parent) 1092 { 1093 struct dentry *this_parent; 1094 struct list_head *next; 1095 unsigned seq; 1096 int found = 0; 1097 int locked = 0; 1098 1099 seq = read_seqbegin(&rename_lock); 1100 again: 1101 this_parent = parent; 1102 spin_lock(&this_parent->d_lock); 1103 repeat: 1104 next = this_parent->d_subdirs.next; 1105 resume: 1106 while (next != &this_parent->d_subdirs) { 1107 struct list_head *tmp = next; 1108 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1109 next = tmp->next; 1110 1111 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1112 1113 /* 1114 * move only zero ref count dentries to the end 1115 * of the unused list for prune_dcache 1116 */ 1117 if (!dentry->d_count) { 1118 dentry_lru_move_tail(dentry); 1119 found++; 1120 } else { 1121 dentry_lru_del(dentry); 1122 } 1123 1124 /* 1125 * We can return to the caller if we have found some (this 1126 * ensures forward progress). We'll be coming back to find 1127 * the rest. 1128 */ 1129 if (found && need_resched()) { 1130 spin_unlock(&dentry->d_lock); 1131 goto out; 1132 } 1133 1134 /* 1135 * Descend a level if the d_subdirs list is non-empty. 1136 */ 1137 if (!list_empty(&dentry->d_subdirs)) { 1138 spin_unlock(&this_parent->d_lock); 1139 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1140 this_parent = dentry; 1141 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1142 goto repeat; 1143 } 1144 1145 spin_unlock(&dentry->d_lock); 1146 } 1147 /* 1148 * All done at this level ... ascend and resume the search. 1149 */ 1150 if (this_parent != parent) { 1151 struct dentry *child = this_parent; 1152 this_parent = try_to_ascend(this_parent, locked, seq); 1153 if (!this_parent) 1154 goto rename_retry; 1155 next = child->d_u.d_child.next; 1156 goto resume; 1157 } 1158 out: 1159 spin_unlock(&this_parent->d_lock); 1160 if (!locked && read_seqretry(&rename_lock, seq)) 1161 goto rename_retry; 1162 if (locked) 1163 write_sequnlock(&rename_lock); 1164 return found; 1165 1166 rename_retry: 1167 if (found) 1168 return found; 1169 locked = 1; 1170 write_seqlock(&rename_lock); 1171 goto again; 1172 } 1173 1174 /** 1175 * shrink_dcache_parent - prune dcache 1176 * @parent: parent of entries to prune 1177 * 1178 * Prune the dcache to remove unused children of the parent dentry. 1179 */ 1180 1181 void shrink_dcache_parent(struct dentry * parent) 1182 { 1183 struct super_block *sb = parent->d_sb; 1184 int found; 1185 1186 while ((found = select_parent(parent)) != 0) 1187 __shrink_dcache_sb(sb, found, 0); 1188 } 1189 EXPORT_SYMBOL(shrink_dcache_parent); 1190 1191 /** 1192 * __d_alloc - allocate a dcache entry 1193 * @sb: filesystem it will belong to 1194 * @name: qstr of the name 1195 * 1196 * Allocates a dentry. It returns %NULL if there is insufficient memory 1197 * available. On a success the dentry is returned. The name passed in is 1198 * copied and the copy passed in may be reused after this call. 1199 */ 1200 1201 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1202 { 1203 struct dentry *dentry; 1204 char *dname; 1205 1206 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1207 if (!dentry) 1208 return NULL; 1209 1210 if (name->len > DNAME_INLINE_LEN-1) { 1211 dname = kmalloc(name->len + 1, GFP_KERNEL); 1212 if (!dname) { 1213 kmem_cache_free(dentry_cache, dentry); 1214 return NULL; 1215 } 1216 } else { 1217 dname = dentry->d_iname; 1218 } 1219 dentry->d_name.name = dname; 1220 1221 dentry->d_name.len = name->len; 1222 dentry->d_name.hash = name->hash; 1223 memcpy(dname, name->name, name->len); 1224 dname[name->len] = 0; 1225 1226 dentry->d_count = 1; 1227 dentry->d_flags = 0; 1228 spin_lock_init(&dentry->d_lock); 1229 seqcount_init(&dentry->d_seq); 1230 dentry->d_inode = NULL; 1231 dentry->d_parent = dentry; 1232 dentry->d_sb = sb; 1233 dentry->d_op = NULL; 1234 dentry->d_fsdata = NULL; 1235 INIT_HLIST_BL_NODE(&dentry->d_hash); 1236 INIT_LIST_HEAD(&dentry->d_lru); 1237 INIT_LIST_HEAD(&dentry->d_subdirs); 1238 INIT_LIST_HEAD(&dentry->d_alias); 1239 INIT_LIST_HEAD(&dentry->d_u.d_child); 1240 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1241 1242 this_cpu_inc(nr_dentry); 1243 1244 return dentry; 1245 } 1246 1247 /** 1248 * d_alloc - allocate a dcache entry 1249 * @parent: parent of entry to allocate 1250 * @name: qstr of the name 1251 * 1252 * Allocates a dentry. It returns %NULL if there is insufficient memory 1253 * available. On a success the dentry is returned. The name passed in is 1254 * copied and the copy passed in may be reused after this call. 1255 */ 1256 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1257 { 1258 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1259 if (!dentry) 1260 return NULL; 1261 1262 spin_lock(&parent->d_lock); 1263 /* 1264 * don't need child lock because it is not subject 1265 * to concurrency here 1266 */ 1267 __dget_dlock(parent); 1268 dentry->d_parent = parent; 1269 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1270 spin_unlock(&parent->d_lock); 1271 1272 return dentry; 1273 } 1274 EXPORT_SYMBOL(d_alloc); 1275 1276 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1277 { 1278 struct dentry *dentry = __d_alloc(sb, name); 1279 if (dentry) 1280 dentry->d_flags |= DCACHE_DISCONNECTED; 1281 return dentry; 1282 } 1283 EXPORT_SYMBOL(d_alloc_pseudo); 1284 1285 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1286 { 1287 struct qstr q; 1288 1289 q.name = name; 1290 q.len = strlen(name); 1291 q.hash = full_name_hash(q.name, q.len); 1292 return d_alloc(parent, &q); 1293 } 1294 EXPORT_SYMBOL(d_alloc_name); 1295 1296 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1297 { 1298 WARN_ON_ONCE(dentry->d_op); 1299 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1300 DCACHE_OP_COMPARE | 1301 DCACHE_OP_REVALIDATE | 1302 DCACHE_OP_DELETE )); 1303 dentry->d_op = op; 1304 if (!op) 1305 return; 1306 if (op->d_hash) 1307 dentry->d_flags |= DCACHE_OP_HASH; 1308 if (op->d_compare) 1309 dentry->d_flags |= DCACHE_OP_COMPARE; 1310 if (op->d_revalidate) 1311 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1312 if (op->d_delete) 1313 dentry->d_flags |= DCACHE_OP_DELETE; 1314 if (op->d_prune) 1315 dentry->d_flags |= DCACHE_OP_PRUNE; 1316 1317 } 1318 EXPORT_SYMBOL(d_set_d_op); 1319 1320 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1321 { 1322 spin_lock(&dentry->d_lock); 1323 if (inode) { 1324 if (unlikely(IS_AUTOMOUNT(inode))) 1325 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1326 list_add(&dentry->d_alias, &inode->i_dentry); 1327 } 1328 dentry->d_inode = inode; 1329 dentry_rcuwalk_barrier(dentry); 1330 spin_unlock(&dentry->d_lock); 1331 fsnotify_d_instantiate(dentry, inode); 1332 } 1333 1334 /** 1335 * d_instantiate - fill in inode information for a dentry 1336 * @entry: dentry to complete 1337 * @inode: inode to attach to this dentry 1338 * 1339 * Fill in inode information in the entry. 1340 * 1341 * This turns negative dentries into productive full members 1342 * of society. 1343 * 1344 * NOTE! This assumes that the inode count has been incremented 1345 * (or otherwise set) by the caller to indicate that it is now 1346 * in use by the dcache. 1347 */ 1348 1349 void d_instantiate(struct dentry *entry, struct inode * inode) 1350 { 1351 BUG_ON(!list_empty(&entry->d_alias)); 1352 if (inode) 1353 spin_lock(&inode->i_lock); 1354 __d_instantiate(entry, inode); 1355 if (inode) 1356 spin_unlock(&inode->i_lock); 1357 security_d_instantiate(entry, inode); 1358 } 1359 EXPORT_SYMBOL(d_instantiate); 1360 1361 /** 1362 * d_instantiate_unique - instantiate a non-aliased dentry 1363 * @entry: dentry to instantiate 1364 * @inode: inode to attach to this dentry 1365 * 1366 * Fill in inode information in the entry. On success, it returns NULL. 1367 * If an unhashed alias of "entry" already exists, then we return the 1368 * aliased dentry instead and drop one reference to inode. 1369 * 1370 * Note that in order to avoid conflicts with rename() etc, the caller 1371 * had better be holding the parent directory semaphore. 1372 * 1373 * This also assumes that the inode count has been incremented 1374 * (or otherwise set) by the caller to indicate that it is now 1375 * in use by the dcache. 1376 */ 1377 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1378 struct inode *inode) 1379 { 1380 struct dentry *alias; 1381 int len = entry->d_name.len; 1382 const char *name = entry->d_name.name; 1383 unsigned int hash = entry->d_name.hash; 1384 1385 if (!inode) { 1386 __d_instantiate(entry, NULL); 1387 return NULL; 1388 } 1389 1390 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1391 struct qstr *qstr = &alias->d_name; 1392 1393 /* 1394 * Don't need alias->d_lock here, because aliases with 1395 * d_parent == entry->d_parent are not subject to name or 1396 * parent changes, because the parent inode i_mutex is held. 1397 */ 1398 if (qstr->hash != hash) 1399 continue; 1400 if (alias->d_parent != entry->d_parent) 1401 continue; 1402 if (dentry_cmp(qstr->name, qstr->len, name, len)) 1403 continue; 1404 __dget(alias); 1405 return alias; 1406 } 1407 1408 __d_instantiate(entry, inode); 1409 return NULL; 1410 } 1411 1412 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1413 { 1414 struct dentry *result; 1415 1416 BUG_ON(!list_empty(&entry->d_alias)); 1417 1418 if (inode) 1419 spin_lock(&inode->i_lock); 1420 result = __d_instantiate_unique(entry, inode); 1421 if (inode) 1422 spin_unlock(&inode->i_lock); 1423 1424 if (!result) { 1425 security_d_instantiate(entry, inode); 1426 return NULL; 1427 } 1428 1429 BUG_ON(!d_unhashed(result)); 1430 iput(inode); 1431 return result; 1432 } 1433 1434 EXPORT_SYMBOL(d_instantiate_unique); 1435 1436 /** 1437 * d_alloc_root - allocate root dentry 1438 * @root_inode: inode to allocate the root for 1439 * 1440 * Allocate a root ("/") dentry for the inode given. The inode is 1441 * instantiated and returned. %NULL is returned if there is insufficient 1442 * memory or the inode passed is %NULL. 1443 */ 1444 1445 struct dentry * d_alloc_root(struct inode * root_inode) 1446 { 1447 struct dentry *res = NULL; 1448 1449 if (root_inode) { 1450 static const struct qstr name = { .name = "/", .len = 1 }; 1451 1452 res = __d_alloc(root_inode->i_sb, &name); 1453 if (res) 1454 d_instantiate(res, root_inode); 1455 } 1456 return res; 1457 } 1458 EXPORT_SYMBOL(d_alloc_root); 1459 1460 static struct dentry * __d_find_any_alias(struct inode *inode) 1461 { 1462 struct dentry *alias; 1463 1464 if (list_empty(&inode->i_dentry)) 1465 return NULL; 1466 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1467 __dget(alias); 1468 return alias; 1469 } 1470 1471 static struct dentry * d_find_any_alias(struct inode *inode) 1472 { 1473 struct dentry *de; 1474 1475 spin_lock(&inode->i_lock); 1476 de = __d_find_any_alias(inode); 1477 spin_unlock(&inode->i_lock); 1478 return de; 1479 } 1480 1481 1482 /** 1483 * d_obtain_alias - find or allocate a dentry for a given inode 1484 * @inode: inode to allocate the dentry for 1485 * 1486 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1487 * similar open by handle operations. The returned dentry may be anonymous, 1488 * or may have a full name (if the inode was already in the cache). 1489 * 1490 * When called on a directory inode, we must ensure that the inode only ever 1491 * has one dentry. If a dentry is found, that is returned instead of 1492 * allocating a new one. 1493 * 1494 * On successful return, the reference to the inode has been transferred 1495 * to the dentry. In case of an error the reference on the inode is released. 1496 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1497 * be passed in and will be the error will be propagate to the return value, 1498 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1499 */ 1500 struct dentry *d_obtain_alias(struct inode *inode) 1501 { 1502 static const struct qstr anonstring = { .name = "" }; 1503 struct dentry *tmp; 1504 struct dentry *res; 1505 1506 if (!inode) 1507 return ERR_PTR(-ESTALE); 1508 if (IS_ERR(inode)) 1509 return ERR_CAST(inode); 1510 1511 res = d_find_any_alias(inode); 1512 if (res) 1513 goto out_iput; 1514 1515 tmp = __d_alloc(inode->i_sb, &anonstring); 1516 if (!tmp) { 1517 res = ERR_PTR(-ENOMEM); 1518 goto out_iput; 1519 } 1520 1521 spin_lock(&inode->i_lock); 1522 res = __d_find_any_alias(inode); 1523 if (res) { 1524 spin_unlock(&inode->i_lock); 1525 dput(tmp); 1526 goto out_iput; 1527 } 1528 1529 /* attach a disconnected dentry */ 1530 spin_lock(&tmp->d_lock); 1531 tmp->d_inode = inode; 1532 tmp->d_flags |= DCACHE_DISCONNECTED; 1533 list_add(&tmp->d_alias, &inode->i_dentry); 1534 hlist_bl_lock(&tmp->d_sb->s_anon); 1535 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1536 hlist_bl_unlock(&tmp->d_sb->s_anon); 1537 spin_unlock(&tmp->d_lock); 1538 spin_unlock(&inode->i_lock); 1539 security_d_instantiate(tmp, inode); 1540 1541 return tmp; 1542 1543 out_iput: 1544 if (res && !IS_ERR(res)) 1545 security_d_instantiate(res, inode); 1546 iput(inode); 1547 return res; 1548 } 1549 EXPORT_SYMBOL(d_obtain_alias); 1550 1551 /** 1552 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1553 * @inode: the inode which may have a disconnected dentry 1554 * @dentry: a negative dentry which we want to point to the inode. 1555 * 1556 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1557 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1558 * and return it, else simply d_add the inode to the dentry and return NULL. 1559 * 1560 * This is needed in the lookup routine of any filesystem that is exportable 1561 * (via knfsd) so that we can build dcache paths to directories effectively. 1562 * 1563 * If a dentry was found and moved, then it is returned. Otherwise NULL 1564 * is returned. This matches the expected return value of ->lookup. 1565 * 1566 */ 1567 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1568 { 1569 struct dentry *new = NULL; 1570 1571 if (IS_ERR(inode)) 1572 return ERR_CAST(inode); 1573 1574 if (inode && S_ISDIR(inode->i_mode)) { 1575 spin_lock(&inode->i_lock); 1576 new = __d_find_alias(inode, 1); 1577 if (new) { 1578 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1579 spin_unlock(&inode->i_lock); 1580 security_d_instantiate(new, inode); 1581 d_move(new, dentry); 1582 iput(inode); 1583 } else { 1584 /* already taking inode->i_lock, so d_add() by hand */ 1585 __d_instantiate(dentry, inode); 1586 spin_unlock(&inode->i_lock); 1587 security_d_instantiate(dentry, inode); 1588 d_rehash(dentry); 1589 } 1590 } else 1591 d_add(dentry, inode); 1592 return new; 1593 } 1594 EXPORT_SYMBOL(d_splice_alias); 1595 1596 /** 1597 * d_add_ci - lookup or allocate new dentry with case-exact name 1598 * @inode: the inode case-insensitive lookup has found 1599 * @dentry: the negative dentry that was passed to the parent's lookup func 1600 * @name: the case-exact name to be associated with the returned dentry 1601 * 1602 * This is to avoid filling the dcache with case-insensitive names to the 1603 * same inode, only the actual correct case is stored in the dcache for 1604 * case-insensitive filesystems. 1605 * 1606 * For a case-insensitive lookup match and if the the case-exact dentry 1607 * already exists in in the dcache, use it and return it. 1608 * 1609 * If no entry exists with the exact case name, allocate new dentry with 1610 * the exact case, and return the spliced entry. 1611 */ 1612 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1613 struct qstr *name) 1614 { 1615 int error; 1616 struct dentry *found; 1617 struct dentry *new; 1618 1619 /* 1620 * First check if a dentry matching the name already exists, 1621 * if not go ahead and create it now. 1622 */ 1623 found = d_hash_and_lookup(dentry->d_parent, name); 1624 if (!found) { 1625 new = d_alloc(dentry->d_parent, name); 1626 if (!new) { 1627 error = -ENOMEM; 1628 goto err_out; 1629 } 1630 1631 found = d_splice_alias(inode, new); 1632 if (found) { 1633 dput(new); 1634 return found; 1635 } 1636 return new; 1637 } 1638 1639 /* 1640 * If a matching dentry exists, and it's not negative use it. 1641 * 1642 * Decrement the reference count to balance the iget() done 1643 * earlier on. 1644 */ 1645 if (found->d_inode) { 1646 if (unlikely(found->d_inode != inode)) { 1647 /* This can't happen because bad inodes are unhashed. */ 1648 BUG_ON(!is_bad_inode(inode)); 1649 BUG_ON(!is_bad_inode(found->d_inode)); 1650 } 1651 iput(inode); 1652 return found; 1653 } 1654 1655 /* 1656 * We are going to instantiate this dentry, unhash it and clear the 1657 * lookup flag so we can do that. 1658 */ 1659 if (unlikely(d_need_lookup(found))) 1660 d_clear_need_lookup(found); 1661 1662 /* 1663 * Negative dentry: instantiate it unless the inode is a directory and 1664 * already has a dentry. 1665 */ 1666 new = d_splice_alias(inode, found); 1667 if (new) { 1668 dput(found); 1669 found = new; 1670 } 1671 return found; 1672 1673 err_out: 1674 iput(inode); 1675 return ERR_PTR(error); 1676 } 1677 EXPORT_SYMBOL(d_add_ci); 1678 1679 /** 1680 * __d_lookup_rcu - search for a dentry (racy, store-free) 1681 * @parent: parent dentry 1682 * @name: qstr of name we wish to find 1683 * @seq: returns d_seq value at the point where the dentry was found 1684 * @inode: returns dentry->d_inode when the inode was found valid. 1685 * Returns: dentry, or NULL 1686 * 1687 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1688 * resolution (store-free path walking) design described in 1689 * Documentation/filesystems/path-lookup.txt. 1690 * 1691 * This is not to be used outside core vfs. 1692 * 1693 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1694 * held, and rcu_read_lock held. The returned dentry must not be stored into 1695 * without taking d_lock and checking d_seq sequence count against @seq 1696 * returned here. 1697 * 1698 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1699 * function. 1700 * 1701 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1702 * the returned dentry, so long as its parent's seqlock is checked after the 1703 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1704 * is formed, giving integrity down the path walk. 1705 */ 1706 struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, 1707 unsigned *seq, struct inode **inode) 1708 { 1709 unsigned int len = name->len; 1710 unsigned int hash = name->hash; 1711 const unsigned char *str = name->name; 1712 struct hlist_bl_head *b = d_hash(parent, hash); 1713 struct hlist_bl_node *node; 1714 struct dentry *dentry; 1715 1716 /* 1717 * Note: There is significant duplication with __d_lookup_rcu which is 1718 * required to prevent single threaded performance regressions 1719 * especially on architectures where smp_rmb (in seqcounts) are costly. 1720 * Keep the two functions in sync. 1721 */ 1722 1723 /* 1724 * The hash list is protected using RCU. 1725 * 1726 * Carefully use d_seq when comparing a candidate dentry, to avoid 1727 * races with d_move(). 1728 * 1729 * It is possible that concurrent renames can mess up our list 1730 * walk here and result in missing our dentry, resulting in the 1731 * false-negative result. d_lookup() protects against concurrent 1732 * renames using rename_lock seqlock. 1733 * 1734 * See Documentation/filesystems/path-lookup.txt for more details. 1735 */ 1736 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1737 struct inode *i; 1738 const char *tname; 1739 int tlen; 1740 1741 if (dentry->d_name.hash != hash) 1742 continue; 1743 1744 seqretry: 1745 *seq = read_seqcount_begin(&dentry->d_seq); 1746 if (dentry->d_parent != parent) 1747 continue; 1748 if (d_unhashed(dentry)) 1749 continue; 1750 tlen = dentry->d_name.len; 1751 tname = dentry->d_name.name; 1752 i = dentry->d_inode; 1753 prefetch(tname); 1754 /* 1755 * This seqcount check is required to ensure name and 1756 * len are loaded atomically, so as not to walk off the 1757 * edge of memory when walking. If we could load this 1758 * atomically some other way, we could drop this check. 1759 */ 1760 if (read_seqcount_retry(&dentry->d_seq, *seq)) 1761 goto seqretry; 1762 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1763 if (parent->d_op->d_compare(parent, *inode, 1764 dentry, i, 1765 tlen, tname, name)) 1766 continue; 1767 } else { 1768 if (dentry_cmp(tname, tlen, str, len)) 1769 continue; 1770 } 1771 /* 1772 * No extra seqcount check is required after the name 1773 * compare. The caller must perform a seqcount check in 1774 * order to do anything useful with the returned dentry 1775 * anyway. 1776 */ 1777 *inode = i; 1778 return dentry; 1779 } 1780 return NULL; 1781 } 1782 1783 /** 1784 * d_lookup - search for a dentry 1785 * @parent: parent dentry 1786 * @name: qstr of name we wish to find 1787 * Returns: dentry, or NULL 1788 * 1789 * d_lookup searches the children of the parent dentry for the name in 1790 * question. If the dentry is found its reference count is incremented and the 1791 * dentry is returned. The caller must use dput to free the entry when it has 1792 * finished using it. %NULL is returned if the dentry does not exist. 1793 */ 1794 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1795 { 1796 struct dentry *dentry; 1797 unsigned seq; 1798 1799 do { 1800 seq = read_seqbegin(&rename_lock); 1801 dentry = __d_lookup(parent, name); 1802 if (dentry) 1803 break; 1804 } while (read_seqretry(&rename_lock, seq)); 1805 return dentry; 1806 } 1807 EXPORT_SYMBOL(d_lookup); 1808 1809 /** 1810 * __d_lookup - search for a dentry (racy) 1811 * @parent: parent dentry 1812 * @name: qstr of name we wish to find 1813 * Returns: dentry, or NULL 1814 * 1815 * __d_lookup is like d_lookup, however it may (rarely) return a 1816 * false-negative result due to unrelated rename activity. 1817 * 1818 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1819 * however it must be used carefully, eg. with a following d_lookup in 1820 * the case of failure. 1821 * 1822 * __d_lookup callers must be commented. 1823 */ 1824 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1825 { 1826 unsigned int len = name->len; 1827 unsigned int hash = name->hash; 1828 const unsigned char *str = name->name; 1829 struct hlist_bl_head *b = d_hash(parent, hash); 1830 struct hlist_bl_node *node; 1831 struct dentry *found = NULL; 1832 struct dentry *dentry; 1833 1834 /* 1835 * Note: There is significant duplication with __d_lookup_rcu which is 1836 * required to prevent single threaded performance regressions 1837 * especially on architectures where smp_rmb (in seqcounts) are costly. 1838 * Keep the two functions in sync. 1839 */ 1840 1841 /* 1842 * The hash list is protected using RCU. 1843 * 1844 * Take d_lock when comparing a candidate dentry, to avoid races 1845 * with d_move(). 1846 * 1847 * It is possible that concurrent renames can mess up our list 1848 * walk here and result in missing our dentry, resulting in the 1849 * false-negative result. d_lookup() protects against concurrent 1850 * renames using rename_lock seqlock. 1851 * 1852 * See Documentation/filesystems/path-lookup.txt for more details. 1853 */ 1854 rcu_read_lock(); 1855 1856 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1857 const char *tname; 1858 int tlen; 1859 1860 if (dentry->d_name.hash != hash) 1861 continue; 1862 1863 spin_lock(&dentry->d_lock); 1864 if (dentry->d_parent != parent) 1865 goto next; 1866 if (d_unhashed(dentry)) 1867 goto next; 1868 1869 /* 1870 * It is safe to compare names since d_move() cannot 1871 * change the qstr (protected by d_lock). 1872 */ 1873 tlen = dentry->d_name.len; 1874 tname = dentry->d_name.name; 1875 if (parent->d_flags & DCACHE_OP_COMPARE) { 1876 if (parent->d_op->d_compare(parent, parent->d_inode, 1877 dentry, dentry->d_inode, 1878 tlen, tname, name)) 1879 goto next; 1880 } else { 1881 if (dentry_cmp(tname, tlen, str, len)) 1882 goto next; 1883 } 1884 1885 dentry->d_count++; 1886 found = dentry; 1887 spin_unlock(&dentry->d_lock); 1888 break; 1889 next: 1890 spin_unlock(&dentry->d_lock); 1891 } 1892 rcu_read_unlock(); 1893 1894 return found; 1895 } 1896 1897 /** 1898 * d_hash_and_lookup - hash the qstr then search for a dentry 1899 * @dir: Directory to search in 1900 * @name: qstr of name we wish to find 1901 * 1902 * On hash failure or on lookup failure NULL is returned. 1903 */ 1904 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1905 { 1906 struct dentry *dentry = NULL; 1907 1908 /* 1909 * Check for a fs-specific hash function. Note that we must 1910 * calculate the standard hash first, as the d_op->d_hash() 1911 * routine may choose to leave the hash value unchanged. 1912 */ 1913 name->hash = full_name_hash(name->name, name->len); 1914 if (dir->d_flags & DCACHE_OP_HASH) { 1915 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 1916 goto out; 1917 } 1918 dentry = d_lookup(dir, name); 1919 out: 1920 return dentry; 1921 } 1922 1923 /** 1924 * d_validate - verify dentry provided from insecure source (deprecated) 1925 * @dentry: The dentry alleged to be valid child of @dparent 1926 * @dparent: The parent dentry (known to be valid) 1927 * 1928 * An insecure source has sent us a dentry, here we verify it and dget() it. 1929 * This is used by ncpfs in its readdir implementation. 1930 * Zero is returned in the dentry is invalid. 1931 * 1932 * This function is slow for big directories, and deprecated, do not use it. 1933 */ 1934 int d_validate(struct dentry *dentry, struct dentry *dparent) 1935 { 1936 struct dentry *child; 1937 1938 spin_lock(&dparent->d_lock); 1939 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 1940 if (dentry == child) { 1941 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1942 __dget_dlock(dentry); 1943 spin_unlock(&dentry->d_lock); 1944 spin_unlock(&dparent->d_lock); 1945 return 1; 1946 } 1947 } 1948 spin_unlock(&dparent->d_lock); 1949 1950 return 0; 1951 } 1952 EXPORT_SYMBOL(d_validate); 1953 1954 /* 1955 * When a file is deleted, we have two options: 1956 * - turn this dentry into a negative dentry 1957 * - unhash this dentry and free it. 1958 * 1959 * Usually, we want to just turn this into 1960 * a negative dentry, but if anybody else is 1961 * currently using the dentry or the inode 1962 * we can't do that and we fall back on removing 1963 * it from the hash queues and waiting for 1964 * it to be deleted later when it has no users 1965 */ 1966 1967 /** 1968 * d_delete - delete a dentry 1969 * @dentry: The dentry to delete 1970 * 1971 * Turn the dentry into a negative dentry if possible, otherwise 1972 * remove it from the hash queues so it can be deleted later 1973 */ 1974 1975 void d_delete(struct dentry * dentry) 1976 { 1977 struct inode *inode; 1978 int isdir = 0; 1979 /* 1980 * Are we the only user? 1981 */ 1982 again: 1983 spin_lock(&dentry->d_lock); 1984 inode = dentry->d_inode; 1985 isdir = S_ISDIR(inode->i_mode); 1986 if (dentry->d_count == 1) { 1987 if (inode && !spin_trylock(&inode->i_lock)) { 1988 spin_unlock(&dentry->d_lock); 1989 cpu_relax(); 1990 goto again; 1991 } 1992 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 1993 dentry_unlink_inode(dentry); 1994 fsnotify_nameremove(dentry, isdir); 1995 return; 1996 } 1997 1998 if (!d_unhashed(dentry)) 1999 __d_drop(dentry); 2000 2001 spin_unlock(&dentry->d_lock); 2002 2003 fsnotify_nameremove(dentry, isdir); 2004 } 2005 EXPORT_SYMBOL(d_delete); 2006 2007 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2008 { 2009 BUG_ON(!d_unhashed(entry)); 2010 hlist_bl_lock(b); 2011 entry->d_flags |= DCACHE_RCUACCESS; 2012 hlist_bl_add_head_rcu(&entry->d_hash, b); 2013 hlist_bl_unlock(b); 2014 } 2015 2016 static void _d_rehash(struct dentry * entry) 2017 { 2018 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2019 } 2020 2021 /** 2022 * d_rehash - add an entry back to the hash 2023 * @entry: dentry to add to the hash 2024 * 2025 * Adds a dentry to the hash according to its name. 2026 */ 2027 2028 void d_rehash(struct dentry * entry) 2029 { 2030 spin_lock(&entry->d_lock); 2031 _d_rehash(entry); 2032 spin_unlock(&entry->d_lock); 2033 } 2034 EXPORT_SYMBOL(d_rehash); 2035 2036 /** 2037 * dentry_update_name_case - update case insensitive dentry with a new name 2038 * @dentry: dentry to be updated 2039 * @name: new name 2040 * 2041 * Update a case insensitive dentry with new case of name. 2042 * 2043 * dentry must have been returned by d_lookup with name @name. Old and new 2044 * name lengths must match (ie. no d_compare which allows mismatched name 2045 * lengths). 2046 * 2047 * Parent inode i_mutex must be held over d_lookup and into this call (to 2048 * keep renames and concurrent inserts, and readdir(2) away). 2049 */ 2050 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2051 { 2052 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2053 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2054 2055 spin_lock(&dentry->d_lock); 2056 write_seqcount_begin(&dentry->d_seq); 2057 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2058 write_seqcount_end(&dentry->d_seq); 2059 spin_unlock(&dentry->d_lock); 2060 } 2061 EXPORT_SYMBOL(dentry_update_name_case); 2062 2063 static void switch_names(struct dentry *dentry, struct dentry *target) 2064 { 2065 if (dname_external(target)) { 2066 if (dname_external(dentry)) { 2067 /* 2068 * Both external: swap the pointers 2069 */ 2070 swap(target->d_name.name, dentry->d_name.name); 2071 } else { 2072 /* 2073 * dentry:internal, target:external. Steal target's 2074 * storage and make target internal. 2075 */ 2076 memcpy(target->d_iname, dentry->d_name.name, 2077 dentry->d_name.len + 1); 2078 dentry->d_name.name = target->d_name.name; 2079 target->d_name.name = target->d_iname; 2080 } 2081 } else { 2082 if (dname_external(dentry)) { 2083 /* 2084 * dentry:external, target:internal. Give dentry's 2085 * storage to target and make dentry internal 2086 */ 2087 memcpy(dentry->d_iname, target->d_name.name, 2088 target->d_name.len + 1); 2089 target->d_name.name = dentry->d_name.name; 2090 dentry->d_name.name = dentry->d_iname; 2091 } else { 2092 /* 2093 * Both are internal. Just copy target to dentry 2094 */ 2095 memcpy(dentry->d_iname, target->d_name.name, 2096 target->d_name.len + 1); 2097 dentry->d_name.len = target->d_name.len; 2098 return; 2099 } 2100 } 2101 swap(dentry->d_name.len, target->d_name.len); 2102 } 2103 2104 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2105 { 2106 /* 2107 * XXXX: do we really need to take target->d_lock? 2108 */ 2109 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2110 spin_lock(&target->d_parent->d_lock); 2111 else { 2112 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2113 spin_lock(&dentry->d_parent->d_lock); 2114 spin_lock_nested(&target->d_parent->d_lock, 2115 DENTRY_D_LOCK_NESTED); 2116 } else { 2117 spin_lock(&target->d_parent->d_lock); 2118 spin_lock_nested(&dentry->d_parent->d_lock, 2119 DENTRY_D_LOCK_NESTED); 2120 } 2121 } 2122 if (target < dentry) { 2123 spin_lock_nested(&target->d_lock, 2); 2124 spin_lock_nested(&dentry->d_lock, 3); 2125 } else { 2126 spin_lock_nested(&dentry->d_lock, 2); 2127 spin_lock_nested(&target->d_lock, 3); 2128 } 2129 } 2130 2131 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2132 struct dentry *target) 2133 { 2134 if (target->d_parent != dentry->d_parent) 2135 spin_unlock(&dentry->d_parent->d_lock); 2136 if (target->d_parent != target) 2137 spin_unlock(&target->d_parent->d_lock); 2138 } 2139 2140 /* 2141 * When switching names, the actual string doesn't strictly have to 2142 * be preserved in the target - because we're dropping the target 2143 * anyway. As such, we can just do a simple memcpy() to copy over 2144 * the new name before we switch. 2145 * 2146 * Note that we have to be a lot more careful about getting the hash 2147 * switched - we have to switch the hash value properly even if it 2148 * then no longer matches the actual (corrupted) string of the target. 2149 * The hash value has to match the hash queue that the dentry is on.. 2150 */ 2151 /* 2152 * __d_move - move a dentry 2153 * @dentry: entry to move 2154 * @target: new dentry 2155 * 2156 * Update the dcache to reflect the move of a file name. Negative 2157 * dcache entries should not be moved in this way. Caller must hold 2158 * rename_lock, the i_mutex of the source and target directories, 2159 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2160 */ 2161 static void __d_move(struct dentry * dentry, struct dentry * target) 2162 { 2163 if (!dentry->d_inode) 2164 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2165 2166 BUG_ON(d_ancestor(dentry, target)); 2167 BUG_ON(d_ancestor(target, dentry)); 2168 2169 dentry_lock_for_move(dentry, target); 2170 2171 write_seqcount_begin(&dentry->d_seq); 2172 write_seqcount_begin(&target->d_seq); 2173 2174 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2175 2176 /* 2177 * Move the dentry to the target hash queue. Don't bother checking 2178 * for the same hash queue because of how unlikely it is. 2179 */ 2180 __d_drop(dentry); 2181 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2182 2183 /* Unhash the target: dput() will then get rid of it */ 2184 __d_drop(target); 2185 2186 list_del(&dentry->d_u.d_child); 2187 list_del(&target->d_u.d_child); 2188 2189 /* Switch the names.. */ 2190 switch_names(dentry, target); 2191 swap(dentry->d_name.hash, target->d_name.hash); 2192 2193 /* ... and switch the parents */ 2194 if (IS_ROOT(dentry)) { 2195 dentry->d_parent = target->d_parent; 2196 target->d_parent = target; 2197 INIT_LIST_HEAD(&target->d_u.d_child); 2198 } else { 2199 swap(dentry->d_parent, target->d_parent); 2200 2201 /* And add them back to the (new) parent lists */ 2202 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2203 } 2204 2205 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2206 2207 write_seqcount_end(&target->d_seq); 2208 write_seqcount_end(&dentry->d_seq); 2209 2210 dentry_unlock_parents_for_move(dentry, target); 2211 spin_unlock(&target->d_lock); 2212 fsnotify_d_move(dentry); 2213 spin_unlock(&dentry->d_lock); 2214 } 2215 2216 /* 2217 * d_move - move a dentry 2218 * @dentry: entry to move 2219 * @target: new dentry 2220 * 2221 * Update the dcache to reflect the move of a file name. Negative 2222 * dcache entries should not be moved in this way. See the locking 2223 * requirements for __d_move. 2224 */ 2225 void d_move(struct dentry *dentry, struct dentry *target) 2226 { 2227 write_seqlock(&rename_lock); 2228 __d_move(dentry, target); 2229 write_sequnlock(&rename_lock); 2230 } 2231 EXPORT_SYMBOL(d_move); 2232 2233 /** 2234 * d_ancestor - search for an ancestor 2235 * @p1: ancestor dentry 2236 * @p2: child dentry 2237 * 2238 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2239 * an ancestor of p2, else NULL. 2240 */ 2241 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2242 { 2243 struct dentry *p; 2244 2245 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2246 if (p->d_parent == p1) 2247 return p; 2248 } 2249 return NULL; 2250 } 2251 2252 /* 2253 * This helper attempts to cope with remotely renamed directories 2254 * 2255 * It assumes that the caller is already holding 2256 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2257 * 2258 * Note: If ever the locking in lock_rename() changes, then please 2259 * remember to update this too... 2260 */ 2261 static struct dentry *__d_unalias(struct inode *inode, 2262 struct dentry *dentry, struct dentry *alias) 2263 { 2264 struct mutex *m1 = NULL, *m2 = NULL; 2265 struct dentry *ret; 2266 2267 /* If alias and dentry share a parent, then no extra locks required */ 2268 if (alias->d_parent == dentry->d_parent) 2269 goto out_unalias; 2270 2271 /* See lock_rename() */ 2272 ret = ERR_PTR(-EBUSY); 2273 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2274 goto out_err; 2275 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2276 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2277 goto out_err; 2278 m2 = &alias->d_parent->d_inode->i_mutex; 2279 out_unalias: 2280 __d_move(alias, dentry); 2281 ret = alias; 2282 out_err: 2283 spin_unlock(&inode->i_lock); 2284 if (m2) 2285 mutex_unlock(m2); 2286 if (m1) 2287 mutex_unlock(m1); 2288 return ret; 2289 } 2290 2291 /* 2292 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2293 * named dentry in place of the dentry to be replaced. 2294 * returns with anon->d_lock held! 2295 */ 2296 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2297 { 2298 struct dentry *dparent, *aparent; 2299 2300 dentry_lock_for_move(anon, dentry); 2301 2302 write_seqcount_begin(&dentry->d_seq); 2303 write_seqcount_begin(&anon->d_seq); 2304 2305 dparent = dentry->d_parent; 2306 aparent = anon->d_parent; 2307 2308 switch_names(dentry, anon); 2309 swap(dentry->d_name.hash, anon->d_name.hash); 2310 2311 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2312 list_del(&dentry->d_u.d_child); 2313 if (!IS_ROOT(dentry)) 2314 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2315 else 2316 INIT_LIST_HEAD(&dentry->d_u.d_child); 2317 2318 anon->d_parent = (dparent == dentry) ? anon : dparent; 2319 list_del(&anon->d_u.d_child); 2320 if (!IS_ROOT(anon)) 2321 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2322 else 2323 INIT_LIST_HEAD(&anon->d_u.d_child); 2324 2325 write_seqcount_end(&dentry->d_seq); 2326 write_seqcount_end(&anon->d_seq); 2327 2328 dentry_unlock_parents_for_move(anon, dentry); 2329 spin_unlock(&dentry->d_lock); 2330 2331 /* anon->d_lock still locked, returns locked */ 2332 anon->d_flags &= ~DCACHE_DISCONNECTED; 2333 } 2334 2335 /** 2336 * d_materialise_unique - introduce an inode into the tree 2337 * @dentry: candidate dentry 2338 * @inode: inode to bind to the dentry, to which aliases may be attached 2339 * 2340 * Introduces an dentry into the tree, substituting an extant disconnected 2341 * root directory alias in its place if there is one. Caller must hold the 2342 * i_mutex of the parent directory. 2343 */ 2344 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2345 { 2346 struct dentry *actual; 2347 2348 BUG_ON(!d_unhashed(dentry)); 2349 2350 if (!inode) { 2351 actual = dentry; 2352 __d_instantiate(dentry, NULL); 2353 d_rehash(actual); 2354 goto out_nolock; 2355 } 2356 2357 spin_lock(&inode->i_lock); 2358 2359 if (S_ISDIR(inode->i_mode)) { 2360 struct dentry *alias; 2361 2362 /* Does an aliased dentry already exist? */ 2363 alias = __d_find_alias(inode, 0); 2364 if (alias) { 2365 actual = alias; 2366 write_seqlock(&rename_lock); 2367 2368 if (d_ancestor(alias, dentry)) { 2369 /* Check for loops */ 2370 actual = ERR_PTR(-ELOOP); 2371 } else if (IS_ROOT(alias)) { 2372 /* Is this an anonymous mountpoint that we 2373 * could splice into our tree? */ 2374 __d_materialise_dentry(dentry, alias); 2375 write_sequnlock(&rename_lock); 2376 __d_drop(alias); 2377 goto found; 2378 } else { 2379 /* Nope, but we must(!) avoid directory 2380 * aliasing */ 2381 actual = __d_unalias(inode, dentry, alias); 2382 } 2383 write_sequnlock(&rename_lock); 2384 if (IS_ERR(actual)) 2385 dput(alias); 2386 goto out_nolock; 2387 } 2388 } 2389 2390 /* Add a unique reference */ 2391 actual = __d_instantiate_unique(dentry, inode); 2392 if (!actual) 2393 actual = dentry; 2394 else 2395 BUG_ON(!d_unhashed(actual)); 2396 2397 spin_lock(&actual->d_lock); 2398 found: 2399 _d_rehash(actual); 2400 spin_unlock(&actual->d_lock); 2401 spin_unlock(&inode->i_lock); 2402 out_nolock: 2403 if (actual == dentry) { 2404 security_d_instantiate(dentry, inode); 2405 return NULL; 2406 } 2407 2408 iput(inode); 2409 return actual; 2410 } 2411 EXPORT_SYMBOL_GPL(d_materialise_unique); 2412 2413 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2414 { 2415 *buflen -= namelen; 2416 if (*buflen < 0) 2417 return -ENAMETOOLONG; 2418 *buffer -= namelen; 2419 memcpy(*buffer, str, namelen); 2420 return 0; 2421 } 2422 2423 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2424 { 2425 return prepend(buffer, buflen, name->name, name->len); 2426 } 2427 2428 /** 2429 * prepend_path - Prepend path string to a buffer 2430 * @path: the dentry/vfsmount to report 2431 * @root: root vfsmnt/dentry (may be modified by this function) 2432 * @buffer: pointer to the end of the buffer 2433 * @buflen: pointer to buffer length 2434 * 2435 * Caller holds the rename_lock. 2436 * 2437 * If path is not reachable from the supplied root, then the value of 2438 * root is changed (without modifying refcounts). 2439 */ 2440 static int prepend_path(const struct path *path, struct path *root, 2441 char **buffer, int *buflen) 2442 { 2443 struct dentry *dentry = path->dentry; 2444 struct vfsmount *vfsmnt = path->mnt; 2445 bool slash = false; 2446 int error = 0; 2447 2448 br_read_lock(vfsmount_lock); 2449 while (dentry != root->dentry || vfsmnt != root->mnt) { 2450 struct dentry * parent; 2451 2452 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2453 /* Global root? */ 2454 if (vfsmnt->mnt_parent == vfsmnt) { 2455 goto global_root; 2456 } 2457 dentry = vfsmnt->mnt_mountpoint; 2458 vfsmnt = vfsmnt->mnt_parent; 2459 continue; 2460 } 2461 parent = dentry->d_parent; 2462 prefetch(parent); 2463 spin_lock(&dentry->d_lock); 2464 error = prepend_name(buffer, buflen, &dentry->d_name); 2465 spin_unlock(&dentry->d_lock); 2466 if (!error) 2467 error = prepend(buffer, buflen, "/", 1); 2468 if (error) 2469 break; 2470 2471 slash = true; 2472 dentry = parent; 2473 } 2474 2475 out: 2476 if (!error && !slash) 2477 error = prepend(buffer, buflen, "/", 1); 2478 2479 br_read_unlock(vfsmount_lock); 2480 return error; 2481 2482 global_root: 2483 /* 2484 * Filesystems needing to implement special "root names" 2485 * should do so with ->d_dname() 2486 */ 2487 if (IS_ROOT(dentry) && 2488 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2489 WARN(1, "Root dentry has weird name <%.*s>\n", 2490 (int) dentry->d_name.len, dentry->d_name.name); 2491 } 2492 root->mnt = vfsmnt; 2493 root->dentry = dentry; 2494 goto out; 2495 } 2496 2497 /** 2498 * __d_path - return the path of a dentry 2499 * @path: the dentry/vfsmount to report 2500 * @root: root vfsmnt/dentry (may be modified by this function) 2501 * @buf: buffer to return value in 2502 * @buflen: buffer length 2503 * 2504 * Convert a dentry into an ASCII path name. 2505 * 2506 * Returns a pointer into the buffer or an error code if the 2507 * path was too long. 2508 * 2509 * "buflen" should be positive. 2510 * 2511 * If path is not reachable from the supplied root, then the value of 2512 * root is changed (without modifying refcounts). 2513 */ 2514 char *__d_path(const struct path *path, struct path *root, 2515 char *buf, int buflen) 2516 { 2517 char *res = buf + buflen; 2518 int error; 2519 2520 prepend(&res, &buflen, "\0", 1); 2521 write_seqlock(&rename_lock); 2522 error = prepend_path(path, root, &res, &buflen); 2523 write_sequnlock(&rename_lock); 2524 2525 if (error) 2526 return ERR_PTR(error); 2527 return res; 2528 } 2529 2530 /* 2531 * same as __d_path but appends "(deleted)" for unlinked files. 2532 */ 2533 static int path_with_deleted(const struct path *path, struct path *root, 2534 char **buf, int *buflen) 2535 { 2536 prepend(buf, buflen, "\0", 1); 2537 if (d_unlinked(path->dentry)) { 2538 int error = prepend(buf, buflen, " (deleted)", 10); 2539 if (error) 2540 return error; 2541 } 2542 2543 return prepend_path(path, root, buf, buflen); 2544 } 2545 2546 static int prepend_unreachable(char **buffer, int *buflen) 2547 { 2548 return prepend(buffer, buflen, "(unreachable)", 13); 2549 } 2550 2551 /** 2552 * d_path - return the path of a dentry 2553 * @path: path to report 2554 * @buf: buffer to return value in 2555 * @buflen: buffer length 2556 * 2557 * Convert a dentry into an ASCII path name. If the entry has been deleted 2558 * the string " (deleted)" is appended. Note that this is ambiguous. 2559 * 2560 * Returns a pointer into the buffer or an error code if the path was 2561 * too long. Note: Callers should use the returned pointer, not the passed 2562 * in buffer, to use the name! The implementation often starts at an offset 2563 * into the buffer, and may leave 0 bytes at the start. 2564 * 2565 * "buflen" should be positive. 2566 */ 2567 char *d_path(const struct path *path, char *buf, int buflen) 2568 { 2569 char *res = buf + buflen; 2570 struct path root; 2571 struct path tmp; 2572 int error; 2573 2574 /* 2575 * We have various synthetic filesystems that never get mounted. On 2576 * these filesystems dentries are never used for lookup purposes, and 2577 * thus don't need to be hashed. They also don't need a name until a 2578 * user wants to identify the object in /proc/pid/fd/. The little hack 2579 * below allows us to generate a name for these objects on demand: 2580 */ 2581 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2582 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2583 2584 get_fs_root(current->fs, &root); 2585 write_seqlock(&rename_lock); 2586 tmp = root; 2587 error = path_with_deleted(path, &tmp, &res, &buflen); 2588 if (error) 2589 res = ERR_PTR(error); 2590 write_sequnlock(&rename_lock); 2591 path_put(&root); 2592 return res; 2593 } 2594 EXPORT_SYMBOL(d_path); 2595 2596 /** 2597 * d_path_with_unreachable - return the path of a dentry 2598 * @path: path to report 2599 * @buf: buffer to return value in 2600 * @buflen: buffer length 2601 * 2602 * The difference from d_path() is that this prepends "(unreachable)" 2603 * to paths which are unreachable from the current process' root. 2604 */ 2605 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2606 { 2607 char *res = buf + buflen; 2608 struct path root; 2609 struct path tmp; 2610 int error; 2611 2612 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2613 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2614 2615 get_fs_root(current->fs, &root); 2616 write_seqlock(&rename_lock); 2617 tmp = root; 2618 error = path_with_deleted(path, &tmp, &res, &buflen); 2619 if (!error && !path_equal(&tmp, &root)) 2620 error = prepend_unreachable(&res, &buflen); 2621 write_sequnlock(&rename_lock); 2622 path_put(&root); 2623 if (error) 2624 res = ERR_PTR(error); 2625 2626 return res; 2627 } 2628 2629 /* 2630 * Helper function for dentry_operations.d_dname() members 2631 */ 2632 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2633 const char *fmt, ...) 2634 { 2635 va_list args; 2636 char temp[64]; 2637 int sz; 2638 2639 va_start(args, fmt); 2640 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2641 va_end(args); 2642 2643 if (sz > sizeof(temp) || sz > buflen) 2644 return ERR_PTR(-ENAMETOOLONG); 2645 2646 buffer += buflen - sz; 2647 return memcpy(buffer, temp, sz); 2648 } 2649 2650 /* 2651 * Write full pathname from the root of the filesystem into the buffer. 2652 */ 2653 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2654 { 2655 char *end = buf + buflen; 2656 char *retval; 2657 2658 prepend(&end, &buflen, "\0", 1); 2659 if (buflen < 1) 2660 goto Elong; 2661 /* Get '/' right */ 2662 retval = end-1; 2663 *retval = '/'; 2664 2665 while (!IS_ROOT(dentry)) { 2666 struct dentry *parent = dentry->d_parent; 2667 int error; 2668 2669 prefetch(parent); 2670 spin_lock(&dentry->d_lock); 2671 error = prepend_name(&end, &buflen, &dentry->d_name); 2672 spin_unlock(&dentry->d_lock); 2673 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2674 goto Elong; 2675 2676 retval = end; 2677 dentry = parent; 2678 } 2679 return retval; 2680 Elong: 2681 return ERR_PTR(-ENAMETOOLONG); 2682 } 2683 2684 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2685 { 2686 char *retval; 2687 2688 write_seqlock(&rename_lock); 2689 retval = __dentry_path(dentry, buf, buflen); 2690 write_sequnlock(&rename_lock); 2691 2692 return retval; 2693 } 2694 EXPORT_SYMBOL(dentry_path_raw); 2695 2696 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2697 { 2698 char *p = NULL; 2699 char *retval; 2700 2701 write_seqlock(&rename_lock); 2702 if (d_unlinked(dentry)) { 2703 p = buf + buflen; 2704 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2705 goto Elong; 2706 buflen++; 2707 } 2708 retval = __dentry_path(dentry, buf, buflen); 2709 write_sequnlock(&rename_lock); 2710 if (!IS_ERR(retval) && p) 2711 *p = '/'; /* restore '/' overriden with '\0' */ 2712 return retval; 2713 Elong: 2714 return ERR_PTR(-ENAMETOOLONG); 2715 } 2716 2717 /* 2718 * NOTE! The user-level library version returns a 2719 * character pointer. The kernel system call just 2720 * returns the length of the buffer filled (which 2721 * includes the ending '\0' character), or a negative 2722 * error value. So libc would do something like 2723 * 2724 * char *getcwd(char * buf, size_t size) 2725 * { 2726 * int retval; 2727 * 2728 * retval = sys_getcwd(buf, size); 2729 * if (retval >= 0) 2730 * return buf; 2731 * errno = -retval; 2732 * return NULL; 2733 * } 2734 */ 2735 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2736 { 2737 int error; 2738 struct path pwd, root; 2739 char *page = (char *) __get_free_page(GFP_USER); 2740 2741 if (!page) 2742 return -ENOMEM; 2743 2744 get_fs_root_and_pwd(current->fs, &root, &pwd); 2745 2746 error = -ENOENT; 2747 write_seqlock(&rename_lock); 2748 if (!d_unlinked(pwd.dentry)) { 2749 unsigned long len; 2750 struct path tmp = root; 2751 char *cwd = page + PAGE_SIZE; 2752 int buflen = PAGE_SIZE; 2753 2754 prepend(&cwd, &buflen, "\0", 1); 2755 error = prepend_path(&pwd, &tmp, &cwd, &buflen); 2756 write_sequnlock(&rename_lock); 2757 2758 if (error) 2759 goto out; 2760 2761 /* Unreachable from current root */ 2762 if (!path_equal(&tmp, &root)) { 2763 error = prepend_unreachable(&cwd, &buflen); 2764 if (error) 2765 goto out; 2766 } 2767 2768 error = -ERANGE; 2769 len = PAGE_SIZE + page - cwd; 2770 if (len <= size) { 2771 error = len; 2772 if (copy_to_user(buf, cwd, len)) 2773 error = -EFAULT; 2774 } 2775 } else { 2776 write_sequnlock(&rename_lock); 2777 } 2778 2779 out: 2780 path_put(&pwd); 2781 path_put(&root); 2782 free_page((unsigned long) page); 2783 return error; 2784 } 2785 2786 /* 2787 * Test whether new_dentry is a subdirectory of old_dentry. 2788 * 2789 * Trivially implemented using the dcache structure 2790 */ 2791 2792 /** 2793 * is_subdir - is new dentry a subdirectory of old_dentry 2794 * @new_dentry: new dentry 2795 * @old_dentry: old dentry 2796 * 2797 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2798 * Returns 0 otherwise. 2799 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2800 */ 2801 2802 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2803 { 2804 int result; 2805 unsigned seq; 2806 2807 if (new_dentry == old_dentry) 2808 return 1; 2809 2810 do { 2811 /* for restarting inner loop in case of seq retry */ 2812 seq = read_seqbegin(&rename_lock); 2813 /* 2814 * Need rcu_readlock to protect against the d_parent trashing 2815 * due to d_move 2816 */ 2817 rcu_read_lock(); 2818 if (d_ancestor(old_dentry, new_dentry)) 2819 result = 1; 2820 else 2821 result = 0; 2822 rcu_read_unlock(); 2823 } while (read_seqretry(&rename_lock, seq)); 2824 2825 return result; 2826 } 2827 2828 int path_is_under(struct path *path1, struct path *path2) 2829 { 2830 struct vfsmount *mnt = path1->mnt; 2831 struct dentry *dentry = path1->dentry; 2832 int res; 2833 2834 br_read_lock(vfsmount_lock); 2835 if (mnt != path2->mnt) { 2836 for (;;) { 2837 if (mnt->mnt_parent == mnt) { 2838 br_read_unlock(vfsmount_lock); 2839 return 0; 2840 } 2841 if (mnt->mnt_parent == path2->mnt) 2842 break; 2843 mnt = mnt->mnt_parent; 2844 } 2845 dentry = mnt->mnt_mountpoint; 2846 } 2847 res = is_subdir(dentry, path2->dentry); 2848 br_read_unlock(vfsmount_lock); 2849 return res; 2850 } 2851 EXPORT_SYMBOL(path_is_under); 2852 2853 void d_genocide(struct dentry *root) 2854 { 2855 struct dentry *this_parent; 2856 struct list_head *next; 2857 unsigned seq; 2858 int locked = 0; 2859 2860 seq = read_seqbegin(&rename_lock); 2861 again: 2862 this_parent = root; 2863 spin_lock(&this_parent->d_lock); 2864 repeat: 2865 next = this_parent->d_subdirs.next; 2866 resume: 2867 while (next != &this_parent->d_subdirs) { 2868 struct list_head *tmp = next; 2869 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2870 next = tmp->next; 2871 2872 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2873 if (d_unhashed(dentry) || !dentry->d_inode) { 2874 spin_unlock(&dentry->d_lock); 2875 continue; 2876 } 2877 if (!list_empty(&dentry->d_subdirs)) { 2878 spin_unlock(&this_parent->d_lock); 2879 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 2880 this_parent = dentry; 2881 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 2882 goto repeat; 2883 } 2884 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2885 dentry->d_flags |= DCACHE_GENOCIDE; 2886 dentry->d_count--; 2887 } 2888 spin_unlock(&dentry->d_lock); 2889 } 2890 if (this_parent != root) { 2891 struct dentry *child = this_parent; 2892 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2893 this_parent->d_flags |= DCACHE_GENOCIDE; 2894 this_parent->d_count--; 2895 } 2896 this_parent = try_to_ascend(this_parent, locked, seq); 2897 if (!this_parent) 2898 goto rename_retry; 2899 next = child->d_u.d_child.next; 2900 goto resume; 2901 } 2902 spin_unlock(&this_parent->d_lock); 2903 if (!locked && read_seqretry(&rename_lock, seq)) 2904 goto rename_retry; 2905 if (locked) 2906 write_sequnlock(&rename_lock); 2907 return; 2908 2909 rename_retry: 2910 locked = 1; 2911 write_seqlock(&rename_lock); 2912 goto again; 2913 } 2914 2915 /** 2916 * find_inode_number - check for dentry with name 2917 * @dir: directory to check 2918 * @name: Name to find. 2919 * 2920 * Check whether a dentry already exists for the given name, 2921 * and return the inode number if it has an inode. Otherwise 2922 * 0 is returned. 2923 * 2924 * This routine is used to post-process directory listings for 2925 * filesystems using synthetic inode numbers, and is necessary 2926 * to keep getcwd() working. 2927 */ 2928 2929 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2930 { 2931 struct dentry * dentry; 2932 ino_t ino = 0; 2933 2934 dentry = d_hash_and_lookup(dir, name); 2935 if (dentry) { 2936 if (dentry->d_inode) 2937 ino = dentry->d_inode->i_ino; 2938 dput(dentry); 2939 } 2940 return ino; 2941 } 2942 EXPORT_SYMBOL(find_inode_number); 2943 2944 static __initdata unsigned long dhash_entries; 2945 static int __init set_dhash_entries(char *str) 2946 { 2947 if (!str) 2948 return 0; 2949 dhash_entries = simple_strtoul(str, &str, 0); 2950 return 1; 2951 } 2952 __setup("dhash_entries=", set_dhash_entries); 2953 2954 static void __init dcache_init_early(void) 2955 { 2956 int loop; 2957 2958 /* If hashes are distributed across NUMA nodes, defer 2959 * hash allocation until vmalloc space is available. 2960 */ 2961 if (hashdist) 2962 return; 2963 2964 dentry_hashtable = 2965 alloc_large_system_hash("Dentry cache", 2966 sizeof(struct hlist_bl_head), 2967 dhash_entries, 2968 13, 2969 HASH_EARLY, 2970 &d_hash_shift, 2971 &d_hash_mask, 2972 0); 2973 2974 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2975 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 2976 } 2977 2978 static void __init dcache_init(void) 2979 { 2980 int loop; 2981 2982 /* 2983 * A constructor could be added for stable state like the lists, 2984 * but it is probably not worth it because of the cache nature 2985 * of the dcache. 2986 */ 2987 dentry_cache = KMEM_CACHE(dentry, 2988 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2989 2990 /* Hash may have been set up in dcache_init_early */ 2991 if (!hashdist) 2992 return; 2993 2994 dentry_hashtable = 2995 alloc_large_system_hash("Dentry cache", 2996 sizeof(struct hlist_bl_head), 2997 dhash_entries, 2998 13, 2999 0, 3000 &d_hash_shift, 3001 &d_hash_mask, 3002 0); 3003 3004 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3005 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3006 } 3007 3008 /* SLAB cache for __getname() consumers */ 3009 struct kmem_cache *names_cachep __read_mostly; 3010 EXPORT_SYMBOL(names_cachep); 3011 3012 EXPORT_SYMBOL(d_genocide); 3013 3014 void __init vfs_caches_init_early(void) 3015 { 3016 dcache_init_early(); 3017 inode_init_early(); 3018 } 3019 3020 void __init vfs_caches_init(unsigned long mempages) 3021 { 3022 unsigned long reserve; 3023 3024 /* Base hash sizes on available memory, with a reserve equal to 3025 150% of current kernel size */ 3026 3027 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3028 mempages -= reserve; 3029 3030 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3031 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3032 3033 dcache_init(); 3034 inode_init(); 3035 files_init(mempages); 3036 mnt_init(); 3037 bdev_cache_init(); 3038 chrdev_init(); 3039 } 3040