1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include "internal.h" 41 #include "mount.h" 42 43 /* 44 * Usage: 45 * dcache->d_inode->i_lock protects: 46 * - i_dentry, d_alias, d_inode of aliases 47 * dcache_hash_bucket lock protects: 48 * - the dcache hash table 49 * s_anon bl list spinlock protects: 50 * - the s_anon list (see __d_drop) 51 * dcache_lru_lock protects: 52 * - the dcache lru lists and counters 53 * d_lock protects: 54 * - d_flags 55 * - d_name 56 * - d_lru 57 * - d_count 58 * - d_unhashed() 59 * - d_parent and d_subdirs 60 * - childrens' d_child and d_parent 61 * - d_alias, d_inode 62 * 63 * Ordering: 64 * dentry->d_inode->i_lock 65 * dentry->d_lock 66 * dcache_lru_lock 67 * dcache_hash_bucket lock 68 * s_anon lock 69 * 70 * If there is an ancestor relationship: 71 * dentry->d_parent->...->d_parent->d_lock 72 * ... 73 * dentry->d_parent->d_lock 74 * dentry->d_lock 75 * 76 * If no ancestor relationship: 77 * if (dentry1 < dentry2) 78 * dentry1->d_lock 79 * dentry2->d_lock 80 */ 81 int sysctl_vfs_cache_pressure __read_mostly = 100; 82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 83 84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 #define D_HASHBITS d_hash_shift 100 #define D_HASHMASK d_hash_mask 101 102 static unsigned int d_hash_mask __read_mostly; 103 static unsigned int d_hash_shift __read_mostly; 104 105 static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 107 static inline struct hlist_bl_head *d_hash(struct dentry *parent, 108 unsigned long hash) 109 { 110 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 111 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 112 return dentry_hashtable + (hash & D_HASHMASK); 113 } 114 115 /* Statistics gathering. */ 116 struct dentry_stat_t dentry_stat = { 117 .age_limit = 45, 118 }; 119 120 static DEFINE_PER_CPU(unsigned int, nr_dentry); 121 122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 123 static int get_nr_dentry(void) 124 { 125 int i; 126 int sum = 0; 127 for_each_possible_cpu(i) 128 sum += per_cpu(nr_dentry, i); 129 return sum < 0 ? 0 : sum; 130 } 131 132 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 133 size_t *lenp, loff_t *ppos) 134 { 135 dentry_stat.nr_dentry = get_nr_dentry(); 136 return proc_dointvec(table, write, buffer, lenp, ppos); 137 } 138 #endif 139 140 static void __d_free(struct rcu_head *head) 141 { 142 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 143 144 WARN_ON(!list_empty(&dentry->d_alias)); 145 if (dname_external(dentry)) 146 kfree(dentry->d_name.name); 147 kmem_cache_free(dentry_cache, dentry); 148 } 149 150 /* 151 * no locks, please. 152 */ 153 static void d_free(struct dentry *dentry) 154 { 155 BUG_ON(dentry->d_count); 156 this_cpu_dec(nr_dentry); 157 if (dentry->d_op && dentry->d_op->d_release) 158 dentry->d_op->d_release(dentry); 159 160 /* if dentry was never visible to RCU, immediate free is OK */ 161 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 162 __d_free(&dentry->d_u.d_rcu); 163 else 164 call_rcu(&dentry->d_u.d_rcu, __d_free); 165 } 166 167 /** 168 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 169 * @dentry: the target dentry 170 * After this call, in-progress rcu-walk path lookup will fail. This 171 * should be called after unhashing, and after changing d_inode (if 172 * the dentry has not already been unhashed). 173 */ 174 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 175 { 176 assert_spin_locked(&dentry->d_lock); 177 /* Go through a barrier */ 178 write_seqcount_barrier(&dentry->d_seq); 179 } 180 181 /* 182 * Release the dentry's inode, using the filesystem 183 * d_iput() operation if defined. Dentry has no refcount 184 * and is unhashed. 185 */ 186 static void dentry_iput(struct dentry * dentry) 187 __releases(dentry->d_lock) 188 __releases(dentry->d_inode->i_lock) 189 { 190 struct inode *inode = dentry->d_inode; 191 if (inode) { 192 dentry->d_inode = NULL; 193 list_del_init(&dentry->d_alias); 194 spin_unlock(&dentry->d_lock); 195 spin_unlock(&inode->i_lock); 196 if (!inode->i_nlink) 197 fsnotify_inoderemove(inode); 198 if (dentry->d_op && dentry->d_op->d_iput) 199 dentry->d_op->d_iput(dentry, inode); 200 else 201 iput(inode); 202 } else { 203 spin_unlock(&dentry->d_lock); 204 } 205 } 206 207 /* 208 * Release the dentry's inode, using the filesystem 209 * d_iput() operation if defined. dentry remains in-use. 210 */ 211 static void dentry_unlink_inode(struct dentry * dentry) 212 __releases(dentry->d_lock) 213 __releases(dentry->d_inode->i_lock) 214 { 215 struct inode *inode = dentry->d_inode; 216 dentry->d_inode = NULL; 217 list_del_init(&dentry->d_alias); 218 dentry_rcuwalk_barrier(dentry); 219 spin_unlock(&dentry->d_lock); 220 spin_unlock(&inode->i_lock); 221 if (!inode->i_nlink) 222 fsnotify_inoderemove(inode); 223 if (dentry->d_op && dentry->d_op->d_iput) 224 dentry->d_op->d_iput(dentry, inode); 225 else 226 iput(inode); 227 } 228 229 /* 230 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 231 */ 232 static void dentry_lru_add(struct dentry *dentry) 233 { 234 if (list_empty(&dentry->d_lru)) { 235 spin_lock(&dcache_lru_lock); 236 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 237 dentry->d_sb->s_nr_dentry_unused++; 238 dentry_stat.nr_unused++; 239 spin_unlock(&dcache_lru_lock); 240 } 241 } 242 243 static void __dentry_lru_del(struct dentry *dentry) 244 { 245 list_del_init(&dentry->d_lru); 246 dentry->d_flags &= ~DCACHE_SHRINK_LIST; 247 dentry->d_sb->s_nr_dentry_unused--; 248 dentry_stat.nr_unused--; 249 } 250 251 /* 252 * Remove a dentry with references from the LRU. 253 */ 254 static void dentry_lru_del(struct dentry *dentry) 255 { 256 if (!list_empty(&dentry->d_lru)) { 257 spin_lock(&dcache_lru_lock); 258 __dentry_lru_del(dentry); 259 spin_unlock(&dcache_lru_lock); 260 } 261 } 262 263 /* 264 * Remove a dentry that is unreferenced and about to be pruned 265 * (unhashed and destroyed) from the LRU, and inform the file system. 266 * This wrapper should be called _prior_ to unhashing a victim dentry. 267 */ 268 static void dentry_lru_prune(struct dentry *dentry) 269 { 270 if (!list_empty(&dentry->d_lru)) { 271 if (dentry->d_flags & DCACHE_OP_PRUNE) 272 dentry->d_op->d_prune(dentry); 273 274 spin_lock(&dcache_lru_lock); 275 __dentry_lru_del(dentry); 276 spin_unlock(&dcache_lru_lock); 277 } 278 } 279 280 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) 281 { 282 spin_lock(&dcache_lru_lock); 283 if (list_empty(&dentry->d_lru)) { 284 list_add_tail(&dentry->d_lru, list); 285 dentry->d_sb->s_nr_dentry_unused++; 286 dentry_stat.nr_unused++; 287 } else { 288 list_move_tail(&dentry->d_lru, list); 289 } 290 spin_unlock(&dcache_lru_lock); 291 } 292 293 /** 294 * d_kill - kill dentry and return parent 295 * @dentry: dentry to kill 296 * @parent: parent dentry 297 * 298 * The dentry must already be unhashed and removed from the LRU. 299 * 300 * If this is the root of the dentry tree, return NULL. 301 * 302 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 303 * d_kill. 304 */ 305 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 306 __releases(dentry->d_lock) 307 __releases(parent->d_lock) 308 __releases(dentry->d_inode->i_lock) 309 { 310 list_del(&dentry->d_u.d_child); 311 /* 312 * Inform try_to_ascend() that we are no longer attached to the 313 * dentry tree 314 */ 315 dentry->d_flags |= DCACHE_DISCONNECTED; 316 if (parent) 317 spin_unlock(&parent->d_lock); 318 dentry_iput(dentry); 319 /* 320 * dentry_iput drops the locks, at which point nobody (except 321 * transient RCU lookups) can reach this dentry. 322 */ 323 d_free(dentry); 324 return parent; 325 } 326 327 /* 328 * Unhash a dentry without inserting an RCU walk barrier or checking that 329 * dentry->d_lock is locked. The caller must take care of that, if 330 * appropriate. 331 */ 332 static void __d_shrink(struct dentry *dentry) 333 { 334 if (!d_unhashed(dentry)) { 335 struct hlist_bl_head *b; 336 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 337 b = &dentry->d_sb->s_anon; 338 else 339 b = d_hash(dentry->d_parent, dentry->d_name.hash); 340 341 hlist_bl_lock(b); 342 __hlist_bl_del(&dentry->d_hash); 343 dentry->d_hash.pprev = NULL; 344 hlist_bl_unlock(b); 345 } 346 } 347 348 /** 349 * d_drop - drop a dentry 350 * @dentry: dentry to drop 351 * 352 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 353 * be found through a VFS lookup any more. Note that this is different from 354 * deleting the dentry - d_delete will try to mark the dentry negative if 355 * possible, giving a successful _negative_ lookup, while d_drop will 356 * just make the cache lookup fail. 357 * 358 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 359 * reason (NFS timeouts or autofs deletes). 360 * 361 * __d_drop requires dentry->d_lock. 362 */ 363 void __d_drop(struct dentry *dentry) 364 { 365 if (!d_unhashed(dentry)) { 366 __d_shrink(dentry); 367 dentry_rcuwalk_barrier(dentry); 368 } 369 } 370 EXPORT_SYMBOL(__d_drop); 371 372 void d_drop(struct dentry *dentry) 373 { 374 spin_lock(&dentry->d_lock); 375 __d_drop(dentry); 376 spin_unlock(&dentry->d_lock); 377 } 378 EXPORT_SYMBOL(d_drop); 379 380 /* 381 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 382 * @dentry: dentry to drop 383 * 384 * This is called when we do a lookup on a placeholder dentry that needed to be 385 * looked up. The dentry should have been hashed in order for it to be found by 386 * the lookup code, but now needs to be unhashed while we do the actual lookup 387 * and clear the DCACHE_NEED_LOOKUP flag. 388 */ 389 void d_clear_need_lookup(struct dentry *dentry) 390 { 391 spin_lock(&dentry->d_lock); 392 __d_drop(dentry); 393 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 394 spin_unlock(&dentry->d_lock); 395 } 396 EXPORT_SYMBOL(d_clear_need_lookup); 397 398 /* 399 * Finish off a dentry we've decided to kill. 400 * dentry->d_lock must be held, returns with it unlocked. 401 * If ref is non-zero, then decrement the refcount too. 402 * Returns dentry requiring refcount drop, or NULL if we're done. 403 */ 404 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 405 __releases(dentry->d_lock) 406 { 407 struct inode *inode; 408 struct dentry *parent; 409 410 inode = dentry->d_inode; 411 if (inode && !spin_trylock(&inode->i_lock)) { 412 relock: 413 spin_unlock(&dentry->d_lock); 414 cpu_relax(); 415 return dentry; /* try again with same dentry */ 416 } 417 if (IS_ROOT(dentry)) 418 parent = NULL; 419 else 420 parent = dentry->d_parent; 421 if (parent && !spin_trylock(&parent->d_lock)) { 422 if (inode) 423 spin_unlock(&inode->i_lock); 424 goto relock; 425 } 426 427 if (ref) 428 dentry->d_count--; 429 /* 430 * if dentry was on the d_lru list delete it from there. 431 * inform the fs via d_prune that this dentry is about to be 432 * unhashed and destroyed. 433 */ 434 dentry_lru_prune(dentry); 435 /* if it was on the hash then remove it */ 436 __d_drop(dentry); 437 return d_kill(dentry, parent); 438 } 439 440 /* 441 * This is dput 442 * 443 * This is complicated by the fact that we do not want to put 444 * dentries that are no longer on any hash chain on the unused 445 * list: we'd much rather just get rid of them immediately. 446 * 447 * However, that implies that we have to traverse the dentry 448 * tree upwards to the parents which might _also_ now be 449 * scheduled for deletion (it may have been only waiting for 450 * its last child to go away). 451 * 452 * This tail recursion is done by hand as we don't want to depend 453 * on the compiler to always get this right (gcc generally doesn't). 454 * Real recursion would eat up our stack space. 455 */ 456 457 /* 458 * dput - release a dentry 459 * @dentry: dentry to release 460 * 461 * Release a dentry. This will drop the usage count and if appropriate 462 * call the dentry unlink method as well as removing it from the queues and 463 * releasing its resources. If the parent dentries were scheduled for release 464 * they too may now get deleted. 465 */ 466 void dput(struct dentry *dentry) 467 { 468 if (!dentry) 469 return; 470 471 repeat: 472 if (dentry->d_count == 1) 473 might_sleep(); 474 spin_lock(&dentry->d_lock); 475 BUG_ON(!dentry->d_count); 476 if (dentry->d_count > 1) { 477 dentry->d_count--; 478 spin_unlock(&dentry->d_lock); 479 return; 480 } 481 482 if (dentry->d_flags & DCACHE_OP_DELETE) { 483 if (dentry->d_op->d_delete(dentry)) 484 goto kill_it; 485 } 486 487 /* Unreachable? Get rid of it */ 488 if (d_unhashed(dentry)) 489 goto kill_it; 490 491 /* 492 * If this dentry needs lookup, don't set the referenced flag so that it 493 * is more likely to be cleaned up by the dcache shrinker in case of 494 * memory pressure. 495 */ 496 if (!d_need_lookup(dentry)) 497 dentry->d_flags |= DCACHE_REFERENCED; 498 dentry_lru_add(dentry); 499 500 dentry->d_count--; 501 spin_unlock(&dentry->d_lock); 502 return; 503 504 kill_it: 505 dentry = dentry_kill(dentry, 1); 506 if (dentry) 507 goto repeat; 508 } 509 EXPORT_SYMBOL(dput); 510 511 /** 512 * d_invalidate - invalidate a dentry 513 * @dentry: dentry to invalidate 514 * 515 * Try to invalidate the dentry if it turns out to be 516 * possible. If there are other dentries that can be 517 * reached through this one we can't delete it and we 518 * return -EBUSY. On success we return 0. 519 * 520 * no dcache lock. 521 */ 522 523 int d_invalidate(struct dentry * dentry) 524 { 525 /* 526 * If it's already been dropped, return OK. 527 */ 528 spin_lock(&dentry->d_lock); 529 if (d_unhashed(dentry)) { 530 spin_unlock(&dentry->d_lock); 531 return 0; 532 } 533 /* 534 * Check whether to do a partial shrink_dcache 535 * to get rid of unused child entries. 536 */ 537 if (!list_empty(&dentry->d_subdirs)) { 538 spin_unlock(&dentry->d_lock); 539 shrink_dcache_parent(dentry); 540 spin_lock(&dentry->d_lock); 541 } 542 543 /* 544 * Somebody else still using it? 545 * 546 * If it's a directory, we can't drop it 547 * for fear of somebody re-populating it 548 * with children (even though dropping it 549 * would make it unreachable from the root, 550 * we might still populate it if it was a 551 * working directory or similar). 552 * We also need to leave mountpoints alone, 553 * directory or not. 554 */ 555 if (dentry->d_count > 1 && dentry->d_inode) { 556 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 557 spin_unlock(&dentry->d_lock); 558 return -EBUSY; 559 } 560 } 561 562 __d_drop(dentry); 563 spin_unlock(&dentry->d_lock); 564 return 0; 565 } 566 EXPORT_SYMBOL(d_invalidate); 567 568 /* This must be called with d_lock held */ 569 static inline void __dget_dlock(struct dentry *dentry) 570 { 571 dentry->d_count++; 572 } 573 574 static inline void __dget(struct dentry *dentry) 575 { 576 spin_lock(&dentry->d_lock); 577 __dget_dlock(dentry); 578 spin_unlock(&dentry->d_lock); 579 } 580 581 struct dentry *dget_parent(struct dentry *dentry) 582 { 583 struct dentry *ret; 584 585 repeat: 586 /* 587 * Don't need rcu_dereference because we re-check it was correct under 588 * the lock. 589 */ 590 rcu_read_lock(); 591 ret = dentry->d_parent; 592 spin_lock(&ret->d_lock); 593 if (unlikely(ret != dentry->d_parent)) { 594 spin_unlock(&ret->d_lock); 595 rcu_read_unlock(); 596 goto repeat; 597 } 598 rcu_read_unlock(); 599 BUG_ON(!ret->d_count); 600 ret->d_count++; 601 spin_unlock(&ret->d_lock); 602 return ret; 603 } 604 EXPORT_SYMBOL(dget_parent); 605 606 /** 607 * d_find_alias - grab a hashed alias of inode 608 * @inode: inode in question 609 * @want_discon: flag, used by d_splice_alias, to request 610 * that only a DISCONNECTED alias be returned. 611 * 612 * If inode has a hashed alias, or is a directory and has any alias, 613 * acquire the reference to alias and return it. Otherwise return NULL. 614 * Notice that if inode is a directory there can be only one alias and 615 * it can be unhashed only if it has no children, or if it is the root 616 * of a filesystem. 617 * 618 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 619 * any other hashed alias over that one unless @want_discon is set, 620 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 621 */ 622 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 623 { 624 struct dentry *alias, *discon_alias; 625 626 again: 627 discon_alias = NULL; 628 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 629 spin_lock(&alias->d_lock); 630 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 631 if (IS_ROOT(alias) && 632 (alias->d_flags & DCACHE_DISCONNECTED)) { 633 discon_alias = alias; 634 } else if (!want_discon) { 635 __dget_dlock(alias); 636 spin_unlock(&alias->d_lock); 637 return alias; 638 } 639 } 640 spin_unlock(&alias->d_lock); 641 } 642 if (discon_alias) { 643 alias = discon_alias; 644 spin_lock(&alias->d_lock); 645 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 646 if (IS_ROOT(alias) && 647 (alias->d_flags & DCACHE_DISCONNECTED)) { 648 __dget_dlock(alias); 649 spin_unlock(&alias->d_lock); 650 return alias; 651 } 652 } 653 spin_unlock(&alias->d_lock); 654 goto again; 655 } 656 return NULL; 657 } 658 659 struct dentry *d_find_alias(struct inode *inode) 660 { 661 struct dentry *de = NULL; 662 663 if (!list_empty(&inode->i_dentry)) { 664 spin_lock(&inode->i_lock); 665 de = __d_find_alias(inode, 0); 666 spin_unlock(&inode->i_lock); 667 } 668 return de; 669 } 670 EXPORT_SYMBOL(d_find_alias); 671 672 /* 673 * Try to kill dentries associated with this inode. 674 * WARNING: you must own a reference to inode. 675 */ 676 void d_prune_aliases(struct inode *inode) 677 { 678 struct dentry *dentry; 679 restart: 680 spin_lock(&inode->i_lock); 681 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 682 spin_lock(&dentry->d_lock); 683 if (!dentry->d_count) { 684 __dget_dlock(dentry); 685 __d_drop(dentry); 686 spin_unlock(&dentry->d_lock); 687 spin_unlock(&inode->i_lock); 688 dput(dentry); 689 goto restart; 690 } 691 spin_unlock(&dentry->d_lock); 692 } 693 spin_unlock(&inode->i_lock); 694 } 695 EXPORT_SYMBOL(d_prune_aliases); 696 697 /* 698 * Try to throw away a dentry - free the inode, dput the parent. 699 * Requires dentry->d_lock is held, and dentry->d_count == 0. 700 * Releases dentry->d_lock. 701 * 702 * This may fail if locks cannot be acquired no problem, just try again. 703 */ 704 static void try_prune_one_dentry(struct dentry *dentry) 705 __releases(dentry->d_lock) 706 { 707 struct dentry *parent; 708 709 parent = dentry_kill(dentry, 0); 710 /* 711 * If dentry_kill returns NULL, we have nothing more to do. 712 * if it returns the same dentry, trylocks failed. In either 713 * case, just loop again. 714 * 715 * Otherwise, we need to prune ancestors too. This is necessary 716 * to prevent quadratic behavior of shrink_dcache_parent(), but 717 * is also expected to be beneficial in reducing dentry cache 718 * fragmentation. 719 */ 720 if (!parent) 721 return; 722 if (parent == dentry) 723 return; 724 725 /* Prune ancestors. */ 726 dentry = parent; 727 while (dentry) { 728 spin_lock(&dentry->d_lock); 729 if (dentry->d_count > 1) { 730 dentry->d_count--; 731 spin_unlock(&dentry->d_lock); 732 return; 733 } 734 dentry = dentry_kill(dentry, 1); 735 } 736 } 737 738 static void shrink_dentry_list(struct list_head *list) 739 { 740 struct dentry *dentry; 741 742 rcu_read_lock(); 743 for (;;) { 744 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 745 if (&dentry->d_lru == list) 746 break; /* empty */ 747 spin_lock(&dentry->d_lock); 748 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 749 spin_unlock(&dentry->d_lock); 750 continue; 751 } 752 753 /* 754 * We found an inuse dentry which was not removed from 755 * the LRU because of laziness during lookup. Do not free 756 * it - just keep it off the LRU list. 757 */ 758 if (dentry->d_count) { 759 dentry_lru_del(dentry); 760 spin_unlock(&dentry->d_lock); 761 continue; 762 } 763 764 rcu_read_unlock(); 765 766 try_prune_one_dentry(dentry); 767 768 rcu_read_lock(); 769 } 770 rcu_read_unlock(); 771 } 772 773 /** 774 * prune_dcache_sb - shrink the dcache 775 * @sb: superblock 776 * @count: number of entries to try to free 777 * 778 * Attempt to shrink the superblock dcache LRU by @count entries. This is 779 * done when we need more memory an called from the superblock shrinker 780 * function. 781 * 782 * This function may fail to free any resources if all the dentries are in 783 * use. 784 */ 785 void prune_dcache_sb(struct super_block *sb, int count) 786 { 787 struct dentry *dentry; 788 LIST_HEAD(referenced); 789 LIST_HEAD(tmp); 790 791 relock: 792 spin_lock(&dcache_lru_lock); 793 while (!list_empty(&sb->s_dentry_lru)) { 794 dentry = list_entry(sb->s_dentry_lru.prev, 795 struct dentry, d_lru); 796 BUG_ON(dentry->d_sb != sb); 797 798 if (!spin_trylock(&dentry->d_lock)) { 799 spin_unlock(&dcache_lru_lock); 800 cpu_relax(); 801 goto relock; 802 } 803 804 if (dentry->d_flags & DCACHE_REFERENCED) { 805 dentry->d_flags &= ~DCACHE_REFERENCED; 806 list_move(&dentry->d_lru, &referenced); 807 spin_unlock(&dentry->d_lock); 808 } else { 809 list_move_tail(&dentry->d_lru, &tmp); 810 dentry->d_flags |= DCACHE_SHRINK_LIST; 811 spin_unlock(&dentry->d_lock); 812 if (!--count) 813 break; 814 } 815 cond_resched_lock(&dcache_lru_lock); 816 } 817 if (!list_empty(&referenced)) 818 list_splice(&referenced, &sb->s_dentry_lru); 819 spin_unlock(&dcache_lru_lock); 820 821 shrink_dentry_list(&tmp); 822 } 823 824 /** 825 * shrink_dcache_sb - shrink dcache for a superblock 826 * @sb: superblock 827 * 828 * Shrink the dcache for the specified super block. This is used to free 829 * the dcache before unmounting a file system. 830 */ 831 void shrink_dcache_sb(struct super_block *sb) 832 { 833 LIST_HEAD(tmp); 834 835 spin_lock(&dcache_lru_lock); 836 while (!list_empty(&sb->s_dentry_lru)) { 837 list_splice_init(&sb->s_dentry_lru, &tmp); 838 spin_unlock(&dcache_lru_lock); 839 shrink_dentry_list(&tmp); 840 spin_lock(&dcache_lru_lock); 841 } 842 spin_unlock(&dcache_lru_lock); 843 } 844 EXPORT_SYMBOL(shrink_dcache_sb); 845 846 /* 847 * destroy a single subtree of dentries for unmount 848 * - see the comments on shrink_dcache_for_umount() for a description of the 849 * locking 850 */ 851 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 852 { 853 struct dentry *parent; 854 855 BUG_ON(!IS_ROOT(dentry)); 856 857 for (;;) { 858 /* descend to the first leaf in the current subtree */ 859 while (!list_empty(&dentry->d_subdirs)) 860 dentry = list_entry(dentry->d_subdirs.next, 861 struct dentry, d_u.d_child); 862 863 /* consume the dentries from this leaf up through its parents 864 * until we find one with children or run out altogether */ 865 do { 866 struct inode *inode; 867 868 /* 869 * remove the dentry from the lru, and inform 870 * the fs that this dentry is about to be 871 * unhashed and destroyed. 872 */ 873 dentry_lru_prune(dentry); 874 __d_shrink(dentry); 875 876 if (dentry->d_count != 0) { 877 printk(KERN_ERR 878 "BUG: Dentry %p{i=%lx,n=%s}" 879 " still in use (%d)" 880 " [unmount of %s %s]\n", 881 dentry, 882 dentry->d_inode ? 883 dentry->d_inode->i_ino : 0UL, 884 dentry->d_name.name, 885 dentry->d_count, 886 dentry->d_sb->s_type->name, 887 dentry->d_sb->s_id); 888 BUG(); 889 } 890 891 if (IS_ROOT(dentry)) { 892 parent = NULL; 893 list_del(&dentry->d_u.d_child); 894 } else { 895 parent = dentry->d_parent; 896 parent->d_count--; 897 list_del(&dentry->d_u.d_child); 898 } 899 900 inode = dentry->d_inode; 901 if (inode) { 902 dentry->d_inode = NULL; 903 list_del_init(&dentry->d_alias); 904 if (dentry->d_op && dentry->d_op->d_iput) 905 dentry->d_op->d_iput(dentry, inode); 906 else 907 iput(inode); 908 } 909 910 d_free(dentry); 911 912 /* finished when we fall off the top of the tree, 913 * otherwise we ascend to the parent and move to the 914 * next sibling if there is one */ 915 if (!parent) 916 return; 917 dentry = parent; 918 } while (list_empty(&dentry->d_subdirs)); 919 920 dentry = list_entry(dentry->d_subdirs.next, 921 struct dentry, d_u.d_child); 922 } 923 } 924 925 /* 926 * destroy the dentries attached to a superblock on unmounting 927 * - we don't need to use dentry->d_lock because: 928 * - the superblock is detached from all mountings and open files, so the 929 * dentry trees will not be rearranged by the VFS 930 * - s_umount is write-locked, so the memory pressure shrinker will ignore 931 * any dentries belonging to this superblock that it comes across 932 * - the filesystem itself is no longer permitted to rearrange the dentries 933 * in this superblock 934 */ 935 void shrink_dcache_for_umount(struct super_block *sb) 936 { 937 struct dentry *dentry; 938 939 if (down_read_trylock(&sb->s_umount)) 940 BUG(); 941 942 dentry = sb->s_root; 943 sb->s_root = NULL; 944 dentry->d_count--; 945 shrink_dcache_for_umount_subtree(dentry); 946 947 while (!hlist_bl_empty(&sb->s_anon)) { 948 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 949 shrink_dcache_for_umount_subtree(dentry); 950 } 951 } 952 953 /* 954 * This tries to ascend one level of parenthood, but 955 * we can race with renaming, so we need to re-check 956 * the parenthood after dropping the lock and check 957 * that the sequence number still matches. 958 */ 959 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 960 { 961 struct dentry *new = old->d_parent; 962 963 rcu_read_lock(); 964 spin_unlock(&old->d_lock); 965 spin_lock(&new->d_lock); 966 967 /* 968 * might go back up the wrong parent if we have had a rename 969 * or deletion 970 */ 971 if (new != old->d_parent || 972 (old->d_flags & DCACHE_DISCONNECTED) || 973 (!locked && read_seqretry(&rename_lock, seq))) { 974 spin_unlock(&new->d_lock); 975 new = NULL; 976 } 977 rcu_read_unlock(); 978 return new; 979 } 980 981 982 /* 983 * Search for at least 1 mount point in the dentry's subdirs. 984 * We descend to the next level whenever the d_subdirs 985 * list is non-empty and continue searching. 986 */ 987 988 /** 989 * have_submounts - check for mounts over a dentry 990 * @parent: dentry to check. 991 * 992 * Return true if the parent or its subdirectories contain 993 * a mount point 994 */ 995 int have_submounts(struct dentry *parent) 996 { 997 struct dentry *this_parent; 998 struct list_head *next; 999 unsigned seq; 1000 int locked = 0; 1001 1002 seq = read_seqbegin(&rename_lock); 1003 again: 1004 this_parent = parent; 1005 1006 if (d_mountpoint(parent)) 1007 goto positive; 1008 spin_lock(&this_parent->d_lock); 1009 repeat: 1010 next = this_parent->d_subdirs.next; 1011 resume: 1012 while (next != &this_parent->d_subdirs) { 1013 struct list_head *tmp = next; 1014 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1015 next = tmp->next; 1016 1017 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1018 /* Have we found a mount point ? */ 1019 if (d_mountpoint(dentry)) { 1020 spin_unlock(&dentry->d_lock); 1021 spin_unlock(&this_parent->d_lock); 1022 goto positive; 1023 } 1024 if (!list_empty(&dentry->d_subdirs)) { 1025 spin_unlock(&this_parent->d_lock); 1026 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1027 this_parent = dentry; 1028 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1029 goto repeat; 1030 } 1031 spin_unlock(&dentry->d_lock); 1032 } 1033 /* 1034 * All done at this level ... ascend and resume the search. 1035 */ 1036 if (this_parent != parent) { 1037 struct dentry *child = this_parent; 1038 this_parent = try_to_ascend(this_parent, locked, seq); 1039 if (!this_parent) 1040 goto rename_retry; 1041 next = child->d_u.d_child.next; 1042 goto resume; 1043 } 1044 spin_unlock(&this_parent->d_lock); 1045 if (!locked && read_seqretry(&rename_lock, seq)) 1046 goto rename_retry; 1047 if (locked) 1048 write_sequnlock(&rename_lock); 1049 return 0; /* No mount points found in tree */ 1050 positive: 1051 if (!locked && read_seqretry(&rename_lock, seq)) 1052 goto rename_retry; 1053 if (locked) 1054 write_sequnlock(&rename_lock); 1055 return 1; 1056 1057 rename_retry: 1058 locked = 1; 1059 write_seqlock(&rename_lock); 1060 goto again; 1061 } 1062 EXPORT_SYMBOL(have_submounts); 1063 1064 /* 1065 * Search the dentry child list for the specified parent, 1066 * and move any unused dentries to the end of the unused 1067 * list for prune_dcache(). We descend to the next level 1068 * whenever the d_subdirs list is non-empty and continue 1069 * searching. 1070 * 1071 * It returns zero iff there are no unused children, 1072 * otherwise it returns the number of children moved to 1073 * the end of the unused list. This may not be the total 1074 * number of unused children, because select_parent can 1075 * drop the lock and return early due to latency 1076 * constraints. 1077 */ 1078 static int select_parent(struct dentry *parent, struct list_head *dispose) 1079 { 1080 struct dentry *this_parent; 1081 struct list_head *next; 1082 unsigned seq; 1083 int found = 0; 1084 int locked = 0; 1085 1086 seq = read_seqbegin(&rename_lock); 1087 again: 1088 this_parent = parent; 1089 spin_lock(&this_parent->d_lock); 1090 repeat: 1091 next = this_parent->d_subdirs.next; 1092 resume: 1093 while (next != &this_parent->d_subdirs) { 1094 struct list_head *tmp = next; 1095 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1096 next = tmp->next; 1097 1098 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1099 1100 /* 1101 * move only zero ref count dentries to the dispose list. 1102 * 1103 * Those which are presently on the shrink list, being processed 1104 * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1105 * loop in shrink_dcache_parent() might not make any progress 1106 * and loop forever. 1107 */ 1108 if (dentry->d_count) { 1109 dentry_lru_del(dentry); 1110 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1111 dentry_lru_move_list(dentry, dispose); 1112 dentry->d_flags |= DCACHE_SHRINK_LIST; 1113 found++; 1114 } 1115 /* 1116 * We can return to the caller if we have found some (this 1117 * ensures forward progress). We'll be coming back to find 1118 * the rest. 1119 */ 1120 if (found && need_resched()) { 1121 spin_unlock(&dentry->d_lock); 1122 goto out; 1123 } 1124 1125 /* 1126 * Descend a level if the d_subdirs list is non-empty. 1127 */ 1128 if (!list_empty(&dentry->d_subdirs)) { 1129 spin_unlock(&this_parent->d_lock); 1130 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1131 this_parent = dentry; 1132 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1133 goto repeat; 1134 } 1135 1136 spin_unlock(&dentry->d_lock); 1137 } 1138 /* 1139 * All done at this level ... ascend and resume the search. 1140 */ 1141 if (this_parent != parent) { 1142 struct dentry *child = this_parent; 1143 this_parent = try_to_ascend(this_parent, locked, seq); 1144 if (!this_parent) 1145 goto rename_retry; 1146 next = child->d_u.d_child.next; 1147 goto resume; 1148 } 1149 out: 1150 spin_unlock(&this_parent->d_lock); 1151 if (!locked && read_seqretry(&rename_lock, seq)) 1152 goto rename_retry; 1153 if (locked) 1154 write_sequnlock(&rename_lock); 1155 return found; 1156 1157 rename_retry: 1158 if (found) 1159 return found; 1160 locked = 1; 1161 write_seqlock(&rename_lock); 1162 goto again; 1163 } 1164 1165 /** 1166 * shrink_dcache_parent - prune dcache 1167 * @parent: parent of entries to prune 1168 * 1169 * Prune the dcache to remove unused children of the parent dentry. 1170 */ 1171 void shrink_dcache_parent(struct dentry * parent) 1172 { 1173 LIST_HEAD(dispose); 1174 int found; 1175 1176 while ((found = select_parent(parent, &dispose)) != 0) 1177 shrink_dentry_list(&dispose); 1178 } 1179 EXPORT_SYMBOL(shrink_dcache_parent); 1180 1181 /** 1182 * __d_alloc - allocate a dcache entry 1183 * @sb: filesystem it will belong to 1184 * @name: qstr of the name 1185 * 1186 * Allocates a dentry. It returns %NULL if there is insufficient memory 1187 * available. On a success the dentry is returned. The name passed in is 1188 * copied and the copy passed in may be reused after this call. 1189 */ 1190 1191 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1192 { 1193 struct dentry *dentry; 1194 char *dname; 1195 1196 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1197 if (!dentry) 1198 return NULL; 1199 1200 if (name->len > DNAME_INLINE_LEN-1) { 1201 dname = kmalloc(name->len + 1, GFP_KERNEL); 1202 if (!dname) { 1203 kmem_cache_free(dentry_cache, dentry); 1204 return NULL; 1205 } 1206 } else { 1207 dname = dentry->d_iname; 1208 } 1209 dentry->d_name.name = dname; 1210 1211 dentry->d_name.len = name->len; 1212 dentry->d_name.hash = name->hash; 1213 memcpy(dname, name->name, name->len); 1214 dname[name->len] = 0; 1215 1216 dentry->d_count = 1; 1217 dentry->d_flags = 0; 1218 spin_lock_init(&dentry->d_lock); 1219 seqcount_init(&dentry->d_seq); 1220 dentry->d_inode = NULL; 1221 dentry->d_parent = dentry; 1222 dentry->d_sb = sb; 1223 dentry->d_op = NULL; 1224 dentry->d_fsdata = NULL; 1225 INIT_HLIST_BL_NODE(&dentry->d_hash); 1226 INIT_LIST_HEAD(&dentry->d_lru); 1227 INIT_LIST_HEAD(&dentry->d_subdirs); 1228 INIT_LIST_HEAD(&dentry->d_alias); 1229 INIT_LIST_HEAD(&dentry->d_u.d_child); 1230 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1231 1232 this_cpu_inc(nr_dentry); 1233 1234 return dentry; 1235 } 1236 1237 /** 1238 * d_alloc - allocate a dcache entry 1239 * @parent: parent of entry to allocate 1240 * @name: qstr of the name 1241 * 1242 * Allocates a dentry. It returns %NULL if there is insufficient memory 1243 * available. On a success the dentry is returned. The name passed in is 1244 * copied and the copy passed in may be reused after this call. 1245 */ 1246 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1247 { 1248 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1249 if (!dentry) 1250 return NULL; 1251 1252 spin_lock(&parent->d_lock); 1253 /* 1254 * don't need child lock because it is not subject 1255 * to concurrency here 1256 */ 1257 __dget_dlock(parent); 1258 dentry->d_parent = parent; 1259 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1260 spin_unlock(&parent->d_lock); 1261 1262 return dentry; 1263 } 1264 EXPORT_SYMBOL(d_alloc); 1265 1266 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1267 { 1268 struct dentry *dentry = __d_alloc(sb, name); 1269 if (dentry) 1270 dentry->d_flags |= DCACHE_DISCONNECTED; 1271 return dentry; 1272 } 1273 EXPORT_SYMBOL(d_alloc_pseudo); 1274 1275 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1276 { 1277 struct qstr q; 1278 1279 q.name = name; 1280 q.len = strlen(name); 1281 q.hash = full_name_hash(q.name, q.len); 1282 return d_alloc(parent, &q); 1283 } 1284 EXPORT_SYMBOL(d_alloc_name); 1285 1286 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1287 { 1288 WARN_ON_ONCE(dentry->d_op); 1289 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1290 DCACHE_OP_COMPARE | 1291 DCACHE_OP_REVALIDATE | 1292 DCACHE_OP_DELETE )); 1293 dentry->d_op = op; 1294 if (!op) 1295 return; 1296 if (op->d_hash) 1297 dentry->d_flags |= DCACHE_OP_HASH; 1298 if (op->d_compare) 1299 dentry->d_flags |= DCACHE_OP_COMPARE; 1300 if (op->d_revalidate) 1301 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1302 if (op->d_delete) 1303 dentry->d_flags |= DCACHE_OP_DELETE; 1304 if (op->d_prune) 1305 dentry->d_flags |= DCACHE_OP_PRUNE; 1306 1307 } 1308 EXPORT_SYMBOL(d_set_d_op); 1309 1310 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1311 { 1312 spin_lock(&dentry->d_lock); 1313 if (inode) { 1314 if (unlikely(IS_AUTOMOUNT(inode))) 1315 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1316 list_add(&dentry->d_alias, &inode->i_dentry); 1317 } 1318 dentry->d_inode = inode; 1319 dentry_rcuwalk_barrier(dentry); 1320 spin_unlock(&dentry->d_lock); 1321 fsnotify_d_instantiate(dentry, inode); 1322 } 1323 1324 /** 1325 * d_instantiate - fill in inode information for a dentry 1326 * @entry: dentry to complete 1327 * @inode: inode to attach to this dentry 1328 * 1329 * Fill in inode information in the entry. 1330 * 1331 * This turns negative dentries into productive full members 1332 * of society. 1333 * 1334 * NOTE! This assumes that the inode count has been incremented 1335 * (or otherwise set) by the caller to indicate that it is now 1336 * in use by the dcache. 1337 */ 1338 1339 void d_instantiate(struct dentry *entry, struct inode * inode) 1340 { 1341 BUG_ON(!list_empty(&entry->d_alias)); 1342 if (inode) 1343 spin_lock(&inode->i_lock); 1344 __d_instantiate(entry, inode); 1345 if (inode) 1346 spin_unlock(&inode->i_lock); 1347 security_d_instantiate(entry, inode); 1348 } 1349 EXPORT_SYMBOL(d_instantiate); 1350 1351 /** 1352 * d_instantiate_unique - instantiate a non-aliased dentry 1353 * @entry: dentry to instantiate 1354 * @inode: inode to attach to this dentry 1355 * 1356 * Fill in inode information in the entry. On success, it returns NULL. 1357 * If an unhashed alias of "entry" already exists, then we return the 1358 * aliased dentry instead and drop one reference to inode. 1359 * 1360 * Note that in order to avoid conflicts with rename() etc, the caller 1361 * had better be holding the parent directory semaphore. 1362 * 1363 * This also assumes that the inode count has been incremented 1364 * (or otherwise set) by the caller to indicate that it is now 1365 * in use by the dcache. 1366 */ 1367 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1368 struct inode *inode) 1369 { 1370 struct dentry *alias; 1371 int len = entry->d_name.len; 1372 const char *name = entry->d_name.name; 1373 unsigned int hash = entry->d_name.hash; 1374 1375 if (!inode) { 1376 __d_instantiate(entry, NULL); 1377 return NULL; 1378 } 1379 1380 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1381 struct qstr *qstr = &alias->d_name; 1382 1383 /* 1384 * Don't need alias->d_lock here, because aliases with 1385 * d_parent == entry->d_parent are not subject to name or 1386 * parent changes, because the parent inode i_mutex is held. 1387 */ 1388 if (qstr->hash != hash) 1389 continue; 1390 if (alias->d_parent != entry->d_parent) 1391 continue; 1392 if (dentry_cmp(qstr->name, qstr->len, name, len)) 1393 continue; 1394 __dget(alias); 1395 return alias; 1396 } 1397 1398 __d_instantiate(entry, inode); 1399 return NULL; 1400 } 1401 1402 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1403 { 1404 struct dentry *result; 1405 1406 BUG_ON(!list_empty(&entry->d_alias)); 1407 1408 if (inode) 1409 spin_lock(&inode->i_lock); 1410 result = __d_instantiate_unique(entry, inode); 1411 if (inode) 1412 spin_unlock(&inode->i_lock); 1413 1414 if (!result) { 1415 security_d_instantiate(entry, inode); 1416 return NULL; 1417 } 1418 1419 BUG_ON(!d_unhashed(result)); 1420 iput(inode); 1421 return result; 1422 } 1423 1424 EXPORT_SYMBOL(d_instantiate_unique); 1425 1426 /** 1427 * d_alloc_root - allocate root dentry 1428 * @root_inode: inode to allocate the root for 1429 * 1430 * Allocate a root ("/") dentry for the inode given. The inode is 1431 * instantiated and returned. %NULL is returned if there is insufficient 1432 * memory or the inode passed is %NULL. 1433 */ 1434 1435 struct dentry * d_alloc_root(struct inode * root_inode) 1436 { 1437 struct dentry *res = NULL; 1438 1439 if (root_inode) { 1440 static const struct qstr name = { .name = "/", .len = 1 }; 1441 1442 res = __d_alloc(root_inode->i_sb, &name); 1443 if (res) 1444 d_instantiate(res, root_inode); 1445 } 1446 return res; 1447 } 1448 EXPORT_SYMBOL(d_alloc_root); 1449 1450 struct dentry *d_make_root(struct inode *root_inode) 1451 { 1452 struct dentry *res = NULL; 1453 1454 if (root_inode) { 1455 static const struct qstr name = { .name = "/", .len = 1 }; 1456 1457 res = __d_alloc(root_inode->i_sb, &name); 1458 if (res) 1459 d_instantiate(res, root_inode); 1460 else 1461 iput(root_inode); 1462 } 1463 return res; 1464 } 1465 EXPORT_SYMBOL(d_make_root); 1466 1467 static struct dentry * __d_find_any_alias(struct inode *inode) 1468 { 1469 struct dentry *alias; 1470 1471 if (list_empty(&inode->i_dentry)) 1472 return NULL; 1473 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1474 __dget(alias); 1475 return alias; 1476 } 1477 1478 /** 1479 * d_find_any_alias - find any alias for a given inode 1480 * @inode: inode to find an alias for 1481 * 1482 * If any aliases exist for the given inode, take and return a 1483 * reference for one of them. If no aliases exist, return %NULL. 1484 */ 1485 struct dentry *d_find_any_alias(struct inode *inode) 1486 { 1487 struct dentry *de; 1488 1489 spin_lock(&inode->i_lock); 1490 de = __d_find_any_alias(inode); 1491 spin_unlock(&inode->i_lock); 1492 return de; 1493 } 1494 EXPORT_SYMBOL(d_find_any_alias); 1495 1496 /** 1497 * d_obtain_alias - find or allocate a dentry for a given inode 1498 * @inode: inode to allocate the dentry for 1499 * 1500 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1501 * similar open by handle operations. The returned dentry may be anonymous, 1502 * or may have a full name (if the inode was already in the cache). 1503 * 1504 * When called on a directory inode, we must ensure that the inode only ever 1505 * has one dentry. If a dentry is found, that is returned instead of 1506 * allocating a new one. 1507 * 1508 * On successful return, the reference to the inode has been transferred 1509 * to the dentry. In case of an error the reference on the inode is released. 1510 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1511 * be passed in and will be the error will be propagate to the return value, 1512 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1513 */ 1514 struct dentry *d_obtain_alias(struct inode *inode) 1515 { 1516 static const struct qstr anonstring = { .name = "" }; 1517 struct dentry *tmp; 1518 struct dentry *res; 1519 1520 if (!inode) 1521 return ERR_PTR(-ESTALE); 1522 if (IS_ERR(inode)) 1523 return ERR_CAST(inode); 1524 1525 res = d_find_any_alias(inode); 1526 if (res) 1527 goto out_iput; 1528 1529 tmp = __d_alloc(inode->i_sb, &anonstring); 1530 if (!tmp) { 1531 res = ERR_PTR(-ENOMEM); 1532 goto out_iput; 1533 } 1534 1535 spin_lock(&inode->i_lock); 1536 res = __d_find_any_alias(inode); 1537 if (res) { 1538 spin_unlock(&inode->i_lock); 1539 dput(tmp); 1540 goto out_iput; 1541 } 1542 1543 /* attach a disconnected dentry */ 1544 spin_lock(&tmp->d_lock); 1545 tmp->d_inode = inode; 1546 tmp->d_flags |= DCACHE_DISCONNECTED; 1547 list_add(&tmp->d_alias, &inode->i_dentry); 1548 hlist_bl_lock(&tmp->d_sb->s_anon); 1549 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1550 hlist_bl_unlock(&tmp->d_sb->s_anon); 1551 spin_unlock(&tmp->d_lock); 1552 spin_unlock(&inode->i_lock); 1553 security_d_instantiate(tmp, inode); 1554 1555 return tmp; 1556 1557 out_iput: 1558 if (res && !IS_ERR(res)) 1559 security_d_instantiate(res, inode); 1560 iput(inode); 1561 return res; 1562 } 1563 EXPORT_SYMBOL(d_obtain_alias); 1564 1565 /** 1566 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1567 * @inode: the inode which may have a disconnected dentry 1568 * @dentry: a negative dentry which we want to point to the inode. 1569 * 1570 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1571 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1572 * and return it, else simply d_add the inode to the dentry and return NULL. 1573 * 1574 * This is needed in the lookup routine of any filesystem that is exportable 1575 * (via knfsd) so that we can build dcache paths to directories effectively. 1576 * 1577 * If a dentry was found and moved, then it is returned. Otherwise NULL 1578 * is returned. This matches the expected return value of ->lookup. 1579 * 1580 */ 1581 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1582 { 1583 struct dentry *new = NULL; 1584 1585 if (IS_ERR(inode)) 1586 return ERR_CAST(inode); 1587 1588 if (inode && S_ISDIR(inode->i_mode)) { 1589 spin_lock(&inode->i_lock); 1590 new = __d_find_alias(inode, 1); 1591 if (new) { 1592 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1593 spin_unlock(&inode->i_lock); 1594 security_d_instantiate(new, inode); 1595 d_move(new, dentry); 1596 iput(inode); 1597 } else { 1598 /* already taking inode->i_lock, so d_add() by hand */ 1599 __d_instantiate(dentry, inode); 1600 spin_unlock(&inode->i_lock); 1601 security_d_instantiate(dentry, inode); 1602 d_rehash(dentry); 1603 } 1604 } else 1605 d_add(dentry, inode); 1606 return new; 1607 } 1608 EXPORT_SYMBOL(d_splice_alias); 1609 1610 /** 1611 * d_add_ci - lookup or allocate new dentry with case-exact name 1612 * @inode: the inode case-insensitive lookup has found 1613 * @dentry: the negative dentry that was passed to the parent's lookup func 1614 * @name: the case-exact name to be associated with the returned dentry 1615 * 1616 * This is to avoid filling the dcache with case-insensitive names to the 1617 * same inode, only the actual correct case is stored in the dcache for 1618 * case-insensitive filesystems. 1619 * 1620 * For a case-insensitive lookup match and if the the case-exact dentry 1621 * already exists in in the dcache, use it and return it. 1622 * 1623 * If no entry exists with the exact case name, allocate new dentry with 1624 * the exact case, and return the spliced entry. 1625 */ 1626 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1627 struct qstr *name) 1628 { 1629 int error; 1630 struct dentry *found; 1631 struct dentry *new; 1632 1633 /* 1634 * First check if a dentry matching the name already exists, 1635 * if not go ahead and create it now. 1636 */ 1637 found = d_hash_and_lookup(dentry->d_parent, name); 1638 if (!found) { 1639 new = d_alloc(dentry->d_parent, name); 1640 if (!new) { 1641 error = -ENOMEM; 1642 goto err_out; 1643 } 1644 1645 found = d_splice_alias(inode, new); 1646 if (found) { 1647 dput(new); 1648 return found; 1649 } 1650 return new; 1651 } 1652 1653 /* 1654 * If a matching dentry exists, and it's not negative use it. 1655 * 1656 * Decrement the reference count to balance the iget() done 1657 * earlier on. 1658 */ 1659 if (found->d_inode) { 1660 if (unlikely(found->d_inode != inode)) { 1661 /* This can't happen because bad inodes are unhashed. */ 1662 BUG_ON(!is_bad_inode(inode)); 1663 BUG_ON(!is_bad_inode(found->d_inode)); 1664 } 1665 iput(inode); 1666 return found; 1667 } 1668 1669 /* 1670 * We are going to instantiate this dentry, unhash it and clear the 1671 * lookup flag so we can do that. 1672 */ 1673 if (unlikely(d_need_lookup(found))) 1674 d_clear_need_lookup(found); 1675 1676 /* 1677 * Negative dentry: instantiate it unless the inode is a directory and 1678 * already has a dentry. 1679 */ 1680 new = d_splice_alias(inode, found); 1681 if (new) { 1682 dput(found); 1683 found = new; 1684 } 1685 return found; 1686 1687 err_out: 1688 iput(inode); 1689 return ERR_PTR(error); 1690 } 1691 EXPORT_SYMBOL(d_add_ci); 1692 1693 /** 1694 * __d_lookup_rcu - search for a dentry (racy, store-free) 1695 * @parent: parent dentry 1696 * @name: qstr of name we wish to find 1697 * @seq: returns d_seq value at the point where the dentry was found 1698 * @inode: returns dentry->d_inode when the inode was found valid. 1699 * Returns: dentry, or NULL 1700 * 1701 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1702 * resolution (store-free path walking) design described in 1703 * Documentation/filesystems/path-lookup.txt. 1704 * 1705 * This is not to be used outside core vfs. 1706 * 1707 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1708 * held, and rcu_read_lock held. The returned dentry must not be stored into 1709 * without taking d_lock and checking d_seq sequence count against @seq 1710 * returned here. 1711 * 1712 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1713 * function. 1714 * 1715 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1716 * the returned dentry, so long as its parent's seqlock is checked after the 1717 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1718 * is formed, giving integrity down the path walk. 1719 */ 1720 struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, 1721 unsigned *seq, struct inode **inode) 1722 { 1723 unsigned int len = name->len; 1724 unsigned int hash = name->hash; 1725 const unsigned char *str = name->name; 1726 struct hlist_bl_head *b = d_hash(parent, hash); 1727 struct hlist_bl_node *node; 1728 struct dentry *dentry; 1729 1730 /* 1731 * Note: There is significant duplication with __d_lookup_rcu which is 1732 * required to prevent single threaded performance regressions 1733 * especially on architectures where smp_rmb (in seqcounts) are costly. 1734 * Keep the two functions in sync. 1735 */ 1736 1737 /* 1738 * The hash list is protected using RCU. 1739 * 1740 * Carefully use d_seq when comparing a candidate dentry, to avoid 1741 * races with d_move(). 1742 * 1743 * It is possible that concurrent renames can mess up our list 1744 * walk here and result in missing our dentry, resulting in the 1745 * false-negative result. d_lookup() protects against concurrent 1746 * renames using rename_lock seqlock. 1747 * 1748 * See Documentation/filesystems/path-lookup.txt for more details. 1749 */ 1750 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1751 struct inode *i; 1752 const char *tname; 1753 int tlen; 1754 1755 if (dentry->d_name.hash != hash) 1756 continue; 1757 1758 seqretry: 1759 *seq = read_seqcount_begin(&dentry->d_seq); 1760 if (dentry->d_parent != parent) 1761 continue; 1762 if (d_unhashed(dentry)) 1763 continue; 1764 tlen = dentry->d_name.len; 1765 tname = dentry->d_name.name; 1766 i = dentry->d_inode; 1767 prefetch(tname); 1768 /* 1769 * This seqcount check is required to ensure name and 1770 * len are loaded atomically, so as not to walk off the 1771 * edge of memory when walking. If we could load this 1772 * atomically some other way, we could drop this check. 1773 */ 1774 if (read_seqcount_retry(&dentry->d_seq, *seq)) 1775 goto seqretry; 1776 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1777 if (parent->d_op->d_compare(parent, *inode, 1778 dentry, i, 1779 tlen, tname, name)) 1780 continue; 1781 } else { 1782 if (dentry_cmp(tname, tlen, str, len)) 1783 continue; 1784 } 1785 /* 1786 * No extra seqcount check is required after the name 1787 * compare. The caller must perform a seqcount check in 1788 * order to do anything useful with the returned dentry 1789 * anyway. 1790 */ 1791 *inode = i; 1792 return dentry; 1793 } 1794 return NULL; 1795 } 1796 1797 /** 1798 * d_lookup - search for a dentry 1799 * @parent: parent dentry 1800 * @name: qstr of name we wish to find 1801 * Returns: dentry, or NULL 1802 * 1803 * d_lookup searches the children of the parent dentry for the name in 1804 * question. If the dentry is found its reference count is incremented and the 1805 * dentry is returned. The caller must use dput to free the entry when it has 1806 * finished using it. %NULL is returned if the dentry does not exist. 1807 */ 1808 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1809 { 1810 struct dentry *dentry; 1811 unsigned seq; 1812 1813 do { 1814 seq = read_seqbegin(&rename_lock); 1815 dentry = __d_lookup(parent, name); 1816 if (dentry) 1817 break; 1818 } while (read_seqretry(&rename_lock, seq)); 1819 return dentry; 1820 } 1821 EXPORT_SYMBOL(d_lookup); 1822 1823 /** 1824 * __d_lookup - search for a dentry (racy) 1825 * @parent: parent dentry 1826 * @name: qstr of name we wish to find 1827 * Returns: dentry, or NULL 1828 * 1829 * __d_lookup is like d_lookup, however it may (rarely) return a 1830 * false-negative result due to unrelated rename activity. 1831 * 1832 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1833 * however it must be used carefully, eg. with a following d_lookup in 1834 * the case of failure. 1835 * 1836 * __d_lookup callers must be commented. 1837 */ 1838 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1839 { 1840 unsigned int len = name->len; 1841 unsigned int hash = name->hash; 1842 const unsigned char *str = name->name; 1843 struct hlist_bl_head *b = d_hash(parent, hash); 1844 struct hlist_bl_node *node; 1845 struct dentry *found = NULL; 1846 struct dentry *dentry; 1847 1848 /* 1849 * Note: There is significant duplication with __d_lookup_rcu which is 1850 * required to prevent single threaded performance regressions 1851 * especially on architectures where smp_rmb (in seqcounts) are costly. 1852 * Keep the two functions in sync. 1853 */ 1854 1855 /* 1856 * The hash list is protected using RCU. 1857 * 1858 * Take d_lock when comparing a candidate dentry, to avoid races 1859 * with d_move(). 1860 * 1861 * It is possible that concurrent renames can mess up our list 1862 * walk here and result in missing our dentry, resulting in the 1863 * false-negative result. d_lookup() protects against concurrent 1864 * renames using rename_lock seqlock. 1865 * 1866 * See Documentation/filesystems/path-lookup.txt for more details. 1867 */ 1868 rcu_read_lock(); 1869 1870 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1871 const char *tname; 1872 int tlen; 1873 1874 if (dentry->d_name.hash != hash) 1875 continue; 1876 1877 spin_lock(&dentry->d_lock); 1878 if (dentry->d_parent != parent) 1879 goto next; 1880 if (d_unhashed(dentry)) 1881 goto next; 1882 1883 /* 1884 * It is safe to compare names since d_move() cannot 1885 * change the qstr (protected by d_lock). 1886 */ 1887 tlen = dentry->d_name.len; 1888 tname = dentry->d_name.name; 1889 if (parent->d_flags & DCACHE_OP_COMPARE) { 1890 if (parent->d_op->d_compare(parent, parent->d_inode, 1891 dentry, dentry->d_inode, 1892 tlen, tname, name)) 1893 goto next; 1894 } else { 1895 if (dentry_cmp(tname, tlen, str, len)) 1896 goto next; 1897 } 1898 1899 dentry->d_count++; 1900 found = dentry; 1901 spin_unlock(&dentry->d_lock); 1902 break; 1903 next: 1904 spin_unlock(&dentry->d_lock); 1905 } 1906 rcu_read_unlock(); 1907 1908 return found; 1909 } 1910 1911 /** 1912 * d_hash_and_lookup - hash the qstr then search for a dentry 1913 * @dir: Directory to search in 1914 * @name: qstr of name we wish to find 1915 * 1916 * On hash failure or on lookup failure NULL is returned. 1917 */ 1918 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1919 { 1920 struct dentry *dentry = NULL; 1921 1922 /* 1923 * Check for a fs-specific hash function. Note that we must 1924 * calculate the standard hash first, as the d_op->d_hash() 1925 * routine may choose to leave the hash value unchanged. 1926 */ 1927 name->hash = full_name_hash(name->name, name->len); 1928 if (dir->d_flags & DCACHE_OP_HASH) { 1929 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 1930 goto out; 1931 } 1932 dentry = d_lookup(dir, name); 1933 out: 1934 return dentry; 1935 } 1936 1937 /** 1938 * d_validate - verify dentry provided from insecure source (deprecated) 1939 * @dentry: The dentry alleged to be valid child of @dparent 1940 * @dparent: The parent dentry (known to be valid) 1941 * 1942 * An insecure source has sent us a dentry, here we verify it and dget() it. 1943 * This is used by ncpfs in its readdir implementation. 1944 * Zero is returned in the dentry is invalid. 1945 * 1946 * This function is slow for big directories, and deprecated, do not use it. 1947 */ 1948 int d_validate(struct dentry *dentry, struct dentry *dparent) 1949 { 1950 struct dentry *child; 1951 1952 spin_lock(&dparent->d_lock); 1953 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 1954 if (dentry == child) { 1955 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1956 __dget_dlock(dentry); 1957 spin_unlock(&dentry->d_lock); 1958 spin_unlock(&dparent->d_lock); 1959 return 1; 1960 } 1961 } 1962 spin_unlock(&dparent->d_lock); 1963 1964 return 0; 1965 } 1966 EXPORT_SYMBOL(d_validate); 1967 1968 /* 1969 * When a file is deleted, we have two options: 1970 * - turn this dentry into a negative dentry 1971 * - unhash this dentry and free it. 1972 * 1973 * Usually, we want to just turn this into 1974 * a negative dentry, but if anybody else is 1975 * currently using the dentry or the inode 1976 * we can't do that and we fall back on removing 1977 * it from the hash queues and waiting for 1978 * it to be deleted later when it has no users 1979 */ 1980 1981 /** 1982 * d_delete - delete a dentry 1983 * @dentry: The dentry to delete 1984 * 1985 * Turn the dentry into a negative dentry if possible, otherwise 1986 * remove it from the hash queues so it can be deleted later 1987 */ 1988 1989 void d_delete(struct dentry * dentry) 1990 { 1991 struct inode *inode; 1992 int isdir = 0; 1993 /* 1994 * Are we the only user? 1995 */ 1996 again: 1997 spin_lock(&dentry->d_lock); 1998 inode = dentry->d_inode; 1999 isdir = S_ISDIR(inode->i_mode); 2000 if (dentry->d_count == 1) { 2001 if (inode && !spin_trylock(&inode->i_lock)) { 2002 spin_unlock(&dentry->d_lock); 2003 cpu_relax(); 2004 goto again; 2005 } 2006 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2007 dentry_unlink_inode(dentry); 2008 fsnotify_nameremove(dentry, isdir); 2009 return; 2010 } 2011 2012 if (!d_unhashed(dentry)) 2013 __d_drop(dentry); 2014 2015 spin_unlock(&dentry->d_lock); 2016 2017 fsnotify_nameremove(dentry, isdir); 2018 } 2019 EXPORT_SYMBOL(d_delete); 2020 2021 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2022 { 2023 BUG_ON(!d_unhashed(entry)); 2024 hlist_bl_lock(b); 2025 entry->d_flags |= DCACHE_RCUACCESS; 2026 hlist_bl_add_head_rcu(&entry->d_hash, b); 2027 hlist_bl_unlock(b); 2028 } 2029 2030 static void _d_rehash(struct dentry * entry) 2031 { 2032 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2033 } 2034 2035 /** 2036 * d_rehash - add an entry back to the hash 2037 * @entry: dentry to add to the hash 2038 * 2039 * Adds a dentry to the hash according to its name. 2040 */ 2041 2042 void d_rehash(struct dentry * entry) 2043 { 2044 spin_lock(&entry->d_lock); 2045 _d_rehash(entry); 2046 spin_unlock(&entry->d_lock); 2047 } 2048 EXPORT_SYMBOL(d_rehash); 2049 2050 /** 2051 * dentry_update_name_case - update case insensitive dentry with a new name 2052 * @dentry: dentry to be updated 2053 * @name: new name 2054 * 2055 * Update a case insensitive dentry with new case of name. 2056 * 2057 * dentry must have been returned by d_lookup with name @name. Old and new 2058 * name lengths must match (ie. no d_compare which allows mismatched name 2059 * lengths). 2060 * 2061 * Parent inode i_mutex must be held over d_lookup and into this call (to 2062 * keep renames and concurrent inserts, and readdir(2) away). 2063 */ 2064 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2065 { 2066 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2067 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2068 2069 spin_lock(&dentry->d_lock); 2070 write_seqcount_begin(&dentry->d_seq); 2071 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2072 write_seqcount_end(&dentry->d_seq); 2073 spin_unlock(&dentry->d_lock); 2074 } 2075 EXPORT_SYMBOL(dentry_update_name_case); 2076 2077 static void switch_names(struct dentry *dentry, struct dentry *target) 2078 { 2079 if (dname_external(target)) { 2080 if (dname_external(dentry)) { 2081 /* 2082 * Both external: swap the pointers 2083 */ 2084 swap(target->d_name.name, dentry->d_name.name); 2085 } else { 2086 /* 2087 * dentry:internal, target:external. Steal target's 2088 * storage and make target internal. 2089 */ 2090 memcpy(target->d_iname, dentry->d_name.name, 2091 dentry->d_name.len + 1); 2092 dentry->d_name.name = target->d_name.name; 2093 target->d_name.name = target->d_iname; 2094 } 2095 } else { 2096 if (dname_external(dentry)) { 2097 /* 2098 * dentry:external, target:internal. Give dentry's 2099 * storage to target and make dentry internal 2100 */ 2101 memcpy(dentry->d_iname, target->d_name.name, 2102 target->d_name.len + 1); 2103 target->d_name.name = dentry->d_name.name; 2104 dentry->d_name.name = dentry->d_iname; 2105 } else { 2106 /* 2107 * Both are internal. Just copy target to dentry 2108 */ 2109 memcpy(dentry->d_iname, target->d_name.name, 2110 target->d_name.len + 1); 2111 dentry->d_name.len = target->d_name.len; 2112 return; 2113 } 2114 } 2115 swap(dentry->d_name.len, target->d_name.len); 2116 } 2117 2118 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2119 { 2120 /* 2121 * XXXX: do we really need to take target->d_lock? 2122 */ 2123 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2124 spin_lock(&target->d_parent->d_lock); 2125 else { 2126 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2127 spin_lock(&dentry->d_parent->d_lock); 2128 spin_lock_nested(&target->d_parent->d_lock, 2129 DENTRY_D_LOCK_NESTED); 2130 } else { 2131 spin_lock(&target->d_parent->d_lock); 2132 spin_lock_nested(&dentry->d_parent->d_lock, 2133 DENTRY_D_LOCK_NESTED); 2134 } 2135 } 2136 if (target < dentry) { 2137 spin_lock_nested(&target->d_lock, 2); 2138 spin_lock_nested(&dentry->d_lock, 3); 2139 } else { 2140 spin_lock_nested(&dentry->d_lock, 2); 2141 spin_lock_nested(&target->d_lock, 3); 2142 } 2143 } 2144 2145 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2146 struct dentry *target) 2147 { 2148 if (target->d_parent != dentry->d_parent) 2149 spin_unlock(&dentry->d_parent->d_lock); 2150 if (target->d_parent != target) 2151 spin_unlock(&target->d_parent->d_lock); 2152 } 2153 2154 /* 2155 * When switching names, the actual string doesn't strictly have to 2156 * be preserved in the target - because we're dropping the target 2157 * anyway. As such, we can just do a simple memcpy() to copy over 2158 * the new name before we switch. 2159 * 2160 * Note that we have to be a lot more careful about getting the hash 2161 * switched - we have to switch the hash value properly even if it 2162 * then no longer matches the actual (corrupted) string of the target. 2163 * The hash value has to match the hash queue that the dentry is on.. 2164 */ 2165 /* 2166 * __d_move - move a dentry 2167 * @dentry: entry to move 2168 * @target: new dentry 2169 * 2170 * Update the dcache to reflect the move of a file name. Negative 2171 * dcache entries should not be moved in this way. Caller must hold 2172 * rename_lock, the i_mutex of the source and target directories, 2173 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2174 */ 2175 static void __d_move(struct dentry * dentry, struct dentry * target) 2176 { 2177 if (!dentry->d_inode) 2178 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2179 2180 BUG_ON(d_ancestor(dentry, target)); 2181 BUG_ON(d_ancestor(target, dentry)); 2182 2183 dentry_lock_for_move(dentry, target); 2184 2185 write_seqcount_begin(&dentry->d_seq); 2186 write_seqcount_begin(&target->d_seq); 2187 2188 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2189 2190 /* 2191 * Move the dentry to the target hash queue. Don't bother checking 2192 * for the same hash queue because of how unlikely it is. 2193 */ 2194 __d_drop(dentry); 2195 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2196 2197 /* Unhash the target: dput() will then get rid of it */ 2198 __d_drop(target); 2199 2200 list_del(&dentry->d_u.d_child); 2201 list_del(&target->d_u.d_child); 2202 2203 /* Switch the names.. */ 2204 switch_names(dentry, target); 2205 swap(dentry->d_name.hash, target->d_name.hash); 2206 2207 /* ... and switch the parents */ 2208 if (IS_ROOT(dentry)) { 2209 dentry->d_parent = target->d_parent; 2210 target->d_parent = target; 2211 INIT_LIST_HEAD(&target->d_u.d_child); 2212 } else { 2213 swap(dentry->d_parent, target->d_parent); 2214 2215 /* And add them back to the (new) parent lists */ 2216 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2217 } 2218 2219 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2220 2221 write_seqcount_end(&target->d_seq); 2222 write_seqcount_end(&dentry->d_seq); 2223 2224 dentry_unlock_parents_for_move(dentry, target); 2225 spin_unlock(&target->d_lock); 2226 fsnotify_d_move(dentry); 2227 spin_unlock(&dentry->d_lock); 2228 } 2229 2230 /* 2231 * d_move - move a dentry 2232 * @dentry: entry to move 2233 * @target: new dentry 2234 * 2235 * Update the dcache to reflect the move of a file name. Negative 2236 * dcache entries should not be moved in this way. See the locking 2237 * requirements for __d_move. 2238 */ 2239 void d_move(struct dentry *dentry, struct dentry *target) 2240 { 2241 write_seqlock(&rename_lock); 2242 __d_move(dentry, target); 2243 write_sequnlock(&rename_lock); 2244 } 2245 EXPORT_SYMBOL(d_move); 2246 2247 /** 2248 * d_ancestor - search for an ancestor 2249 * @p1: ancestor dentry 2250 * @p2: child dentry 2251 * 2252 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2253 * an ancestor of p2, else NULL. 2254 */ 2255 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2256 { 2257 struct dentry *p; 2258 2259 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2260 if (p->d_parent == p1) 2261 return p; 2262 } 2263 return NULL; 2264 } 2265 2266 /* 2267 * This helper attempts to cope with remotely renamed directories 2268 * 2269 * It assumes that the caller is already holding 2270 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2271 * 2272 * Note: If ever the locking in lock_rename() changes, then please 2273 * remember to update this too... 2274 */ 2275 static struct dentry *__d_unalias(struct inode *inode, 2276 struct dentry *dentry, struct dentry *alias) 2277 { 2278 struct mutex *m1 = NULL, *m2 = NULL; 2279 struct dentry *ret; 2280 2281 /* If alias and dentry share a parent, then no extra locks required */ 2282 if (alias->d_parent == dentry->d_parent) 2283 goto out_unalias; 2284 2285 /* See lock_rename() */ 2286 ret = ERR_PTR(-EBUSY); 2287 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2288 goto out_err; 2289 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2290 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2291 goto out_err; 2292 m2 = &alias->d_parent->d_inode->i_mutex; 2293 out_unalias: 2294 __d_move(alias, dentry); 2295 ret = alias; 2296 out_err: 2297 spin_unlock(&inode->i_lock); 2298 if (m2) 2299 mutex_unlock(m2); 2300 if (m1) 2301 mutex_unlock(m1); 2302 return ret; 2303 } 2304 2305 /* 2306 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2307 * named dentry in place of the dentry to be replaced. 2308 * returns with anon->d_lock held! 2309 */ 2310 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2311 { 2312 struct dentry *dparent, *aparent; 2313 2314 dentry_lock_for_move(anon, dentry); 2315 2316 write_seqcount_begin(&dentry->d_seq); 2317 write_seqcount_begin(&anon->d_seq); 2318 2319 dparent = dentry->d_parent; 2320 aparent = anon->d_parent; 2321 2322 switch_names(dentry, anon); 2323 swap(dentry->d_name.hash, anon->d_name.hash); 2324 2325 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2326 list_del(&dentry->d_u.d_child); 2327 if (!IS_ROOT(dentry)) 2328 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2329 else 2330 INIT_LIST_HEAD(&dentry->d_u.d_child); 2331 2332 anon->d_parent = (dparent == dentry) ? anon : dparent; 2333 list_del(&anon->d_u.d_child); 2334 if (!IS_ROOT(anon)) 2335 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2336 else 2337 INIT_LIST_HEAD(&anon->d_u.d_child); 2338 2339 write_seqcount_end(&dentry->d_seq); 2340 write_seqcount_end(&anon->d_seq); 2341 2342 dentry_unlock_parents_for_move(anon, dentry); 2343 spin_unlock(&dentry->d_lock); 2344 2345 /* anon->d_lock still locked, returns locked */ 2346 anon->d_flags &= ~DCACHE_DISCONNECTED; 2347 } 2348 2349 /** 2350 * d_materialise_unique - introduce an inode into the tree 2351 * @dentry: candidate dentry 2352 * @inode: inode to bind to the dentry, to which aliases may be attached 2353 * 2354 * Introduces an dentry into the tree, substituting an extant disconnected 2355 * root directory alias in its place if there is one. Caller must hold the 2356 * i_mutex of the parent directory. 2357 */ 2358 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2359 { 2360 struct dentry *actual; 2361 2362 BUG_ON(!d_unhashed(dentry)); 2363 2364 if (!inode) { 2365 actual = dentry; 2366 __d_instantiate(dentry, NULL); 2367 d_rehash(actual); 2368 goto out_nolock; 2369 } 2370 2371 spin_lock(&inode->i_lock); 2372 2373 if (S_ISDIR(inode->i_mode)) { 2374 struct dentry *alias; 2375 2376 /* Does an aliased dentry already exist? */ 2377 alias = __d_find_alias(inode, 0); 2378 if (alias) { 2379 actual = alias; 2380 write_seqlock(&rename_lock); 2381 2382 if (d_ancestor(alias, dentry)) { 2383 /* Check for loops */ 2384 actual = ERR_PTR(-ELOOP); 2385 } else if (IS_ROOT(alias)) { 2386 /* Is this an anonymous mountpoint that we 2387 * could splice into our tree? */ 2388 __d_materialise_dentry(dentry, alias); 2389 write_sequnlock(&rename_lock); 2390 __d_drop(alias); 2391 goto found; 2392 } else { 2393 /* Nope, but we must(!) avoid directory 2394 * aliasing */ 2395 actual = __d_unalias(inode, dentry, alias); 2396 } 2397 write_sequnlock(&rename_lock); 2398 if (IS_ERR(actual)) { 2399 if (PTR_ERR(actual) == -ELOOP) 2400 pr_warn_ratelimited( 2401 "VFS: Lookup of '%s' in %s %s" 2402 " would have caused loop\n", 2403 dentry->d_name.name, 2404 inode->i_sb->s_type->name, 2405 inode->i_sb->s_id); 2406 dput(alias); 2407 } 2408 goto out_nolock; 2409 } 2410 } 2411 2412 /* Add a unique reference */ 2413 actual = __d_instantiate_unique(dentry, inode); 2414 if (!actual) 2415 actual = dentry; 2416 else 2417 BUG_ON(!d_unhashed(actual)); 2418 2419 spin_lock(&actual->d_lock); 2420 found: 2421 _d_rehash(actual); 2422 spin_unlock(&actual->d_lock); 2423 spin_unlock(&inode->i_lock); 2424 out_nolock: 2425 if (actual == dentry) { 2426 security_d_instantiate(dentry, inode); 2427 return NULL; 2428 } 2429 2430 iput(inode); 2431 return actual; 2432 } 2433 EXPORT_SYMBOL_GPL(d_materialise_unique); 2434 2435 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2436 { 2437 *buflen -= namelen; 2438 if (*buflen < 0) 2439 return -ENAMETOOLONG; 2440 *buffer -= namelen; 2441 memcpy(*buffer, str, namelen); 2442 return 0; 2443 } 2444 2445 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2446 { 2447 return prepend(buffer, buflen, name->name, name->len); 2448 } 2449 2450 /** 2451 * prepend_path - Prepend path string to a buffer 2452 * @path: the dentry/vfsmount to report 2453 * @root: root vfsmnt/dentry 2454 * @buffer: pointer to the end of the buffer 2455 * @buflen: pointer to buffer length 2456 * 2457 * Caller holds the rename_lock. 2458 */ 2459 static int prepend_path(const struct path *path, 2460 const struct path *root, 2461 char **buffer, int *buflen) 2462 { 2463 struct dentry *dentry = path->dentry; 2464 struct vfsmount *vfsmnt = path->mnt; 2465 struct mount *mnt = real_mount(vfsmnt); 2466 bool slash = false; 2467 int error = 0; 2468 2469 br_read_lock(vfsmount_lock); 2470 while (dentry != root->dentry || vfsmnt != root->mnt) { 2471 struct dentry * parent; 2472 2473 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2474 /* Global root? */ 2475 if (!mnt_has_parent(mnt)) 2476 goto global_root; 2477 dentry = mnt->mnt_mountpoint; 2478 mnt = mnt->mnt_parent; 2479 vfsmnt = &mnt->mnt; 2480 continue; 2481 } 2482 parent = dentry->d_parent; 2483 prefetch(parent); 2484 spin_lock(&dentry->d_lock); 2485 error = prepend_name(buffer, buflen, &dentry->d_name); 2486 spin_unlock(&dentry->d_lock); 2487 if (!error) 2488 error = prepend(buffer, buflen, "/", 1); 2489 if (error) 2490 break; 2491 2492 slash = true; 2493 dentry = parent; 2494 } 2495 2496 if (!error && !slash) 2497 error = prepend(buffer, buflen, "/", 1); 2498 2499 out: 2500 br_read_unlock(vfsmount_lock); 2501 return error; 2502 2503 global_root: 2504 /* 2505 * Filesystems needing to implement special "root names" 2506 * should do so with ->d_dname() 2507 */ 2508 if (IS_ROOT(dentry) && 2509 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2510 WARN(1, "Root dentry has weird name <%.*s>\n", 2511 (int) dentry->d_name.len, dentry->d_name.name); 2512 } 2513 if (!slash) 2514 error = prepend(buffer, buflen, "/", 1); 2515 if (!error) 2516 error = real_mount(vfsmnt)->mnt_ns ? 1 : 2; 2517 goto out; 2518 } 2519 2520 /** 2521 * __d_path - return the path of a dentry 2522 * @path: the dentry/vfsmount to report 2523 * @root: root vfsmnt/dentry 2524 * @buf: buffer to return value in 2525 * @buflen: buffer length 2526 * 2527 * Convert a dentry into an ASCII path name. 2528 * 2529 * Returns a pointer into the buffer or an error code if the 2530 * path was too long. 2531 * 2532 * "buflen" should be positive. 2533 * 2534 * If the path is not reachable from the supplied root, return %NULL. 2535 */ 2536 char *__d_path(const struct path *path, 2537 const struct path *root, 2538 char *buf, int buflen) 2539 { 2540 char *res = buf + buflen; 2541 int error; 2542 2543 prepend(&res, &buflen, "\0", 1); 2544 write_seqlock(&rename_lock); 2545 error = prepend_path(path, root, &res, &buflen); 2546 write_sequnlock(&rename_lock); 2547 2548 if (error < 0) 2549 return ERR_PTR(error); 2550 if (error > 0) 2551 return NULL; 2552 return res; 2553 } 2554 2555 char *d_absolute_path(const struct path *path, 2556 char *buf, int buflen) 2557 { 2558 struct path root = {}; 2559 char *res = buf + buflen; 2560 int error; 2561 2562 prepend(&res, &buflen, "\0", 1); 2563 write_seqlock(&rename_lock); 2564 error = prepend_path(path, &root, &res, &buflen); 2565 write_sequnlock(&rename_lock); 2566 2567 if (error > 1) 2568 error = -EINVAL; 2569 if (error < 0) 2570 return ERR_PTR(error); 2571 return res; 2572 } 2573 2574 /* 2575 * same as __d_path but appends "(deleted)" for unlinked files. 2576 */ 2577 static int path_with_deleted(const struct path *path, 2578 const struct path *root, 2579 char **buf, int *buflen) 2580 { 2581 prepend(buf, buflen, "\0", 1); 2582 if (d_unlinked(path->dentry)) { 2583 int error = prepend(buf, buflen, " (deleted)", 10); 2584 if (error) 2585 return error; 2586 } 2587 2588 return prepend_path(path, root, buf, buflen); 2589 } 2590 2591 static int prepend_unreachable(char **buffer, int *buflen) 2592 { 2593 return prepend(buffer, buflen, "(unreachable)", 13); 2594 } 2595 2596 /** 2597 * d_path - return the path of a dentry 2598 * @path: path to report 2599 * @buf: buffer to return value in 2600 * @buflen: buffer length 2601 * 2602 * Convert a dentry into an ASCII path name. If the entry has been deleted 2603 * the string " (deleted)" is appended. Note that this is ambiguous. 2604 * 2605 * Returns a pointer into the buffer or an error code if the path was 2606 * too long. Note: Callers should use the returned pointer, not the passed 2607 * in buffer, to use the name! The implementation often starts at an offset 2608 * into the buffer, and may leave 0 bytes at the start. 2609 * 2610 * "buflen" should be positive. 2611 */ 2612 char *d_path(const struct path *path, char *buf, int buflen) 2613 { 2614 char *res = buf + buflen; 2615 struct path root; 2616 int error; 2617 2618 /* 2619 * We have various synthetic filesystems that never get mounted. On 2620 * these filesystems dentries are never used for lookup purposes, and 2621 * thus don't need to be hashed. They also don't need a name until a 2622 * user wants to identify the object in /proc/pid/fd/. The little hack 2623 * below allows us to generate a name for these objects on demand: 2624 */ 2625 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2626 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2627 2628 get_fs_root(current->fs, &root); 2629 write_seqlock(&rename_lock); 2630 error = path_with_deleted(path, &root, &res, &buflen); 2631 if (error < 0) 2632 res = ERR_PTR(error); 2633 write_sequnlock(&rename_lock); 2634 path_put(&root); 2635 return res; 2636 } 2637 EXPORT_SYMBOL(d_path); 2638 2639 /** 2640 * d_path_with_unreachable - return the path of a dentry 2641 * @path: path to report 2642 * @buf: buffer to return value in 2643 * @buflen: buffer length 2644 * 2645 * The difference from d_path() is that this prepends "(unreachable)" 2646 * to paths which are unreachable from the current process' root. 2647 */ 2648 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2649 { 2650 char *res = buf + buflen; 2651 struct path root; 2652 int error; 2653 2654 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2655 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2656 2657 get_fs_root(current->fs, &root); 2658 write_seqlock(&rename_lock); 2659 error = path_with_deleted(path, &root, &res, &buflen); 2660 if (error > 0) 2661 error = prepend_unreachable(&res, &buflen); 2662 write_sequnlock(&rename_lock); 2663 path_put(&root); 2664 if (error) 2665 res = ERR_PTR(error); 2666 2667 return res; 2668 } 2669 2670 /* 2671 * Helper function for dentry_operations.d_dname() members 2672 */ 2673 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2674 const char *fmt, ...) 2675 { 2676 va_list args; 2677 char temp[64]; 2678 int sz; 2679 2680 va_start(args, fmt); 2681 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2682 va_end(args); 2683 2684 if (sz > sizeof(temp) || sz > buflen) 2685 return ERR_PTR(-ENAMETOOLONG); 2686 2687 buffer += buflen - sz; 2688 return memcpy(buffer, temp, sz); 2689 } 2690 2691 /* 2692 * Write full pathname from the root of the filesystem into the buffer. 2693 */ 2694 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2695 { 2696 char *end = buf + buflen; 2697 char *retval; 2698 2699 prepend(&end, &buflen, "\0", 1); 2700 if (buflen < 1) 2701 goto Elong; 2702 /* Get '/' right */ 2703 retval = end-1; 2704 *retval = '/'; 2705 2706 while (!IS_ROOT(dentry)) { 2707 struct dentry *parent = dentry->d_parent; 2708 int error; 2709 2710 prefetch(parent); 2711 spin_lock(&dentry->d_lock); 2712 error = prepend_name(&end, &buflen, &dentry->d_name); 2713 spin_unlock(&dentry->d_lock); 2714 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2715 goto Elong; 2716 2717 retval = end; 2718 dentry = parent; 2719 } 2720 return retval; 2721 Elong: 2722 return ERR_PTR(-ENAMETOOLONG); 2723 } 2724 2725 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2726 { 2727 char *retval; 2728 2729 write_seqlock(&rename_lock); 2730 retval = __dentry_path(dentry, buf, buflen); 2731 write_sequnlock(&rename_lock); 2732 2733 return retval; 2734 } 2735 EXPORT_SYMBOL(dentry_path_raw); 2736 2737 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2738 { 2739 char *p = NULL; 2740 char *retval; 2741 2742 write_seqlock(&rename_lock); 2743 if (d_unlinked(dentry)) { 2744 p = buf + buflen; 2745 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2746 goto Elong; 2747 buflen++; 2748 } 2749 retval = __dentry_path(dentry, buf, buflen); 2750 write_sequnlock(&rename_lock); 2751 if (!IS_ERR(retval) && p) 2752 *p = '/'; /* restore '/' overriden with '\0' */ 2753 return retval; 2754 Elong: 2755 return ERR_PTR(-ENAMETOOLONG); 2756 } 2757 2758 /* 2759 * NOTE! The user-level library version returns a 2760 * character pointer. The kernel system call just 2761 * returns the length of the buffer filled (which 2762 * includes the ending '\0' character), or a negative 2763 * error value. So libc would do something like 2764 * 2765 * char *getcwd(char * buf, size_t size) 2766 * { 2767 * int retval; 2768 * 2769 * retval = sys_getcwd(buf, size); 2770 * if (retval >= 0) 2771 * return buf; 2772 * errno = -retval; 2773 * return NULL; 2774 * } 2775 */ 2776 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2777 { 2778 int error; 2779 struct path pwd, root; 2780 char *page = (char *) __get_free_page(GFP_USER); 2781 2782 if (!page) 2783 return -ENOMEM; 2784 2785 get_fs_root_and_pwd(current->fs, &root, &pwd); 2786 2787 error = -ENOENT; 2788 write_seqlock(&rename_lock); 2789 if (!d_unlinked(pwd.dentry)) { 2790 unsigned long len; 2791 char *cwd = page + PAGE_SIZE; 2792 int buflen = PAGE_SIZE; 2793 2794 prepend(&cwd, &buflen, "\0", 1); 2795 error = prepend_path(&pwd, &root, &cwd, &buflen); 2796 write_sequnlock(&rename_lock); 2797 2798 if (error < 0) 2799 goto out; 2800 2801 /* Unreachable from current root */ 2802 if (error > 0) { 2803 error = prepend_unreachable(&cwd, &buflen); 2804 if (error) 2805 goto out; 2806 } 2807 2808 error = -ERANGE; 2809 len = PAGE_SIZE + page - cwd; 2810 if (len <= size) { 2811 error = len; 2812 if (copy_to_user(buf, cwd, len)) 2813 error = -EFAULT; 2814 } 2815 } else { 2816 write_sequnlock(&rename_lock); 2817 } 2818 2819 out: 2820 path_put(&pwd); 2821 path_put(&root); 2822 free_page((unsigned long) page); 2823 return error; 2824 } 2825 2826 /* 2827 * Test whether new_dentry is a subdirectory of old_dentry. 2828 * 2829 * Trivially implemented using the dcache structure 2830 */ 2831 2832 /** 2833 * is_subdir - is new dentry a subdirectory of old_dentry 2834 * @new_dentry: new dentry 2835 * @old_dentry: old dentry 2836 * 2837 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2838 * Returns 0 otherwise. 2839 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2840 */ 2841 2842 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2843 { 2844 int result; 2845 unsigned seq; 2846 2847 if (new_dentry == old_dentry) 2848 return 1; 2849 2850 do { 2851 /* for restarting inner loop in case of seq retry */ 2852 seq = read_seqbegin(&rename_lock); 2853 /* 2854 * Need rcu_readlock to protect against the d_parent trashing 2855 * due to d_move 2856 */ 2857 rcu_read_lock(); 2858 if (d_ancestor(old_dentry, new_dentry)) 2859 result = 1; 2860 else 2861 result = 0; 2862 rcu_read_unlock(); 2863 } while (read_seqretry(&rename_lock, seq)); 2864 2865 return result; 2866 } 2867 2868 void d_genocide(struct dentry *root) 2869 { 2870 struct dentry *this_parent; 2871 struct list_head *next; 2872 unsigned seq; 2873 int locked = 0; 2874 2875 seq = read_seqbegin(&rename_lock); 2876 again: 2877 this_parent = root; 2878 spin_lock(&this_parent->d_lock); 2879 repeat: 2880 next = this_parent->d_subdirs.next; 2881 resume: 2882 while (next != &this_parent->d_subdirs) { 2883 struct list_head *tmp = next; 2884 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2885 next = tmp->next; 2886 2887 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2888 if (d_unhashed(dentry) || !dentry->d_inode) { 2889 spin_unlock(&dentry->d_lock); 2890 continue; 2891 } 2892 if (!list_empty(&dentry->d_subdirs)) { 2893 spin_unlock(&this_parent->d_lock); 2894 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 2895 this_parent = dentry; 2896 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 2897 goto repeat; 2898 } 2899 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2900 dentry->d_flags |= DCACHE_GENOCIDE; 2901 dentry->d_count--; 2902 } 2903 spin_unlock(&dentry->d_lock); 2904 } 2905 if (this_parent != root) { 2906 struct dentry *child = this_parent; 2907 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2908 this_parent->d_flags |= DCACHE_GENOCIDE; 2909 this_parent->d_count--; 2910 } 2911 this_parent = try_to_ascend(this_parent, locked, seq); 2912 if (!this_parent) 2913 goto rename_retry; 2914 next = child->d_u.d_child.next; 2915 goto resume; 2916 } 2917 spin_unlock(&this_parent->d_lock); 2918 if (!locked && read_seqretry(&rename_lock, seq)) 2919 goto rename_retry; 2920 if (locked) 2921 write_sequnlock(&rename_lock); 2922 return; 2923 2924 rename_retry: 2925 locked = 1; 2926 write_seqlock(&rename_lock); 2927 goto again; 2928 } 2929 2930 /** 2931 * find_inode_number - check for dentry with name 2932 * @dir: directory to check 2933 * @name: Name to find. 2934 * 2935 * Check whether a dentry already exists for the given name, 2936 * and return the inode number if it has an inode. Otherwise 2937 * 0 is returned. 2938 * 2939 * This routine is used to post-process directory listings for 2940 * filesystems using synthetic inode numbers, and is necessary 2941 * to keep getcwd() working. 2942 */ 2943 2944 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2945 { 2946 struct dentry * dentry; 2947 ino_t ino = 0; 2948 2949 dentry = d_hash_and_lookup(dir, name); 2950 if (dentry) { 2951 if (dentry->d_inode) 2952 ino = dentry->d_inode->i_ino; 2953 dput(dentry); 2954 } 2955 return ino; 2956 } 2957 EXPORT_SYMBOL(find_inode_number); 2958 2959 static __initdata unsigned long dhash_entries; 2960 static int __init set_dhash_entries(char *str) 2961 { 2962 if (!str) 2963 return 0; 2964 dhash_entries = simple_strtoul(str, &str, 0); 2965 return 1; 2966 } 2967 __setup("dhash_entries=", set_dhash_entries); 2968 2969 static void __init dcache_init_early(void) 2970 { 2971 int loop; 2972 2973 /* If hashes are distributed across NUMA nodes, defer 2974 * hash allocation until vmalloc space is available. 2975 */ 2976 if (hashdist) 2977 return; 2978 2979 dentry_hashtable = 2980 alloc_large_system_hash("Dentry cache", 2981 sizeof(struct hlist_bl_head), 2982 dhash_entries, 2983 13, 2984 HASH_EARLY, 2985 &d_hash_shift, 2986 &d_hash_mask, 2987 0); 2988 2989 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2990 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 2991 } 2992 2993 static void __init dcache_init(void) 2994 { 2995 int loop; 2996 2997 /* 2998 * A constructor could be added for stable state like the lists, 2999 * but it is probably not worth it because of the cache nature 3000 * of the dcache. 3001 */ 3002 dentry_cache = KMEM_CACHE(dentry, 3003 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3004 3005 /* Hash may have been set up in dcache_init_early */ 3006 if (!hashdist) 3007 return; 3008 3009 dentry_hashtable = 3010 alloc_large_system_hash("Dentry cache", 3011 sizeof(struct hlist_bl_head), 3012 dhash_entries, 3013 13, 3014 0, 3015 &d_hash_shift, 3016 &d_hash_mask, 3017 0); 3018 3019 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3020 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3021 } 3022 3023 /* SLAB cache for __getname() consumers */ 3024 struct kmem_cache *names_cachep __read_mostly; 3025 EXPORT_SYMBOL(names_cachep); 3026 3027 EXPORT_SYMBOL(d_genocide); 3028 3029 void __init vfs_caches_init_early(void) 3030 { 3031 dcache_init_early(); 3032 inode_init_early(); 3033 } 3034 3035 void __init vfs_caches_init(unsigned long mempages) 3036 { 3037 unsigned long reserve; 3038 3039 /* Base hash sizes on available memory, with a reserve equal to 3040 150% of current kernel size */ 3041 3042 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3043 mempages -= reserve; 3044 3045 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3046 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3047 3048 dcache_init(); 3049 inode_init(); 3050 files_init(mempages); 3051 mnt_init(); 3052 bdev_cache_init(); 3053 chrdev_init(); 3054 } 3055