1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include "internal.h" 40 41 /* 42 * Usage: 43 * dcache->d_inode->i_lock protects: 44 * - i_dentry, d_alias, d_inode of aliases 45 * dcache_hash_bucket lock protects: 46 * - the dcache hash table 47 * s_anon bl list spinlock protects: 48 * - the s_anon list (see __d_drop) 49 * dcache_lru_lock protects: 50 * - the dcache lru lists and counters 51 * d_lock protects: 52 * - d_flags 53 * - d_name 54 * - d_lru 55 * - d_count 56 * - d_unhashed() 57 * - d_parent and d_subdirs 58 * - childrens' d_child and d_parent 59 * - d_alias, d_inode 60 * 61 * Ordering: 62 * dentry->d_inode->i_lock 63 * dentry->d_lock 64 * dcache_lru_lock 65 * dcache_hash_bucket lock 66 * s_anon lock 67 * 68 * If there is an ancestor relationship: 69 * dentry->d_parent->...->d_parent->d_lock 70 * ... 71 * dentry->d_parent->d_lock 72 * dentry->d_lock 73 * 74 * If no ancestor relationship: 75 * if (dentry1 < dentry2) 76 * dentry1->d_lock 77 * dentry2->d_lock 78 */ 79 int sysctl_vfs_cache_pressure __read_mostly = 100; 80 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 81 82 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 83 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 84 85 EXPORT_SYMBOL(rename_lock); 86 87 static struct kmem_cache *dentry_cache __read_mostly; 88 89 /* 90 * This is the single most critical data structure when it comes 91 * to the dcache: the hashtable for lookups. Somebody should try 92 * to make this good - I've just made it work. 93 * 94 * This hash-function tries to avoid losing too many bits of hash 95 * information, yet avoid using a prime hash-size or similar. 96 */ 97 #define D_HASHBITS d_hash_shift 98 #define D_HASHMASK d_hash_mask 99 100 static unsigned int d_hash_mask __read_mostly; 101 static unsigned int d_hash_shift __read_mostly; 102 103 static struct hlist_bl_head *dentry_hashtable __read_mostly; 104 105 static inline struct hlist_bl_head *d_hash(struct dentry *parent, 106 unsigned long hash) 107 { 108 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 109 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 110 return dentry_hashtable + (hash & D_HASHMASK); 111 } 112 113 /* Statistics gathering. */ 114 struct dentry_stat_t dentry_stat = { 115 .age_limit = 45, 116 }; 117 118 static DEFINE_PER_CPU(unsigned int, nr_dentry); 119 120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 121 static int get_nr_dentry(void) 122 { 123 int i; 124 int sum = 0; 125 for_each_possible_cpu(i) 126 sum += per_cpu(nr_dentry, i); 127 return sum < 0 ? 0 : sum; 128 } 129 130 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 131 size_t *lenp, loff_t *ppos) 132 { 133 dentry_stat.nr_dentry = get_nr_dentry(); 134 return proc_dointvec(table, write, buffer, lenp, ppos); 135 } 136 #endif 137 138 static void __d_free(struct rcu_head *head) 139 { 140 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 141 142 WARN_ON(!list_empty(&dentry->d_alias)); 143 if (dname_external(dentry)) 144 kfree(dentry->d_name.name); 145 kmem_cache_free(dentry_cache, dentry); 146 } 147 148 /* 149 * no locks, please. 150 */ 151 static void d_free(struct dentry *dentry) 152 { 153 BUG_ON(dentry->d_count); 154 this_cpu_dec(nr_dentry); 155 if (dentry->d_op && dentry->d_op->d_release) 156 dentry->d_op->d_release(dentry); 157 158 /* if dentry was never visible to RCU, immediate free is OK */ 159 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 160 __d_free(&dentry->d_u.d_rcu); 161 else 162 call_rcu(&dentry->d_u.d_rcu, __d_free); 163 } 164 165 /** 166 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 167 * @dentry: the target dentry 168 * After this call, in-progress rcu-walk path lookup will fail. This 169 * should be called after unhashing, and after changing d_inode (if 170 * the dentry has not already been unhashed). 171 */ 172 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 173 { 174 assert_spin_locked(&dentry->d_lock); 175 /* Go through a barrier */ 176 write_seqcount_barrier(&dentry->d_seq); 177 } 178 179 /* 180 * Release the dentry's inode, using the filesystem 181 * d_iput() operation if defined. Dentry has no refcount 182 * and is unhashed. 183 */ 184 static void dentry_iput(struct dentry * dentry) 185 __releases(dentry->d_lock) 186 __releases(dentry->d_inode->i_lock) 187 { 188 struct inode *inode = dentry->d_inode; 189 if (inode) { 190 dentry->d_inode = NULL; 191 list_del_init(&dentry->d_alias); 192 spin_unlock(&dentry->d_lock); 193 spin_unlock(&inode->i_lock); 194 if (!inode->i_nlink) 195 fsnotify_inoderemove(inode); 196 if (dentry->d_op && dentry->d_op->d_iput) 197 dentry->d_op->d_iput(dentry, inode); 198 else 199 iput(inode); 200 } else { 201 spin_unlock(&dentry->d_lock); 202 } 203 } 204 205 /* 206 * Release the dentry's inode, using the filesystem 207 * d_iput() operation if defined. dentry remains in-use. 208 */ 209 static void dentry_unlink_inode(struct dentry * dentry) 210 __releases(dentry->d_lock) 211 __releases(dentry->d_inode->i_lock) 212 { 213 struct inode *inode = dentry->d_inode; 214 dentry->d_inode = NULL; 215 list_del_init(&dentry->d_alias); 216 dentry_rcuwalk_barrier(dentry); 217 spin_unlock(&dentry->d_lock); 218 spin_unlock(&inode->i_lock); 219 if (!inode->i_nlink) 220 fsnotify_inoderemove(inode); 221 if (dentry->d_op && dentry->d_op->d_iput) 222 dentry->d_op->d_iput(dentry, inode); 223 else 224 iput(inode); 225 } 226 227 /* 228 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 229 */ 230 static void dentry_lru_add(struct dentry *dentry) 231 { 232 if (list_empty(&dentry->d_lru)) { 233 spin_lock(&dcache_lru_lock); 234 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 235 dentry->d_sb->s_nr_dentry_unused++; 236 dentry_stat.nr_unused++; 237 spin_unlock(&dcache_lru_lock); 238 } 239 } 240 241 static void __dentry_lru_del(struct dentry *dentry) 242 { 243 list_del_init(&dentry->d_lru); 244 dentry->d_sb->s_nr_dentry_unused--; 245 dentry_stat.nr_unused--; 246 } 247 248 /* 249 * Remove a dentry with references from the LRU. 250 */ 251 static void dentry_lru_del(struct dentry *dentry) 252 { 253 if (!list_empty(&dentry->d_lru)) { 254 spin_lock(&dcache_lru_lock); 255 __dentry_lru_del(dentry); 256 spin_unlock(&dcache_lru_lock); 257 } 258 } 259 260 /* 261 * Remove a dentry that is unreferenced and about to be pruned 262 * (unhashed and destroyed) from the LRU, and inform the file system. 263 * This wrapper should be called _prior_ to unhashing a victim dentry. 264 */ 265 static void dentry_lru_prune(struct dentry *dentry) 266 { 267 if (!list_empty(&dentry->d_lru)) { 268 if (dentry->d_flags & DCACHE_OP_PRUNE) 269 dentry->d_op->d_prune(dentry); 270 271 spin_lock(&dcache_lru_lock); 272 __dentry_lru_del(dentry); 273 spin_unlock(&dcache_lru_lock); 274 } 275 } 276 277 static void dentry_lru_move_tail(struct dentry *dentry) 278 { 279 spin_lock(&dcache_lru_lock); 280 if (list_empty(&dentry->d_lru)) { 281 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 282 dentry->d_sb->s_nr_dentry_unused++; 283 dentry_stat.nr_unused++; 284 } else { 285 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 286 } 287 spin_unlock(&dcache_lru_lock); 288 } 289 290 /** 291 * d_kill - kill dentry and return parent 292 * @dentry: dentry to kill 293 * @parent: parent dentry 294 * 295 * The dentry must already be unhashed and removed from the LRU. 296 * 297 * If this is the root of the dentry tree, return NULL. 298 * 299 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 300 * d_kill. 301 */ 302 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 303 __releases(dentry->d_lock) 304 __releases(parent->d_lock) 305 __releases(dentry->d_inode->i_lock) 306 { 307 list_del(&dentry->d_u.d_child); 308 /* 309 * Inform try_to_ascend() that we are no longer attached to the 310 * dentry tree 311 */ 312 dentry->d_flags |= DCACHE_DISCONNECTED; 313 if (parent) 314 spin_unlock(&parent->d_lock); 315 dentry_iput(dentry); 316 /* 317 * dentry_iput drops the locks, at which point nobody (except 318 * transient RCU lookups) can reach this dentry. 319 */ 320 d_free(dentry); 321 return parent; 322 } 323 324 /* 325 * Unhash a dentry without inserting an RCU walk barrier or checking that 326 * dentry->d_lock is locked. The caller must take care of that, if 327 * appropriate. 328 */ 329 static void __d_shrink(struct dentry *dentry) 330 { 331 if (!d_unhashed(dentry)) { 332 struct hlist_bl_head *b; 333 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 334 b = &dentry->d_sb->s_anon; 335 else 336 b = d_hash(dentry->d_parent, dentry->d_name.hash); 337 338 hlist_bl_lock(b); 339 __hlist_bl_del(&dentry->d_hash); 340 dentry->d_hash.pprev = NULL; 341 hlist_bl_unlock(b); 342 } 343 } 344 345 /** 346 * d_drop - drop a dentry 347 * @dentry: dentry to drop 348 * 349 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 350 * be found through a VFS lookup any more. Note that this is different from 351 * deleting the dentry - d_delete will try to mark the dentry negative if 352 * possible, giving a successful _negative_ lookup, while d_drop will 353 * just make the cache lookup fail. 354 * 355 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 356 * reason (NFS timeouts or autofs deletes). 357 * 358 * __d_drop requires dentry->d_lock. 359 */ 360 void __d_drop(struct dentry *dentry) 361 { 362 if (!d_unhashed(dentry)) { 363 __d_shrink(dentry); 364 dentry_rcuwalk_barrier(dentry); 365 } 366 } 367 EXPORT_SYMBOL(__d_drop); 368 369 void d_drop(struct dentry *dentry) 370 { 371 spin_lock(&dentry->d_lock); 372 __d_drop(dentry); 373 spin_unlock(&dentry->d_lock); 374 } 375 EXPORT_SYMBOL(d_drop); 376 377 /* 378 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 379 * @dentry: dentry to drop 380 * 381 * This is called when we do a lookup on a placeholder dentry that needed to be 382 * looked up. The dentry should have been hashed in order for it to be found by 383 * the lookup code, but now needs to be unhashed while we do the actual lookup 384 * and clear the DCACHE_NEED_LOOKUP flag. 385 */ 386 void d_clear_need_lookup(struct dentry *dentry) 387 { 388 spin_lock(&dentry->d_lock); 389 __d_drop(dentry); 390 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 391 spin_unlock(&dentry->d_lock); 392 } 393 EXPORT_SYMBOL(d_clear_need_lookup); 394 395 /* 396 * Finish off a dentry we've decided to kill. 397 * dentry->d_lock must be held, returns with it unlocked. 398 * If ref is non-zero, then decrement the refcount too. 399 * Returns dentry requiring refcount drop, or NULL if we're done. 400 */ 401 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 402 __releases(dentry->d_lock) 403 { 404 struct inode *inode; 405 struct dentry *parent; 406 407 inode = dentry->d_inode; 408 if (inode && !spin_trylock(&inode->i_lock)) { 409 relock: 410 spin_unlock(&dentry->d_lock); 411 cpu_relax(); 412 return dentry; /* try again with same dentry */ 413 } 414 if (IS_ROOT(dentry)) 415 parent = NULL; 416 else 417 parent = dentry->d_parent; 418 if (parent && !spin_trylock(&parent->d_lock)) { 419 if (inode) 420 spin_unlock(&inode->i_lock); 421 goto relock; 422 } 423 424 if (ref) 425 dentry->d_count--; 426 /* 427 * if dentry was on the d_lru list delete it from there. 428 * inform the fs via d_prune that this dentry is about to be 429 * unhashed and destroyed. 430 */ 431 dentry_lru_prune(dentry); 432 /* if it was on the hash then remove it */ 433 __d_drop(dentry); 434 return d_kill(dentry, parent); 435 } 436 437 /* 438 * This is dput 439 * 440 * This is complicated by the fact that we do not want to put 441 * dentries that are no longer on any hash chain on the unused 442 * list: we'd much rather just get rid of them immediately. 443 * 444 * However, that implies that we have to traverse the dentry 445 * tree upwards to the parents which might _also_ now be 446 * scheduled for deletion (it may have been only waiting for 447 * its last child to go away). 448 * 449 * This tail recursion is done by hand as we don't want to depend 450 * on the compiler to always get this right (gcc generally doesn't). 451 * Real recursion would eat up our stack space. 452 */ 453 454 /* 455 * dput - release a dentry 456 * @dentry: dentry to release 457 * 458 * Release a dentry. This will drop the usage count and if appropriate 459 * call the dentry unlink method as well as removing it from the queues and 460 * releasing its resources. If the parent dentries were scheduled for release 461 * they too may now get deleted. 462 */ 463 void dput(struct dentry *dentry) 464 { 465 if (!dentry) 466 return; 467 468 repeat: 469 if (dentry->d_count == 1) 470 might_sleep(); 471 spin_lock(&dentry->d_lock); 472 BUG_ON(!dentry->d_count); 473 if (dentry->d_count > 1) { 474 dentry->d_count--; 475 spin_unlock(&dentry->d_lock); 476 return; 477 } 478 479 if (dentry->d_flags & DCACHE_OP_DELETE) { 480 if (dentry->d_op->d_delete(dentry)) 481 goto kill_it; 482 } 483 484 /* Unreachable? Get rid of it */ 485 if (d_unhashed(dentry)) 486 goto kill_it; 487 488 /* 489 * If this dentry needs lookup, don't set the referenced flag so that it 490 * is more likely to be cleaned up by the dcache shrinker in case of 491 * memory pressure. 492 */ 493 if (!d_need_lookup(dentry)) 494 dentry->d_flags |= DCACHE_REFERENCED; 495 dentry_lru_add(dentry); 496 497 dentry->d_count--; 498 spin_unlock(&dentry->d_lock); 499 return; 500 501 kill_it: 502 dentry = dentry_kill(dentry, 1); 503 if (dentry) 504 goto repeat; 505 } 506 EXPORT_SYMBOL(dput); 507 508 /** 509 * d_invalidate - invalidate a dentry 510 * @dentry: dentry to invalidate 511 * 512 * Try to invalidate the dentry if it turns out to be 513 * possible. If there are other dentries that can be 514 * reached through this one we can't delete it and we 515 * return -EBUSY. On success we return 0. 516 * 517 * no dcache lock. 518 */ 519 520 int d_invalidate(struct dentry * dentry) 521 { 522 /* 523 * If it's already been dropped, return OK. 524 */ 525 spin_lock(&dentry->d_lock); 526 if (d_unhashed(dentry)) { 527 spin_unlock(&dentry->d_lock); 528 return 0; 529 } 530 /* 531 * Check whether to do a partial shrink_dcache 532 * to get rid of unused child entries. 533 */ 534 if (!list_empty(&dentry->d_subdirs)) { 535 spin_unlock(&dentry->d_lock); 536 shrink_dcache_parent(dentry); 537 spin_lock(&dentry->d_lock); 538 } 539 540 /* 541 * Somebody else still using it? 542 * 543 * If it's a directory, we can't drop it 544 * for fear of somebody re-populating it 545 * with children (even though dropping it 546 * would make it unreachable from the root, 547 * we might still populate it if it was a 548 * working directory or similar). 549 * We also need to leave mountpoints alone, 550 * directory or not. 551 */ 552 if (dentry->d_count > 1 && dentry->d_inode) { 553 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 554 spin_unlock(&dentry->d_lock); 555 return -EBUSY; 556 } 557 } 558 559 __d_drop(dentry); 560 spin_unlock(&dentry->d_lock); 561 return 0; 562 } 563 EXPORT_SYMBOL(d_invalidate); 564 565 /* This must be called with d_lock held */ 566 static inline void __dget_dlock(struct dentry *dentry) 567 { 568 dentry->d_count++; 569 } 570 571 static inline void __dget(struct dentry *dentry) 572 { 573 spin_lock(&dentry->d_lock); 574 __dget_dlock(dentry); 575 spin_unlock(&dentry->d_lock); 576 } 577 578 struct dentry *dget_parent(struct dentry *dentry) 579 { 580 struct dentry *ret; 581 582 repeat: 583 /* 584 * Don't need rcu_dereference because we re-check it was correct under 585 * the lock. 586 */ 587 rcu_read_lock(); 588 ret = dentry->d_parent; 589 spin_lock(&ret->d_lock); 590 if (unlikely(ret != dentry->d_parent)) { 591 spin_unlock(&ret->d_lock); 592 rcu_read_unlock(); 593 goto repeat; 594 } 595 rcu_read_unlock(); 596 BUG_ON(!ret->d_count); 597 ret->d_count++; 598 spin_unlock(&ret->d_lock); 599 return ret; 600 } 601 EXPORT_SYMBOL(dget_parent); 602 603 /** 604 * d_find_alias - grab a hashed alias of inode 605 * @inode: inode in question 606 * @want_discon: flag, used by d_splice_alias, to request 607 * that only a DISCONNECTED alias be returned. 608 * 609 * If inode has a hashed alias, or is a directory and has any alias, 610 * acquire the reference to alias and return it. Otherwise return NULL. 611 * Notice that if inode is a directory there can be only one alias and 612 * it can be unhashed only if it has no children, or if it is the root 613 * of a filesystem. 614 * 615 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 616 * any other hashed alias over that one unless @want_discon is set, 617 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 618 */ 619 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 620 { 621 struct dentry *alias, *discon_alias; 622 623 again: 624 discon_alias = NULL; 625 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 626 spin_lock(&alias->d_lock); 627 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 628 if (IS_ROOT(alias) && 629 (alias->d_flags & DCACHE_DISCONNECTED)) { 630 discon_alias = alias; 631 } else if (!want_discon) { 632 __dget_dlock(alias); 633 spin_unlock(&alias->d_lock); 634 return alias; 635 } 636 } 637 spin_unlock(&alias->d_lock); 638 } 639 if (discon_alias) { 640 alias = discon_alias; 641 spin_lock(&alias->d_lock); 642 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 643 if (IS_ROOT(alias) && 644 (alias->d_flags & DCACHE_DISCONNECTED)) { 645 __dget_dlock(alias); 646 spin_unlock(&alias->d_lock); 647 return alias; 648 } 649 } 650 spin_unlock(&alias->d_lock); 651 goto again; 652 } 653 return NULL; 654 } 655 656 struct dentry *d_find_alias(struct inode *inode) 657 { 658 struct dentry *de = NULL; 659 660 if (!list_empty(&inode->i_dentry)) { 661 spin_lock(&inode->i_lock); 662 de = __d_find_alias(inode, 0); 663 spin_unlock(&inode->i_lock); 664 } 665 return de; 666 } 667 EXPORT_SYMBOL(d_find_alias); 668 669 /* 670 * Try to kill dentries associated with this inode. 671 * WARNING: you must own a reference to inode. 672 */ 673 void d_prune_aliases(struct inode *inode) 674 { 675 struct dentry *dentry; 676 restart: 677 spin_lock(&inode->i_lock); 678 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 679 spin_lock(&dentry->d_lock); 680 if (!dentry->d_count) { 681 __dget_dlock(dentry); 682 __d_drop(dentry); 683 spin_unlock(&dentry->d_lock); 684 spin_unlock(&inode->i_lock); 685 dput(dentry); 686 goto restart; 687 } 688 spin_unlock(&dentry->d_lock); 689 } 690 spin_unlock(&inode->i_lock); 691 } 692 EXPORT_SYMBOL(d_prune_aliases); 693 694 /* 695 * Try to throw away a dentry - free the inode, dput the parent. 696 * Requires dentry->d_lock is held, and dentry->d_count == 0. 697 * Releases dentry->d_lock. 698 * 699 * This may fail if locks cannot be acquired no problem, just try again. 700 */ 701 static void try_prune_one_dentry(struct dentry *dentry) 702 __releases(dentry->d_lock) 703 { 704 struct dentry *parent; 705 706 parent = dentry_kill(dentry, 0); 707 /* 708 * If dentry_kill returns NULL, we have nothing more to do. 709 * if it returns the same dentry, trylocks failed. In either 710 * case, just loop again. 711 * 712 * Otherwise, we need to prune ancestors too. This is necessary 713 * to prevent quadratic behavior of shrink_dcache_parent(), but 714 * is also expected to be beneficial in reducing dentry cache 715 * fragmentation. 716 */ 717 if (!parent) 718 return; 719 if (parent == dentry) 720 return; 721 722 /* Prune ancestors. */ 723 dentry = parent; 724 while (dentry) { 725 spin_lock(&dentry->d_lock); 726 if (dentry->d_count > 1) { 727 dentry->d_count--; 728 spin_unlock(&dentry->d_lock); 729 return; 730 } 731 dentry = dentry_kill(dentry, 1); 732 } 733 } 734 735 static void shrink_dentry_list(struct list_head *list) 736 { 737 struct dentry *dentry; 738 739 rcu_read_lock(); 740 for (;;) { 741 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 742 if (&dentry->d_lru == list) 743 break; /* empty */ 744 spin_lock(&dentry->d_lock); 745 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 746 spin_unlock(&dentry->d_lock); 747 continue; 748 } 749 750 /* 751 * We found an inuse dentry which was not removed from 752 * the LRU because of laziness during lookup. Do not free 753 * it - just keep it off the LRU list. 754 */ 755 if (dentry->d_count) { 756 dentry_lru_del(dentry); 757 spin_unlock(&dentry->d_lock); 758 continue; 759 } 760 761 rcu_read_unlock(); 762 763 try_prune_one_dentry(dentry); 764 765 rcu_read_lock(); 766 } 767 rcu_read_unlock(); 768 } 769 770 /** 771 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock 772 * @sb: superblock to shrink dentry LRU. 773 * @count: number of entries to prune 774 * @flags: flags to control the dentry processing 775 * 776 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. 777 */ 778 static void __shrink_dcache_sb(struct super_block *sb, int count, int flags) 779 { 780 struct dentry *dentry; 781 LIST_HEAD(referenced); 782 LIST_HEAD(tmp); 783 784 relock: 785 spin_lock(&dcache_lru_lock); 786 while (!list_empty(&sb->s_dentry_lru)) { 787 dentry = list_entry(sb->s_dentry_lru.prev, 788 struct dentry, d_lru); 789 BUG_ON(dentry->d_sb != sb); 790 791 if (!spin_trylock(&dentry->d_lock)) { 792 spin_unlock(&dcache_lru_lock); 793 cpu_relax(); 794 goto relock; 795 } 796 797 /* 798 * If we are honouring the DCACHE_REFERENCED flag and the 799 * dentry has this flag set, don't free it. Clear the flag 800 * and put it back on the LRU. 801 */ 802 if (flags & DCACHE_REFERENCED && 803 dentry->d_flags & DCACHE_REFERENCED) { 804 dentry->d_flags &= ~DCACHE_REFERENCED; 805 list_move(&dentry->d_lru, &referenced); 806 spin_unlock(&dentry->d_lock); 807 } else { 808 list_move_tail(&dentry->d_lru, &tmp); 809 spin_unlock(&dentry->d_lock); 810 if (!--count) 811 break; 812 } 813 cond_resched_lock(&dcache_lru_lock); 814 } 815 if (!list_empty(&referenced)) 816 list_splice(&referenced, &sb->s_dentry_lru); 817 spin_unlock(&dcache_lru_lock); 818 819 shrink_dentry_list(&tmp); 820 } 821 822 /** 823 * prune_dcache_sb - shrink the dcache 824 * @sb: superblock 825 * @nr_to_scan: number of entries to try to free 826 * 827 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is 828 * done when we need more memory an called from the superblock shrinker 829 * function. 830 * 831 * This function may fail to free any resources if all the dentries are in 832 * use. 833 */ 834 void prune_dcache_sb(struct super_block *sb, int nr_to_scan) 835 { 836 __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED); 837 } 838 839 /** 840 * shrink_dcache_sb - shrink dcache for a superblock 841 * @sb: superblock 842 * 843 * Shrink the dcache for the specified super block. This is used to free 844 * the dcache before unmounting a file system. 845 */ 846 void shrink_dcache_sb(struct super_block *sb) 847 { 848 LIST_HEAD(tmp); 849 850 spin_lock(&dcache_lru_lock); 851 while (!list_empty(&sb->s_dentry_lru)) { 852 list_splice_init(&sb->s_dentry_lru, &tmp); 853 spin_unlock(&dcache_lru_lock); 854 shrink_dentry_list(&tmp); 855 spin_lock(&dcache_lru_lock); 856 } 857 spin_unlock(&dcache_lru_lock); 858 } 859 EXPORT_SYMBOL(shrink_dcache_sb); 860 861 /* 862 * destroy a single subtree of dentries for unmount 863 * - see the comments on shrink_dcache_for_umount() for a description of the 864 * locking 865 */ 866 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 867 { 868 struct dentry *parent; 869 870 BUG_ON(!IS_ROOT(dentry)); 871 872 for (;;) { 873 /* descend to the first leaf in the current subtree */ 874 while (!list_empty(&dentry->d_subdirs)) 875 dentry = list_entry(dentry->d_subdirs.next, 876 struct dentry, d_u.d_child); 877 878 /* consume the dentries from this leaf up through its parents 879 * until we find one with children or run out altogether */ 880 do { 881 struct inode *inode; 882 883 /* 884 * remove the dentry from the lru, and inform 885 * the fs that this dentry is about to be 886 * unhashed and destroyed. 887 */ 888 dentry_lru_prune(dentry); 889 __d_shrink(dentry); 890 891 if (dentry->d_count != 0) { 892 printk(KERN_ERR 893 "BUG: Dentry %p{i=%lx,n=%s}" 894 " still in use (%d)" 895 " [unmount of %s %s]\n", 896 dentry, 897 dentry->d_inode ? 898 dentry->d_inode->i_ino : 0UL, 899 dentry->d_name.name, 900 dentry->d_count, 901 dentry->d_sb->s_type->name, 902 dentry->d_sb->s_id); 903 BUG(); 904 } 905 906 if (IS_ROOT(dentry)) { 907 parent = NULL; 908 list_del(&dentry->d_u.d_child); 909 } else { 910 parent = dentry->d_parent; 911 parent->d_count--; 912 list_del(&dentry->d_u.d_child); 913 } 914 915 inode = dentry->d_inode; 916 if (inode) { 917 dentry->d_inode = NULL; 918 list_del_init(&dentry->d_alias); 919 if (dentry->d_op && dentry->d_op->d_iput) 920 dentry->d_op->d_iput(dentry, inode); 921 else 922 iput(inode); 923 } 924 925 d_free(dentry); 926 927 /* finished when we fall off the top of the tree, 928 * otherwise we ascend to the parent and move to the 929 * next sibling if there is one */ 930 if (!parent) 931 return; 932 dentry = parent; 933 } while (list_empty(&dentry->d_subdirs)); 934 935 dentry = list_entry(dentry->d_subdirs.next, 936 struct dentry, d_u.d_child); 937 } 938 } 939 940 /* 941 * destroy the dentries attached to a superblock on unmounting 942 * - we don't need to use dentry->d_lock because: 943 * - the superblock is detached from all mountings and open files, so the 944 * dentry trees will not be rearranged by the VFS 945 * - s_umount is write-locked, so the memory pressure shrinker will ignore 946 * any dentries belonging to this superblock that it comes across 947 * - the filesystem itself is no longer permitted to rearrange the dentries 948 * in this superblock 949 */ 950 void shrink_dcache_for_umount(struct super_block *sb) 951 { 952 struct dentry *dentry; 953 954 if (down_read_trylock(&sb->s_umount)) 955 BUG(); 956 957 dentry = sb->s_root; 958 sb->s_root = NULL; 959 dentry->d_count--; 960 shrink_dcache_for_umount_subtree(dentry); 961 962 while (!hlist_bl_empty(&sb->s_anon)) { 963 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 964 shrink_dcache_for_umount_subtree(dentry); 965 } 966 } 967 968 /* 969 * This tries to ascend one level of parenthood, but 970 * we can race with renaming, so we need to re-check 971 * the parenthood after dropping the lock and check 972 * that the sequence number still matches. 973 */ 974 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 975 { 976 struct dentry *new = old->d_parent; 977 978 rcu_read_lock(); 979 spin_unlock(&old->d_lock); 980 spin_lock(&new->d_lock); 981 982 /* 983 * might go back up the wrong parent if we have had a rename 984 * or deletion 985 */ 986 if (new != old->d_parent || 987 (old->d_flags & DCACHE_DISCONNECTED) || 988 (!locked && read_seqretry(&rename_lock, seq))) { 989 spin_unlock(&new->d_lock); 990 new = NULL; 991 } 992 rcu_read_unlock(); 993 return new; 994 } 995 996 997 /* 998 * Search for at least 1 mount point in the dentry's subdirs. 999 * We descend to the next level whenever the d_subdirs 1000 * list is non-empty and continue searching. 1001 */ 1002 1003 /** 1004 * have_submounts - check for mounts over a dentry 1005 * @parent: dentry to check. 1006 * 1007 * Return true if the parent or its subdirectories contain 1008 * a mount point 1009 */ 1010 int have_submounts(struct dentry *parent) 1011 { 1012 struct dentry *this_parent; 1013 struct list_head *next; 1014 unsigned seq; 1015 int locked = 0; 1016 1017 seq = read_seqbegin(&rename_lock); 1018 again: 1019 this_parent = parent; 1020 1021 if (d_mountpoint(parent)) 1022 goto positive; 1023 spin_lock(&this_parent->d_lock); 1024 repeat: 1025 next = this_parent->d_subdirs.next; 1026 resume: 1027 while (next != &this_parent->d_subdirs) { 1028 struct list_head *tmp = next; 1029 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1030 next = tmp->next; 1031 1032 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1033 /* Have we found a mount point ? */ 1034 if (d_mountpoint(dentry)) { 1035 spin_unlock(&dentry->d_lock); 1036 spin_unlock(&this_parent->d_lock); 1037 goto positive; 1038 } 1039 if (!list_empty(&dentry->d_subdirs)) { 1040 spin_unlock(&this_parent->d_lock); 1041 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1042 this_parent = dentry; 1043 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1044 goto repeat; 1045 } 1046 spin_unlock(&dentry->d_lock); 1047 } 1048 /* 1049 * All done at this level ... ascend and resume the search. 1050 */ 1051 if (this_parent != parent) { 1052 struct dentry *child = this_parent; 1053 this_parent = try_to_ascend(this_parent, locked, seq); 1054 if (!this_parent) 1055 goto rename_retry; 1056 next = child->d_u.d_child.next; 1057 goto resume; 1058 } 1059 spin_unlock(&this_parent->d_lock); 1060 if (!locked && read_seqretry(&rename_lock, seq)) 1061 goto rename_retry; 1062 if (locked) 1063 write_sequnlock(&rename_lock); 1064 return 0; /* No mount points found in tree */ 1065 positive: 1066 if (!locked && read_seqretry(&rename_lock, seq)) 1067 goto rename_retry; 1068 if (locked) 1069 write_sequnlock(&rename_lock); 1070 return 1; 1071 1072 rename_retry: 1073 locked = 1; 1074 write_seqlock(&rename_lock); 1075 goto again; 1076 } 1077 EXPORT_SYMBOL(have_submounts); 1078 1079 /* 1080 * Search the dentry child list for the specified parent, 1081 * and move any unused dentries to the end of the unused 1082 * list for prune_dcache(). We descend to the next level 1083 * whenever the d_subdirs list is non-empty and continue 1084 * searching. 1085 * 1086 * It returns zero iff there are no unused children, 1087 * otherwise it returns the number of children moved to 1088 * the end of the unused list. This may not be the total 1089 * number of unused children, because select_parent can 1090 * drop the lock and return early due to latency 1091 * constraints. 1092 */ 1093 static int select_parent(struct dentry * parent) 1094 { 1095 struct dentry *this_parent; 1096 struct list_head *next; 1097 unsigned seq; 1098 int found = 0; 1099 int locked = 0; 1100 1101 seq = read_seqbegin(&rename_lock); 1102 again: 1103 this_parent = parent; 1104 spin_lock(&this_parent->d_lock); 1105 repeat: 1106 next = this_parent->d_subdirs.next; 1107 resume: 1108 while (next != &this_parent->d_subdirs) { 1109 struct list_head *tmp = next; 1110 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1111 next = tmp->next; 1112 1113 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1114 1115 /* 1116 * move only zero ref count dentries to the end 1117 * of the unused list for prune_dcache 1118 */ 1119 if (!dentry->d_count) { 1120 dentry_lru_move_tail(dentry); 1121 found++; 1122 } else { 1123 dentry_lru_del(dentry); 1124 } 1125 1126 /* 1127 * We can return to the caller if we have found some (this 1128 * ensures forward progress). We'll be coming back to find 1129 * the rest. 1130 */ 1131 if (found && need_resched()) { 1132 spin_unlock(&dentry->d_lock); 1133 goto out; 1134 } 1135 1136 /* 1137 * Descend a level if the d_subdirs list is non-empty. 1138 */ 1139 if (!list_empty(&dentry->d_subdirs)) { 1140 spin_unlock(&this_parent->d_lock); 1141 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1142 this_parent = dentry; 1143 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1144 goto repeat; 1145 } 1146 1147 spin_unlock(&dentry->d_lock); 1148 } 1149 /* 1150 * All done at this level ... ascend and resume the search. 1151 */ 1152 if (this_parent != parent) { 1153 struct dentry *child = this_parent; 1154 this_parent = try_to_ascend(this_parent, locked, seq); 1155 if (!this_parent) 1156 goto rename_retry; 1157 next = child->d_u.d_child.next; 1158 goto resume; 1159 } 1160 out: 1161 spin_unlock(&this_parent->d_lock); 1162 if (!locked && read_seqretry(&rename_lock, seq)) 1163 goto rename_retry; 1164 if (locked) 1165 write_sequnlock(&rename_lock); 1166 return found; 1167 1168 rename_retry: 1169 if (found) 1170 return found; 1171 locked = 1; 1172 write_seqlock(&rename_lock); 1173 goto again; 1174 } 1175 1176 /** 1177 * shrink_dcache_parent - prune dcache 1178 * @parent: parent of entries to prune 1179 * 1180 * Prune the dcache to remove unused children of the parent dentry. 1181 */ 1182 1183 void shrink_dcache_parent(struct dentry * parent) 1184 { 1185 struct super_block *sb = parent->d_sb; 1186 int found; 1187 1188 while ((found = select_parent(parent)) != 0) 1189 __shrink_dcache_sb(sb, found, 0); 1190 } 1191 EXPORT_SYMBOL(shrink_dcache_parent); 1192 1193 /** 1194 * __d_alloc - allocate a dcache entry 1195 * @sb: filesystem it will belong to 1196 * @name: qstr of the name 1197 * 1198 * Allocates a dentry. It returns %NULL if there is insufficient memory 1199 * available. On a success the dentry is returned. The name passed in is 1200 * copied and the copy passed in may be reused after this call. 1201 */ 1202 1203 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1204 { 1205 struct dentry *dentry; 1206 char *dname; 1207 1208 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1209 if (!dentry) 1210 return NULL; 1211 1212 if (name->len > DNAME_INLINE_LEN-1) { 1213 dname = kmalloc(name->len + 1, GFP_KERNEL); 1214 if (!dname) { 1215 kmem_cache_free(dentry_cache, dentry); 1216 return NULL; 1217 } 1218 } else { 1219 dname = dentry->d_iname; 1220 } 1221 dentry->d_name.name = dname; 1222 1223 dentry->d_name.len = name->len; 1224 dentry->d_name.hash = name->hash; 1225 memcpy(dname, name->name, name->len); 1226 dname[name->len] = 0; 1227 1228 dentry->d_count = 1; 1229 dentry->d_flags = 0; 1230 spin_lock_init(&dentry->d_lock); 1231 seqcount_init(&dentry->d_seq); 1232 dentry->d_inode = NULL; 1233 dentry->d_parent = dentry; 1234 dentry->d_sb = sb; 1235 dentry->d_op = NULL; 1236 dentry->d_fsdata = NULL; 1237 INIT_HLIST_BL_NODE(&dentry->d_hash); 1238 INIT_LIST_HEAD(&dentry->d_lru); 1239 INIT_LIST_HEAD(&dentry->d_subdirs); 1240 INIT_LIST_HEAD(&dentry->d_alias); 1241 INIT_LIST_HEAD(&dentry->d_u.d_child); 1242 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1243 1244 this_cpu_inc(nr_dentry); 1245 1246 return dentry; 1247 } 1248 1249 /** 1250 * d_alloc - allocate a dcache entry 1251 * @parent: parent of entry to allocate 1252 * @name: qstr of the name 1253 * 1254 * Allocates a dentry. It returns %NULL if there is insufficient memory 1255 * available. On a success the dentry is returned. The name passed in is 1256 * copied and the copy passed in may be reused after this call. 1257 */ 1258 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1259 { 1260 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1261 if (!dentry) 1262 return NULL; 1263 1264 spin_lock(&parent->d_lock); 1265 /* 1266 * don't need child lock because it is not subject 1267 * to concurrency here 1268 */ 1269 __dget_dlock(parent); 1270 dentry->d_parent = parent; 1271 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1272 spin_unlock(&parent->d_lock); 1273 1274 return dentry; 1275 } 1276 EXPORT_SYMBOL(d_alloc); 1277 1278 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1279 { 1280 struct dentry *dentry = __d_alloc(sb, name); 1281 if (dentry) 1282 dentry->d_flags |= DCACHE_DISCONNECTED; 1283 return dentry; 1284 } 1285 EXPORT_SYMBOL(d_alloc_pseudo); 1286 1287 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1288 { 1289 struct qstr q; 1290 1291 q.name = name; 1292 q.len = strlen(name); 1293 q.hash = full_name_hash(q.name, q.len); 1294 return d_alloc(parent, &q); 1295 } 1296 EXPORT_SYMBOL(d_alloc_name); 1297 1298 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1299 { 1300 WARN_ON_ONCE(dentry->d_op); 1301 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1302 DCACHE_OP_COMPARE | 1303 DCACHE_OP_REVALIDATE | 1304 DCACHE_OP_DELETE )); 1305 dentry->d_op = op; 1306 if (!op) 1307 return; 1308 if (op->d_hash) 1309 dentry->d_flags |= DCACHE_OP_HASH; 1310 if (op->d_compare) 1311 dentry->d_flags |= DCACHE_OP_COMPARE; 1312 if (op->d_revalidate) 1313 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1314 if (op->d_delete) 1315 dentry->d_flags |= DCACHE_OP_DELETE; 1316 if (op->d_prune) 1317 dentry->d_flags |= DCACHE_OP_PRUNE; 1318 1319 } 1320 EXPORT_SYMBOL(d_set_d_op); 1321 1322 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1323 { 1324 spin_lock(&dentry->d_lock); 1325 if (inode) { 1326 if (unlikely(IS_AUTOMOUNT(inode))) 1327 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1328 list_add(&dentry->d_alias, &inode->i_dentry); 1329 } 1330 dentry->d_inode = inode; 1331 dentry_rcuwalk_barrier(dentry); 1332 spin_unlock(&dentry->d_lock); 1333 fsnotify_d_instantiate(dentry, inode); 1334 } 1335 1336 /** 1337 * d_instantiate - fill in inode information for a dentry 1338 * @entry: dentry to complete 1339 * @inode: inode to attach to this dentry 1340 * 1341 * Fill in inode information in the entry. 1342 * 1343 * This turns negative dentries into productive full members 1344 * of society. 1345 * 1346 * NOTE! This assumes that the inode count has been incremented 1347 * (or otherwise set) by the caller to indicate that it is now 1348 * in use by the dcache. 1349 */ 1350 1351 void d_instantiate(struct dentry *entry, struct inode * inode) 1352 { 1353 BUG_ON(!list_empty(&entry->d_alias)); 1354 if (inode) 1355 spin_lock(&inode->i_lock); 1356 __d_instantiate(entry, inode); 1357 if (inode) 1358 spin_unlock(&inode->i_lock); 1359 security_d_instantiate(entry, inode); 1360 } 1361 EXPORT_SYMBOL(d_instantiate); 1362 1363 /** 1364 * d_instantiate_unique - instantiate a non-aliased dentry 1365 * @entry: dentry to instantiate 1366 * @inode: inode to attach to this dentry 1367 * 1368 * Fill in inode information in the entry. On success, it returns NULL. 1369 * If an unhashed alias of "entry" already exists, then we return the 1370 * aliased dentry instead and drop one reference to inode. 1371 * 1372 * Note that in order to avoid conflicts with rename() etc, the caller 1373 * had better be holding the parent directory semaphore. 1374 * 1375 * This also assumes that the inode count has been incremented 1376 * (or otherwise set) by the caller to indicate that it is now 1377 * in use by the dcache. 1378 */ 1379 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1380 struct inode *inode) 1381 { 1382 struct dentry *alias; 1383 int len = entry->d_name.len; 1384 const char *name = entry->d_name.name; 1385 unsigned int hash = entry->d_name.hash; 1386 1387 if (!inode) { 1388 __d_instantiate(entry, NULL); 1389 return NULL; 1390 } 1391 1392 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1393 struct qstr *qstr = &alias->d_name; 1394 1395 /* 1396 * Don't need alias->d_lock here, because aliases with 1397 * d_parent == entry->d_parent are not subject to name or 1398 * parent changes, because the parent inode i_mutex is held. 1399 */ 1400 if (qstr->hash != hash) 1401 continue; 1402 if (alias->d_parent != entry->d_parent) 1403 continue; 1404 if (dentry_cmp(qstr->name, qstr->len, name, len)) 1405 continue; 1406 __dget(alias); 1407 return alias; 1408 } 1409 1410 __d_instantiate(entry, inode); 1411 return NULL; 1412 } 1413 1414 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1415 { 1416 struct dentry *result; 1417 1418 BUG_ON(!list_empty(&entry->d_alias)); 1419 1420 if (inode) 1421 spin_lock(&inode->i_lock); 1422 result = __d_instantiate_unique(entry, inode); 1423 if (inode) 1424 spin_unlock(&inode->i_lock); 1425 1426 if (!result) { 1427 security_d_instantiate(entry, inode); 1428 return NULL; 1429 } 1430 1431 BUG_ON(!d_unhashed(result)); 1432 iput(inode); 1433 return result; 1434 } 1435 1436 EXPORT_SYMBOL(d_instantiate_unique); 1437 1438 /** 1439 * d_alloc_root - allocate root dentry 1440 * @root_inode: inode to allocate the root for 1441 * 1442 * Allocate a root ("/") dentry for the inode given. The inode is 1443 * instantiated and returned. %NULL is returned if there is insufficient 1444 * memory or the inode passed is %NULL. 1445 */ 1446 1447 struct dentry * d_alloc_root(struct inode * root_inode) 1448 { 1449 struct dentry *res = NULL; 1450 1451 if (root_inode) { 1452 static const struct qstr name = { .name = "/", .len = 1 }; 1453 1454 res = __d_alloc(root_inode->i_sb, &name); 1455 if (res) 1456 d_instantiate(res, root_inode); 1457 } 1458 return res; 1459 } 1460 EXPORT_SYMBOL(d_alloc_root); 1461 1462 static struct dentry * __d_find_any_alias(struct inode *inode) 1463 { 1464 struct dentry *alias; 1465 1466 if (list_empty(&inode->i_dentry)) 1467 return NULL; 1468 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1469 __dget(alias); 1470 return alias; 1471 } 1472 1473 static struct dentry * d_find_any_alias(struct inode *inode) 1474 { 1475 struct dentry *de; 1476 1477 spin_lock(&inode->i_lock); 1478 de = __d_find_any_alias(inode); 1479 spin_unlock(&inode->i_lock); 1480 return de; 1481 } 1482 1483 1484 /** 1485 * d_obtain_alias - find or allocate a dentry for a given inode 1486 * @inode: inode to allocate the dentry for 1487 * 1488 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1489 * similar open by handle operations. The returned dentry may be anonymous, 1490 * or may have a full name (if the inode was already in the cache). 1491 * 1492 * When called on a directory inode, we must ensure that the inode only ever 1493 * has one dentry. If a dentry is found, that is returned instead of 1494 * allocating a new one. 1495 * 1496 * On successful return, the reference to the inode has been transferred 1497 * to the dentry. In case of an error the reference on the inode is released. 1498 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1499 * be passed in and will be the error will be propagate to the return value, 1500 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1501 */ 1502 struct dentry *d_obtain_alias(struct inode *inode) 1503 { 1504 static const struct qstr anonstring = { .name = "" }; 1505 struct dentry *tmp; 1506 struct dentry *res; 1507 1508 if (!inode) 1509 return ERR_PTR(-ESTALE); 1510 if (IS_ERR(inode)) 1511 return ERR_CAST(inode); 1512 1513 res = d_find_any_alias(inode); 1514 if (res) 1515 goto out_iput; 1516 1517 tmp = __d_alloc(inode->i_sb, &anonstring); 1518 if (!tmp) { 1519 res = ERR_PTR(-ENOMEM); 1520 goto out_iput; 1521 } 1522 1523 spin_lock(&inode->i_lock); 1524 res = __d_find_any_alias(inode); 1525 if (res) { 1526 spin_unlock(&inode->i_lock); 1527 dput(tmp); 1528 goto out_iput; 1529 } 1530 1531 /* attach a disconnected dentry */ 1532 spin_lock(&tmp->d_lock); 1533 tmp->d_inode = inode; 1534 tmp->d_flags |= DCACHE_DISCONNECTED; 1535 list_add(&tmp->d_alias, &inode->i_dentry); 1536 hlist_bl_lock(&tmp->d_sb->s_anon); 1537 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1538 hlist_bl_unlock(&tmp->d_sb->s_anon); 1539 spin_unlock(&tmp->d_lock); 1540 spin_unlock(&inode->i_lock); 1541 security_d_instantiate(tmp, inode); 1542 1543 return tmp; 1544 1545 out_iput: 1546 if (res && !IS_ERR(res)) 1547 security_d_instantiate(res, inode); 1548 iput(inode); 1549 return res; 1550 } 1551 EXPORT_SYMBOL(d_obtain_alias); 1552 1553 /** 1554 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1555 * @inode: the inode which may have a disconnected dentry 1556 * @dentry: a negative dentry which we want to point to the inode. 1557 * 1558 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1559 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1560 * and return it, else simply d_add the inode to the dentry and return NULL. 1561 * 1562 * This is needed in the lookup routine of any filesystem that is exportable 1563 * (via knfsd) so that we can build dcache paths to directories effectively. 1564 * 1565 * If a dentry was found and moved, then it is returned. Otherwise NULL 1566 * is returned. This matches the expected return value of ->lookup. 1567 * 1568 */ 1569 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1570 { 1571 struct dentry *new = NULL; 1572 1573 if (IS_ERR(inode)) 1574 return ERR_CAST(inode); 1575 1576 if (inode && S_ISDIR(inode->i_mode)) { 1577 spin_lock(&inode->i_lock); 1578 new = __d_find_alias(inode, 1); 1579 if (new) { 1580 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1581 spin_unlock(&inode->i_lock); 1582 security_d_instantiate(new, inode); 1583 d_move(new, dentry); 1584 iput(inode); 1585 } else { 1586 /* already taking inode->i_lock, so d_add() by hand */ 1587 __d_instantiate(dentry, inode); 1588 spin_unlock(&inode->i_lock); 1589 security_d_instantiate(dentry, inode); 1590 d_rehash(dentry); 1591 } 1592 } else 1593 d_add(dentry, inode); 1594 return new; 1595 } 1596 EXPORT_SYMBOL(d_splice_alias); 1597 1598 /** 1599 * d_add_ci - lookup or allocate new dentry with case-exact name 1600 * @inode: the inode case-insensitive lookup has found 1601 * @dentry: the negative dentry that was passed to the parent's lookup func 1602 * @name: the case-exact name to be associated with the returned dentry 1603 * 1604 * This is to avoid filling the dcache with case-insensitive names to the 1605 * same inode, only the actual correct case is stored in the dcache for 1606 * case-insensitive filesystems. 1607 * 1608 * For a case-insensitive lookup match and if the the case-exact dentry 1609 * already exists in in the dcache, use it and return it. 1610 * 1611 * If no entry exists with the exact case name, allocate new dentry with 1612 * the exact case, and return the spliced entry. 1613 */ 1614 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1615 struct qstr *name) 1616 { 1617 int error; 1618 struct dentry *found; 1619 struct dentry *new; 1620 1621 /* 1622 * First check if a dentry matching the name already exists, 1623 * if not go ahead and create it now. 1624 */ 1625 found = d_hash_and_lookup(dentry->d_parent, name); 1626 if (!found) { 1627 new = d_alloc(dentry->d_parent, name); 1628 if (!new) { 1629 error = -ENOMEM; 1630 goto err_out; 1631 } 1632 1633 found = d_splice_alias(inode, new); 1634 if (found) { 1635 dput(new); 1636 return found; 1637 } 1638 return new; 1639 } 1640 1641 /* 1642 * If a matching dentry exists, and it's not negative use it. 1643 * 1644 * Decrement the reference count to balance the iget() done 1645 * earlier on. 1646 */ 1647 if (found->d_inode) { 1648 if (unlikely(found->d_inode != inode)) { 1649 /* This can't happen because bad inodes are unhashed. */ 1650 BUG_ON(!is_bad_inode(inode)); 1651 BUG_ON(!is_bad_inode(found->d_inode)); 1652 } 1653 iput(inode); 1654 return found; 1655 } 1656 1657 /* 1658 * We are going to instantiate this dentry, unhash it and clear the 1659 * lookup flag so we can do that. 1660 */ 1661 if (unlikely(d_need_lookup(found))) 1662 d_clear_need_lookup(found); 1663 1664 /* 1665 * Negative dentry: instantiate it unless the inode is a directory and 1666 * already has a dentry. 1667 */ 1668 new = d_splice_alias(inode, found); 1669 if (new) { 1670 dput(found); 1671 found = new; 1672 } 1673 return found; 1674 1675 err_out: 1676 iput(inode); 1677 return ERR_PTR(error); 1678 } 1679 EXPORT_SYMBOL(d_add_ci); 1680 1681 /** 1682 * __d_lookup_rcu - search for a dentry (racy, store-free) 1683 * @parent: parent dentry 1684 * @name: qstr of name we wish to find 1685 * @seq: returns d_seq value at the point where the dentry was found 1686 * @inode: returns dentry->d_inode when the inode was found valid. 1687 * Returns: dentry, or NULL 1688 * 1689 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1690 * resolution (store-free path walking) design described in 1691 * Documentation/filesystems/path-lookup.txt. 1692 * 1693 * This is not to be used outside core vfs. 1694 * 1695 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1696 * held, and rcu_read_lock held. The returned dentry must not be stored into 1697 * without taking d_lock and checking d_seq sequence count against @seq 1698 * returned here. 1699 * 1700 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1701 * function. 1702 * 1703 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1704 * the returned dentry, so long as its parent's seqlock is checked after the 1705 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1706 * is formed, giving integrity down the path walk. 1707 */ 1708 struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, 1709 unsigned *seq, struct inode **inode) 1710 { 1711 unsigned int len = name->len; 1712 unsigned int hash = name->hash; 1713 const unsigned char *str = name->name; 1714 struct hlist_bl_head *b = d_hash(parent, hash); 1715 struct hlist_bl_node *node; 1716 struct dentry *dentry; 1717 1718 /* 1719 * Note: There is significant duplication with __d_lookup_rcu which is 1720 * required to prevent single threaded performance regressions 1721 * especially on architectures where smp_rmb (in seqcounts) are costly. 1722 * Keep the two functions in sync. 1723 */ 1724 1725 /* 1726 * The hash list is protected using RCU. 1727 * 1728 * Carefully use d_seq when comparing a candidate dentry, to avoid 1729 * races with d_move(). 1730 * 1731 * It is possible that concurrent renames can mess up our list 1732 * walk here and result in missing our dentry, resulting in the 1733 * false-negative result. d_lookup() protects against concurrent 1734 * renames using rename_lock seqlock. 1735 * 1736 * See Documentation/filesystems/path-lookup.txt for more details. 1737 */ 1738 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1739 struct inode *i; 1740 const char *tname; 1741 int tlen; 1742 1743 if (dentry->d_name.hash != hash) 1744 continue; 1745 1746 seqretry: 1747 *seq = read_seqcount_begin(&dentry->d_seq); 1748 if (dentry->d_parent != parent) 1749 continue; 1750 if (d_unhashed(dentry)) 1751 continue; 1752 tlen = dentry->d_name.len; 1753 tname = dentry->d_name.name; 1754 i = dentry->d_inode; 1755 prefetch(tname); 1756 /* 1757 * This seqcount check is required to ensure name and 1758 * len are loaded atomically, so as not to walk off the 1759 * edge of memory when walking. If we could load this 1760 * atomically some other way, we could drop this check. 1761 */ 1762 if (read_seqcount_retry(&dentry->d_seq, *seq)) 1763 goto seqretry; 1764 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1765 if (parent->d_op->d_compare(parent, *inode, 1766 dentry, i, 1767 tlen, tname, name)) 1768 continue; 1769 } else { 1770 if (dentry_cmp(tname, tlen, str, len)) 1771 continue; 1772 } 1773 /* 1774 * No extra seqcount check is required after the name 1775 * compare. The caller must perform a seqcount check in 1776 * order to do anything useful with the returned dentry 1777 * anyway. 1778 */ 1779 *inode = i; 1780 return dentry; 1781 } 1782 return NULL; 1783 } 1784 1785 /** 1786 * d_lookup - search for a dentry 1787 * @parent: parent dentry 1788 * @name: qstr of name we wish to find 1789 * Returns: dentry, or NULL 1790 * 1791 * d_lookup searches the children of the parent dentry for the name in 1792 * question. If the dentry is found its reference count is incremented and the 1793 * dentry is returned. The caller must use dput to free the entry when it has 1794 * finished using it. %NULL is returned if the dentry does not exist. 1795 */ 1796 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1797 { 1798 struct dentry *dentry; 1799 unsigned seq; 1800 1801 do { 1802 seq = read_seqbegin(&rename_lock); 1803 dentry = __d_lookup(parent, name); 1804 if (dentry) 1805 break; 1806 } while (read_seqretry(&rename_lock, seq)); 1807 return dentry; 1808 } 1809 EXPORT_SYMBOL(d_lookup); 1810 1811 /** 1812 * __d_lookup - search for a dentry (racy) 1813 * @parent: parent dentry 1814 * @name: qstr of name we wish to find 1815 * Returns: dentry, or NULL 1816 * 1817 * __d_lookup is like d_lookup, however it may (rarely) return a 1818 * false-negative result due to unrelated rename activity. 1819 * 1820 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1821 * however it must be used carefully, eg. with a following d_lookup in 1822 * the case of failure. 1823 * 1824 * __d_lookup callers must be commented. 1825 */ 1826 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1827 { 1828 unsigned int len = name->len; 1829 unsigned int hash = name->hash; 1830 const unsigned char *str = name->name; 1831 struct hlist_bl_head *b = d_hash(parent, hash); 1832 struct hlist_bl_node *node; 1833 struct dentry *found = NULL; 1834 struct dentry *dentry; 1835 1836 /* 1837 * Note: There is significant duplication with __d_lookup_rcu which is 1838 * required to prevent single threaded performance regressions 1839 * especially on architectures where smp_rmb (in seqcounts) are costly. 1840 * Keep the two functions in sync. 1841 */ 1842 1843 /* 1844 * The hash list is protected using RCU. 1845 * 1846 * Take d_lock when comparing a candidate dentry, to avoid races 1847 * with d_move(). 1848 * 1849 * It is possible that concurrent renames can mess up our list 1850 * walk here and result in missing our dentry, resulting in the 1851 * false-negative result. d_lookup() protects against concurrent 1852 * renames using rename_lock seqlock. 1853 * 1854 * See Documentation/filesystems/path-lookup.txt for more details. 1855 */ 1856 rcu_read_lock(); 1857 1858 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1859 const char *tname; 1860 int tlen; 1861 1862 if (dentry->d_name.hash != hash) 1863 continue; 1864 1865 spin_lock(&dentry->d_lock); 1866 if (dentry->d_parent != parent) 1867 goto next; 1868 if (d_unhashed(dentry)) 1869 goto next; 1870 1871 /* 1872 * It is safe to compare names since d_move() cannot 1873 * change the qstr (protected by d_lock). 1874 */ 1875 tlen = dentry->d_name.len; 1876 tname = dentry->d_name.name; 1877 if (parent->d_flags & DCACHE_OP_COMPARE) { 1878 if (parent->d_op->d_compare(parent, parent->d_inode, 1879 dentry, dentry->d_inode, 1880 tlen, tname, name)) 1881 goto next; 1882 } else { 1883 if (dentry_cmp(tname, tlen, str, len)) 1884 goto next; 1885 } 1886 1887 dentry->d_count++; 1888 found = dentry; 1889 spin_unlock(&dentry->d_lock); 1890 break; 1891 next: 1892 spin_unlock(&dentry->d_lock); 1893 } 1894 rcu_read_unlock(); 1895 1896 return found; 1897 } 1898 1899 /** 1900 * d_hash_and_lookup - hash the qstr then search for a dentry 1901 * @dir: Directory to search in 1902 * @name: qstr of name we wish to find 1903 * 1904 * On hash failure or on lookup failure NULL is returned. 1905 */ 1906 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1907 { 1908 struct dentry *dentry = NULL; 1909 1910 /* 1911 * Check for a fs-specific hash function. Note that we must 1912 * calculate the standard hash first, as the d_op->d_hash() 1913 * routine may choose to leave the hash value unchanged. 1914 */ 1915 name->hash = full_name_hash(name->name, name->len); 1916 if (dir->d_flags & DCACHE_OP_HASH) { 1917 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 1918 goto out; 1919 } 1920 dentry = d_lookup(dir, name); 1921 out: 1922 return dentry; 1923 } 1924 1925 /** 1926 * d_validate - verify dentry provided from insecure source (deprecated) 1927 * @dentry: The dentry alleged to be valid child of @dparent 1928 * @dparent: The parent dentry (known to be valid) 1929 * 1930 * An insecure source has sent us a dentry, here we verify it and dget() it. 1931 * This is used by ncpfs in its readdir implementation. 1932 * Zero is returned in the dentry is invalid. 1933 * 1934 * This function is slow for big directories, and deprecated, do not use it. 1935 */ 1936 int d_validate(struct dentry *dentry, struct dentry *dparent) 1937 { 1938 struct dentry *child; 1939 1940 spin_lock(&dparent->d_lock); 1941 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 1942 if (dentry == child) { 1943 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1944 __dget_dlock(dentry); 1945 spin_unlock(&dentry->d_lock); 1946 spin_unlock(&dparent->d_lock); 1947 return 1; 1948 } 1949 } 1950 spin_unlock(&dparent->d_lock); 1951 1952 return 0; 1953 } 1954 EXPORT_SYMBOL(d_validate); 1955 1956 /* 1957 * When a file is deleted, we have two options: 1958 * - turn this dentry into a negative dentry 1959 * - unhash this dentry and free it. 1960 * 1961 * Usually, we want to just turn this into 1962 * a negative dentry, but if anybody else is 1963 * currently using the dentry or the inode 1964 * we can't do that and we fall back on removing 1965 * it from the hash queues and waiting for 1966 * it to be deleted later when it has no users 1967 */ 1968 1969 /** 1970 * d_delete - delete a dentry 1971 * @dentry: The dentry to delete 1972 * 1973 * Turn the dentry into a negative dentry if possible, otherwise 1974 * remove it from the hash queues so it can be deleted later 1975 */ 1976 1977 void d_delete(struct dentry * dentry) 1978 { 1979 struct inode *inode; 1980 int isdir = 0; 1981 /* 1982 * Are we the only user? 1983 */ 1984 again: 1985 spin_lock(&dentry->d_lock); 1986 inode = dentry->d_inode; 1987 isdir = S_ISDIR(inode->i_mode); 1988 if (dentry->d_count == 1) { 1989 if (inode && !spin_trylock(&inode->i_lock)) { 1990 spin_unlock(&dentry->d_lock); 1991 cpu_relax(); 1992 goto again; 1993 } 1994 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 1995 dentry_unlink_inode(dentry); 1996 fsnotify_nameremove(dentry, isdir); 1997 return; 1998 } 1999 2000 if (!d_unhashed(dentry)) 2001 __d_drop(dentry); 2002 2003 spin_unlock(&dentry->d_lock); 2004 2005 fsnotify_nameremove(dentry, isdir); 2006 } 2007 EXPORT_SYMBOL(d_delete); 2008 2009 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2010 { 2011 BUG_ON(!d_unhashed(entry)); 2012 hlist_bl_lock(b); 2013 entry->d_flags |= DCACHE_RCUACCESS; 2014 hlist_bl_add_head_rcu(&entry->d_hash, b); 2015 hlist_bl_unlock(b); 2016 } 2017 2018 static void _d_rehash(struct dentry * entry) 2019 { 2020 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2021 } 2022 2023 /** 2024 * d_rehash - add an entry back to the hash 2025 * @entry: dentry to add to the hash 2026 * 2027 * Adds a dentry to the hash according to its name. 2028 */ 2029 2030 void d_rehash(struct dentry * entry) 2031 { 2032 spin_lock(&entry->d_lock); 2033 _d_rehash(entry); 2034 spin_unlock(&entry->d_lock); 2035 } 2036 EXPORT_SYMBOL(d_rehash); 2037 2038 /** 2039 * dentry_update_name_case - update case insensitive dentry with a new name 2040 * @dentry: dentry to be updated 2041 * @name: new name 2042 * 2043 * Update a case insensitive dentry with new case of name. 2044 * 2045 * dentry must have been returned by d_lookup with name @name. Old and new 2046 * name lengths must match (ie. no d_compare which allows mismatched name 2047 * lengths). 2048 * 2049 * Parent inode i_mutex must be held over d_lookup and into this call (to 2050 * keep renames and concurrent inserts, and readdir(2) away). 2051 */ 2052 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2053 { 2054 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2055 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2056 2057 spin_lock(&dentry->d_lock); 2058 write_seqcount_begin(&dentry->d_seq); 2059 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2060 write_seqcount_end(&dentry->d_seq); 2061 spin_unlock(&dentry->d_lock); 2062 } 2063 EXPORT_SYMBOL(dentry_update_name_case); 2064 2065 static void switch_names(struct dentry *dentry, struct dentry *target) 2066 { 2067 if (dname_external(target)) { 2068 if (dname_external(dentry)) { 2069 /* 2070 * Both external: swap the pointers 2071 */ 2072 swap(target->d_name.name, dentry->d_name.name); 2073 } else { 2074 /* 2075 * dentry:internal, target:external. Steal target's 2076 * storage and make target internal. 2077 */ 2078 memcpy(target->d_iname, dentry->d_name.name, 2079 dentry->d_name.len + 1); 2080 dentry->d_name.name = target->d_name.name; 2081 target->d_name.name = target->d_iname; 2082 } 2083 } else { 2084 if (dname_external(dentry)) { 2085 /* 2086 * dentry:external, target:internal. Give dentry's 2087 * storage to target and make dentry internal 2088 */ 2089 memcpy(dentry->d_iname, target->d_name.name, 2090 target->d_name.len + 1); 2091 target->d_name.name = dentry->d_name.name; 2092 dentry->d_name.name = dentry->d_iname; 2093 } else { 2094 /* 2095 * Both are internal. Just copy target to dentry 2096 */ 2097 memcpy(dentry->d_iname, target->d_name.name, 2098 target->d_name.len + 1); 2099 dentry->d_name.len = target->d_name.len; 2100 return; 2101 } 2102 } 2103 swap(dentry->d_name.len, target->d_name.len); 2104 } 2105 2106 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2107 { 2108 /* 2109 * XXXX: do we really need to take target->d_lock? 2110 */ 2111 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2112 spin_lock(&target->d_parent->d_lock); 2113 else { 2114 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2115 spin_lock(&dentry->d_parent->d_lock); 2116 spin_lock_nested(&target->d_parent->d_lock, 2117 DENTRY_D_LOCK_NESTED); 2118 } else { 2119 spin_lock(&target->d_parent->d_lock); 2120 spin_lock_nested(&dentry->d_parent->d_lock, 2121 DENTRY_D_LOCK_NESTED); 2122 } 2123 } 2124 if (target < dentry) { 2125 spin_lock_nested(&target->d_lock, 2); 2126 spin_lock_nested(&dentry->d_lock, 3); 2127 } else { 2128 spin_lock_nested(&dentry->d_lock, 2); 2129 spin_lock_nested(&target->d_lock, 3); 2130 } 2131 } 2132 2133 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2134 struct dentry *target) 2135 { 2136 if (target->d_parent != dentry->d_parent) 2137 spin_unlock(&dentry->d_parent->d_lock); 2138 if (target->d_parent != target) 2139 spin_unlock(&target->d_parent->d_lock); 2140 } 2141 2142 /* 2143 * When switching names, the actual string doesn't strictly have to 2144 * be preserved in the target - because we're dropping the target 2145 * anyway. As such, we can just do a simple memcpy() to copy over 2146 * the new name before we switch. 2147 * 2148 * Note that we have to be a lot more careful about getting the hash 2149 * switched - we have to switch the hash value properly even if it 2150 * then no longer matches the actual (corrupted) string of the target. 2151 * The hash value has to match the hash queue that the dentry is on.. 2152 */ 2153 /* 2154 * __d_move - move a dentry 2155 * @dentry: entry to move 2156 * @target: new dentry 2157 * 2158 * Update the dcache to reflect the move of a file name. Negative 2159 * dcache entries should not be moved in this way. Caller must hold 2160 * rename_lock, the i_mutex of the source and target directories, 2161 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2162 */ 2163 static void __d_move(struct dentry * dentry, struct dentry * target) 2164 { 2165 if (!dentry->d_inode) 2166 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2167 2168 BUG_ON(d_ancestor(dentry, target)); 2169 BUG_ON(d_ancestor(target, dentry)); 2170 2171 dentry_lock_for_move(dentry, target); 2172 2173 write_seqcount_begin(&dentry->d_seq); 2174 write_seqcount_begin(&target->d_seq); 2175 2176 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2177 2178 /* 2179 * Move the dentry to the target hash queue. Don't bother checking 2180 * for the same hash queue because of how unlikely it is. 2181 */ 2182 __d_drop(dentry); 2183 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2184 2185 /* Unhash the target: dput() will then get rid of it */ 2186 __d_drop(target); 2187 2188 list_del(&dentry->d_u.d_child); 2189 list_del(&target->d_u.d_child); 2190 2191 /* Switch the names.. */ 2192 switch_names(dentry, target); 2193 swap(dentry->d_name.hash, target->d_name.hash); 2194 2195 /* ... and switch the parents */ 2196 if (IS_ROOT(dentry)) { 2197 dentry->d_parent = target->d_parent; 2198 target->d_parent = target; 2199 INIT_LIST_HEAD(&target->d_u.d_child); 2200 } else { 2201 swap(dentry->d_parent, target->d_parent); 2202 2203 /* And add them back to the (new) parent lists */ 2204 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2205 } 2206 2207 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2208 2209 write_seqcount_end(&target->d_seq); 2210 write_seqcount_end(&dentry->d_seq); 2211 2212 dentry_unlock_parents_for_move(dentry, target); 2213 spin_unlock(&target->d_lock); 2214 fsnotify_d_move(dentry); 2215 spin_unlock(&dentry->d_lock); 2216 } 2217 2218 /* 2219 * d_move - move a dentry 2220 * @dentry: entry to move 2221 * @target: new dentry 2222 * 2223 * Update the dcache to reflect the move of a file name. Negative 2224 * dcache entries should not be moved in this way. See the locking 2225 * requirements for __d_move. 2226 */ 2227 void d_move(struct dentry *dentry, struct dentry *target) 2228 { 2229 write_seqlock(&rename_lock); 2230 __d_move(dentry, target); 2231 write_sequnlock(&rename_lock); 2232 } 2233 EXPORT_SYMBOL(d_move); 2234 2235 /** 2236 * d_ancestor - search for an ancestor 2237 * @p1: ancestor dentry 2238 * @p2: child dentry 2239 * 2240 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2241 * an ancestor of p2, else NULL. 2242 */ 2243 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2244 { 2245 struct dentry *p; 2246 2247 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2248 if (p->d_parent == p1) 2249 return p; 2250 } 2251 return NULL; 2252 } 2253 2254 /* 2255 * This helper attempts to cope with remotely renamed directories 2256 * 2257 * It assumes that the caller is already holding 2258 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2259 * 2260 * Note: If ever the locking in lock_rename() changes, then please 2261 * remember to update this too... 2262 */ 2263 static struct dentry *__d_unalias(struct inode *inode, 2264 struct dentry *dentry, struct dentry *alias) 2265 { 2266 struct mutex *m1 = NULL, *m2 = NULL; 2267 struct dentry *ret; 2268 2269 /* If alias and dentry share a parent, then no extra locks required */ 2270 if (alias->d_parent == dentry->d_parent) 2271 goto out_unalias; 2272 2273 /* See lock_rename() */ 2274 ret = ERR_PTR(-EBUSY); 2275 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2276 goto out_err; 2277 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2278 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2279 goto out_err; 2280 m2 = &alias->d_parent->d_inode->i_mutex; 2281 out_unalias: 2282 __d_move(alias, dentry); 2283 ret = alias; 2284 out_err: 2285 spin_unlock(&inode->i_lock); 2286 if (m2) 2287 mutex_unlock(m2); 2288 if (m1) 2289 mutex_unlock(m1); 2290 return ret; 2291 } 2292 2293 /* 2294 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2295 * named dentry in place of the dentry to be replaced. 2296 * returns with anon->d_lock held! 2297 */ 2298 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2299 { 2300 struct dentry *dparent, *aparent; 2301 2302 dentry_lock_for_move(anon, dentry); 2303 2304 write_seqcount_begin(&dentry->d_seq); 2305 write_seqcount_begin(&anon->d_seq); 2306 2307 dparent = dentry->d_parent; 2308 aparent = anon->d_parent; 2309 2310 switch_names(dentry, anon); 2311 swap(dentry->d_name.hash, anon->d_name.hash); 2312 2313 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2314 list_del(&dentry->d_u.d_child); 2315 if (!IS_ROOT(dentry)) 2316 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2317 else 2318 INIT_LIST_HEAD(&dentry->d_u.d_child); 2319 2320 anon->d_parent = (dparent == dentry) ? anon : dparent; 2321 list_del(&anon->d_u.d_child); 2322 if (!IS_ROOT(anon)) 2323 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2324 else 2325 INIT_LIST_HEAD(&anon->d_u.d_child); 2326 2327 write_seqcount_end(&dentry->d_seq); 2328 write_seqcount_end(&anon->d_seq); 2329 2330 dentry_unlock_parents_for_move(anon, dentry); 2331 spin_unlock(&dentry->d_lock); 2332 2333 /* anon->d_lock still locked, returns locked */ 2334 anon->d_flags &= ~DCACHE_DISCONNECTED; 2335 } 2336 2337 /** 2338 * d_materialise_unique - introduce an inode into the tree 2339 * @dentry: candidate dentry 2340 * @inode: inode to bind to the dentry, to which aliases may be attached 2341 * 2342 * Introduces an dentry into the tree, substituting an extant disconnected 2343 * root directory alias in its place if there is one. Caller must hold the 2344 * i_mutex of the parent directory. 2345 */ 2346 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2347 { 2348 struct dentry *actual; 2349 2350 BUG_ON(!d_unhashed(dentry)); 2351 2352 if (!inode) { 2353 actual = dentry; 2354 __d_instantiate(dentry, NULL); 2355 d_rehash(actual); 2356 goto out_nolock; 2357 } 2358 2359 spin_lock(&inode->i_lock); 2360 2361 if (S_ISDIR(inode->i_mode)) { 2362 struct dentry *alias; 2363 2364 /* Does an aliased dentry already exist? */ 2365 alias = __d_find_alias(inode, 0); 2366 if (alias) { 2367 actual = alias; 2368 write_seqlock(&rename_lock); 2369 2370 if (d_ancestor(alias, dentry)) { 2371 /* Check for loops */ 2372 actual = ERR_PTR(-ELOOP); 2373 } else if (IS_ROOT(alias)) { 2374 /* Is this an anonymous mountpoint that we 2375 * could splice into our tree? */ 2376 __d_materialise_dentry(dentry, alias); 2377 write_sequnlock(&rename_lock); 2378 __d_drop(alias); 2379 goto found; 2380 } else { 2381 /* Nope, but we must(!) avoid directory 2382 * aliasing */ 2383 actual = __d_unalias(inode, dentry, alias); 2384 } 2385 write_sequnlock(&rename_lock); 2386 if (IS_ERR(actual)) 2387 dput(alias); 2388 goto out_nolock; 2389 } 2390 } 2391 2392 /* Add a unique reference */ 2393 actual = __d_instantiate_unique(dentry, inode); 2394 if (!actual) 2395 actual = dentry; 2396 else 2397 BUG_ON(!d_unhashed(actual)); 2398 2399 spin_lock(&actual->d_lock); 2400 found: 2401 _d_rehash(actual); 2402 spin_unlock(&actual->d_lock); 2403 spin_unlock(&inode->i_lock); 2404 out_nolock: 2405 if (actual == dentry) { 2406 security_d_instantiate(dentry, inode); 2407 return NULL; 2408 } 2409 2410 iput(inode); 2411 return actual; 2412 } 2413 EXPORT_SYMBOL_GPL(d_materialise_unique); 2414 2415 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2416 { 2417 *buflen -= namelen; 2418 if (*buflen < 0) 2419 return -ENAMETOOLONG; 2420 *buffer -= namelen; 2421 memcpy(*buffer, str, namelen); 2422 return 0; 2423 } 2424 2425 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2426 { 2427 return prepend(buffer, buflen, name->name, name->len); 2428 } 2429 2430 /** 2431 * prepend_path - Prepend path string to a buffer 2432 * @path: the dentry/vfsmount to report 2433 * @root: root vfsmnt/dentry (may be modified by this function) 2434 * @buffer: pointer to the end of the buffer 2435 * @buflen: pointer to buffer length 2436 * 2437 * Caller holds the rename_lock. 2438 * 2439 * If path is not reachable from the supplied root, then the value of 2440 * root is changed (without modifying refcounts). 2441 */ 2442 static int prepend_path(const struct path *path, struct path *root, 2443 char **buffer, int *buflen) 2444 { 2445 struct dentry *dentry = path->dentry; 2446 struct vfsmount *vfsmnt = path->mnt; 2447 bool slash = false; 2448 int error = 0; 2449 2450 br_read_lock(vfsmount_lock); 2451 while (dentry != root->dentry || vfsmnt != root->mnt) { 2452 struct dentry * parent; 2453 2454 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2455 /* Global root? */ 2456 if (vfsmnt->mnt_parent == vfsmnt) { 2457 goto global_root; 2458 } 2459 dentry = vfsmnt->mnt_mountpoint; 2460 vfsmnt = vfsmnt->mnt_parent; 2461 continue; 2462 } 2463 parent = dentry->d_parent; 2464 prefetch(parent); 2465 spin_lock(&dentry->d_lock); 2466 error = prepend_name(buffer, buflen, &dentry->d_name); 2467 spin_unlock(&dentry->d_lock); 2468 if (!error) 2469 error = prepend(buffer, buflen, "/", 1); 2470 if (error) 2471 break; 2472 2473 slash = true; 2474 dentry = parent; 2475 } 2476 2477 out: 2478 if (!error && !slash) 2479 error = prepend(buffer, buflen, "/", 1); 2480 2481 br_read_unlock(vfsmount_lock); 2482 return error; 2483 2484 global_root: 2485 /* 2486 * Filesystems needing to implement special "root names" 2487 * should do so with ->d_dname() 2488 */ 2489 if (IS_ROOT(dentry) && 2490 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2491 WARN(1, "Root dentry has weird name <%.*s>\n", 2492 (int) dentry->d_name.len, dentry->d_name.name); 2493 } 2494 root->mnt = vfsmnt; 2495 root->dentry = dentry; 2496 goto out; 2497 } 2498 2499 /** 2500 * __d_path - return the path of a dentry 2501 * @path: the dentry/vfsmount to report 2502 * @root: root vfsmnt/dentry (may be modified by this function) 2503 * @buf: buffer to return value in 2504 * @buflen: buffer length 2505 * 2506 * Convert a dentry into an ASCII path name. 2507 * 2508 * Returns a pointer into the buffer or an error code if the 2509 * path was too long. 2510 * 2511 * "buflen" should be positive. 2512 * 2513 * If path is not reachable from the supplied root, then the value of 2514 * root is changed (without modifying refcounts). 2515 */ 2516 char *__d_path(const struct path *path, struct path *root, 2517 char *buf, int buflen) 2518 { 2519 char *res = buf + buflen; 2520 int error; 2521 2522 prepend(&res, &buflen, "\0", 1); 2523 write_seqlock(&rename_lock); 2524 error = prepend_path(path, root, &res, &buflen); 2525 write_sequnlock(&rename_lock); 2526 2527 if (error) 2528 return ERR_PTR(error); 2529 return res; 2530 } 2531 2532 /* 2533 * same as __d_path but appends "(deleted)" for unlinked files. 2534 */ 2535 static int path_with_deleted(const struct path *path, struct path *root, 2536 char **buf, int *buflen) 2537 { 2538 prepend(buf, buflen, "\0", 1); 2539 if (d_unlinked(path->dentry)) { 2540 int error = prepend(buf, buflen, " (deleted)", 10); 2541 if (error) 2542 return error; 2543 } 2544 2545 return prepend_path(path, root, buf, buflen); 2546 } 2547 2548 static int prepend_unreachable(char **buffer, int *buflen) 2549 { 2550 return prepend(buffer, buflen, "(unreachable)", 13); 2551 } 2552 2553 /** 2554 * d_path - return the path of a dentry 2555 * @path: path to report 2556 * @buf: buffer to return value in 2557 * @buflen: buffer length 2558 * 2559 * Convert a dentry into an ASCII path name. If the entry has been deleted 2560 * the string " (deleted)" is appended. Note that this is ambiguous. 2561 * 2562 * Returns a pointer into the buffer or an error code if the path was 2563 * too long. Note: Callers should use the returned pointer, not the passed 2564 * in buffer, to use the name! The implementation often starts at an offset 2565 * into the buffer, and may leave 0 bytes at the start. 2566 * 2567 * "buflen" should be positive. 2568 */ 2569 char *d_path(const struct path *path, char *buf, int buflen) 2570 { 2571 char *res = buf + buflen; 2572 struct path root; 2573 struct path tmp; 2574 int error; 2575 2576 /* 2577 * We have various synthetic filesystems that never get mounted. On 2578 * these filesystems dentries are never used for lookup purposes, and 2579 * thus don't need to be hashed. They also don't need a name until a 2580 * user wants to identify the object in /proc/pid/fd/. The little hack 2581 * below allows us to generate a name for these objects on demand: 2582 */ 2583 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2584 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2585 2586 get_fs_root(current->fs, &root); 2587 write_seqlock(&rename_lock); 2588 tmp = root; 2589 error = path_with_deleted(path, &tmp, &res, &buflen); 2590 if (error) 2591 res = ERR_PTR(error); 2592 write_sequnlock(&rename_lock); 2593 path_put(&root); 2594 return res; 2595 } 2596 EXPORT_SYMBOL(d_path); 2597 2598 /** 2599 * d_path_with_unreachable - return the path of a dentry 2600 * @path: path to report 2601 * @buf: buffer to return value in 2602 * @buflen: buffer length 2603 * 2604 * The difference from d_path() is that this prepends "(unreachable)" 2605 * to paths which are unreachable from the current process' root. 2606 */ 2607 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2608 { 2609 char *res = buf + buflen; 2610 struct path root; 2611 struct path tmp; 2612 int error; 2613 2614 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2615 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2616 2617 get_fs_root(current->fs, &root); 2618 write_seqlock(&rename_lock); 2619 tmp = root; 2620 error = path_with_deleted(path, &tmp, &res, &buflen); 2621 if (!error && !path_equal(&tmp, &root)) 2622 error = prepend_unreachable(&res, &buflen); 2623 write_sequnlock(&rename_lock); 2624 path_put(&root); 2625 if (error) 2626 res = ERR_PTR(error); 2627 2628 return res; 2629 } 2630 2631 /* 2632 * Helper function for dentry_operations.d_dname() members 2633 */ 2634 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2635 const char *fmt, ...) 2636 { 2637 va_list args; 2638 char temp[64]; 2639 int sz; 2640 2641 va_start(args, fmt); 2642 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2643 va_end(args); 2644 2645 if (sz > sizeof(temp) || sz > buflen) 2646 return ERR_PTR(-ENAMETOOLONG); 2647 2648 buffer += buflen - sz; 2649 return memcpy(buffer, temp, sz); 2650 } 2651 2652 /* 2653 * Write full pathname from the root of the filesystem into the buffer. 2654 */ 2655 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2656 { 2657 char *end = buf + buflen; 2658 char *retval; 2659 2660 prepend(&end, &buflen, "\0", 1); 2661 if (buflen < 1) 2662 goto Elong; 2663 /* Get '/' right */ 2664 retval = end-1; 2665 *retval = '/'; 2666 2667 while (!IS_ROOT(dentry)) { 2668 struct dentry *parent = dentry->d_parent; 2669 int error; 2670 2671 prefetch(parent); 2672 spin_lock(&dentry->d_lock); 2673 error = prepend_name(&end, &buflen, &dentry->d_name); 2674 spin_unlock(&dentry->d_lock); 2675 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2676 goto Elong; 2677 2678 retval = end; 2679 dentry = parent; 2680 } 2681 return retval; 2682 Elong: 2683 return ERR_PTR(-ENAMETOOLONG); 2684 } 2685 2686 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2687 { 2688 char *retval; 2689 2690 write_seqlock(&rename_lock); 2691 retval = __dentry_path(dentry, buf, buflen); 2692 write_sequnlock(&rename_lock); 2693 2694 return retval; 2695 } 2696 EXPORT_SYMBOL(dentry_path_raw); 2697 2698 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2699 { 2700 char *p = NULL; 2701 char *retval; 2702 2703 write_seqlock(&rename_lock); 2704 if (d_unlinked(dentry)) { 2705 p = buf + buflen; 2706 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2707 goto Elong; 2708 buflen++; 2709 } 2710 retval = __dentry_path(dentry, buf, buflen); 2711 write_sequnlock(&rename_lock); 2712 if (!IS_ERR(retval) && p) 2713 *p = '/'; /* restore '/' overriden with '\0' */ 2714 return retval; 2715 Elong: 2716 return ERR_PTR(-ENAMETOOLONG); 2717 } 2718 2719 /* 2720 * NOTE! The user-level library version returns a 2721 * character pointer. The kernel system call just 2722 * returns the length of the buffer filled (which 2723 * includes the ending '\0' character), or a negative 2724 * error value. So libc would do something like 2725 * 2726 * char *getcwd(char * buf, size_t size) 2727 * { 2728 * int retval; 2729 * 2730 * retval = sys_getcwd(buf, size); 2731 * if (retval >= 0) 2732 * return buf; 2733 * errno = -retval; 2734 * return NULL; 2735 * } 2736 */ 2737 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2738 { 2739 int error; 2740 struct path pwd, root; 2741 char *page = (char *) __get_free_page(GFP_USER); 2742 2743 if (!page) 2744 return -ENOMEM; 2745 2746 get_fs_root_and_pwd(current->fs, &root, &pwd); 2747 2748 error = -ENOENT; 2749 write_seqlock(&rename_lock); 2750 if (!d_unlinked(pwd.dentry)) { 2751 unsigned long len; 2752 struct path tmp = root; 2753 char *cwd = page + PAGE_SIZE; 2754 int buflen = PAGE_SIZE; 2755 2756 prepend(&cwd, &buflen, "\0", 1); 2757 error = prepend_path(&pwd, &tmp, &cwd, &buflen); 2758 write_sequnlock(&rename_lock); 2759 2760 if (error) 2761 goto out; 2762 2763 /* Unreachable from current root */ 2764 if (!path_equal(&tmp, &root)) { 2765 error = prepend_unreachable(&cwd, &buflen); 2766 if (error) 2767 goto out; 2768 } 2769 2770 error = -ERANGE; 2771 len = PAGE_SIZE + page - cwd; 2772 if (len <= size) { 2773 error = len; 2774 if (copy_to_user(buf, cwd, len)) 2775 error = -EFAULT; 2776 } 2777 } else { 2778 write_sequnlock(&rename_lock); 2779 } 2780 2781 out: 2782 path_put(&pwd); 2783 path_put(&root); 2784 free_page((unsigned long) page); 2785 return error; 2786 } 2787 2788 /* 2789 * Test whether new_dentry is a subdirectory of old_dentry. 2790 * 2791 * Trivially implemented using the dcache structure 2792 */ 2793 2794 /** 2795 * is_subdir - is new dentry a subdirectory of old_dentry 2796 * @new_dentry: new dentry 2797 * @old_dentry: old dentry 2798 * 2799 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2800 * Returns 0 otherwise. 2801 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2802 */ 2803 2804 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2805 { 2806 int result; 2807 unsigned seq; 2808 2809 if (new_dentry == old_dentry) 2810 return 1; 2811 2812 do { 2813 /* for restarting inner loop in case of seq retry */ 2814 seq = read_seqbegin(&rename_lock); 2815 /* 2816 * Need rcu_readlock to protect against the d_parent trashing 2817 * due to d_move 2818 */ 2819 rcu_read_lock(); 2820 if (d_ancestor(old_dentry, new_dentry)) 2821 result = 1; 2822 else 2823 result = 0; 2824 rcu_read_unlock(); 2825 } while (read_seqretry(&rename_lock, seq)); 2826 2827 return result; 2828 } 2829 2830 int path_is_under(struct path *path1, struct path *path2) 2831 { 2832 struct vfsmount *mnt = path1->mnt; 2833 struct dentry *dentry = path1->dentry; 2834 int res; 2835 2836 br_read_lock(vfsmount_lock); 2837 if (mnt != path2->mnt) { 2838 for (;;) { 2839 if (mnt->mnt_parent == mnt) { 2840 br_read_unlock(vfsmount_lock); 2841 return 0; 2842 } 2843 if (mnt->mnt_parent == path2->mnt) 2844 break; 2845 mnt = mnt->mnt_parent; 2846 } 2847 dentry = mnt->mnt_mountpoint; 2848 } 2849 res = is_subdir(dentry, path2->dentry); 2850 br_read_unlock(vfsmount_lock); 2851 return res; 2852 } 2853 EXPORT_SYMBOL(path_is_under); 2854 2855 void d_genocide(struct dentry *root) 2856 { 2857 struct dentry *this_parent; 2858 struct list_head *next; 2859 unsigned seq; 2860 int locked = 0; 2861 2862 seq = read_seqbegin(&rename_lock); 2863 again: 2864 this_parent = root; 2865 spin_lock(&this_parent->d_lock); 2866 repeat: 2867 next = this_parent->d_subdirs.next; 2868 resume: 2869 while (next != &this_parent->d_subdirs) { 2870 struct list_head *tmp = next; 2871 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2872 next = tmp->next; 2873 2874 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2875 if (d_unhashed(dentry) || !dentry->d_inode) { 2876 spin_unlock(&dentry->d_lock); 2877 continue; 2878 } 2879 if (!list_empty(&dentry->d_subdirs)) { 2880 spin_unlock(&this_parent->d_lock); 2881 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 2882 this_parent = dentry; 2883 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 2884 goto repeat; 2885 } 2886 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2887 dentry->d_flags |= DCACHE_GENOCIDE; 2888 dentry->d_count--; 2889 } 2890 spin_unlock(&dentry->d_lock); 2891 } 2892 if (this_parent != root) { 2893 struct dentry *child = this_parent; 2894 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2895 this_parent->d_flags |= DCACHE_GENOCIDE; 2896 this_parent->d_count--; 2897 } 2898 this_parent = try_to_ascend(this_parent, locked, seq); 2899 if (!this_parent) 2900 goto rename_retry; 2901 next = child->d_u.d_child.next; 2902 goto resume; 2903 } 2904 spin_unlock(&this_parent->d_lock); 2905 if (!locked && read_seqretry(&rename_lock, seq)) 2906 goto rename_retry; 2907 if (locked) 2908 write_sequnlock(&rename_lock); 2909 return; 2910 2911 rename_retry: 2912 locked = 1; 2913 write_seqlock(&rename_lock); 2914 goto again; 2915 } 2916 2917 /** 2918 * find_inode_number - check for dentry with name 2919 * @dir: directory to check 2920 * @name: Name to find. 2921 * 2922 * Check whether a dentry already exists for the given name, 2923 * and return the inode number if it has an inode. Otherwise 2924 * 0 is returned. 2925 * 2926 * This routine is used to post-process directory listings for 2927 * filesystems using synthetic inode numbers, and is necessary 2928 * to keep getcwd() working. 2929 */ 2930 2931 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2932 { 2933 struct dentry * dentry; 2934 ino_t ino = 0; 2935 2936 dentry = d_hash_and_lookup(dir, name); 2937 if (dentry) { 2938 if (dentry->d_inode) 2939 ino = dentry->d_inode->i_ino; 2940 dput(dentry); 2941 } 2942 return ino; 2943 } 2944 EXPORT_SYMBOL(find_inode_number); 2945 2946 static __initdata unsigned long dhash_entries; 2947 static int __init set_dhash_entries(char *str) 2948 { 2949 if (!str) 2950 return 0; 2951 dhash_entries = simple_strtoul(str, &str, 0); 2952 return 1; 2953 } 2954 __setup("dhash_entries=", set_dhash_entries); 2955 2956 static void __init dcache_init_early(void) 2957 { 2958 int loop; 2959 2960 /* If hashes are distributed across NUMA nodes, defer 2961 * hash allocation until vmalloc space is available. 2962 */ 2963 if (hashdist) 2964 return; 2965 2966 dentry_hashtable = 2967 alloc_large_system_hash("Dentry cache", 2968 sizeof(struct hlist_bl_head), 2969 dhash_entries, 2970 13, 2971 HASH_EARLY, 2972 &d_hash_shift, 2973 &d_hash_mask, 2974 0); 2975 2976 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2977 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 2978 } 2979 2980 static void __init dcache_init(void) 2981 { 2982 int loop; 2983 2984 /* 2985 * A constructor could be added for stable state like the lists, 2986 * but it is probably not worth it because of the cache nature 2987 * of the dcache. 2988 */ 2989 dentry_cache = KMEM_CACHE(dentry, 2990 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2991 2992 /* Hash may have been set up in dcache_init_early */ 2993 if (!hashdist) 2994 return; 2995 2996 dentry_hashtable = 2997 alloc_large_system_hash("Dentry cache", 2998 sizeof(struct hlist_bl_head), 2999 dhash_entries, 3000 13, 3001 0, 3002 &d_hash_shift, 3003 &d_hash_mask, 3004 0); 3005 3006 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3007 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3008 } 3009 3010 /* SLAB cache for __getname() consumers */ 3011 struct kmem_cache *names_cachep __read_mostly; 3012 EXPORT_SYMBOL(names_cachep); 3013 3014 EXPORT_SYMBOL(d_genocide); 3015 3016 void __init vfs_caches_init_early(void) 3017 { 3018 dcache_init_early(); 3019 inode_init_early(); 3020 } 3021 3022 void __init vfs_caches_init(unsigned long mempages) 3023 { 3024 unsigned long reserve; 3025 3026 /* Base hash sizes on available memory, with a reserve equal to 3027 150% of current kernel size */ 3028 3029 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3030 mempages -= reserve; 3031 3032 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3033 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3034 3035 dcache_init(); 3036 inode_init(); 3037 files_init(mempages); 3038 mnt_init(); 3039 bdev_cache_init(); 3040 chrdev_init(); 3041 } 3042