1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include "internal.h" 41 #include "mount.h" 42 43 /* 44 * Usage: 45 * dcache->d_inode->i_lock protects: 46 * - i_dentry, d_alias, d_inode of aliases 47 * dcache_hash_bucket lock protects: 48 * - the dcache hash table 49 * s_anon bl list spinlock protects: 50 * - the s_anon list (see __d_drop) 51 * dcache_lru_lock protects: 52 * - the dcache lru lists and counters 53 * d_lock protects: 54 * - d_flags 55 * - d_name 56 * - d_lru 57 * - d_count 58 * - d_unhashed() 59 * - d_parent and d_subdirs 60 * - childrens' d_child and d_parent 61 * - d_alias, d_inode 62 * 63 * Ordering: 64 * dentry->d_inode->i_lock 65 * dentry->d_lock 66 * dcache_lru_lock 67 * dcache_hash_bucket lock 68 * s_anon lock 69 * 70 * If there is an ancestor relationship: 71 * dentry->d_parent->...->d_parent->d_lock 72 * ... 73 * dentry->d_parent->d_lock 74 * dentry->d_lock 75 * 76 * If no ancestor relationship: 77 * if (dentry1 < dentry2) 78 * dentry1->d_lock 79 * dentry2->d_lock 80 */ 81 int sysctl_vfs_cache_pressure __read_mostly = 100; 82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 83 84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 #define D_HASHBITS d_hash_shift 100 #define D_HASHMASK d_hash_mask 101 102 static unsigned int d_hash_mask __read_mostly; 103 static unsigned int d_hash_shift __read_mostly; 104 105 static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 108 unsigned int hash) 109 { 110 hash += (unsigned long) parent / L1_CACHE_BYTES; 111 hash = hash + (hash >> D_HASHBITS); 112 return dentry_hashtable + (hash & D_HASHMASK); 113 } 114 115 /* Statistics gathering. */ 116 struct dentry_stat_t dentry_stat = { 117 .age_limit = 45, 118 }; 119 120 static DEFINE_PER_CPU(unsigned int, nr_dentry); 121 122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 123 static int get_nr_dentry(void) 124 { 125 int i; 126 int sum = 0; 127 for_each_possible_cpu(i) 128 sum += per_cpu(nr_dentry, i); 129 return sum < 0 ? 0 : sum; 130 } 131 132 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 133 size_t *lenp, loff_t *ppos) 134 { 135 dentry_stat.nr_dentry = get_nr_dentry(); 136 return proc_dointvec(table, write, buffer, lenp, ppos); 137 } 138 #endif 139 140 /* 141 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 142 * The strings are both count bytes long, and count is non-zero. 143 */ 144 #ifdef CONFIG_DCACHE_WORD_ACCESS 145 146 #include <asm/word-at-a-time.h> 147 /* 148 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 149 * aligned allocation for this particular component. We don't 150 * strictly need the load_unaligned_zeropad() safety, but it 151 * doesn't hurt either. 152 * 153 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 154 * need the careful unaligned handling. 155 */ 156 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 157 { 158 unsigned long a,b,mask; 159 160 for (;;) { 161 a = *(unsigned long *)cs; 162 b = load_unaligned_zeropad(ct); 163 if (tcount < sizeof(unsigned long)) 164 break; 165 if (unlikely(a != b)) 166 return 1; 167 cs += sizeof(unsigned long); 168 ct += sizeof(unsigned long); 169 tcount -= sizeof(unsigned long); 170 if (!tcount) 171 return 0; 172 } 173 mask = ~(~0ul << tcount*8); 174 return unlikely(!!((a ^ b) & mask)); 175 } 176 177 #else 178 179 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 180 { 181 do { 182 if (*cs != *ct) 183 return 1; 184 cs++; 185 ct++; 186 tcount--; 187 } while (tcount); 188 return 0; 189 } 190 191 #endif 192 193 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) 194 { 195 const unsigned char *cs; 196 /* 197 * Be careful about RCU walk racing with rename: 198 * use ACCESS_ONCE to fetch the name pointer. 199 * 200 * NOTE! Even if a rename will mean that the length 201 * was not loaded atomically, we don't care. The 202 * RCU walk will check the sequence count eventually, 203 * and catch it. And we won't overrun the buffer, 204 * because we're reading the name pointer atomically, 205 * and a dentry name is guaranteed to be properly 206 * terminated with a NUL byte. 207 * 208 * End result: even if 'len' is wrong, we'll exit 209 * early because the data cannot match (there can 210 * be no NUL in the ct/tcount data) 211 */ 212 cs = ACCESS_ONCE(dentry->d_name.name); 213 smp_read_barrier_depends(); 214 return dentry_string_cmp(cs, ct, tcount); 215 } 216 217 static void __d_free(struct rcu_head *head) 218 { 219 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 220 221 WARN_ON(!list_empty(&dentry->d_alias)); 222 if (dname_external(dentry)) 223 kfree(dentry->d_name.name); 224 kmem_cache_free(dentry_cache, dentry); 225 } 226 227 /* 228 * no locks, please. 229 */ 230 static void d_free(struct dentry *dentry) 231 { 232 BUG_ON(dentry->d_count); 233 this_cpu_dec(nr_dentry); 234 if (dentry->d_op && dentry->d_op->d_release) 235 dentry->d_op->d_release(dentry); 236 237 /* if dentry was never visible to RCU, immediate free is OK */ 238 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 239 __d_free(&dentry->d_u.d_rcu); 240 else 241 call_rcu(&dentry->d_u.d_rcu, __d_free); 242 } 243 244 /** 245 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 246 * @dentry: the target dentry 247 * After this call, in-progress rcu-walk path lookup will fail. This 248 * should be called after unhashing, and after changing d_inode (if 249 * the dentry has not already been unhashed). 250 */ 251 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 252 { 253 assert_spin_locked(&dentry->d_lock); 254 /* Go through a barrier */ 255 write_seqcount_barrier(&dentry->d_seq); 256 } 257 258 /* 259 * Release the dentry's inode, using the filesystem 260 * d_iput() operation if defined. Dentry has no refcount 261 * and is unhashed. 262 */ 263 static void dentry_iput(struct dentry * dentry) 264 __releases(dentry->d_lock) 265 __releases(dentry->d_inode->i_lock) 266 { 267 struct inode *inode = dentry->d_inode; 268 if (inode) { 269 dentry->d_inode = NULL; 270 list_del_init(&dentry->d_alias); 271 spin_unlock(&dentry->d_lock); 272 spin_unlock(&inode->i_lock); 273 if (!inode->i_nlink) 274 fsnotify_inoderemove(inode); 275 if (dentry->d_op && dentry->d_op->d_iput) 276 dentry->d_op->d_iput(dentry, inode); 277 else 278 iput(inode); 279 } else { 280 spin_unlock(&dentry->d_lock); 281 } 282 } 283 284 /* 285 * Release the dentry's inode, using the filesystem 286 * d_iput() operation if defined. dentry remains in-use. 287 */ 288 static void dentry_unlink_inode(struct dentry * dentry) 289 __releases(dentry->d_lock) 290 __releases(dentry->d_inode->i_lock) 291 { 292 struct inode *inode = dentry->d_inode; 293 dentry->d_inode = NULL; 294 list_del_init(&dentry->d_alias); 295 dentry_rcuwalk_barrier(dentry); 296 spin_unlock(&dentry->d_lock); 297 spin_unlock(&inode->i_lock); 298 if (!inode->i_nlink) 299 fsnotify_inoderemove(inode); 300 if (dentry->d_op && dentry->d_op->d_iput) 301 dentry->d_op->d_iput(dentry, inode); 302 else 303 iput(inode); 304 } 305 306 /* 307 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 308 */ 309 static void dentry_lru_add(struct dentry *dentry) 310 { 311 if (list_empty(&dentry->d_lru)) { 312 spin_lock(&dcache_lru_lock); 313 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 314 dentry->d_sb->s_nr_dentry_unused++; 315 dentry_stat.nr_unused++; 316 spin_unlock(&dcache_lru_lock); 317 } 318 } 319 320 static void __dentry_lru_del(struct dentry *dentry) 321 { 322 list_del_init(&dentry->d_lru); 323 dentry->d_flags &= ~DCACHE_SHRINK_LIST; 324 dentry->d_sb->s_nr_dentry_unused--; 325 dentry_stat.nr_unused--; 326 } 327 328 /* 329 * Remove a dentry with references from the LRU. 330 */ 331 static void dentry_lru_del(struct dentry *dentry) 332 { 333 if (!list_empty(&dentry->d_lru)) { 334 spin_lock(&dcache_lru_lock); 335 __dentry_lru_del(dentry); 336 spin_unlock(&dcache_lru_lock); 337 } 338 } 339 340 /* 341 * Remove a dentry that is unreferenced and about to be pruned 342 * (unhashed and destroyed) from the LRU, and inform the file system. 343 * This wrapper should be called _prior_ to unhashing a victim dentry. 344 */ 345 static void dentry_lru_prune(struct dentry *dentry) 346 { 347 if (!list_empty(&dentry->d_lru)) { 348 if (dentry->d_flags & DCACHE_OP_PRUNE) 349 dentry->d_op->d_prune(dentry); 350 351 spin_lock(&dcache_lru_lock); 352 __dentry_lru_del(dentry); 353 spin_unlock(&dcache_lru_lock); 354 } 355 } 356 357 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) 358 { 359 spin_lock(&dcache_lru_lock); 360 if (list_empty(&dentry->d_lru)) { 361 list_add_tail(&dentry->d_lru, list); 362 dentry->d_sb->s_nr_dentry_unused++; 363 dentry_stat.nr_unused++; 364 } else { 365 list_move_tail(&dentry->d_lru, list); 366 } 367 spin_unlock(&dcache_lru_lock); 368 } 369 370 /** 371 * d_kill - kill dentry and return parent 372 * @dentry: dentry to kill 373 * @parent: parent dentry 374 * 375 * The dentry must already be unhashed and removed from the LRU. 376 * 377 * If this is the root of the dentry tree, return NULL. 378 * 379 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 380 * d_kill. 381 */ 382 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 383 __releases(dentry->d_lock) 384 __releases(parent->d_lock) 385 __releases(dentry->d_inode->i_lock) 386 { 387 list_del(&dentry->d_u.d_child); 388 /* 389 * Inform try_to_ascend() that we are no longer attached to the 390 * dentry tree 391 */ 392 dentry->d_flags |= DCACHE_DISCONNECTED; 393 if (parent) 394 spin_unlock(&parent->d_lock); 395 dentry_iput(dentry); 396 /* 397 * dentry_iput drops the locks, at which point nobody (except 398 * transient RCU lookups) can reach this dentry. 399 */ 400 d_free(dentry); 401 return parent; 402 } 403 404 /* 405 * Unhash a dentry without inserting an RCU walk barrier or checking that 406 * dentry->d_lock is locked. The caller must take care of that, if 407 * appropriate. 408 */ 409 static void __d_shrink(struct dentry *dentry) 410 { 411 if (!d_unhashed(dentry)) { 412 struct hlist_bl_head *b; 413 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 414 b = &dentry->d_sb->s_anon; 415 else 416 b = d_hash(dentry->d_parent, dentry->d_name.hash); 417 418 hlist_bl_lock(b); 419 __hlist_bl_del(&dentry->d_hash); 420 dentry->d_hash.pprev = NULL; 421 hlist_bl_unlock(b); 422 } 423 } 424 425 /** 426 * d_drop - drop a dentry 427 * @dentry: dentry to drop 428 * 429 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 430 * be found through a VFS lookup any more. Note that this is different from 431 * deleting the dentry - d_delete will try to mark the dentry negative if 432 * possible, giving a successful _negative_ lookup, while d_drop will 433 * just make the cache lookup fail. 434 * 435 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 436 * reason (NFS timeouts or autofs deletes). 437 * 438 * __d_drop requires dentry->d_lock. 439 */ 440 void __d_drop(struct dentry *dentry) 441 { 442 if (!d_unhashed(dentry)) { 443 __d_shrink(dentry); 444 dentry_rcuwalk_barrier(dentry); 445 } 446 } 447 EXPORT_SYMBOL(__d_drop); 448 449 void d_drop(struct dentry *dentry) 450 { 451 spin_lock(&dentry->d_lock); 452 __d_drop(dentry); 453 spin_unlock(&dentry->d_lock); 454 } 455 EXPORT_SYMBOL(d_drop); 456 457 /* 458 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 459 * @dentry: dentry to drop 460 * 461 * This is called when we do a lookup on a placeholder dentry that needed to be 462 * looked up. The dentry should have been hashed in order for it to be found by 463 * the lookup code, but now needs to be unhashed while we do the actual lookup 464 * and clear the DCACHE_NEED_LOOKUP flag. 465 */ 466 void d_clear_need_lookup(struct dentry *dentry) 467 { 468 spin_lock(&dentry->d_lock); 469 __d_drop(dentry); 470 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 471 spin_unlock(&dentry->d_lock); 472 } 473 EXPORT_SYMBOL(d_clear_need_lookup); 474 475 /* 476 * Finish off a dentry we've decided to kill. 477 * dentry->d_lock must be held, returns with it unlocked. 478 * If ref is non-zero, then decrement the refcount too. 479 * Returns dentry requiring refcount drop, or NULL if we're done. 480 */ 481 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 482 __releases(dentry->d_lock) 483 { 484 struct inode *inode; 485 struct dentry *parent; 486 487 inode = dentry->d_inode; 488 if (inode && !spin_trylock(&inode->i_lock)) { 489 relock: 490 spin_unlock(&dentry->d_lock); 491 cpu_relax(); 492 return dentry; /* try again with same dentry */ 493 } 494 if (IS_ROOT(dentry)) 495 parent = NULL; 496 else 497 parent = dentry->d_parent; 498 if (parent && !spin_trylock(&parent->d_lock)) { 499 if (inode) 500 spin_unlock(&inode->i_lock); 501 goto relock; 502 } 503 504 if (ref) 505 dentry->d_count--; 506 /* 507 * if dentry was on the d_lru list delete it from there. 508 * inform the fs via d_prune that this dentry is about to be 509 * unhashed and destroyed. 510 */ 511 dentry_lru_prune(dentry); 512 /* if it was on the hash then remove it */ 513 __d_drop(dentry); 514 return d_kill(dentry, parent); 515 } 516 517 /* 518 * This is dput 519 * 520 * This is complicated by the fact that we do not want to put 521 * dentries that are no longer on any hash chain on the unused 522 * list: we'd much rather just get rid of them immediately. 523 * 524 * However, that implies that we have to traverse the dentry 525 * tree upwards to the parents which might _also_ now be 526 * scheduled for deletion (it may have been only waiting for 527 * its last child to go away). 528 * 529 * This tail recursion is done by hand as we don't want to depend 530 * on the compiler to always get this right (gcc generally doesn't). 531 * Real recursion would eat up our stack space. 532 */ 533 534 /* 535 * dput - release a dentry 536 * @dentry: dentry to release 537 * 538 * Release a dentry. This will drop the usage count and if appropriate 539 * call the dentry unlink method as well as removing it from the queues and 540 * releasing its resources. If the parent dentries were scheduled for release 541 * they too may now get deleted. 542 */ 543 void dput(struct dentry *dentry) 544 { 545 if (!dentry) 546 return; 547 548 repeat: 549 if (dentry->d_count == 1) 550 might_sleep(); 551 spin_lock(&dentry->d_lock); 552 BUG_ON(!dentry->d_count); 553 if (dentry->d_count > 1) { 554 dentry->d_count--; 555 spin_unlock(&dentry->d_lock); 556 return; 557 } 558 559 if (dentry->d_flags & DCACHE_OP_DELETE) { 560 if (dentry->d_op->d_delete(dentry)) 561 goto kill_it; 562 } 563 564 /* Unreachable? Get rid of it */ 565 if (d_unhashed(dentry)) 566 goto kill_it; 567 568 /* 569 * If this dentry needs lookup, don't set the referenced flag so that it 570 * is more likely to be cleaned up by the dcache shrinker in case of 571 * memory pressure. 572 */ 573 if (!d_need_lookup(dentry)) 574 dentry->d_flags |= DCACHE_REFERENCED; 575 dentry_lru_add(dentry); 576 577 dentry->d_count--; 578 spin_unlock(&dentry->d_lock); 579 return; 580 581 kill_it: 582 dentry = dentry_kill(dentry, 1); 583 if (dentry) 584 goto repeat; 585 } 586 EXPORT_SYMBOL(dput); 587 588 /** 589 * d_invalidate - invalidate a dentry 590 * @dentry: dentry to invalidate 591 * 592 * Try to invalidate the dentry if it turns out to be 593 * possible. If there are other dentries that can be 594 * reached through this one we can't delete it and we 595 * return -EBUSY. On success we return 0. 596 * 597 * no dcache lock. 598 */ 599 600 int d_invalidate(struct dentry * dentry) 601 { 602 /* 603 * If it's already been dropped, return OK. 604 */ 605 spin_lock(&dentry->d_lock); 606 if (d_unhashed(dentry)) { 607 spin_unlock(&dentry->d_lock); 608 return 0; 609 } 610 /* 611 * Check whether to do a partial shrink_dcache 612 * to get rid of unused child entries. 613 */ 614 if (!list_empty(&dentry->d_subdirs)) { 615 spin_unlock(&dentry->d_lock); 616 shrink_dcache_parent(dentry); 617 spin_lock(&dentry->d_lock); 618 } 619 620 /* 621 * Somebody else still using it? 622 * 623 * If it's a directory, we can't drop it 624 * for fear of somebody re-populating it 625 * with children (even though dropping it 626 * would make it unreachable from the root, 627 * we might still populate it if it was a 628 * working directory or similar). 629 * We also need to leave mountpoints alone, 630 * directory or not. 631 */ 632 if (dentry->d_count > 1 && dentry->d_inode) { 633 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 634 spin_unlock(&dentry->d_lock); 635 return -EBUSY; 636 } 637 } 638 639 __d_drop(dentry); 640 spin_unlock(&dentry->d_lock); 641 return 0; 642 } 643 EXPORT_SYMBOL(d_invalidate); 644 645 /* This must be called with d_lock held */ 646 static inline void __dget_dlock(struct dentry *dentry) 647 { 648 dentry->d_count++; 649 } 650 651 static inline void __dget(struct dentry *dentry) 652 { 653 spin_lock(&dentry->d_lock); 654 __dget_dlock(dentry); 655 spin_unlock(&dentry->d_lock); 656 } 657 658 struct dentry *dget_parent(struct dentry *dentry) 659 { 660 struct dentry *ret; 661 662 repeat: 663 /* 664 * Don't need rcu_dereference because we re-check it was correct under 665 * the lock. 666 */ 667 rcu_read_lock(); 668 ret = dentry->d_parent; 669 spin_lock(&ret->d_lock); 670 if (unlikely(ret != dentry->d_parent)) { 671 spin_unlock(&ret->d_lock); 672 rcu_read_unlock(); 673 goto repeat; 674 } 675 rcu_read_unlock(); 676 BUG_ON(!ret->d_count); 677 ret->d_count++; 678 spin_unlock(&ret->d_lock); 679 return ret; 680 } 681 EXPORT_SYMBOL(dget_parent); 682 683 /** 684 * d_find_alias - grab a hashed alias of inode 685 * @inode: inode in question 686 * @want_discon: flag, used by d_splice_alias, to request 687 * that only a DISCONNECTED alias be returned. 688 * 689 * If inode has a hashed alias, or is a directory and has any alias, 690 * acquire the reference to alias and return it. Otherwise return NULL. 691 * Notice that if inode is a directory there can be only one alias and 692 * it can be unhashed only if it has no children, or if it is the root 693 * of a filesystem. 694 * 695 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 696 * any other hashed alias over that one unless @want_discon is set, 697 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 698 */ 699 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 700 { 701 struct dentry *alias, *discon_alias; 702 703 again: 704 discon_alias = NULL; 705 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 706 spin_lock(&alias->d_lock); 707 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 708 if (IS_ROOT(alias) && 709 (alias->d_flags & DCACHE_DISCONNECTED)) { 710 discon_alias = alias; 711 } else if (!want_discon) { 712 __dget_dlock(alias); 713 spin_unlock(&alias->d_lock); 714 return alias; 715 } 716 } 717 spin_unlock(&alias->d_lock); 718 } 719 if (discon_alias) { 720 alias = discon_alias; 721 spin_lock(&alias->d_lock); 722 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 723 if (IS_ROOT(alias) && 724 (alias->d_flags & DCACHE_DISCONNECTED)) { 725 __dget_dlock(alias); 726 spin_unlock(&alias->d_lock); 727 return alias; 728 } 729 } 730 spin_unlock(&alias->d_lock); 731 goto again; 732 } 733 return NULL; 734 } 735 736 struct dentry *d_find_alias(struct inode *inode) 737 { 738 struct dentry *de = NULL; 739 740 if (!list_empty(&inode->i_dentry)) { 741 spin_lock(&inode->i_lock); 742 de = __d_find_alias(inode, 0); 743 spin_unlock(&inode->i_lock); 744 } 745 return de; 746 } 747 EXPORT_SYMBOL(d_find_alias); 748 749 /* 750 * Try to kill dentries associated with this inode. 751 * WARNING: you must own a reference to inode. 752 */ 753 void d_prune_aliases(struct inode *inode) 754 { 755 struct dentry *dentry; 756 restart: 757 spin_lock(&inode->i_lock); 758 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 759 spin_lock(&dentry->d_lock); 760 if (!dentry->d_count) { 761 __dget_dlock(dentry); 762 __d_drop(dentry); 763 spin_unlock(&dentry->d_lock); 764 spin_unlock(&inode->i_lock); 765 dput(dentry); 766 goto restart; 767 } 768 spin_unlock(&dentry->d_lock); 769 } 770 spin_unlock(&inode->i_lock); 771 } 772 EXPORT_SYMBOL(d_prune_aliases); 773 774 /* 775 * Try to throw away a dentry - free the inode, dput the parent. 776 * Requires dentry->d_lock is held, and dentry->d_count == 0. 777 * Releases dentry->d_lock. 778 * 779 * This may fail if locks cannot be acquired no problem, just try again. 780 */ 781 static void try_prune_one_dentry(struct dentry *dentry) 782 __releases(dentry->d_lock) 783 { 784 struct dentry *parent; 785 786 parent = dentry_kill(dentry, 0); 787 /* 788 * If dentry_kill returns NULL, we have nothing more to do. 789 * if it returns the same dentry, trylocks failed. In either 790 * case, just loop again. 791 * 792 * Otherwise, we need to prune ancestors too. This is necessary 793 * to prevent quadratic behavior of shrink_dcache_parent(), but 794 * is also expected to be beneficial in reducing dentry cache 795 * fragmentation. 796 */ 797 if (!parent) 798 return; 799 if (parent == dentry) 800 return; 801 802 /* Prune ancestors. */ 803 dentry = parent; 804 while (dentry) { 805 spin_lock(&dentry->d_lock); 806 if (dentry->d_count > 1) { 807 dentry->d_count--; 808 spin_unlock(&dentry->d_lock); 809 return; 810 } 811 dentry = dentry_kill(dentry, 1); 812 } 813 } 814 815 static void shrink_dentry_list(struct list_head *list) 816 { 817 struct dentry *dentry; 818 819 rcu_read_lock(); 820 for (;;) { 821 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 822 if (&dentry->d_lru == list) 823 break; /* empty */ 824 spin_lock(&dentry->d_lock); 825 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 826 spin_unlock(&dentry->d_lock); 827 continue; 828 } 829 830 /* 831 * We found an inuse dentry which was not removed from 832 * the LRU because of laziness during lookup. Do not free 833 * it - just keep it off the LRU list. 834 */ 835 if (dentry->d_count) { 836 dentry_lru_del(dentry); 837 spin_unlock(&dentry->d_lock); 838 continue; 839 } 840 841 rcu_read_unlock(); 842 843 try_prune_one_dentry(dentry); 844 845 rcu_read_lock(); 846 } 847 rcu_read_unlock(); 848 } 849 850 /** 851 * prune_dcache_sb - shrink the dcache 852 * @sb: superblock 853 * @count: number of entries to try to free 854 * 855 * Attempt to shrink the superblock dcache LRU by @count entries. This is 856 * done when we need more memory an called from the superblock shrinker 857 * function. 858 * 859 * This function may fail to free any resources if all the dentries are in 860 * use. 861 */ 862 void prune_dcache_sb(struct super_block *sb, int count) 863 { 864 struct dentry *dentry; 865 LIST_HEAD(referenced); 866 LIST_HEAD(tmp); 867 868 relock: 869 spin_lock(&dcache_lru_lock); 870 while (!list_empty(&sb->s_dentry_lru)) { 871 dentry = list_entry(sb->s_dentry_lru.prev, 872 struct dentry, d_lru); 873 BUG_ON(dentry->d_sb != sb); 874 875 if (!spin_trylock(&dentry->d_lock)) { 876 spin_unlock(&dcache_lru_lock); 877 cpu_relax(); 878 goto relock; 879 } 880 881 if (dentry->d_flags & DCACHE_REFERENCED) { 882 dentry->d_flags &= ~DCACHE_REFERENCED; 883 list_move(&dentry->d_lru, &referenced); 884 spin_unlock(&dentry->d_lock); 885 } else { 886 list_move_tail(&dentry->d_lru, &tmp); 887 dentry->d_flags |= DCACHE_SHRINK_LIST; 888 spin_unlock(&dentry->d_lock); 889 if (!--count) 890 break; 891 } 892 cond_resched_lock(&dcache_lru_lock); 893 } 894 if (!list_empty(&referenced)) 895 list_splice(&referenced, &sb->s_dentry_lru); 896 spin_unlock(&dcache_lru_lock); 897 898 shrink_dentry_list(&tmp); 899 } 900 901 /** 902 * shrink_dcache_sb - shrink dcache for a superblock 903 * @sb: superblock 904 * 905 * Shrink the dcache for the specified super block. This is used to free 906 * the dcache before unmounting a file system. 907 */ 908 void shrink_dcache_sb(struct super_block *sb) 909 { 910 LIST_HEAD(tmp); 911 912 spin_lock(&dcache_lru_lock); 913 while (!list_empty(&sb->s_dentry_lru)) { 914 list_splice_init(&sb->s_dentry_lru, &tmp); 915 spin_unlock(&dcache_lru_lock); 916 shrink_dentry_list(&tmp); 917 spin_lock(&dcache_lru_lock); 918 } 919 spin_unlock(&dcache_lru_lock); 920 } 921 EXPORT_SYMBOL(shrink_dcache_sb); 922 923 /* 924 * destroy a single subtree of dentries for unmount 925 * - see the comments on shrink_dcache_for_umount() for a description of the 926 * locking 927 */ 928 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 929 { 930 struct dentry *parent; 931 932 BUG_ON(!IS_ROOT(dentry)); 933 934 for (;;) { 935 /* descend to the first leaf in the current subtree */ 936 while (!list_empty(&dentry->d_subdirs)) 937 dentry = list_entry(dentry->d_subdirs.next, 938 struct dentry, d_u.d_child); 939 940 /* consume the dentries from this leaf up through its parents 941 * until we find one with children or run out altogether */ 942 do { 943 struct inode *inode; 944 945 /* 946 * remove the dentry from the lru, and inform 947 * the fs that this dentry is about to be 948 * unhashed and destroyed. 949 */ 950 dentry_lru_prune(dentry); 951 __d_shrink(dentry); 952 953 if (dentry->d_count != 0) { 954 printk(KERN_ERR 955 "BUG: Dentry %p{i=%lx,n=%s}" 956 " still in use (%d)" 957 " [unmount of %s %s]\n", 958 dentry, 959 dentry->d_inode ? 960 dentry->d_inode->i_ino : 0UL, 961 dentry->d_name.name, 962 dentry->d_count, 963 dentry->d_sb->s_type->name, 964 dentry->d_sb->s_id); 965 BUG(); 966 } 967 968 if (IS_ROOT(dentry)) { 969 parent = NULL; 970 list_del(&dentry->d_u.d_child); 971 } else { 972 parent = dentry->d_parent; 973 parent->d_count--; 974 list_del(&dentry->d_u.d_child); 975 } 976 977 inode = dentry->d_inode; 978 if (inode) { 979 dentry->d_inode = NULL; 980 list_del_init(&dentry->d_alias); 981 if (dentry->d_op && dentry->d_op->d_iput) 982 dentry->d_op->d_iput(dentry, inode); 983 else 984 iput(inode); 985 } 986 987 d_free(dentry); 988 989 /* finished when we fall off the top of the tree, 990 * otherwise we ascend to the parent and move to the 991 * next sibling if there is one */ 992 if (!parent) 993 return; 994 dentry = parent; 995 } while (list_empty(&dentry->d_subdirs)); 996 997 dentry = list_entry(dentry->d_subdirs.next, 998 struct dentry, d_u.d_child); 999 } 1000 } 1001 1002 /* 1003 * destroy the dentries attached to a superblock on unmounting 1004 * - we don't need to use dentry->d_lock because: 1005 * - the superblock is detached from all mountings and open files, so the 1006 * dentry trees will not be rearranged by the VFS 1007 * - s_umount is write-locked, so the memory pressure shrinker will ignore 1008 * any dentries belonging to this superblock that it comes across 1009 * - the filesystem itself is no longer permitted to rearrange the dentries 1010 * in this superblock 1011 */ 1012 void shrink_dcache_for_umount(struct super_block *sb) 1013 { 1014 struct dentry *dentry; 1015 1016 if (down_read_trylock(&sb->s_umount)) 1017 BUG(); 1018 1019 dentry = sb->s_root; 1020 sb->s_root = NULL; 1021 dentry->d_count--; 1022 shrink_dcache_for_umount_subtree(dentry); 1023 1024 while (!hlist_bl_empty(&sb->s_anon)) { 1025 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 1026 shrink_dcache_for_umount_subtree(dentry); 1027 } 1028 } 1029 1030 /* 1031 * This tries to ascend one level of parenthood, but 1032 * we can race with renaming, so we need to re-check 1033 * the parenthood after dropping the lock and check 1034 * that the sequence number still matches. 1035 */ 1036 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 1037 { 1038 struct dentry *new = old->d_parent; 1039 1040 rcu_read_lock(); 1041 spin_unlock(&old->d_lock); 1042 spin_lock(&new->d_lock); 1043 1044 /* 1045 * might go back up the wrong parent if we have had a rename 1046 * or deletion 1047 */ 1048 if (new != old->d_parent || 1049 (old->d_flags & DCACHE_DISCONNECTED) || 1050 (!locked && read_seqretry(&rename_lock, seq))) { 1051 spin_unlock(&new->d_lock); 1052 new = NULL; 1053 } 1054 rcu_read_unlock(); 1055 return new; 1056 } 1057 1058 1059 /* 1060 * Search for at least 1 mount point in the dentry's subdirs. 1061 * We descend to the next level whenever the d_subdirs 1062 * list is non-empty and continue searching. 1063 */ 1064 1065 /** 1066 * have_submounts - check for mounts over a dentry 1067 * @parent: dentry to check. 1068 * 1069 * Return true if the parent or its subdirectories contain 1070 * a mount point 1071 */ 1072 int have_submounts(struct dentry *parent) 1073 { 1074 struct dentry *this_parent; 1075 struct list_head *next; 1076 unsigned seq; 1077 int locked = 0; 1078 1079 seq = read_seqbegin(&rename_lock); 1080 again: 1081 this_parent = parent; 1082 1083 if (d_mountpoint(parent)) 1084 goto positive; 1085 spin_lock(&this_parent->d_lock); 1086 repeat: 1087 next = this_parent->d_subdirs.next; 1088 resume: 1089 while (next != &this_parent->d_subdirs) { 1090 struct list_head *tmp = next; 1091 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1092 next = tmp->next; 1093 1094 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1095 /* Have we found a mount point ? */ 1096 if (d_mountpoint(dentry)) { 1097 spin_unlock(&dentry->d_lock); 1098 spin_unlock(&this_parent->d_lock); 1099 goto positive; 1100 } 1101 if (!list_empty(&dentry->d_subdirs)) { 1102 spin_unlock(&this_parent->d_lock); 1103 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1104 this_parent = dentry; 1105 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1106 goto repeat; 1107 } 1108 spin_unlock(&dentry->d_lock); 1109 } 1110 /* 1111 * All done at this level ... ascend and resume the search. 1112 */ 1113 if (this_parent != parent) { 1114 struct dentry *child = this_parent; 1115 this_parent = try_to_ascend(this_parent, locked, seq); 1116 if (!this_parent) 1117 goto rename_retry; 1118 next = child->d_u.d_child.next; 1119 goto resume; 1120 } 1121 spin_unlock(&this_parent->d_lock); 1122 if (!locked && read_seqretry(&rename_lock, seq)) 1123 goto rename_retry; 1124 if (locked) 1125 write_sequnlock(&rename_lock); 1126 return 0; /* No mount points found in tree */ 1127 positive: 1128 if (!locked && read_seqretry(&rename_lock, seq)) 1129 goto rename_retry; 1130 if (locked) 1131 write_sequnlock(&rename_lock); 1132 return 1; 1133 1134 rename_retry: 1135 locked = 1; 1136 write_seqlock(&rename_lock); 1137 goto again; 1138 } 1139 EXPORT_SYMBOL(have_submounts); 1140 1141 /* 1142 * Search the dentry child list for the specified parent, 1143 * and move any unused dentries to the end of the unused 1144 * list for prune_dcache(). We descend to the next level 1145 * whenever the d_subdirs list is non-empty and continue 1146 * searching. 1147 * 1148 * It returns zero iff there are no unused children, 1149 * otherwise it returns the number of children moved to 1150 * the end of the unused list. This may not be the total 1151 * number of unused children, because select_parent can 1152 * drop the lock and return early due to latency 1153 * constraints. 1154 */ 1155 static int select_parent(struct dentry *parent, struct list_head *dispose) 1156 { 1157 struct dentry *this_parent; 1158 struct list_head *next; 1159 unsigned seq; 1160 int found = 0; 1161 int locked = 0; 1162 1163 seq = read_seqbegin(&rename_lock); 1164 again: 1165 this_parent = parent; 1166 spin_lock(&this_parent->d_lock); 1167 repeat: 1168 next = this_parent->d_subdirs.next; 1169 resume: 1170 while (next != &this_parent->d_subdirs) { 1171 struct list_head *tmp = next; 1172 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1173 next = tmp->next; 1174 1175 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1176 1177 /* 1178 * move only zero ref count dentries to the dispose list. 1179 * 1180 * Those which are presently on the shrink list, being processed 1181 * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1182 * loop in shrink_dcache_parent() might not make any progress 1183 * and loop forever. 1184 */ 1185 if (dentry->d_count) { 1186 dentry_lru_del(dentry); 1187 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1188 dentry_lru_move_list(dentry, dispose); 1189 dentry->d_flags |= DCACHE_SHRINK_LIST; 1190 found++; 1191 } 1192 /* 1193 * We can return to the caller if we have found some (this 1194 * ensures forward progress). We'll be coming back to find 1195 * the rest. 1196 */ 1197 if (found && need_resched()) { 1198 spin_unlock(&dentry->d_lock); 1199 goto out; 1200 } 1201 1202 /* 1203 * Descend a level if the d_subdirs list is non-empty. 1204 */ 1205 if (!list_empty(&dentry->d_subdirs)) { 1206 spin_unlock(&this_parent->d_lock); 1207 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1208 this_parent = dentry; 1209 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1210 goto repeat; 1211 } 1212 1213 spin_unlock(&dentry->d_lock); 1214 } 1215 /* 1216 * All done at this level ... ascend and resume the search. 1217 */ 1218 if (this_parent != parent) { 1219 struct dentry *child = this_parent; 1220 this_parent = try_to_ascend(this_parent, locked, seq); 1221 if (!this_parent) 1222 goto rename_retry; 1223 next = child->d_u.d_child.next; 1224 goto resume; 1225 } 1226 out: 1227 spin_unlock(&this_parent->d_lock); 1228 if (!locked && read_seqretry(&rename_lock, seq)) 1229 goto rename_retry; 1230 if (locked) 1231 write_sequnlock(&rename_lock); 1232 return found; 1233 1234 rename_retry: 1235 if (found) 1236 return found; 1237 locked = 1; 1238 write_seqlock(&rename_lock); 1239 goto again; 1240 } 1241 1242 /** 1243 * shrink_dcache_parent - prune dcache 1244 * @parent: parent of entries to prune 1245 * 1246 * Prune the dcache to remove unused children of the parent dentry. 1247 */ 1248 void shrink_dcache_parent(struct dentry * parent) 1249 { 1250 LIST_HEAD(dispose); 1251 int found; 1252 1253 while ((found = select_parent(parent, &dispose)) != 0) 1254 shrink_dentry_list(&dispose); 1255 } 1256 EXPORT_SYMBOL(shrink_dcache_parent); 1257 1258 /** 1259 * __d_alloc - allocate a dcache entry 1260 * @sb: filesystem it will belong to 1261 * @name: qstr of the name 1262 * 1263 * Allocates a dentry. It returns %NULL if there is insufficient memory 1264 * available. On a success the dentry is returned. The name passed in is 1265 * copied and the copy passed in may be reused after this call. 1266 */ 1267 1268 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1269 { 1270 struct dentry *dentry; 1271 char *dname; 1272 1273 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1274 if (!dentry) 1275 return NULL; 1276 1277 /* 1278 * We guarantee that the inline name is always NUL-terminated. 1279 * This way the memcpy() done by the name switching in rename 1280 * will still always have a NUL at the end, even if we might 1281 * be overwriting an internal NUL character 1282 */ 1283 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; 1284 if (name->len > DNAME_INLINE_LEN-1) { 1285 dname = kmalloc(name->len + 1, GFP_KERNEL); 1286 if (!dname) { 1287 kmem_cache_free(dentry_cache, dentry); 1288 return NULL; 1289 } 1290 } else { 1291 dname = dentry->d_iname; 1292 } 1293 1294 dentry->d_name.len = name->len; 1295 dentry->d_name.hash = name->hash; 1296 memcpy(dname, name->name, name->len); 1297 dname[name->len] = 0; 1298 1299 /* Make sure we always see the terminating NUL character */ 1300 smp_wmb(); 1301 dentry->d_name.name = dname; 1302 1303 dentry->d_count = 1; 1304 dentry->d_flags = 0; 1305 spin_lock_init(&dentry->d_lock); 1306 seqcount_init(&dentry->d_seq); 1307 dentry->d_inode = NULL; 1308 dentry->d_parent = dentry; 1309 dentry->d_sb = sb; 1310 dentry->d_op = NULL; 1311 dentry->d_fsdata = NULL; 1312 INIT_HLIST_BL_NODE(&dentry->d_hash); 1313 INIT_LIST_HEAD(&dentry->d_lru); 1314 INIT_LIST_HEAD(&dentry->d_subdirs); 1315 INIT_LIST_HEAD(&dentry->d_alias); 1316 INIT_LIST_HEAD(&dentry->d_u.d_child); 1317 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1318 1319 this_cpu_inc(nr_dentry); 1320 1321 return dentry; 1322 } 1323 1324 /** 1325 * d_alloc - allocate a dcache entry 1326 * @parent: parent of entry to allocate 1327 * @name: qstr of the name 1328 * 1329 * Allocates a dentry. It returns %NULL if there is insufficient memory 1330 * available. On a success the dentry is returned. The name passed in is 1331 * copied and the copy passed in may be reused after this call. 1332 */ 1333 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1334 { 1335 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1336 if (!dentry) 1337 return NULL; 1338 1339 spin_lock(&parent->d_lock); 1340 /* 1341 * don't need child lock because it is not subject 1342 * to concurrency here 1343 */ 1344 __dget_dlock(parent); 1345 dentry->d_parent = parent; 1346 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1347 spin_unlock(&parent->d_lock); 1348 1349 return dentry; 1350 } 1351 EXPORT_SYMBOL(d_alloc); 1352 1353 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1354 { 1355 struct dentry *dentry = __d_alloc(sb, name); 1356 if (dentry) 1357 dentry->d_flags |= DCACHE_DISCONNECTED; 1358 return dentry; 1359 } 1360 EXPORT_SYMBOL(d_alloc_pseudo); 1361 1362 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1363 { 1364 struct qstr q; 1365 1366 q.name = name; 1367 q.len = strlen(name); 1368 q.hash = full_name_hash(q.name, q.len); 1369 return d_alloc(parent, &q); 1370 } 1371 EXPORT_SYMBOL(d_alloc_name); 1372 1373 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1374 { 1375 WARN_ON_ONCE(dentry->d_op); 1376 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1377 DCACHE_OP_COMPARE | 1378 DCACHE_OP_REVALIDATE | 1379 DCACHE_OP_DELETE )); 1380 dentry->d_op = op; 1381 if (!op) 1382 return; 1383 if (op->d_hash) 1384 dentry->d_flags |= DCACHE_OP_HASH; 1385 if (op->d_compare) 1386 dentry->d_flags |= DCACHE_OP_COMPARE; 1387 if (op->d_revalidate) 1388 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1389 if (op->d_delete) 1390 dentry->d_flags |= DCACHE_OP_DELETE; 1391 if (op->d_prune) 1392 dentry->d_flags |= DCACHE_OP_PRUNE; 1393 1394 } 1395 EXPORT_SYMBOL(d_set_d_op); 1396 1397 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1398 { 1399 spin_lock(&dentry->d_lock); 1400 if (inode) { 1401 if (unlikely(IS_AUTOMOUNT(inode))) 1402 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1403 list_add(&dentry->d_alias, &inode->i_dentry); 1404 } 1405 dentry->d_inode = inode; 1406 dentry_rcuwalk_barrier(dentry); 1407 spin_unlock(&dentry->d_lock); 1408 fsnotify_d_instantiate(dentry, inode); 1409 } 1410 1411 /** 1412 * d_instantiate - fill in inode information for a dentry 1413 * @entry: dentry to complete 1414 * @inode: inode to attach to this dentry 1415 * 1416 * Fill in inode information in the entry. 1417 * 1418 * This turns negative dentries into productive full members 1419 * of society. 1420 * 1421 * NOTE! This assumes that the inode count has been incremented 1422 * (or otherwise set) by the caller to indicate that it is now 1423 * in use by the dcache. 1424 */ 1425 1426 void d_instantiate(struct dentry *entry, struct inode * inode) 1427 { 1428 BUG_ON(!list_empty(&entry->d_alias)); 1429 if (inode) 1430 spin_lock(&inode->i_lock); 1431 __d_instantiate(entry, inode); 1432 if (inode) 1433 spin_unlock(&inode->i_lock); 1434 security_d_instantiate(entry, inode); 1435 } 1436 EXPORT_SYMBOL(d_instantiate); 1437 1438 /** 1439 * d_instantiate_unique - instantiate a non-aliased dentry 1440 * @entry: dentry to instantiate 1441 * @inode: inode to attach to this dentry 1442 * 1443 * Fill in inode information in the entry. On success, it returns NULL. 1444 * If an unhashed alias of "entry" already exists, then we return the 1445 * aliased dentry instead and drop one reference to inode. 1446 * 1447 * Note that in order to avoid conflicts with rename() etc, the caller 1448 * had better be holding the parent directory semaphore. 1449 * 1450 * This also assumes that the inode count has been incremented 1451 * (or otherwise set) by the caller to indicate that it is now 1452 * in use by the dcache. 1453 */ 1454 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1455 struct inode *inode) 1456 { 1457 struct dentry *alias; 1458 int len = entry->d_name.len; 1459 const char *name = entry->d_name.name; 1460 unsigned int hash = entry->d_name.hash; 1461 1462 if (!inode) { 1463 __d_instantiate(entry, NULL); 1464 return NULL; 1465 } 1466 1467 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1468 /* 1469 * Don't need alias->d_lock here, because aliases with 1470 * d_parent == entry->d_parent are not subject to name or 1471 * parent changes, because the parent inode i_mutex is held. 1472 */ 1473 if (alias->d_name.hash != hash) 1474 continue; 1475 if (alias->d_parent != entry->d_parent) 1476 continue; 1477 if (alias->d_name.len != len) 1478 continue; 1479 if (dentry_cmp(alias, name, len)) 1480 continue; 1481 __dget(alias); 1482 return alias; 1483 } 1484 1485 __d_instantiate(entry, inode); 1486 return NULL; 1487 } 1488 1489 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1490 { 1491 struct dentry *result; 1492 1493 BUG_ON(!list_empty(&entry->d_alias)); 1494 1495 if (inode) 1496 spin_lock(&inode->i_lock); 1497 result = __d_instantiate_unique(entry, inode); 1498 if (inode) 1499 spin_unlock(&inode->i_lock); 1500 1501 if (!result) { 1502 security_d_instantiate(entry, inode); 1503 return NULL; 1504 } 1505 1506 BUG_ON(!d_unhashed(result)); 1507 iput(inode); 1508 return result; 1509 } 1510 1511 EXPORT_SYMBOL(d_instantiate_unique); 1512 1513 struct dentry *d_make_root(struct inode *root_inode) 1514 { 1515 struct dentry *res = NULL; 1516 1517 if (root_inode) { 1518 static const struct qstr name = QSTR_INIT("/", 1); 1519 1520 res = __d_alloc(root_inode->i_sb, &name); 1521 if (res) 1522 d_instantiate(res, root_inode); 1523 else 1524 iput(root_inode); 1525 } 1526 return res; 1527 } 1528 EXPORT_SYMBOL(d_make_root); 1529 1530 static struct dentry * __d_find_any_alias(struct inode *inode) 1531 { 1532 struct dentry *alias; 1533 1534 if (list_empty(&inode->i_dentry)) 1535 return NULL; 1536 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1537 __dget(alias); 1538 return alias; 1539 } 1540 1541 /** 1542 * d_find_any_alias - find any alias for a given inode 1543 * @inode: inode to find an alias for 1544 * 1545 * If any aliases exist for the given inode, take and return a 1546 * reference for one of them. If no aliases exist, return %NULL. 1547 */ 1548 struct dentry *d_find_any_alias(struct inode *inode) 1549 { 1550 struct dentry *de; 1551 1552 spin_lock(&inode->i_lock); 1553 de = __d_find_any_alias(inode); 1554 spin_unlock(&inode->i_lock); 1555 return de; 1556 } 1557 EXPORT_SYMBOL(d_find_any_alias); 1558 1559 /** 1560 * d_obtain_alias - find or allocate a dentry for a given inode 1561 * @inode: inode to allocate the dentry for 1562 * 1563 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1564 * similar open by handle operations. The returned dentry may be anonymous, 1565 * or may have a full name (if the inode was already in the cache). 1566 * 1567 * When called on a directory inode, we must ensure that the inode only ever 1568 * has one dentry. If a dentry is found, that is returned instead of 1569 * allocating a new one. 1570 * 1571 * On successful return, the reference to the inode has been transferred 1572 * to the dentry. In case of an error the reference on the inode is released. 1573 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1574 * be passed in and will be the error will be propagate to the return value, 1575 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1576 */ 1577 struct dentry *d_obtain_alias(struct inode *inode) 1578 { 1579 static const struct qstr anonstring = { .name = "" }; 1580 struct dentry *tmp; 1581 struct dentry *res; 1582 1583 if (!inode) 1584 return ERR_PTR(-ESTALE); 1585 if (IS_ERR(inode)) 1586 return ERR_CAST(inode); 1587 1588 res = d_find_any_alias(inode); 1589 if (res) 1590 goto out_iput; 1591 1592 tmp = __d_alloc(inode->i_sb, &anonstring); 1593 if (!tmp) { 1594 res = ERR_PTR(-ENOMEM); 1595 goto out_iput; 1596 } 1597 1598 spin_lock(&inode->i_lock); 1599 res = __d_find_any_alias(inode); 1600 if (res) { 1601 spin_unlock(&inode->i_lock); 1602 dput(tmp); 1603 goto out_iput; 1604 } 1605 1606 /* attach a disconnected dentry */ 1607 spin_lock(&tmp->d_lock); 1608 tmp->d_inode = inode; 1609 tmp->d_flags |= DCACHE_DISCONNECTED; 1610 list_add(&tmp->d_alias, &inode->i_dentry); 1611 hlist_bl_lock(&tmp->d_sb->s_anon); 1612 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1613 hlist_bl_unlock(&tmp->d_sb->s_anon); 1614 spin_unlock(&tmp->d_lock); 1615 spin_unlock(&inode->i_lock); 1616 security_d_instantiate(tmp, inode); 1617 1618 return tmp; 1619 1620 out_iput: 1621 if (res && !IS_ERR(res)) 1622 security_d_instantiate(res, inode); 1623 iput(inode); 1624 return res; 1625 } 1626 EXPORT_SYMBOL(d_obtain_alias); 1627 1628 /** 1629 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1630 * @inode: the inode which may have a disconnected dentry 1631 * @dentry: a negative dentry which we want to point to the inode. 1632 * 1633 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1634 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1635 * and return it, else simply d_add the inode to the dentry and return NULL. 1636 * 1637 * This is needed in the lookup routine of any filesystem that is exportable 1638 * (via knfsd) so that we can build dcache paths to directories effectively. 1639 * 1640 * If a dentry was found and moved, then it is returned. Otherwise NULL 1641 * is returned. This matches the expected return value of ->lookup. 1642 * 1643 */ 1644 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1645 { 1646 struct dentry *new = NULL; 1647 1648 if (IS_ERR(inode)) 1649 return ERR_CAST(inode); 1650 1651 if (inode && S_ISDIR(inode->i_mode)) { 1652 spin_lock(&inode->i_lock); 1653 new = __d_find_alias(inode, 1); 1654 if (new) { 1655 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1656 spin_unlock(&inode->i_lock); 1657 security_d_instantiate(new, inode); 1658 d_move(new, dentry); 1659 iput(inode); 1660 } else { 1661 /* already taking inode->i_lock, so d_add() by hand */ 1662 __d_instantiate(dentry, inode); 1663 spin_unlock(&inode->i_lock); 1664 security_d_instantiate(dentry, inode); 1665 d_rehash(dentry); 1666 } 1667 } else 1668 d_add(dentry, inode); 1669 return new; 1670 } 1671 EXPORT_SYMBOL(d_splice_alias); 1672 1673 /** 1674 * d_add_ci - lookup or allocate new dentry with case-exact name 1675 * @inode: the inode case-insensitive lookup has found 1676 * @dentry: the negative dentry that was passed to the parent's lookup func 1677 * @name: the case-exact name to be associated with the returned dentry 1678 * 1679 * This is to avoid filling the dcache with case-insensitive names to the 1680 * same inode, only the actual correct case is stored in the dcache for 1681 * case-insensitive filesystems. 1682 * 1683 * For a case-insensitive lookup match and if the the case-exact dentry 1684 * already exists in in the dcache, use it and return it. 1685 * 1686 * If no entry exists with the exact case name, allocate new dentry with 1687 * the exact case, and return the spliced entry. 1688 */ 1689 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1690 struct qstr *name) 1691 { 1692 int error; 1693 struct dentry *found; 1694 struct dentry *new; 1695 1696 /* 1697 * First check if a dentry matching the name already exists, 1698 * if not go ahead and create it now. 1699 */ 1700 found = d_hash_and_lookup(dentry->d_parent, name); 1701 if (!found) { 1702 new = d_alloc(dentry->d_parent, name); 1703 if (!new) { 1704 error = -ENOMEM; 1705 goto err_out; 1706 } 1707 1708 found = d_splice_alias(inode, new); 1709 if (found) { 1710 dput(new); 1711 return found; 1712 } 1713 return new; 1714 } 1715 1716 /* 1717 * If a matching dentry exists, and it's not negative use it. 1718 * 1719 * Decrement the reference count to balance the iget() done 1720 * earlier on. 1721 */ 1722 if (found->d_inode) { 1723 if (unlikely(found->d_inode != inode)) { 1724 /* This can't happen because bad inodes are unhashed. */ 1725 BUG_ON(!is_bad_inode(inode)); 1726 BUG_ON(!is_bad_inode(found->d_inode)); 1727 } 1728 iput(inode); 1729 return found; 1730 } 1731 1732 /* 1733 * We are going to instantiate this dentry, unhash it and clear the 1734 * lookup flag so we can do that. 1735 */ 1736 if (unlikely(d_need_lookup(found))) 1737 d_clear_need_lookup(found); 1738 1739 /* 1740 * Negative dentry: instantiate it unless the inode is a directory and 1741 * already has a dentry. 1742 */ 1743 new = d_splice_alias(inode, found); 1744 if (new) { 1745 dput(found); 1746 found = new; 1747 } 1748 return found; 1749 1750 err_out: 1751 iput(inode); 1752 return ERR_PTR(error); 1753 } 1754 EXPORT_SYMBOL(d_add_ci); 1755 1756 /* 1757 * Do the slow-case of the dentry name compare. 1758 * 1759 * Unlike the dentry_cmp() function, we need to atomically 1760 * load the name, length and inode information, so that the 1761 * filesystem can rely on them, and can use the 'name' and 1762 * 'len' information without worrying about walking off the 1763 * end of memory etc. 1764 * 1765 * Thus the read_seqcount_retry() and the "duplicate" info 1766 * in arguments (the low-level filesystem should not look 1767 * at the dentry inode or name contents directly, since 1768 * rename can change them while we're in RCU mode). 1769 */ 1770 enum slow_d_compare { 1771 D_COMP_OK, 1772 D_COMP_NOMATCH, 1773 D_COMP_SEQRETRY, 1774 }; 1775 1776 static noinline enum slow_d_compare slow_dentry_cmp( 1777 const struct dentry *parent, 1778 struct inode *inode, 1779 struct dentry *dentry, 1780 unsigned int seq, 1781 const struct qstr *name) 1782 { 1783 int tlen = dentry->d_name.len; 1784 const char *tname = dentry->d_name.name; 1785 struct inode *i = dentry->d_inode; 1786 1787 if (read_seqcount_retry(&dentry->d_seq, seq)) { 1788 cpu_relax(); 1789 return D_COMP_SEQRETRY; 1790 } 1791 if (parent->d_op->d_compare(parent, inode, 1792 dentry, i, 1793 tlen, tname, name)) 1794 return D_COMP_NOMATCH; 1795 return D_COMP_OK; 1796 } 1797 1798 /** 1799 * __d_lookup_rcu - search for a dentry (racy, store-free) 1800 * @parent: parent dentry 1801 * @name: qstr of name we wish to find 1802 * @seqp: returns d_seq value at the point where the dentry was found 1803 * @inode: returns dentry->d_inode when the inode was found valid. 1804 * Returns: dentry, or NULL 1805 * 1806 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1807 * resolution (store-free path walking) design described in 1808 * Documentation/filesystems/path-lookup.txt. 1809 * 1810 * This is not to be used outside core vfs. 1811 * 1812 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1813 * held, and rcu_read_lock held. The returned dentry must not be stored into 1814 * without taking d_lock and checking d_seq sequence count against @seq 1815 * returned here. 1816 * 1817 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1818 * function. 1819 * 1820 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1821 * the returned dentry, so long as its parent's seqlock is checked after the 1822 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1823 * is formed, giving integrity down the path walk. 1824 * 1825 * NOTE! The caller *has* to check the resulting dentry against the sequence 1826 * number we've returned before using any of the resulting dentry state! 1827 */ 1828 struct dentry *__d_lookup_rcu(const struct dentry *parent, 1829 const struct qstr *name, 1830 unsigned *seqp, struct inode *inode) 1831 { 1832 u64 hashlen = name->hash_len; 1833 const unsigned char *str = name->name; 1834 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); 1835 struct hlist_bl_node *node; 1836 struct dentry *dentry; 1837 1838 /* 1839 * Note: There is significant duplication with __d_lookup_rcu which is 1840 * required to prevent single threaded performance regressions 1841 * especially on architectures where smp_rmb (in seqcounts) are costly. 1842 * Keep the two functions in sync. 1843 */ 1844 1845 /* 1846 * The hash list is protected using RCU. 1847 * 1848 * Carefully use d_seq when comparing a candidate dentry, to avoid 1849 * races with d_move(). 1850 * 1851 * It is possible that concurrent renames can mess up our list 1852 * walk here and result in missing our dentry, resulting in the 1853 * false-negative result. d_lookup() protects against concurrent 1854 * renames using rename_lock seqlock. 1855 * 1856 * See Documentation/filesystems/path-lookup.txt for more details. 1857 */ 1858 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1859 unsigned seq; 1860 1861 seqretry: 1862 /* 1863 * The dentry sequence count protects us from concurrent 1864 * renames, and thus protects inode, parent and name fields. 1865 * 1866 * The caller must perform a seqcount check in order 1867 * to do anything useful with the returned dentry, 1868 * including using the 'd_inode' pointer. 1869 * 1870 * NOTE! We do a "raw" seqcount_begin here. That means that 1871 * we don't wait for the sequence count to stabilize if it 1872 * is in the middle of a sequence change. If we do the slow 1873 * dentry compare, we will do seqretries until it is stable, 1874 * and if we end up with a successful lookup, we actually 1875 * want to exit RCU lookup anyway. 1876 */ 1877 seq = raw_seqcount_begin(&dentry->d_seq); 1878 if (dentry->d_parent != parent) 1879 continue; 1880 if (d_unhashed(dentry)) 1881 continue; 1882 *seqp = seq; 1883 1884 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1885 if (dentry->d_name.hash != hashlen_hash(hashlen)) 1886 continue; 1887 switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) { 1888 case D_COMP_OK: 1889 return dentry; 1890 case D_COMP_NOMATCH: 1891 continue; 1892 default: 1893 goto seqretry; 1894 } 1895 } 1896 1897 if (dentry->d_name.hash_len != hashlen) 1898 continue; 1899 if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) 1900 return dentry; 1901 } 1902 return NULL; 1903 } 1904 1905 /** 1906 * d_lookup - search for a dentry 1907 * @parent: parent dentry 1908 * @name: qstr of name we wish to find 1909 * Returns: dentry, or NULL 1910 * 1911 * d_lookup searches the children of the parent dentry for the name in 1912 * question. If the dentry is found its reference count is incremented and the 1913 * dentry is returned. The caller must use dput to free the entry when it has 1914 * finished using it. %NULL is returned if the dentry does not exist. 1915 */ 1916 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1917 { 1918 struct dentry *dentry; 1919 unsigned seq; 1920 1921 do { 1922 seq = read_seqbegin(&rename_lock); 1923 dentry = __d_lookup(parent, name); 1924 if (dentry) 1925 break; 1926 } while (read_seqretry(&rename_lock, seq)); 1927 return dentry; 1928 } 1929 EXPORT_SYMBOL(d_lookup); 1930 1931 /** 1932 * __d_lookup - search for a dentry (racy) 1933 * @parent: parent dentry 1934 * @name: qstr of name we wish to find 1935 * Returns: dentry, or NULL 1936 * 1937 * __d_lookup is like d_lookup, however it may (rarely) return a 1938 * false-negative result due to unrelated rename activity. 1939 * 1940 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1941 * however it must be used carefully, eg. with a following d_lookup in 1942 * the case of failure. 1943 * 1944 * __d_lookup callers must be commented. 1945 */ 1946 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1947 { 1948 unsigned int len = name->len; 1949 unsigned int hash = name->hash; 1950 const unsigned char *str = name->name; 1951 struct hlist_bl_head *b = d_hash(parent, hash); 1952 struct hlist_bl_node *node; 1953 struct dentry *found = NULL; 1954 struct dentry *dentry; 1955 1956 /* 1957 * Note: There is significant duplication with __d_lookup_rcu which is 1958 * required to prevent single threaded performance regressions 1959 * especially on architectures where smp_rmb (in seqcounts) are costly. 1960 * Keep the two functions in sync. 1961 */ 1962 1963 /* 1964 * The hash list is protected using RCU. 1965 * 1966 * Take d_lock when comparing a candidate dentry, to avoid races 1967 * with d_move(). 1968 * 1969 * It is possible that concurrent renames can mess up our list 1970 * walk here and result in missing our dentry, resulting in the 1971 * false-negative result. d_lookup() protects against concurrent 1972 * renames using rename_lock seqlock. 1973 * 1974 * See Documentation/filesystems/path-lookup.txt for more details. 1975 */ 1976 rcu_read_lock(); 1977 1978 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1979 1980 if (dentry->d_name.hash != hash) 1981 continue; 1982 1983 spin_lock(&dentry->d_lock); 1984 if (dentry->d_parent != parent) 1985 goto next; 1986 if (d_unhashed(dentry)) 1987 goto next; 1988 1989 /* 1990 * It is safe to compare names since d_move() cannot 1991 * change the qstr (protected by d_lock). 1992 */ 1993 if (parent->d_flags & DCACHE_OP_COMPARE) { 1994 int tlen = dentry->d_name.len; 1995 const char *tname = dentry->d_name.name; 1996 if (parent->d_op->d_compare(parent, parent->d_inode, 1997 dentry, dentry->d_inode, 1998 tlen, tname, name)) 1999 goto next; 2000 } else { 2001 if (dentry->d_name.len != len) 2002 goto next; 2003 if (dentry_cmp(dentry, str, len)) 2004 goto next; 2005 } 2006 2007 dentry->d_count++; 2008 found = dentry; 2009 spin_unlock(&dentry->d_lock); 2010 break; 2011 next: 2012 spin_unlock(&dentry->d_lock); 2013 } 2014 rcu_read_unlock(); 2015 2016 return found; 2017 } 2018 2019 /** 2020 * d_hash_and_lookup - hash the qstr then search for a dentry 2021 * @dir: Directory to search in 2022 * @name: qstr of name we wish to find 2023 * 2024 * On hash failure or on lookup failure NULL is returned. 2025 */ 2026 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 2027 { 2028 struct dentry *dentry = NULL; 2029 2030 /* 2031 * Check for a fs-specific hash function. Note that we must 2032 * calculate the standard hash first, as the d_op->d_hash() 2033 * routine may choose to leave the hash value unchanged. 2034 */ 2035 name->hash = full_name_hash(name->name, name->len); 2036 if (dir->d_flags & DCACHE_OP_HASH) { 2037 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 2038 goto out; 2039 } 2040 dentry = d_lookup(dir, name); 2041 out: 2042 return dentry; 2043 } 2044 2045 /** 2046 * d_validate - verify dentry provided from insecure source (deprecated) 2047 * @dentry: The dentry alleged to be valid child of @dparent 2048 * @dparent: The parent dentry (known to be valid) 2049 * 2050 * An insecure source has sent us a dentry, here we verify it and dget() it. 2051 * This is used by ncpfs in its readdir implementation. 2052 * Zero is returned in the dentry is invalid. 2053 * 2054 * This function is slow for big directories, and deprecated, do not use it. 2055 */ 2056 int d_validate(struct dentry *dentry, struct dentry *dparent) 2057 { 2058 struct dentry *child; 2059 2060 spin_lock(&dparent->d_lock); 2061 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 2062 if (dentry == child) { 2063 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2064 __dget_dlock(dentry); 2065 spin_unlock(&dentry->d_lock); 2066 spin_unlock(&dparent->d_lock); 2067 return 1; 2068 } 2069 } 2070 spin_unlock(&dparent->d_lock); 2071 2072 return 0; 2073 } 2074 EXPORT_SYMBOL(d_validate); 2075 2076 /* 2077 * When a file is deleted, we have two options: 2078 * - turn this dentry into a negative dentry 2079 * - unhash this dentry and free it. 2080 * 2081 * Usually, we want to just turn this into 2082 * a negative dentry, but if anybody else is 2083 * currently using the dentry or the inode 2084 * we can't do that and we fall back on removing 2085 * it from the hash queues and waiting for 2086 * it to be deleted later when it has no users 2087 */ 2088 2089 /** 2090 * d_delete - delete a dentry 2091 * @dentry: The dentry to delete 2092 * 2093 * Turn the dentry into a negative dentry if possible, otherwise 2094 * remove it from the hash queues so it can be deleted later 2095 */ 2096 2097 void d_delete(struct dentry * dentry) 2098 { 2099 struct inode *inode; 2100 int isdir = 0; 2101 /* 2102 * Are we the only user? 2103 */ 2104 again: 2105 spin_lock(&dentry->d_lock); 2106 inode = dentry->d_inode; 2107 isdir = S_ISDIR(inode->i_mode); 2108 if (dentry->d_count == 1) { 2109 if (inode && !spin_trylock(&inode->i_lock)) { 2110 spin_unlock(&dentry->d_lock); 2111 cpu_relax(); 2112 goto again; 2113 } 2114 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2115 dentry_unlink_inode(dentry); 2116 fsnotify_nameremove(dentry, isdir); 2117 return; 2118 } 2119 2120 if (!d_unhashed(dentry)) 2121 __d_drop(dentry); 2122 2123 spin_unlock(&dentry->d_lock); 2124 2125 fsnotify_nameremove(dentry, isdir); 2126 } 2127 EXPORT_SYMBOL(d_delete); 2128 2129 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2130 { 2131 BUG_ON(!d_unhashed(entry)); 2132 hlist_bl_lock(b); 2133 entry->d_flags |= DCACHE_RCUACCESS; 2134 hlist_bl_add_head_rcu(&entry->d_hash, b); 2135 hlist_bl_unlock(b); 2136 } 2137 2138 static void _d_rehash(struct dentry * entry) 2139 { 2140 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2141 } 2142 2143 /** 2144 * d_rehash - add an entry back to the hash 2145 * @entry: dentry to add to the hash 2146 * 2147 * Adds a dentry to the hash according to its name. 2148 */ 2149 2150 void d_rehash(struct dentry * entry) 2151 { 2152 spin_lock(&entry->d_lock); 2153 _d_rehash(entry); 2154 spin_unlock(&entry->d_lock); 2155 } 2156 EXPORT_SYMBOL(d_rehash); 2157 2158 /** 2159 * dentry_update_name_case - update case insensitive dentry with a new name 2160 * @dentry: dentry to be updated 2161 * @name: new name 2162 * 2163 * Update a case insensitive dentry with new case of name. 2164 * 2165 * dentry must have been returned by d_lookup with name @name. Old and new 2166 * name lengths must match (ie. no d_compare which allows mismatched name 2167 * lengths). 2168 * 2169 * Parent inode i_mutex must be held over d_lookup and into this call (to 2170 * keep renames and concurrent inserts, and readdir(2) away). 2171 */ 2172 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2173 { 2174 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2175 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2176 2177 spin_lock(&dentry->d_lock); 2178 write_seqcount_begin(&dentry->d_seq); 2179 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2180 write_seqcount_end(&dentry->d_seq); 2181 spin_unlock(&dentry->d_lock); 2182 } 2183 EXPORT_SYMBOL(dentry_update_name_case); 2184 2185 static void switch_names(struct dentry *dentry, struct dentry *target) 2186 { 2187 if (dname_external(target)) { 2188 if (dname_external(dentry)) { 2189 /* 2190 * Both external: swap the pointers 2191 */ 2192 swap(target->d_name.name, dentry->d_name.name); 2193 } else { 2194 /* 2195 * dentry:internal, target:external. Steal target's 2196 * storage and make target internal. 2197 */ 2198 memcpy(target->d_iname, dentry->d_name.name, 2199 dentry->d_name.len + 1); 2200 dentry->d_name.name = target->d_name.name; 2201 target->d_name.name = target->d_iname; 2202 } 2203 } else { 2204 if (dname_external(dentry)) { 2205 /* 2206 * dentry:external, target:internal. Give dentry's 2207 * storage to target and make dentry internal 2208 */ 2209 memcpy(dentry->d_iname, target->d_name.name, 2210 target->d_name.len + 1); 2211 target->d_name.name = dentry->d_name.name; 2212 dentry->d_name.name = dentry->d_iname; 2213 } else { 2214 /* 2215 * Both are internal. Just copy target to dentry 2216 */ 2217 memcpy(dentry->d_iname, target->d_name.name, 2218 target->d_name.len + 1); 2219 dentry->d_name.len = target->d_name.len; 2220 return; 2221 } 2222 } 2223 swap(dentry->d_name.len, target->d_name.len); 2224 } 2225 2226 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2227 { 2228 /* 2229 * XXXX: do we really need to take target->d_lock? 2230 */ 2231 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2232 spin_lock(&target->d_parent->d_lock); 2233 else { 2234 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2235 spin_lock(&dentry->d_parent->d_lock); 2236 spin_lock_nested(&target->d_parent->d_lock, 2237 DENTRY_D_LOCK_NESTED); 2238 } else { 2239 spin_lock(&target->d_parent->d_lock); 2240 spin_lock_nested(&dentry->d_parent->d_lock, 2241 DENTRY_D_LOCK_NESTED); 2242 } 2243 } 2244 if (target < dentry) { 2245 spin_lock_nested(&target->d_lock, 2); 2246 spin_lock_nested(&dentry->d_lock, 3); 2247 } else { 2248 spin_lock_nested(&dentry->d_lock, 2); 2249 spin_lock_nested(&target->d_lock, 3); 2250 } 2251 } 2252 2253 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2254 struct dentry *target) 2255 { 2256 if (target->d_parent != dentry->d_parent) 2257 spin_unlock(&dentry->d_parent->d_lock); 2258 if (target->d_parent != target) 2259 spin_unlock(&target->d_parent->d_lock); 2260 } 2261 2262 /* 2263 * When switching names, the actual string doesn't strictly have to 2264 * be preserved in the target - because we're dropping the target 2265 * anyway. As such, we can just do a simple memcpy() to copy over 2266 * the new name before we switch. 2267 * 2268 * Note that we have to be a lot more careful about getting the hash 2269 * switched - we have to switch the hash value properly even if it 2270 * then no longer matches the actual (corrupted) string of the target. 2271 * The hash value has to match the hash queue that the dentry is on.. 2272 */ 2273 /* 2274 * __d_move - move a dentry 2275 * @dentry: entry to move 2276 * @target: new dentry 2277 * 2278 * Update the dcache to reflect the move of a file name. Negative 2279 * dcache entries should not be moved in this way. Caller must hold 2280 * rename_lock, the i_mutex of the source and target directories, 2281 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2282 */ 2283 static void __d_move(struct dentry * dentry, struct dentry * target) 2284 { 2285 if (!dentry->d_inode) 2286 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2287 2288 BUG_ON(d_ancestor(dentry, target)); 2289 BUG_ON(d_ancestor(target, dentry)); 2290 2291 dentry_lock_for_move(dentry, target); 2292 2293 write_seqcount_begin(&dentry->d_seq); 2294 write_seqcount_begin(&target->d_seq); 2295 2296 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2297 2298 /* 2299 * Move the dentry to the target hash queue. Don't bother checking 2300 * for the same hash queue because of how unlikely it is. 2301 */ 2302 __d_drop(dentry); 2303 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2304 2305 /* Unhash the target: dput() will then get rid of it */ 2306 __d_drop(target); 2307 2308 list_del(&dentry->d_u.d_child); 2309 list_del(&target->d_u.d_child); 2310 2311 /* Switch the names.. */ 2312 switch_names(dentry, target); 2313 swap(dentry->d_name.hash, target->d_name.hash); 2314 2315 /* ... and switch the parents */ 2316 if (IS_ROOT(dentry)) { 2317 dentry->d_parent = target->d_parent; 2318 target->d_parent = target; 2319 INIT_LIST_HEAD(&target->d_u.d_child); 2320 } else { 2321 swap(dentry->d_parent, target->d_parent); 2322 2323 /* And add them back to the (new) parent lists */ 2324 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2325 } 2326 2327 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2328 2329 write_seqcount_end(&target->d_seq); 2330 write_seqcount_end(&dentry->d_seq); 2331 2332 dentry_unlock_parents_for_move(dentry, target); 2333 spin_unlock(&target->d_lock); 2334 fsnotify_d_move(dentry); 2335 spin_unlock(&dentry->d_lock); 2336 } 2337 2338 /* 2339 * d_move - move a dentry 2340 * @dentry: entry to move 2341 * @target: new dentry 2342 * 2343 * Update the dcache to reflect the move of a file name. Negative 2344 * dcache entries should not be moved in this way. See the locking 2345 * requirements for __d_move. 2346 */ 2347 void d_move(struct dentry *dentry, struct dentry *target) 2348 { 2349 write_seqlock(&rename_lock); 2350 __d_move(dentry, target); 2351 write_sequnlock(&rename_lock); 2352 } 2353 EXPORT_SYMBOL(d_move); 2354 2355 /** 2356 * d_ancestor - search for an ancestor 2357 * @p1: ancestor dentry 2358 * @p2: child dentry 2359 * 2360 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2361 * an ancestor of p2, else NULL. 2362 */ 2363 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2364 { 2365 struct dentry *p; 2366 2367 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2368 if (p->d_parent == p1) 2369 return p; 2370 } 2371 return NULL; 2372 } 2373 2374 /* 2375 * This helper attempts to cope with remotely renamed directories 2376 * 2377 * It assumes that the caller is already holding 2378 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2379 * 2380 * Note: If ever the locking in lock_rename() changes, then please 2381 * remember to update this too... 2382 */ 2383 static struct dentry *__d_unalias(struct inode *inode, 2384 struct dentry *dentry, struct dentry *alias) 2385 { 2386 struct mutex *m1 = NULL, *m2 = NULL; 2387 struct dentry *ret; 2388 2389 /* If alias and dentry share a parent, then no extra locks required */ 2390 if (alias->d_parent == dentry->d_parent) 2391 goto out_unalias; 2392 2393 /* See lock_rename() */ 2394 ret = ERR_PTR(-EBUSY); 2395 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2396 goto out_err; 2397 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2398 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2399 goto out_err; 2400 m2 = &alias->d_parent->d_inode->i_mutex; 2401 out_unalias: 2402 __d_move(alias, dentry); 2403 ret = alias; 2404 out_err: 2405 spin_unlock(&inode->i_lock); 2406 if (m2) 2407 mutex_unlock(m2); 2408 if (m1) 2409 mutex_unlock(m1); 2410 return ret; 2411 } 2412 2413 /* 2414 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2415 * named dentry in place of the dentry to be replaced. 2416 * returns with anon->d_lock held! 2417 */ 2418 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2419 { 2420 struct dentry *dparent, *aparent; 2421 2422 dentry_lock_for_move(anon, dentry); 2423 2424 write_seqcount_begin(&dentry->d_seq); 2425 write_seqcount_begin(&anon->d_seq); 2426 2427 dparent = dentry->d_parent; 2428 aparent = anon->d_parent; 2429 2430 switch_names(dentry, anon); 2431 swap(dentry->d_name.hash, anon->d_name.hash); 2432 2433 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2434 list_del(&dentry->d_u.d_child); 2435 if (!IS_ROOT(dentry)) 2436 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2437 else 2438 INIT_LIST_HEAD(&dentry->d_u.d_child); 2439 2440 anon->d_parent = (dparent == dentry) ? anon : dparent; 2441 list_del(&anon->d_u.d_child); 2442 if (!IS_ROOT(anon)) 2443 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2444 else 2445 INIT_LIST_HEAD(&anon->d_u.d_child); 2446 2447 write_seqcount_end(&dentry->d_seq); 2448 write_seqcount_end(&anon->d_seq); 2449 2450 dentry_unlock_parents_for_move(anon, dentry); 2451 spin_unlock(&dentry->d_lock); 2452 2453 /* anon->d_lock still locked, returns locked */ 2454 anon->d_flags &= ~DCACHE_DISCONNECTED; 2455 } 2456 2457 /** 2458 * d_materialise_unique - introduce an inode into the tree 2459 * @dentry: candidate dentry 2460 * @inode: inode to bind to the dentry, to which aliases may be attached 2461 * 2462 * Introduces an dentry into the tree, substituting an extant disconnected 2463 * root directory alias in its place if there is one. Caller must hold the 2464 * i_mutex of the parent directory. 2465 */ 2466 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2467 { 2468 struct dentry *actual; 2469 2470 BUG_ON(!d_unhashed(dentry)); 2471 2472 if (!inode) { 2473 actual = dentry; 2474 __d_instantiate(dentry, NULL); 2475 d_rehash(actual); 2476 goto out_nolock; 2477 } 2478 2479 spin_lock(&inode->i_lock); 2480 2481 if (S_ISDIR(inode->i_mode)) { 2482 struct dentry *alias; 2483 2484 /* Does an aliased dentry already exist? */ 2485 alias = __d_find_alias(inode, 0); 2486 if (alias) { 2487 actual = alias; 2488 write_seqlock(&rename_lock); 2489 2490 if (d_ancestor(alias, dentry)) { 2491 /* Check for loops */ 2492 actual = ERR_PTR(-ELOOP); 2493 spin_unlock(&inode->i_lock); 2494 } else if (IS_ROOT(alias)) { 2495 /* Is this an anonymous mountpoint that we 2496 * could splice into our tree? */ 2497 __d_materialise_dentry(dentry, alias); 2498 write_sequnlock(&rename_lock); 2499 __d_drop(alias); 2500 goto found; 2501 } else { 2502 /* Nope, but we must(!) avoid directory 2503 * aliasing. This drops inode->i_lock */ 2504 actual = __d_unalias(inode, dentry, alias); 2505 } 2506 write_sequnlock(&rename_lock); 2507 if (IS_ERR(actual)) { 2508 if (PTR_ERR(actual) == -ELOOP) 2509 pr_warn_ratelimited( 2510 "VFS: Lookup of '%s' in %s %s" 2511 " would have caused loop\n", 2512 dentry->d_name.name, 2513 inode->i_sb->s_type->name, 2514 inode->i_sb->s_id); 2515 dput(alias); 2516 } 2517 goto out_nolock; 2518 } 2519 } 2520 2521 /* Add a unique reference */ 2522 actual = __d_instantiate_unique(dentry, inode); 2523 if (!actual) 2524 actual = dentry; 2525 else 2526 BUG_ON(!d_unhashed(actual)); 2527 2528 spin_lock(&actual->d_lock); 2529 found: 2530 _d_rehash(actual); 2531 spin_unlock(&actual->d_lock); 2532 spin_unlock(&inode->i_lock); 2533 out_nolock: 2534 if (actual == dentry) { 2535 security_d_instantiate(dentry, inode); 2536 return NULL; 2537 } 2538 2539 iput(inode); 2540 return actual; 2541 } 2542 EXPORT_SYMBOL_GPL(d_materialise_unique); 2543 2544 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2545 { 2546 *buflen -= namelen; 2547 if (*buflen < 0) 2548 return -ENAMETOOLONG; 2549 *buffer -= namelen; 2550 memcpy(*buffer, str, namelen); 2551 return 0; 2552 } 2553 2554 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2555 { 2556 return prepend(buffer, buflen, name->name, name->len); 2557 } 2558 2559 /** 2560 * prepend_path - Prepend path string to a buffer 2561 * @path: the dentry/vfsmount to report 2562 * @root: root vfsmnt/dentry 2563 * @buffer: pointer to the end of the buffer 2564 * @buflen: pointer to buffer length 2565 * 2566 * Caller holds the rename_lock. 2567 */ 2568 static int prepend_path(const struct path *path, 2569 const struct path *root, 2570 char **buffer, int *buflen) 2571 { 2572 struct dentry *dentry = path->dentry; 2573 struct vfsmount *vfsmnt = path->mnt; 2574 struct mount *mnt = real_mount(vfsmnt); 2575 bool slash = false; 2576 int error = 0; 2577 2578 br_read_lock(&vfsmount_lock); 2579 while (dentry != root->dentry || vfsmnt != root->mnt) { 2580 struct dentry * parent; 2581 2582 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2583 /* Global root? */ 2584 if (!mnt_has_parent(mnt)) 2585 goto global_root; 2586 dentry = mnt->mnt_mountpoint; 2587 mnt = mnt->mnt_parent; 2588 vfsmnt = &mnt->mnt; 2589 continue; 2590 } 2591 parent = dentry->d_parent; 2592 prefetch(parent); 2593 spin_lock(&dentry->d_lock); 2594 error = prepend_name(buffer, buflen, &dentry->d_name); 2595 spin_unlock(&dentry->d_lock); 2596 if (!error) 2597 error = prepend(buffer, buflen, "/", 1); 2598 if (error) 2599 break; 2600 2601 slash = true; 2602 dentry = parent; 2603 } 2604 2605 if (!error && !slash) 2606 error = prepend(buffer, buflen, "/", 1); 2607 2608 out: 2609 br_read_unlock(&vfsmount_lock); 2610 return error; 2611 2612 global_root: 2613 /* 2614 * Filesystems needing to implement special "root names" 2615 * should do so with ->d_dname() 2616 */ 2617 if (IS_ROOT(dentry) && 2618 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2619 WARN(1, "Root dentry has weird name <%.*s>\n", 2620 (int) dentry->d_name.len, dentry->d_name.name); 2621 } 2622 if (!slash) 2623 error = prepend(buffer, buflen, "/", 1); 2624 if (!error) 2625 error = real_mount(vfsmnt)->mnt_ns ? 1 : 2; 2626 goto out; 2627 } 2628 2629 /** 2630 * __d_path - return the path of a dentry 2631 * @path: the dentry/vfsmount to report 2632 * @root: root vfsmnt/dentry 2633 * @buf: buffer to return value in 2634 * @buflen: buffer length 2635 * 2636 * Convert a dentry into an ASCII path name. 2637 * 2638 * Returns a pointer into the buffer or an error code if the 2639 * path was too long. 2640 * 2641 * "buflen" should be positive. 2642 * 2643 * If the path is not reachable from the supplied root, return %NULL. 2644 */ 2645 char *__d_path(const struct path *path, 2646 const struct path *root, 2647 char *buf, int buflen) 2648 { 2649 char *res = buf + buflen; 2650 int error; 2651 2652 prepend(&res, &buflen, "\0", 1); 2653 write_seqlock(&rename_lock); 2654 error = prepend_path(path, root, &res, &buflen); 2655 write_sequnlock(&rename_lock); 2656 2657 if (error < 0) 2658 return ERR_PTR(error); 2659 if (error > 0) 2660 return NULL; 2661 return res; 2662 } 2663 2664 char *d_absolute_path(const struct path *path, 2665 char *buf, int buflen) 2666 { 2667 struct path root = {}; 2668 char *res = buf + buflen; 2669 int error; 2670 2671 prepend(&res, &buflen, "\0", 1); 2672 write_seqlock(&rename_lock); 2673 error = prepend_path(path, &root, &res, &buflen); 2674 write_sequnlock(&rename_lock); 2675 2676 if (error > 1) 2677 error = -EINVAL; 2678 if (error < 0) 2679 return ERR_PTR(error); 2680 return res; 2681 } 2682 2683 /* 2684 * same as __d_path but appends "(deleted)" for unlinked files. 2685 */ 2686 static int path_with_deleted(const struct path *path, 2687 const struct path *root, 2688 char **buf, int *buflen) 2689 { 2690 prepend(buf, buflen, "\0", 1); 2691 if (d_unlinked(path->dentry)) { 2692 int error = prepend(buf, buflen, " (deleted)", 10); 2693 if (error) 2694 return error; 2695 } 2696 2697 return prepend_path(path, root, buf, buflen); 2698 } 2699 2700 static int prepend_unreachable(char **buffer, int *buflen) 2701 { 2702 return prepend(buffer, buflen, "(unreachable)", 13); 2703 } 2704 2705 /** 2706 * d_path - return the path of a dentry 2707 * @path: path to report 2708 * @buf: buffer to return value in 2709 * @buflen: buffer length 2710 * 2711 * Convert a dentry into an ASCII path name. If the entry has been deleted 2712 * the string " (deleted)" is appended. Note that this is ambiguous. 2713 * 2714 * Returns a pointer into the buffer or an error code if the path was 2715 * too long. Note: Callers should use the returned pointer, not the passed 2716 * in buffer, to use the name! The implementation often starts at an offset 2717 * into the buffer, and may leave 0 bytes at the start. 2718 * 2719 * "buflen" should be positive. 2720 */ 2721 char *d_path(const struct path *path, char *buf, int buflen) 2722 { 2723 char *res = buf + buflen; 2724 struct path root; 2725 int error; 2726 2727 /* 2728 * We have various synthetic filesystems that never get mounted. On 2729 * these filesystems dentries are never used for lookup purposes, and 2730 * thus don't need to be hashed. They also don't need a name until a 2731 * user wants to identify the object in /proc/pid/fd/. The little hack 2732 * below allows us to generate a name for these objects on demand: 2733 */ 2734 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2735 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2736 2737 get_fs_root(current->fs, &root); 2738 write_seqlock(&rename_lock); 2739 error = path_with_deleted(path, &root, &res, &buflen); 2740 if (error < 0) 2741 res = ERR_PTR(error); 2742 write_sequnlock(&rename_lock); 2743 path_put(&root); 2744 return res; 2745 } 2746 EXPORT_SYMBOL(d_path); 2747 2748 /** 2749 * d_path_with_unreachable - return the path of a dentry 2750 * @path: path to report 2751 * @buf: buffer to return value in 2752 * @buflen: buffer length 2753 * 2754 * The difference from d_path() is that this prepends "(unreachable)" 2755 * to paths which are unreachable from the current process' root. 2756 */ 2757 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2758 { 2759 char *res = buf + buflen; 2760 struct path root; 2761 int error; 2762 2763 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2764 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2765 2766 get_fs_root(current->fs, &root); 2767 write_seqlock(&rename_lock); 2768 error = path_with_deleted(path, &root, &res, &buflen); 2769 if (error > 0) 2770 error = prepend_unreachable(&res, &buflen); 2771 write_sequnlock(&rename_lock); 2772 path_put(&root); 2773 if (error) 2774 res = ERR_PTR(error); 2775 2776 return res; 2777 } 2778 2779 /* 2780 * Helper function for dentry_operations.d_dname() members 2781 */ 2782 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2783 const char *fmt, ...) 2784 { 2785 va_list args; 2786 char temp[64]; 2787 int sz; 2788 2789 va_start(args, fmt); 2790 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2791 va_end(args); 2792 2793 if (sz > sizeof(temp) || sz > buflen) 2794 return ERR_PTR(-ENAMETOOLONG); 2795 2796 buffer += buflen - sz; 2797 return memcpy(buffer, temp, sz); 2798 } 2799 2800 /* 2801 * Write full pathname from the root of the filesystem into the buffer. 2802 */ 2803 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2804 { 2805 char *end = buf + buflen; 2806 char *retval; 2807 2808 prepend(&end, &buflen, "\0", 1); 2809 if (buflen < 1) 2810 goto Elong; 2811 /* Get '/' right */ 2812 retval = end-1; 2813 *retval = '/'; 2814 2815 while (!IS_ROOT(dentry)) { 2816 struct dentry *parent = dentry->d_parent; 2817 int error; 2818 2819 prefetch(parent); 2820 spin_lock(&dentry->d_lock); 2821 error = prepend_name(&end, &buflen, &dentry->d_name); 2822 spin_unlock(&dentry->d_lock); 2823 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2824 goto Elong; 2825 2826 retval = end; 2827 dentry = parent; 2828 } 2829 return retval; 2830 Elong: 2831 return ERR_PTR(-ENAMETOOLONG); 2832 } 2833 2834 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2835 { 2836 char *retval; 2837 2838 write_seqlock(&rename_lock); 2839 retval = __dentry_path(dentry, buf, buflen); 2840 write_sequnlock(&rename_lock); 2841 2842 return retval; 2843 } 2844 EXPORT_SYMBOL(dentry_path_raw); 2845 2846 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2847 { 2848 char *p = NULL; 2849 char *retval; 2850 2851 write_seqlock(&rename_lock); 2852 if (d_unlinked(dentry)) { 2853 p = buf + buflen; 2854 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2855 goto Elong; 2856 buflen++; 2857 } 2858 retval = __dentry_path(dentry, buf, buflen); 2859 write_sequnlock(&rename_lock); 2860 if (!IS_ERR(retval) && p) 2861 *p = '/'; /* restore '/' overriden with '\0' */ 2862 return retval; 2863 Elong: 2864 return ERR_PTR(-ENAMETOOLONG); 2865 } 2866 2867 /* 2868 * NOTE! The user-level library version returns a 2869 * character pointer. The kernel system call just 2870 * returns the length of the buffer filled (which 2871 * includes the ending '\0' character), or a negative 2872 * error value. So libc would do something like 2873 * 2874 * char *getcwd(char * buf, size_t size) 2875 * { 2876 * int retval; 2877 * 2878 * retval = sys_getcwd(buf, size); 2879 * if (retval >= 0) 2880 * return buf; 2881 * errno = -retval; 2882 * return NULL; 2883 * } 2884 */ 2885 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2886 { 2887 int error; 2888 struct path pwd, root; 2889 char *page = (char *) __get_free_page(GFP_USER); 2890 2891 if (!page) 2892 return -ENOMEM; 2893 2894 get_fs_root_and_pwd(current->fs, &root, &pwd); 2895 2896 error = -ENOENT; 2897 write_seqlock(&rename_lock); 2898 if (!d_unlinked(pwd.dentry)) { 2899 unsigned long len; 2900 char *cwd = page + PAGE_SIZE; 2901 int buflen = PAGE_SIZE; 2902 2903 prepend(&cwd, &buflen, "\0", 1); 2904 error = prepend_path(&pwd, &root, &cwd, &buflen); 2905 write_sequnlock(&rename_lock); 2906 2907 if (error < 0) 2908 goto out; 2909 2910 /* Unreachable from current root */ 2911 if (error > 0) { 2912 error = prepend_unreachable(&cwd, &buflen); 2913 if (error) 2914 goto out; 2915 } 2916 2917 error = -ERANGE; 2918 len = PAGE_SIZE + page - cwd; 2919 if (len <= size) { 2920 error = len; 2921 if (copy_to_user(buf, cwd, len)) 2922 error = -EFAULT; 2923 } 2924 } else { 2925 write_sequnlock(&rename_lock); 2926 } 2927 2928 out: 2929 path_put(&pwd); 2930 path_put(&root); 2931 free_page((unsigned long) page); 2932 return error; 2933 } 2934 2935 /* 2936 * Test whether new_dentry is a subdirectory of old_dentry. 2937 * 2938 * Trivially implemented using the dcache structure 2939 */ 2940 2941 /** 2942 * is_subdir - is new dentry a subdirectory of old_dentry 2943 * @new_dentry: new dentry 2944 * @old_dentry: old dentry 2945 * 2946 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2947 * Returns 0 otherwise. 2948 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2949 */ 2950 2951 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2952 { 2953 int result; 2954 unsigned seq; 2955 2956 if (new_dentry == old_dentry) 2957 return 1; 2958 2959 do { 2960 /* for restarting inner loop in case of seq retry */ 2961 seq = read_seqbegin(&rename_lock); 2962 /* 2963 * Need rcu_readlock to protect against the d_parent trashing 2964 * due to d_move 2965 */ 2966 rcu_read_lock(); 2967 if (d_ancestor(old_dentry, new_dentry)) 2968 result = 1; 2969 else 2970 result = 0; 2971 rcu_read_unlock(); 2972 } while (read_seqretry(&rename_lock, seq)); 2973 2974 return result; 2975 } 2976 2977 void d_genocide(struct dentry *root) 2978 { 2979 struct dentry *this_parent; 2980 struct list_head *next; 2981 unsigned seq; 2982 int locked = 0; 2983 2984 seq = read_seqbegin(&rename_lock); 2985 again: 2986 this_parent = root; 2987 spin_lock(&this_parent->d_lock); 2988 repeat: 2989 next = this_parent->d_subdirs.next; 2990 resume: 2991 while (next != &this_parent->d_subdirs) { 2992 struct list_head *tmp = next; 2993 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2994 next = tmp->next; 2995 2996 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2997 if (d_unhashed(dentry) || !dentry->d_inode) { 2998 spin_unlock(&dentry->d_lock); 2999 continue; 3000 } 3001 if (!list_empty(&dentry->d_subdirs)) { 3002 spin_unlock(&this_parent->d_lock); 3003 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 3004 this_parent = dentry; 3005 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 3006 goto repeat; 3007 } 3008 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3009 dentry->d_flags |= DCACHE_GENOCIDE; 3010 dentry->d_count--; 3011 } 3012 spin_unlock(&dentry->d_lock); 3013 } 3014 if (this_parent != root) { 3015 struct dentry *child = this_parent; 3016 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 3017 this_parent->d_flags |= DCACHE_GENOCIDE; 3018 this_parent->d_count--; 3019 } 3020 this_parent = try_to_ascend(this_parent, locked, seq); 3021 if (!this_parent) 3022 goto rename_retry; 3023 next = child->d_u.d_child.next; 3024 goto resume; 3025 } 3026 spin_unlock(&this_parent->d_lock); 3027 if (!locked && read_seqretry(&rename_lock, seq)) 3028 goto rename_retry; 3029 if (locked) 3030 write_sequnlock(&rename_lock); 3031 return; 3032 3033 rename_retry: 3034 locked = 1; 3035 write_seqlock(&rename_lock); 3036 goto again; 3037 } 3038 3039 /** 3040 * find_inode_number - check for dentry with name 3041 * @dir: directory to check 3042 * @name: Name to find. 3043 * 3044 * Check whether a dentry already exists for the given name, 3045 * and return the inode number if it has an inode. Otherwise 3046 * 0 is returned. 3047 * 3048 * This routine is used to post-process directory listings for 3049 * filesystems using synthetic inode numbers, and is necessary 3050 * to keep getcwd() working. 3051 */ 3052 3053 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 3054 { 3055 struct dentry * dentry; 3056 ino_t ino = 0; 3057 3058 dentry = d_hash_and_lookup(dir, name); 3059 if (dentry) { 3060 if (dentry->d_inode) 3061 ino = dentry->d_inode->i_ino; 3062 dput(dentry); 3063 } 3064 return ino; 3065 } 3066 EXPORT_SYMBOL(find_inode_number); 3067 3068 static __initdata unsigned long dhash_entries; 3069 static int __init set_dhash_entries(char *str) 3070 { 3071 if (!str) 3072 return 0; 3073 dhash_entries = simple_strtoul(str, &str, 0); 3074 return 1; 3075 } 3076 __setup("dhash_entries=", set_dhash_entries); 3077 3078 static void __init dcache_init_early(void) 3079 { 3080 unsigned int loop; 3081 3082 /* If hashes are distributed across NUMA nodes, defer 3083 * hash allocation until vmalloc space is available. 3084 */ 3085 if (hashdist) 3086 return; 3087 3088 dentry_hashtable = 3089 alloc_large_system_hash("Dentry cache", 3090 sizeof(struct hlist_bl_head), 3091 dhash_entries, 3092 13, 3093 HASH_EARLY, 3094 &d_hash_shift, 3095 &d_hash_mask, 3096 0, 3097 0); 3098 3099 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3100 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3101 } 3102 3103 static void __init dcache_init(void) 3104 { 3105 unsigned int loop; 3106 3107 /* 3108 * A constructor could be added for stable state like the lists, 3109 * but it is probably not worth it because of the cache nature 3110 * of the dcache. 3111 */ 3112 dentry_cache = KMEM_CACHE(dentry, 3113 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3114 3115 /* Hash may have been set up in dcache_init_early */ 3116 if (!hashdist) 3117 return; 3118 3119 dentry_hashtable = 3120 alloc_large_system_hash("Dentry cache", 3121 sizeof(struct hlist_bl_head), 3122 dhash_entries, 3123 13, 3124 0, 3125 &d_hash_shift, 3126 &d_hash_mask, 3127 0, 3128 0); 3129 3130 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3131 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3132 } 3133 3134 /* SLAB cache for __getname() consumers */ 3135 struct kmem_cache *names_cachep __read_mostly; 3136 EXPORT_SYMBOL(names_cachep); 3137 3138 EXPORT_SYMBOL(d_genocide); 3139 3140 void __init vfs_caches_init_early(void) 3141 { 3142 dcache_init_early(); 3143 inode_init_early(); 3144 } 3145 3146 void __init vfs_caches_init(unsigned long mempages) 3147 { 3148 unsigned long reserve; 3149 3150 /* Base hash sizes on available memory, with a reserve equal to 3151 150% of current kernel size */ 3152 3153 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3154 mempages -= reserve; 3155 3156 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3157 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3158 3159 dcache_init(); 3160 inode_init(); 3161 files_init(mempages); 3162 mnt_init(); 3163 bdev_cache_init(); 3164 chrdev_init(); 3165 } 3166