Lines Matching +full:parent +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 1997 Thomas Schoebel-Theuer,
13 * The dcache is a master of the icache - whenever a dcache entry
38 #include <asm/runtime-const.h>
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_u.d_alias, d_inode of aliases
45 * - the dcache hash table
47 * - the s_roots list (see __d_drop)
48 * dentry->d_sb->s_dentry_lru_lock protects:
49 * - the dcache lru lists and counters
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_chilren
57 * - childrens' d_sib and d_parent
58 * - d_u.d_alias, d_inode
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dentry->d_sb->s_dentry_lru_lock
68 * dentry->d_parent->...->d_parent->d_lock
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
101 * to make this good - I've just made it work.
103 * This hash-function tries to avoid losing too many bits of hash
104 * information, yet avoid using a prime hash-size or similar.
125 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, in in_lookup_hash() argument
128 hash += (unsigned long) parent / L1_CACHE_BYTES; in in_lookup_hash()
153 * Here we resort to our own counters instead of using generic per-cpu counters
203 .procname = "dentry-state",
210 .procname = "dentry-negative",
249 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
250 * The strings are both count bytes long, and count is non-zero.
254 #include <asm/word-at-a-time.h>
277 tcount -= sizeof(unsigned long); in dentry_string_cmp()
294 tcount--; in dentry_string_cmp()
319 const unsigned char *cs = READ_ONCE(dentry->d_name.name); in dentry_cmp()
326 * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
327 * for the reason why ->count and ->head can't be combined into a union.
328 * dentry_string_cmp() relies upon ->name[] being word-aligned.
338 return container_of(dentry->d_name.name, struct external_name, name[0]); in external_name()
357 return dentry->d_name.name != dentry->d_shortname.string; in dname_external()
367 seq = read_seqcount_begin(&dentry->d_seq); in take_dentry_name_snapshot()
368 s = READ_ONCE(dentry->d_name.name); in take_dentry_name_snapshot()
369 name->name.hash_len = dentry->d_name.hash_len; in take_dentry_name_snapshot()
370 name->name.name = name->inline_name.string; in take_dentry_name_snapshot()
371 if (likely(s == dentry->d_shortname.string)) { in take_dentry_name_snapshot()
372 name->inline_name = dentry->d_shortname; in take_dentry_name_snapshot()
377 if (unlikely(!atomic_inc_not_zero(&p->count))) in take_dentry_name_snapshot()
379 name->name.name = s; in take_dentry_name_snapshot()
381 if (read_seqcount_retry(&dentry->d_seq, seq)) { in take_dentry_name_snapshot()
391 if (unlikely(name->name.name != name->inline_name.string)) { in release_dentry_name_snapshot()
393 p = container_of(name->name.name, struct external_name, name[0]); in release_dentry_name_snapshot()
394 if (unlikely(atomic_dec_and_test(&p->count))) in release_dentry_name_snapshot()
406 dentry->d_inode = inode; in __d_set_inode_and_type()
407 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
410 smp_store_release(&dentry->d_flags, flags); in __d_set_inode_and_type()
415 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
418 WRITE_ONCE(dentry->d_flags, flags); in __d_clear_type_and_inode()
419 dentry->d_inode = NULL; in __d_clear_type_and_inode()
430 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); in dentry_free()
433 if (likely(atomic_dec_and_test(&p->count))) { in dentry_free()
434 call_rcu(&dentry->d_u.d_rcu, __d_free_external); in dentry_free()
439 if (dentry->d_flags & DCACHE_NORCU) in dentry_free()
440 __d_free(&dentry->d_u.d_rcu); in dentry_free()
442 call_rcu(&dentry->d_u.d_rcu, __d_free); in dentry_free()
450 __releases(dentry->d_lock) in dentry_unlink_inode()
451 __releases(dentry->d_inode->i_lock) in dentry_unlink_inode()
453 struct inode *inode = dentry->d_inode; in dentry_unlink_inode()
455 raw_write_seqcount_begin(&dentry->d_seq); in dentry_unlink_inode()
457 hlist_del_init(&dentry->d_u.d_alias); in dentry_unlink_inode()
458 raw_write_seqcount_end(&dentry->d_seq); in dentry_unlink_inode()
459 spin_unlock(&dentry->d_lock); in dentry_unlink_inode()
460 spin_unlock(&inode->i_lock); in dentry_unlink_inode()
461 if (!inode->i_nlink) in dentry_unlink_inode()
463 if (dentry->d_op && dentry->d_op->d_iput) in dentry_unlink_inode()
464 dentry->d_op->d_iput(dentry, inode); in dentry_unlink_inode()
471 * is in use - which includes both the "real" per-superblock
477 * The per-cpu "nr_dentry_unused" counters are updated with
480 * The per-cpu "nr_dentry_negative" counters are only updated
481 * when deleted from or added to the per-superblock LRU list, not
488 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_…
492 dentry->d_flags |= DCACHE_LRU_LIST; in d_lru_add()
497 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_add()
503 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_del()
508 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_del()
514 list_del_init(&dentry->d_lru); in d_shrink_del()
515 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); in d_shrink_del()
522 list_add(&dentry->d_lru, list); in d_shrink_add()
523 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; in d_shrink_add()
536 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_isolate()
540 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate()
547 dentry->d_flags |= DCACHE_SHRINK_LIST; in d_lru_shrink_move()
550 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move()
562 b = &dentry->d_sb->s_roots; in ___d_drop()
564 b = d_hash(dentry->d_name.hash); in ___d_drop()
567 __hlist_bl_del(&dentry->d_hash); in ___d_drop()
575 dentry->d_hash.pprev = NULL; in __d_drop()
576 write_seqcount_invalidate(&dentry->d_seq); in __d_drop()
582 * d_drop - drop a dentry
585 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
587 * deleting the dentry - d_delete will try to mark the dentry negative if
594 * __d_drop requires dentry->d_lock
597 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
601 spin_lock(&dentry->d_lock); in d_drop()
603 spin_unlock(&dentry->d_lock); in d_drop()
614 dentry->d_flags |= DCACHE_DENTRY_KILLED; in dentry_unlist()
615 if (unlikely(hlist_unhashed(&dentry->d_sib))) in dentry_unlist()
617 __hlist_del(&dentry->d_sib); in dentry_unlist()
620 * a normal list member, it didn't matter - ->d_sib.next would've in dentry_unlist()
623 * Normally d_walk() doesn't care about cursors moving around - in dentry_unlist()
624 * ->d_lock on parent prevents that and since a cursor has no children in dentry_unlist()
625 * of its own, we get through it without ever unlocking the parent. in dentry_unlist()
626 * There is one exception, though - if we ascend from a child that in dentry_unlist()
628 * using the value left in its ->d_sib.next. And if _that_ in dentry_unlist()
630 * before d_walk() regains parent->d_lock, we'll end up skipping in dentry_unlist()
633 * Solution: make sure that the pointer left behind in ->d_sib.next in dentry_unlist()
637 while (dentry->d_sib.next) { in dentry_unlist()
638 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); in dentry_unlist()
639 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) in dentry_unlist()
641 dentry->d_sib.next = next->d_sib.next; in dentry_unlist()
647 struct dentry *parent = NULL; in __dentry_kill() local
653 lockref_mark_dead(&dentry->d_lockref); in __dentry_kill()
659 if (dentry->d_flags & DCACHE_OP_PRUNE) in __dentry_kill()
660 dentry->d_op->d_prune(dentry); in __dentry_kill()
662 if (dentry->d_flags & DCACHE_LRU_LIST) { in __dentry_kill()
663 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) in __dentry_kill()
668 if (dentry->d_inode) in __dentry_kill()
671 spin_unlock(&dentry->d_lock); in __dentry_kill()
673 if (dentry->d_op && dentry->d_op->d_release) in __dentry_kill()
674 dentry->d_op->d_release(dentry); in __dentry_kill()
677 /* now that it's negative, ->d_parent is stable */ in __dentry_kill()
679 parent = dentry->d_parent; in __dentry_kill()
680 spin_lock(&parent->d_lock); in __dentry_kill()
682 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in __dentry_kill()
684 if (dentry->d_flags & DCACHE_SHRINK_LIST) in __dentry_kill()
686 spin_unlock(&dentry->d_lock); in __dentry_kill()
689 if (parent && --parent->d_lockref.count) { in __dentry_kill()
690 spin_unlock(&parent->d_lock); in __dentry_kill()
693 return parent; in __dentry_kill()
698 * Called under rcu_read_lock() and dentry->d_lock; the former
704 * that dentry's inode locked.
709 struct inode *inode = dentry->d_inode; in lock_for_kill()
711 if (unlikely(dentry->d_lockref.count)) in lock_for_kill()
714 if (!inode || likely(spin_trylock(&inode->i_lock))) in lock_for_kill()
718 spin_unlock(&dentry->d_lock); in lock_for_kill()
719 spin_lock(&inode->i_lock); in lock_for_kill()
720 spin_lock(&dentry->d_lock); in lock_for_kill()
721 if (likely(inode == dentry->d_inode)) in lock_for_kill()
723 spin_unlock(&inode->i_lock); in lock_for_kill()
724 inode = dentry->d_inode; in lock_for_kill()
726 if (likely(!dentry->d_lockref.count)) in lock_for_kill()
729 spin_unlock(&inode->i_lock); in lock_for_kill()
735 * locked; if not locked, we are more limited and might not be able to tell
736 * without a lock. False in this case means "punt to locked path and recheck".
738 * In case we aren't locked, these predicates are not "stable". However, it is
741 * re-gotten a reference to the dentry and change that, but our work is done -
744 static inline bool retain_dentry(struct dentry *dentry, bool locked) in retain_dentry() argument
749 d_flags = READ_ONCE(dentry->d_flags); in retain_dentry()
759 // ->d_delete() might tell us not to bother, but that requires in retain_dentry()
760 // ->d_lock; can't decide without it in retain_dentry()
762 if (!locked || dentry->d_op->d_delete(dentry)) in retain_dentry()
771 // need to do something - put it on LRU if it wasn't there already in retain_dentry()
773 // Unfortunately, both actions require ->d_lock, so in lockless in retain_dentry()
776 if (!locked) in retain_dentry()
780 if (!locked) in retain_dentry()
782 dentry->d_flags |= DCACHE_REFERENCED; in retain_dentry()
791 spin_lock(&inode->i_lock); in d_mark_dontcache()
792 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { in d_mark_dontcache()
793 spin_lock(&de->d_lock); in d_mark_dontcache()
794 de->d_flags |= DCACHE_DONTCACHE; in d_mark_dontcache()
795 spin_unlock(&de->d_lock); in d_mark_dontcache()
797 inode->i_state |= I_DONTCACHE; in d_mark_dontcache()
798 spin_unlock(&inode->i_lock); in d_mark_dontcache()
819 ret = lockref_put_return(&dentry->d_lockref); in fast_dput()
827 spin_lock(&dentry->d_lock); in fast_dput()
828 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) { in fast_dput()
829 spin_unlock(&dentry->d_lock); in fast_dput()
832 dentry->d_lockref.count--; in fast_dput()
833 goto locked; in fast_dput()
844 * taking the lock? There's a very common case when it's all we need - in fast_dput()
854 * but we'll need to re-check the situation after getting the lock. in fast_dput()
856 spin_lock(&dentry->d_lock); in fast_dput()
864 locked: in fast_dput()
865 if (dentry->d_lockref.count || retain_dentry(dentry, true)) { in fast_dput()
866 spin_unlock(&dentry->d_lock); in fast_dput()
891 * dput - release a dentry
896 * releasing its resources. If the parent dentries were scheduled for release
915 spin_unlock(&dentry->d_lock); in dput()
921 spin_unlock(&dentry->d_lock); in dput()
926 __must_hold(&dentry->d_lock) in to_shrink_list()
928 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { in to_shrink_list()
929 if (dentry->d_flags & DCACHE_LRU_LIST) in to_shrink_list()
944 spin_unlock(&dentry->d_lock); in dput_to_list()
954 * Do optimistic parent lookup without any in dget_parent()
958 seq = raw_seqcount_begin(&dentry->d_seq); in dget_parent()
959 ret = READ_ONCE(dentry->d_parent); in dget_parent()
960 gotref = lockref_get_not_zero(&ret->d_lockref); in dget_parent()
963 if (!read_seqcount_retry(&dentry->d_seq, seq)) in dget_parent()
970 * Don't need rcu_dereference because we re-check it was correct under in dget_parent()
974 ret = dentry->d_parent; in dget_parent()
975 spin_lock(&ret->d_lock); in dget_parent()
976 if (unlikely(ret != dentry->d_parent)) { in dget_parent()
977 spin_unlock(&ret->d_lock); in dget_parent()
982 BUG_ON(!ret->d_lockref.count); in dget_parent()
983 ret->d_lockref.count++; in dget_parent()
984 spin_unlock(&ret->d_lock); in dget_parent()
993 if (hlist_empty(&inode->i_dentry)) in __d_find_any_alias()
995 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in __d_find_any_alias()
996 lockref_get(&alias->d_lockref); in __d_find_any_alias()
1001 * d_find_any_alias - find any alias for a given inode
1011 spin_lock(&inode->i_lock); in d_find_any_alias()
1013 spin_unlock(&inode->i_lock); in d_find_any_alias()
1022 if (S_ISDIR(inode->i_mode)) in __d_find_alias()
1025 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in __d_find_alias()
1026 spin_lock(&alias->d_lock); in __d_find_alias()
1029 spin_unlock(&alias->d_lock); in __d_find_alias()
1032 spin_unlock(&alias->d_lock); in __d_find_alias()
1038 * d_find_alias - grab a hashed alias of inode
1055 if (!hlist_empty(&inode->i_dentry)) { in d_find_alias()
1056 spin_lock(&inode->i_lock); in d_find_alias()
1058 spin_unlock(&inode->i_lock); in d_find_alias()
1070 struct hlist_head *l = &inode->i_dentry; in d_find_alias_rcu()
1073 spin_lock(&inode->i_lock); in d_find_alias_rcu()
1074 // ->i_dentry and ->i_rcu are colocated, but the latter won't be in d_find_alias_rcu()
1076 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { in d_find_alias_rcu()
1077 if (S_ISDIR(inode->i_mode)) { in d_find_alias_rcu()
1078 de = hlist_entry(l->first, struct dentry, d_u.d_alias); in d_find_alias_rcu()
1085 spin_unlock(&inode->i_lock); in d_find_alias_rcu()
1098 spin_lock(&inode->i_lock); in d_prune_aliases()
1099 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { in d_prune_aliases()
1100 spin_lock(&dentry->d_lock); in d_prune_aliases()
1101 if (!dentry->d_lockref.count) in d_prune_aliases()
1103 spin_unlock(&dentry->d_lock); in d_prune_aliases()
1105 spin_unlock(&inode->i_lock); in d_prune_aliases()
1119 spin_unlock(&victim->d_lock); in shrink_kill()
1127 dentry = list_entry(list->prev, struct dentry, d_lru); in shrink_dentry_list()
1128 spin_lock(&dentry->d_lock); in shrink_dentry_list()
1134 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED; in shrink_dentry_list()
1135 spin_unlock(&dentry->d_lock); in shrink_dentry_list()
1153 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate()
1157 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate()
1165 if (dentry->d_lockref.count) { in dentry_lru_isolate()
1167 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1171 if (dentry->d_flags & DCACHE_REFERENCED) { in dentry_lru_isolate()
1172 dentry->d_flags &= ~DCACHE_REFERENCED; in dentry_lru_isolate()
1173 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1177 * this point, we've dropped the dentry->d_lock but keep the in dentry_lru_isolate()
1198 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1204 * prune_dcache_sb - shrink the dcache
1208 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1220 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, in prune_dcache_sb()
1233 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate_shrink()
1237 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate_shrink()
1241 spin_unlock(&dentry->d_lock); in dentry_lru_isolate_shrink()
1248 * shrink_dcache_sb - shrink dcache for a superblock
1259 list_lru_walk(&sb->s_dentry_lru, in shrink_dcache_sb()
1262 } while (list_lru_count(&sb->s_dentry_lru) > 0); in shrink_dcache_sb()
1267 * enum d_walk_ret - action to talke during tree walk
1281 * d_walk - walk the dentry tree
1282 * @parent: start of walk
1288 static void d_walk(struct dentry *parent, void *data, in d_walk() argument
1298 this_parent = parent; in d_walk()
1299 spin_lock(&this_parent->d_lock); in d_walk()
1316 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) in d_walk()
1319 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_walk()
1326 spin_unlock(&dentry->d_lock); in d_walk()
1332 spin_unlock(&dentry->d_lock); in d_walk()
1336 if (!hlist_empty(&dentry->d_children)) { in d_walk()
1337 spin_unlock(&this_parent->d_lock); in d_walk()
1338 spin_release(&dentry->d_lock.dep_map, _RET_IP_); in d_walk()
1340 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); in d_walk()
1343 spin_unlock(&dentry->d_lock); in d_walk()
1350 if (this_parent != parent) { in d_walk()
1352 this_parent = dentry->d_parent; in d_walk()
1354 spin_unlock(&dentry->d_lock); in d_walk()
1355 spin_lock(&this_parent->d_lock); in d_walk()
1357 /* might go back up the wrong parent if we have had a rename. */ in d_walk()
1362 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) { in d_walk()
1374 spin_unlock(&this_parent->d_lock); in d_walk()
1379 spin_unlock(&this_parent->d_lock); in d_walk()
1393 /* locks: mount_locked_reader && dentry->d_lock */
1397 struct path path = { .mnt = info->mnt, .dentry = dentry }; in path_check_mount()
1402 info->mounted = 1; in path_check_mount()
1409 * path_has_submounts - check for mounts over a dentry in the
1411 * @parent: path to check.
1413 * Return true if the parent or its subdirectories contain
1416 int path_has_submounts(const struct path *parent) in path_has_submounts() argument
1418 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; in path_has_submounts()
1421 d_walk(parent->dentry, &data, path_check_mount); in path_has_submounts()
1438 int ret = -ENOENT; in d_set_mounted()
1440 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { in d_set_mounted()
1442 spin_lock(&p->d_lock); in d_set_mounted()
1444 spin_unlock(&p->d_lock); in d_set_mounted()
1447 spin_unlock(&p->d_lock); in d_set_mounted()
1449 spin_lock(&dentry->d_lock); in d_set_mounted()
1451 ret = -EBUSY; in d_set_mounted()
1453 dentry->d_flags |= DCACHE_MOUNTED; in d_set_mounted()
1457 spin_unlock(&dentry->d_lock); in d_set_mounted()
1464 * Search the dentry child list of the specified parent,
1467 * whenever the d_children list is non-empty and continue
1492 if (data->start == dentry) in select_collect()
1495 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect()
1496 data->found++; in select_collect()
1497 } else if (!dentry->d_lockref.count) { in select_collect()
1498 to_shrink_list(dentry, &data->dispose); in select_collect()
1499 data->found++; in select_collect()
1500 } else if (dentry->d_lockref.count < 0) { in select_collect()
1501 data->found++; in select_collect()
1508 if (!list_empty(&data->dispose)) in select_collect()
1519 if (data->start == dentry) in select_collect2()
1522 if (!dentry->d_lockref.count) { in select_collect2()
1523 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect2()
1525 data->victim = dentry; in select_collect2()
1528 to_shrink_list(dentry, &data->dispose); in select_collect2()
1535 if (!list_empty(&data->dispose)) in select_collect2()
1542 * shrink_dcache_parent - prune dcache
1543 * @parent: parent of entries to prune
1545 * Prune the dcache to remove unused children of the parent dentry.
1547 void shrink_dcache_parent(struct dentry *parent) in shrink_dcache_parent() argument
1550 struct select_data data = {.start = parent}; in shrink_dcache_parent()
1553 d_walk(parent, &data, select_collect); in shrink_dcache_parent()
1564 d_walk(parent, &data, select_collect2); in shrink_dcache_parent()
1566 spin_lock(&data.victim->d_lock); in shrink_dcache_parent()
1568 spin_unlock(&data.victim->d_lock); in shrink_dcache_parent()
1583 if (!hlist_empty(&dentry->d_children)) in umount_check()
1587 if (dentry == _data && dentry->d_lockref.count == 1) in umount_check()
1593 dentry->d_inode ? in umount_check()
1594 dentry->d_inode->i_ino : 0UL, in umount_check()
1596 dentry->d_lockref.count, in umount_check()
1597 dentry->d_sb->s_type->name, in umount_check()
1598 dentry->d_sb->s_id); in umount_check()
1617 rwsem_assert_held_write(&sb->s_umount); in shrink_dcache_for_umount()
1619 dentry = sb->s_root; in shrink_dcache_for_umount()
1620 sb->s_root = NULL; in shrink_dcache_for_umount()
1623 while (!hlist_bl_empty(&sb->s_roots)) { in shrink_dcache_for_umount()
1624 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); in shrink_dcache_for_umount()
1640 * d_invalidate - detach submounts, prune dcache, and drop
1646 spin_lock(&dentry->d_lock); in d_invalidate()
1648 spin_unlock(&dentry->d_lock); in d_invalidate()
1652 spin_unlock(&dentry->d_lock); in d_invalidate()
1655 if (!dentry->d_inode) in d_invalidate()
1675 * __d_alloc - allocate a dcache entry
1690 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, in __d_alloc()
1696 * We guarantee that the inline name is always NUL-terminated. in __d_alloc()
1701 dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0; in __d_alloc()
1704 dname = dentry->d_shortname.string; in __d_alloc()
1705 } else if (name->len > DNAME_INLINE_LEN-1) { in __d_alloc()
1707 struct external_name *p = kmalloc(size + name->len, in __d_alloc()
1714 atomic_set(&p->count, 1); in __d_alloc()
1715 dname = p->name; in __d_alloc()
1717 dname = dentry->d_shortname.string; in __d_alloc()
1720 dentry->__d_name.len = name->len; in __d_alloc()
1721 dentry->__d_name.hash = name->hash; in __d_alloc()
1722 memcpy(dname, name->name, name->len); in __d_alloc()
1723 dname[name->len] = 0; in __d_alloc()
1726 smp_store_release(&dentry->__d_name.name, dname); /* ^^^ */ in __d_alloc()
1728 dentry->d_flags = 0; in __d_alloc()
1729 lockref_init(&dentry->d_lockref); in __d_alloc()
1730 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); in __d_alloc()
1731 dentry->d_inode = NULL; in __d_alloc()
1732 dentry->d_parent = dentry; in __d_alloc()
1733 dentry->d_sb = sb; in __d_alloc()
1734 dentry->d_op = sb->__s_d_op; in __d_alloc()
1735 dentry->d_flags = sb->s_d_flags; in __d_alloc()
1736 dentry->d_fsdata = NULL; in __d_alloc()
1737 INIT_HLIST_BL_NODE(&dentry->d_hash); in __d_alloc()
1738 INIT_LIST_HEAD(&dentry->d_lru); in __d_alloc()
1739 INIT_HLIST_HEAD(&dentry->d_children); in __d_alloc()
1740 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_alloc()
1741 INIT_HLIST_NODE(&dentry->d_sib); in __d_alloc()
1743 if (dentry->d_op && dentry->d_op->d_init) { in __d_alloc()
1744 err = dentry->d_op->d_init(dentry); in __d_alloc()
1759 * d_alloc - allocate a dcache entry
1760 * @parent: parent of entry to allocate
1767 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) in d_alloc() argument
1769 struct dentry *dentry = __d_alloc(parent->d_sb, name); in d_alloc()
1772 spin_lock(&parent->d_lock); in d_alloc()
1777 dentry->d_parent = dget_dlock(parent); in d_alloc()
1778 hlist_add_head(&dentry->d_sib, &parent->d_children); in d_alloc()
1779 spin_unlock(&parent->d_lock); in d_alloc()
1791 struct dentry *d_alloc_cursor(struct dentry * parent) in d_alloc_cursor() argument
1793 struct dentry *dentry = d_alloc_anon(parent->d_sb); in d_alloc_cursor()
1795 dentry->d_flags |= DCACHE_DENTRY_CURSOR; in d_alloc_cursor()
1796 dentry->d_parent = dget(parent); in d_alloc_cursor()
1802 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1808 * This is used for pipes, sockets et.al. - the stuff that should
1823 dentry->d_flags |= DCACHE_NORCU; in d_alloc_pseudo()
1825 if (!dentry->d_op) in d_alloc_pseudo()
1826 dentry->d_op = &anon_ops; in d_alloc_pseudo()
1831 struct dentry *d_alloc_name(struct dentry *parent, const char *name) in d_alloc_name() argument
1836 q.hash_len = hashlen_string(parent, name); in d_alloc_name()
1837 return d_alloc(parent, &q); in d_alloc_name()
1850 if (op->d_hash) in d_op_flags()
1852 if (op->d_compare) in d_op_flags()
1854 if (op->d_revalidate) in d_op_flags()
1856 if (op->d_weak_revalidate) in d_op_flags()
1858 if (op->d_delete) in d_op_flags()
1860 if (op->d_prune) in d_op_flags()
1862 if (op->d_real) in d_op_flags()
1871 WARN_ON_ONCE(dentry->d_op); in d_set_d_op()
1872 WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS); in d_set_d_op()
1873 dentry->d_op = op; in d_set_d_op()
1875 dentry->d_flags |= flags; in d_set_d_op()
1881 s->__s_d_op = ops; in set_default_d_op()
1882 s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags; in set_default_d_op()
1893 if (S_ISDIR(inode->i_mode)) { in d_flags_for_inode()
1895 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { in d_flags_for_inode()
1896 if (unlikely(!inode->i_op->lookup)) in d_flags_for_inode()
1899 inode->i_opflags |= IOP_LOOKUP; in d_flags_for_inode()
1904 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { in d_flags_for_inode()
1905 if (unlikely(inode->i_op->get_link)) { in d_flags_for_inode()
1909 inode->i_opflags |= IOP_NOFOLLOW; in d_flags_for_inode()
1912 if (unlikely(!S_ISREG(inode->i_mode))) in d_flags_for_inode()
1926 spin_lock(&dentry->d_lock); in __d_instantiate()
1931 if ((dentry->d_flags & in __d_instantiate()
1934 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_instantiate()
1935 raw_write_seqcount_begin(&dentry->d_seq); in __d_instantiate()
1937 raw_write_seqcount_end(&dentry->d_seq); in __d_instantiate()
1939 spin_unlock(&dentry->d_lock); in __d_instantiate()
1943 * d_instantiate - fill in inode information for a dentry
1959 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate()
1962 spin_lock(&inode->i_lock); in d_instantiate()
1964 spin_unlock(&inode->i_lock); in d_instantiate()
1971 * with lockdep-related part of unlock_new_inode() done before
1972 * anything else. Use that instead of open-coding d_instantiate()/
1977 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate_new()
1981 spin_lock(&inode->i_lock); in d_instantiate_new()
1983 WARN_ON(!(inode->i_state & I_NEW)); in d_instantiate_new()
1984 inode->i_state &= ~I_NEW & ~I_CREATING; in d_instantiate_new()
1992 spin_unlock(&inode->i_lock); in d_instantiate_new()
2001 res = d_alloc_anon(root_inode->i_sb); in d_make_root()
2017 return ERR_PTR(-ESTALE); in __d_obtain_alias()
2021 sb = inode->i_sb; in __d_obtain_alias()
2029 res = ERR_PTR(-ENOMEM); in __d_obtain_alias()
2034 spin_lock(&inode->i_lock); in __d_obtain_alias()
2042 spin_lock(&new->d_lock); in __d_obtain_alias()
2044 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry); in __d_obtain_alias()
2046 hlist_bl_lock(&sb->s_roots); in __d_obtain_alias()
2047 hlist_bl_add_head(&new->d_hash, &sb->s_roots); in __d_obtain_alias()
2048 hlist_bl_unlock(&sb->s_roots); in __d_obtain_alias()
2050 spin_unlock(&new->d_lock); in __d_obtain_alias()
2051 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2052 inode = NULL; /* consumed by new->d_inode */ in __d_obtain_alias()
2055 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2065 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2080 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2089 * d_obtain_root - find or allocate a dentry for a given inode
2101 * replaced by ERR_PTR(-ESTALE).
2110 * d_add_ci - lookup or allocate new dentry with case-exact name
2111 * @dentry: the negative dentry that was passed to the parent's lookup func
2112 * @inode: the inode case-insensitive lookup has found
2113 * @name: the case-exact name to be associated with the returned dentry
2115 * This is to avoid filling the dcache with case-insensitive names to the
2117 * case-insensitive filesystems.
2119 * For a case-insensitive lookup match and if the case-exact dentry
2134 found = d_hash_and_lookup(dentry->d_parent, name); in d_add_ci()
2140 found = d_alloc_parallel(dentry->d_parent, name, in d_add_ci()
2141 dentry->d_wait); in d_add_ci()
2147 found = d_alloc(dentry->d_parent, name); in d_add_ci()
2150 return ERR_PTR(-ENOMEM); in d_add_ci()
2164 * d_same_name - compare dentry name with case-exact name
2165 * @dentry: the negative dentry that was passed to the parent's lookup func
2166 * @parent: parent dentry
2167 * @name: the case-exact name to be associated with the returned dentry
2171 bool d_same_name(const struct dentry *dentry, const struct dentry *parent, in d_same_name() argument
2174 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { in d_same_name()
2175 if (dentry->d_name.len != name->len) in d_same_name()
2177 return dentry_cmp(dentry, name->name, name->len) == 0; in d_same_name()
2179 return parent->d_op->d_compare(dentry, in d_same_name()
2180 dentry->d_name.len, dentry->d_name.name, in d_same_name()
2186 * This is __d_lookup_rcu() when the parent dentry has
2190 const struct dentry *parent, in __d_lookup_rcu_op_compare() argument
2194 u64 hashlen = name->hash_len; in __d_lookup_rcu_op_compare()
2205 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu_op_compare()
2206 if (dentry->d_parent != parent) in __d_lookup_rcu_op_compare()
2210 if (dentry->d_name.hash != hashlen_hash(hashlen)) in __d_lookup_rcu_op_compare()
2212 tlen = dentry->d_name.len; in __d_lookup_rcu_op_compare()
2213 tname = dentry->d_name.name; in __d_lookup_rcu_op_compare()
2215 if (read_seqcount_retry(&dentry->d_seq, seq)) { in __d_lookup_rcu_op_compare()
2219 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) in __d_lookup_rcu_op_compare()
2228 * __d_lookup_rcu - search for a dentry (racy, store-free)
2229 * @parent: parent dentry
2234 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2235 * resolution (store-free path walking) design described in
2236 * Documentation/filesystems/path-lookup.txt.
2240 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2246 * the returned dentry, so long as its parent's seqlock is checked after the
2253 struct dentry *__d_lookup_rcu(const struct dentry *parent, in __d_lookup_rcu() argument
2257 u64 hashlen = name->hash_len; in __d_lookup_rcu()
2258 const unsigned char *str = name->name; in __d_lookup_rcu()
2270 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) in __d_lookup_rcu()
2271 return __d_lookup_rcu_op_compare(parent, name, seqp); in __d_lookup_rcu()
2281 * false-negative result. d_lookup() protects against concurrent in __d_lookup_rcu()
2284 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup_rcu()
2291 * renames, and thus protects parent and name fields. in __d_lookup_rcu()
2304 * we are still guaranteed NUL-termination of ->d_name.name. in __d_lookup_rcu()
2306 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu()
2307 if (dentry->d_parent != parent) in __d_lookup_rcu()
2311 if (dentry->d_name.hash_len != hashlen) in __d_lookup_rcu()
2322 * d_lookup - search for a dentry
2323 * @parent: parent dentry
2327 * d_lookup searches the children of the parent dentry for the name in
2332 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) in d_lookup() argument
2339 dentry = __d_lookup(parent, name); in d_lookup()
2348 * __d_lookup - search for a dentry (racy)
2349 * @parent: parent dentry
2354 * false-negative result due to unrelated rename activity.
2362 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) in __d_lookup() argument
2364 unsigned int hash = name->hash; in __d_lookup()
2385 * false-negative result. d_lookup() protects against concurrent in __d_lookup()
2388 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup()
2394 if (dentry->d_name.hash != hash) in __d_lookup()
2397 spin_lock(&dentry->d_lock); in __d_lookup()
2398 if (dentry->d_parent != parent) in __d_lookup()
2403 if (!d_same_name(dentry, parent, name)) in __d_lookup()
2406 dentry->d_lockref.count++; in __d_lookup()
2408 spin_unlock(&dentry->d_lock); in __d_lookup()
2411 spin_unlock(&dentry->d_lock); in __d_lookup()
2419 * d_hash_and_lookup - hash the qstr then search for a dentry
2423 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2428 * Check for a fs-specific hash function. Note that we must in d_hash_and_lookup()
2429 * calculate the standard hash first, as the d_op->d_hash() in d_hash_and_lookup()
2432 name->hash = full_name_hash(dir, name->name, name->len); in d_hash_and_lookup()
2433 if (dir->d_flags & DCACHE_OP_HASH) { in d_hash_and_lookup()
2434 int err = dir->d_op->d_hash(dir, name); in d_hash_and_lookup()
2443 * - turn this dentry into a negative dentry
2444 * - unhash this dentry and free it.
2455 * d_delete - delete a dentry
2464 struct inode *inode = dentry->d_inode; in d_delete()
2466 spin_lock(&inode->i_lock); in d_delete()
2467 spin_lock(&dentry->d_lock); in d_delete()
2471 if (dentry->d_lockref.count == 1) { in d_delete()
2474 dentry->d_flags &= ~DCACHE_CANT_MOUNT; in d_delete()
2478 spin_unlock(&dentry->d_lock); in d_delete()
2479 spin_unlock(&inode->i_lock); in d_delete()
2486 struct hlist_bl_head *b = d_hash(entry->d_name.hash); in __d_rehash()
2489 hlist_bl_add_head_rcu(&entry->d_hash, b); in __d_rehash()
2494 * d_rehash - add an entry back to the hash
2502 spin_lock(&entry->d_lock); in d_rehash()
2504 spin_unlock(&entry->d_lock); in d_rehash()
2512 unsigned n = READ_ONCE(dir->i_dir_seq); in start_dir_add()
2513 if (!(n & 1) && try_cmpxchg(&dir->i_dir_seq, &n, n + 1)) in start_dir_add()
2522 smp_store_release(&dir->i_dir_seq, n + 2); in end_dir_add()
2532 add_wait_queue(dentry->d_wait, &wait); in d_wait_lookup()
2535 spin_unlock(&dentry->d_lock); in d_wait_lookup()
2537 spin_lock(&dentry->d_lock); in d_wait_lookup()
2542 struct dentry *d_alloc_parallel(struct dentry *parent, in d_alloc_parallel() argument
2546 unsigned int hash = name->hash; in d_alloc_parallel()
2547 struct hlist_bl_head *b = in_lookup_hash(parent, hash); in d_alloc_parallel()
2549 struct dentry *new = __d_alloc(parent->d_sb, name); in d_alloc_parallel()
2554 return ERR_PTR(-ENOMEM); in d_alloc_parallel()
2556 new->d_flags |= DCACHE_PAR_LOOKUP; in d_alloc_parallel()
2557 spin_lock(&parent->d_lock); in d_alloc_parallel()
2558 new->d_parent = dget_dlock(parent); in d_alloc_parallel()
2559 hlist_add_head(&new->d_sib, &parent->d_children); in d_alloc_parallel()
2560 if (parent->d_flags & DCACHE_DISCONNECTED) in d_alloc_parallel()
2561 new->d_flags |= DCACHE_DISCONNECTED; in d_alloc_parallel()
2562 spin_unlock(&parent->d_lock); in d_alloc_parallel()
2566 seq = smp_load_acquire(&parent->d_inode->i_dir_seq); in d_alloc_parallel()
2568 dentry = __d_lookup_rcu(parent, name, &d_seq); in d_alloc_parallel()
2570 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2574 if (read_seqcount_retry(&dentry->d_seq, d_seq)) { in d_alloc_parallel()
2594 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { in d_alloc_parallel()
2600 * No changes for the parent since the beginning of d_lookup(). in d_alloc_parallel()
2602 * any potential in-lookup matches are going to stay here until in d_alloc_parallel()
2607 if (dentry->d_name.hash != hash) in d_alloc_parallel()
2609 if (dentry->d_parent != parent) in d_alloc_parallel()
2611 if (!d_same_name(dentry, parent, name)) in d_alloc_parallel()
2615 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2625 spin_lock(&dentry->d_lock); in d_alloc_parallel()
2628 * it's not in-lookup anymore; in principle we should repeat in d_alloc_parallel()
2633 if (unlikely(dentry->d_name.hash != hash)) in d_alloc_parallel()
2635 if (unlikely(dentry->d_parent != parent)) in d_alloc_parallel()
2639 if (unlikely(!d_same_name(dentry, parent, name))) in d_alloc_parallel()
2642 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2647 new->d_wait = wq; in d_alloc_parallel()
2648 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); in d_alloc_parallel()
2652 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2659 * - Unhash the dentry
2660 * - Retrieve and clear the waitqueue head in dentry
2661 * - Return the waitqueue head
2668 lockdep_assert_held(&dentry->d_lock); in __d_lookup_unhash()
2670 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); in __d_lookup_unhash()
2672 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; in __d_lookup_unhash()
2673 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); in __d_lookup_unhash()
2674 d_wait = dentry->d_wait; in __d_lookup_unhash()
2675 dentry->d_wait = NULL; in __d_lookup_unhash()
2677 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_lookup_unhash()
2678 INIT_LIST_HEAD(&dentry->d_lru); in __d_lookup_unhash()
2684 spin_lock(&dentry->d_lock); in __d_lookup_unhash_wake()
2686 spin_unlock(&dentry->d_lock); in __d_lookup_unhash_wake()
2690 /* inode->i_lock held if inode is non-NULL */
2698 spin_lock(&dentry->d_lock); in __d_add()
2700 dir = dentry->d_parent->d_inode; in __d_add()
2708 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_add()
2709 raw_write_seqcount_begin(&dentry->d_seq); in __d_add()
2711 raw_write_seqcount_end(&dentry->d_seq); in __d_add()
2717 spin_unlock(&dentry->d_lock); in __d_add()
2719 spin_unlock(&inode->i_lock); in __d_add()
2723 * d_add - add dentry to hash queues
2735 spin_lock(&inode->i_lock); in d_add()
2748 swap(target->__d_name.name, dentry->__d_name.name); in swap_names()
2754 dentry->__d_name.name = target->__d_name.name; in swap_names()
2755 target->d_shortname = dentry->d_shortname; in swap_names()
2756 target->__d_name.name = target->d_shortname.string; in swap_names()
2764 target->__d_name.name = dentry->__d_name.name; in swap_names()
2765 dentry->d_shortname = target->d_shortname; in swap_names()
2766 dentry->__d_name.name = dentry->d_shortname.string; in swap_names()
2772 swap(dentry->d_shortname.words[i], in swap_names()
2773 target->d_shortname.words[i]); in swap_names()
2776 swap(dentry->__d_name.hash_len, target->__d_name.hash_len); in swap_names()
2785 atomic_inc(&external_name(target)->count); in copy_name()
2786 dentry->__d_name = target->__d_name; in copy_name()
2788 dentry->d_shortname = target->d_shortname; in copy_name()
2789 dentry->__d_name.name = dentry->d_shortname.string; in copy_name()
2790 dentry->__d_name.hash_len = target->__d_name.hash_len; in copy_name()
2792 if (old_name && likely(atomic_dec_and_test(&old_name->count))) in copy_name()
2797 * __d_move - move a dentry
2804 * i_rwsem of the source and target directories (exclusively), and the sb->
2815 WARN_ON(!dentry->d_inode); in __d_move()
2820 old_parent = dentry->d_parent; in __d_move()
2824 spin_lock(&target->d_parent->d_lock); in __d_move()
2826 /* target is not a descendent of dentry->d_parent */ in __d_move()
2827 spin_lock(&target->d_parent->d_lock); in __d_move()
2828 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); in __d_move()
2831 spin_lock(&old_parent->d_lock); in __d_move()
2833 spin_lock_nested(&target->d_parent->d_lock, in __d_move()
2836 spin_lock_nested(&dentry->d_lock, 2); in __d_move()
2837 spin_lock_nested(&target->d_lock, 3); in __d_move()
2840 dir = target->d_parent->d_inode; in __d_move()
2845 write_seqcount_begin(&dentry->d_seq); in __d_move()
2846 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); in __d_move()
2855 dentry->d_parent = target->d_parent; in __d_move()
2858 target->d_hash.pprev = NULL; in __d_move()
2859 dentry->d_parent->d_lockref.count++; in __d_move()
2861 WARN_ON(!--old_parent->d_lockref.count); in __d_move()
2863 target->d_parent = old_parent; in __d_move()
2865 if (!hlist_unhashed(&target->d_sib)) in __d_move()
2866 __hlist_del(&target->d_sib); in __d_move()
2867 hlist_add_head(&target->d_sib, &target->d_parent->d_children); in __d_move()
2871 if (!hlist_unhashed(&dentry->d_sib)) in __d_move()
2872 __hlist_del(&dentry->d_sib); in __d_move()
2873 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children); in __d_move()
2878 write_seqcount_end(&target->d_seq); in __d_move()
2879 write_seqcount_end(&dentry->d_seq); in __d_move()
2884 if (dentry->d_parent != old_parent) in __d_move()
2885 spin_unlock(&dentry->d_parent->d_lock); in __d_move()
2887 spin_unlock(&old_parent->d_lock); in __d_move()
2888 spin_unlock(&target->d_lock); in __d_move()
2889 spin_unlock(&dentry->d_lock); in __d_move()
2893 * d_move - move a dentry
2910 * d_exchange - exchange two dentries
2918 WARN_ON(!dentry1->d_inode); in d_exchange()
2919 WARN_ON(!dentry2->d_inode); in d_exchange()
2930 * d_ancestor - search for an ancestor
2941 for (p = p2; !IS_ROOT(p); p = p->d_parent) { in d_ancestor()
2942 if (p->d_parent == p1) in d_ancestor()
2952 * dentry->d_parent->d_inode->i_rwsem, and rename_lock
2961 int ret = -ESTALE; in __d_unalias()
2963 /* If alias and dentry share a parent, then no extra locks required */ in __d_unalias()
2964 if (alias->d_parent == dentry->d_parent) in __d_unalias()
2968 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) in __d_unalias()
2970 m1 = &dentry->d_sb->s_vfs_rename_mutex; in __d_unalias()
2971 if (!inode_trylock_shared(alias->d_parent->d_inode)) in __d_unalias()
2973 m2 = &alias->d_parent->d_inode->i_rwsem; in __d_unalias()
2975 if (alias->d_op && alias->d_op->d_unalias_trylock && in __d_unalias()
2976 !alias->d_op->d_unalias_trylock(alias)) in __d_unalias()
2979 if (alias->d_op && alias->d_op->d_unalias_unlock) in __d_unalias()
2980 alias->d_op->d_unalias_unlock(alias); in __d_unalias()
3002 spin_lock(&inode->i_lock); in d_splice_alias_ops()
3003 if (S_ISDIR(inode->i_mode)) { in d_splice_alias_ops()
3007 spin_unlock(&inode->i_lock); in d_splice_alias_ops()
3012 new = ERR_PTR(-ELOOP); in d_splice_alias_ops()
3016 dentry->d_name.name, in d_splice_alias_ops()
3017 inode->i_sb->s_type->name, in d_splice_alias_ops()
3018 inode->i_sb->s_id); in d_splice_alias_ops()
3020 struct dentry *old_parent = dget(new->d_parent); in d_splice_alias_ops()
3042 * d_splice_alias - splice a disconnected dentry into the tree if one exists
3050 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3057 * is returned. This matches the expected return value of ->lookup.
3077 * is_subdir - is new dentry a subdirectory of old_dentry
3081 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3114 if (d_unhashed(dentry) || !dentry->d_inode) in d_genocide_kill()
3117 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { in d_genocide_kill()
3118 dentry->d_flags |= DCACHE_GENOCIDE; in d_genocide_kill()
3119 dentry->d_lockref.count--; in d_genocide_kill()
3125 void d_genocide(struct dentry *parent) in d_genocide() argument
3127 d_walk(parent, parent, d_genocide_kill); in d_genocide()
3132 struct dentry *dentry = file->f_path.dentry; in d_mark_tmpfile()
3135 !hlist_unhashed(&dentry->d_u.d_alias) || in d_mark_tmpfile()
3137 spin_lock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3138 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_mark_tmpfile()
3139 dentry->__d_name.len = sprintf(dentry->d_shortname.string, "#%llu", in d_mark_tmpfile()
3140 (unsigned long long)inode->i_ino); in d_mark_tmpfile()
3141 spin_unlock(&dentry->d_lock); in d_mark_tmpfile()
3142 spin_unlock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3148 struct dentry *dentry = file->f_path.dentry; in d_tmpfile()
3157 * Obtain inode number of the parent dentry.
3161 struct dentry *parent; in d_parent_ino() local
3167 seq = raw_seqcount_begin(&dentry->d_seq); in d_parent_ino()
3168 parent = READ_ONCE(dentry->d_parent); in d_parent_ino()
3169 iparent = d_inode_rcu(parent); in d_parent_ino()
3171 ret = iparent->i_ino; in d_parent_ino()
3172 if (!read_seqcount_retry(&dentry->d_seq, seq)) in d_parent_ino()
3177 spin_lock(&dentry->d_lock); in d_parent_ino()
3178 ret = dentry->d_parent->d_inode->i_ino; in d_parent_ino()
3179 spin_unlock(&dentry->d_lock); in d_parent_ino()
3212 d_hash_shift = 32 - d_hash_shift; in dcache_init_early()
3243 d_hash_shift = 32 - d_hash_shift; in dcache_init()