Lines Matching +full:de +full:- +full:serialized

1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 1997 Thomas Schoebel-Theuer,
13 * The dcache is a master of the icache - whenever a dcache entry
38 #include <asm/runtime-const.h>
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_u.d_alias, d_inode of aliases
45 * - the dcache hash table
47 * - the s_roots list (see __d_drop)
48 * dentry->d_sb->s_dentry_lru_lock protects:
49 * - the dcache lru lists and counters
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_chilren
57 * - childrens' d_sib and d_parent
58 * - d_u.d_alias, d_inode
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dentry->d_sb->s_dentry_lru_lock
68 * dentry->d_parent->...->d_parent->d_lock
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
74 * arbitrary, since it's serialized on rename_lock
95 * to make this good - I've just made it work.
97 * This hash-function tries to avoid losing too many bits of hash
98 * information, yet avoid using a prime hash-size or similar.
147 * Here we resort to our own counters instead of using generic per-cpu counters
197 .procname = "dentry-state",
204 .procname = "dentry-negative",
223 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
224 * The strings are both count bytes long, and count is non-zero.
228 #include <asm/word-at-a-time.h>
251 tcount -= sizeof(unsigned long); in dentry_string_cmp()
268 tcount--; in dentry_string_cmp()
293 const unsigned char *cs = READ_ONCE(dentry->d_name.name); in dentry_cmp()
300 * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
301 * for the reason why ->count and ->head can't be combined into a union.
302 * dentry_string_cmp() relies upon ->name[] being word-aligned.
312 return container_of(dentry->d_name.name, struct external_name, name[0]); in external_name()
331 return dentry->d_name.name != dentry->d_shortname.string; in dname_external()
341 seq = read_seqcount_begin(&dentry->d_seq); in take_dentry_name_snapshot()
342 s = READ_ONCE(dentry->d_name.name); in take_dentry_name_snapshot()
343 name->name.hash_len = dentry->d_name.hash_len; in take_dentry_name_snapshot()
344 name->name.name = name->inline_name.string; in take_dentry_name_snapshot()
345 if (likely(s == dentry->d_shortname.string)) { in take_dentry_name_snapshot()
346 name->inline_name = dentry->d_shortname; in take_dentry_name_snapshot()
351 if (unlikely(!atomic_inc_not_zero(&p->count))) in take_dentry_name_snapshot()
353 name->name.name = s; in take_dentry_name_snapshot()
355 if (read_seqcount_retry(&dentry->d_seq, seq)) { in take_dentry_name_snapshot()
365 if (unlikely(name->name.name != name->inline_name.string)) { in release_dentry_name_snapshot()
367 p = container_of(name->name.name, struct external_name, name[0]); in release_dentry_name_snapshot()
368 if (unlikely(atomic_dec_and_test(&p->count))) in release_dentry_name_snapshot()
380 dentry->d_inode = inode; in __d_set_inode_and_type()
381 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
384 smp_store_release(&dentry->d_flags, flags); in __d_set_inode_and_type()
389 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
392 WRITE_ONCE(dentry->d_flags, flags); in __d_clear_type_and_inode()
393 dentry->d_inode = NULL; in __d_clear_type_and_inode()
404 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); in dentry_free()
407 if (likely(atomic_dec_and_test(&p->count))) { in dentry_free()
408 call_rcu(&dentry->d_u.d_rcu, __d_free_external); in dentry_free()
413 if (dentry->d_flags & DCACHE_NORCU) in dentry_free()
414 __d_free(&dentry->d_u.d_rcu); in dentry_free()
416 call_rcu(&dentry->d_u.d_rcu, __d_free); in dentry_free()
424 __releases(dentry->d_lock) in dentry_unlink_inode()
425 __releases(dentry->d_inode->i_lock) in dentry_unlink_inode()
427 struct inode *inode = dentry->d_inode; in dentry_unlink_inode()
429 raw_write_seqcount_begin(&dentry->d_seq); in dentry_unlink_inode()
431 hlist_del_init(&dentry->d_u.d_alias); in dentry_unlink_inode()
432 raw_write_seqcount_end(&dentry->d_seq); in dentry_unlink_inode()
433 spin_unlock(&dentry->d_lock); in dentry_unlink_inode()
434 spin_unlock(&inode->i_lock); in dentry_unlink_inode()
435 if (!inode->i_nlink) in dentry_unlink_inode()
437 if (dentry->d_op && dentry->d_op->d_iput) in dentry_unlink_inode()
438 dentry->d_op->d_iput(dentry, inode); in dentry_unlink_inode()
445 * is in use - which includes both the "real" per-superblock
451 * The per-cpu "nr_dentry_unused" counters are updated with
454 * The per-cpu "nr_dentry_negative" counters are only updated
455 * when deleted from or added to the per-superblock LRU list, not
462 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_…
466 dentry->d_flags |= DCACHE_LRU_LIST; in d_lru_add()
471 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_add()
477 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_del()
482 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_del()
488 list_del_init(&dentry->d_lru); in d_shrink_del()
489 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); in d_shrink_del()
496 list_add(&dentry->d_lru, list); in d_shrink_add()
497 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; in d_shrink_add()
510 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_isolate()
514 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate()
521 dentry->d_flags |= DCACHE_SHRINK_LIST; in d_lru_shrink_move()
524 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move()
536 b = &dentry->d_sb->s_roots; in ___d_drop()
538 b = d_hash(dentry->d_name.hash); in ___d_drop()
541 __hlist_bl_del(&dentry->d_hash); in ___d_drop()
549 dentry->d_hash.pprev = NULL; in __d_drop()
550 write_seqcount_invalidate(&dentry->d_seq); in __d_drop()
556 * d_drop - drop a dentry
561 * deleting the dentry - d_delete will try to mark the dentry negative if
568 * __d_drop requires dentry->d_lock
571 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
575 spin_lock(&dentry->d_lock); in d_drop()
577 spin_unlock(&dentry->d_lock); in d_drop()
588 dentry->d_flags |= DCACHE_DENTRY_KILLED; in dentry_unlist()
589 if (unlikely(hlist_unhashed(&dentry->d_sib))) in dentry_unlist()
591 __hlist_del(&dentry->d_sib); in dentry_unlist()
594 * a normal list member, it didn't matter - ->d_sib.next would've in dentry_unlist()
597 * Normally d_walk() doesn't care about cursors moving around - in dentry_unlist()
598 * ->d_lock on parent prevents that and since a cursor has no children in dentry_unlist()
600 * There is one exception, though - if we ascend from a child that in dentry_unlist()
602 * using the value left in its ->d_sib.next. And if _that_ in dentry_unlist()
604 * before d_walk() regains parent->d_lock, we'll end up skipping in dentry_unlist()
607 * Solution: make sure that the pointer left behind in ->d_sib.next in dentry_unlist()
611 while (dentry->d_sib.next) { in dentry_unlist()
612 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); in dentry_unlist()
613 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) in dentry_unlist()
615 dentry->d_sib.next = next->d_sib.next; in dentry_unlist()
627 lockref_mark_dead(&dentry->d_lockref); in __dentry_kill()
633 if (dentry->d_flags & DCACHE_OP_PRUNE) in __dentry_kill()
634 dentry->d_op->d_prune(dentry); in __dentry_kill()
636 if (dentry->d_flags & DCACHE_LRU_LIST) { in __dentry_kill()
637 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) in __dentry_kill()
642 if (dentry->d_inode) in __dentry_kill()
645 spin_unlock(&dentry->d_lock); in __dentry_kill()
647 if (dentry->d_op && dentry->d_op->d_release) in __dentry_kill()
648 dentry->d_op->d_release(dentry); in __dentry_kill()
651 /* now that it's negative, ->d_parent is stable */ in __dentry_kill()
653 parent = dentry->d_parent; in __dentry_kill()
654 spin_lock(&parent->d_lock); in __dentry_kill()
656 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in __dentry_kill()
658 if (dentry->d_flags & DCACHE_SHRINK_LIST) in __dentry_kill()
660 spin_unlock(&dentry->d_lock); in __dentry_kill()
663 if (parent && --parent->d_lockref.count) { in __dentry_kill()
664 spin_unlock(&parent->d_lock); in __dentry_kill()
672 * Called under rcu_read_lock() and dentry->d_lock; the former
683 struct inode *inode = dentry->d_inode; in lock_for_kill()
685 if (unlikely(dentry->d_lockref.count)) in lock_for_kill()
688 if (!inode || likely(spin_trylock(&inode->i_lock))) in lock_for_kill()
692 spin_unlock(&dentry->d_lock); in lock_for_kill()
693 spin_lock(&inode->i_lock); in lock_for_kill()
694 spin_lock(&dentry->d_lock); in lock_for_kill()
695 if (likely(inode == dentry->d_inode)) in lock_for_kill()
697 spin_unlock(&inode->i_lock); in lock_for_kill()
698 inode = dentry->d_inode; in lock_for_kill()
700 if (likely(!dentry->d_lockref.count)) in lock_for_kill()
703 spin_unlock(&inode->i_lock); in lock_for_kill()
715 * re-gotten a reference to the dentry and change that, but our work is done -
723 d_flags = READ_ONCE(dentry->d_flags); in retain_dentry()
733 // ->d_delete() might tell us not to bother, but that requires in retain_dentry()
734 // ->d_lock; can't decide without it in retain_dentry()
736 if (!locked || dentry->d_op->d_delete(dentry)) in retain_dentry()
745 // need to do something - put it on LRU if it wasn't there already in retain_dentry()
747 // Unfortunately, both actions require ->d_lock, so in lockless in retain_dentry()
756 dentry->d_flags |= DCACHE_REFERENCED; in retain_dentry()
763 struct dentry *de; in d_mark_dontcache() local
765 spin_lock(&inode->i_lock); in d_mark_dontcache()
766 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { in d_mark_dontcache()
767 spin_lock(&de->d_lock); in d_mark_dontcache()
768 de->d_flags |= DCACHE_DONTCACHE; in d_mark_dontcache()
769 spin_unlock(&de->d_lock); in d_mark_dontcache()
771 inode->i_state |= I_DONTCACHE; in d_mark_dontcache()
772 spin_unlock(&inode->i_lock); in d_mark_dontcache()
793 ret = lockref_put_return(&dentry->d_lockref); in fast_dput()
801 spin_lock(&dentry->d_lock); in fast_dput()
802 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) { in fast_dput()
803 spin_unlock(&dentry->d_lock); in fast_dput()
806 dentry->d_lockref.count--; in fast_dput()
818 * taking the lock? There's a very common case when it's all we need - in fast_dput()
828 * but we'll need to re-check the situation after getting the lock. in fast_dput()
830 spin_lock(&dentry->d_lock); in fast_dput()
839 if (dentry->d_lockref.count || retain_dentry(dentry, true)) { in fast_dput()
840 spin_unlock(&dentry->d_lock); in fast_dput()
865 * dput - release a dentry
889 spin_unlock(&dentry->d_lock); in dput()
895 spin_unlock(&dentry->d_lock); in dput()
900 __must_hold(&dentry->d_lock) in to_shrink_list()
902 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { in to_shrink_list()
903 if (dentry->d_flags & DCACHE_LRU_LIST) in to_shrink_list()
918 spin_unlock(&dentry->d_lock); in dput_to_list()
932 seq = raw_seqcount_begin(&dentry->d_seq); in dget_parent()
933 ret = READ_ONCE(dentry->d_parent); in dget_parent()
934 gotref = lockref_get_not_zero(&ret->d_lockref); in dget_parent()
937 if (!read_seqcount_retry(&dentry->d_seq, seq)) in dget_parent()
944 * Don't need rcu_dereference because we re-check it was correct under in dget_parent()
948 ret = dentry->d_parent; in dget_parent()
949 spin_lock(&ret->d_lock); in dget_parent()
950 if (unlikely(ret != dentry->d_parent)) { in dget_parent()
951 spin_unlock(&ret->d_lock); in dget_parent()
956 BUG_ON(!ret->d_lockref.count); in dget_parent()
957 ret->d_lockref.count++; in dget_parent()
958 spin_unlock(&ret->d_lock); in dget_parent()
967 if (hlist_empty(&inode->i_dentry)) in __d_find_any_alias()
969 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in __d_find_any_alias()
970 lockref_get(&alias->d_lockref); in __d_find_any_alias()
975 * d_find_any_alias - find any alias for a given inode
983 struct dentry *de; in d_find_any_alias() local
985 spin_lock(&inode->i_lock); in d_find_any_alias()
986 de = __d_find_any_alias(inode); in d_find_any_alias()
987 spin_unlock(&inode->i_lock); in d_find_any_alias()
988 return de; in d_find_any_alias()
996 if (S_ISDIR(inode->i_mode)) in __d_find_alias()
999 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in __d_find_alias()
1000 spin_lock(&alias->d_lock); in __d_find_alias()
1003 spin_unlock(&alias->d_lock); in __d_find_alias()
1006 spin_unlock(&alias->d_lock); in __d_find_alias()
1012 * d_find_alias - grab a hashed alias of inode
1027 struct dentry *de = NULL; in d_find_alias() local
1029 if (!hlist_empty(&inode->i_dentry)) { in d_find_alias()
1030 spin_lock(&inode->i_lock); in d_find_alias()
1031 de = __d_find_alias(inode); in d_find_alias()
1032 spin_unlock(&inode->i_lock); in d_find_alias()
1034 return de; in d_find_alias()
1044 struct hlist_head *l = &inode->i_dentry; in d_find_alias_rcu()
1045 struct dentry *de = NULL; in d_find_alias_rcu() local
1047 spin_lock(&inode->i_lock); in d_find_alias_rcu()
1048 // ->i_dentry and ->i_rcu are colocated, but the latter won't be in d_find_alias_rcu()
1050 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { in d_find_alias_rcu()
1051 if (S_ISDIR(inode->i_mode)) { in d_find_alias_rcu()
1052 de = hlist_entry(l->first, struct dentry, d_u.d_alias); in d_find_alias_rcu()
1054 hlist_for_each_entry(de, l, d_u.d_alias) in d_find_alias_rcu()
1055 if (!d_unhashed(de)) in d_find_alias_rcu()
1059 spin_unlock(&inode->i_lock); in d_find_alias_rcu()
1060 return de; in d_find_alias_rcu()
1072 spin_lock(&inode->i_lock); in d_prune_aliases()
1073 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { in d_prune_aliases()
1074 spin_lock(&dentry->d_lock); in d_prune_aliases()
1075 if (!dentry->d_lockref.count) in d_prune_aliases()
1077 spin_unlock(&dentry->d_lock); in d_prune_aliases()
1079 spin_unlock(&inode->i_lock); in d_prune_aliases()
1093 spin_unlock(&victim->d_lock); in shrink_kill()
1101 dentry = list_entry(list->prev, struct dentry, d_lru); in shrink_dentry_list()
1102 spin_lock(&dentry->d_lock); in shrink_dentry_list()
1108 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED; in shrink_dentry_list()
1109 spin_unlock(&dentry->d_lock); in shrink_dentry_list()
1127 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate()
1131 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate()
1139 if (dentry->d_lockref.count) { in dentry_lru_isolate()
1141 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1145 if (dentry->d_flags & DCACHE_REFERENCED) { in dentry_lru_isolate()
1146 dentry->d_flags &= ~DCACHE_REFERENCED; in dentry_lru_isolate()
1147 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1151 * this point, we've dropped the dentry->d_lock but keep the in dentry_lru_isolate()
1172 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1178 * prune_dcache_sb - shrink the dcache
1182 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1194 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, in prune_dcache_sb()
1207 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate_shrink()
1211 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate_shrink()
1215 spin_unlock(&dentry->d_lock); in dentry_lru_isolate_shrink()
1222 * shrink_dcache_sb - shrink dcache for a superblock
1233 list_lru_walk(&sb->s_dentry_lru, in shrink_dcache_sb()
1236 } while (list_lru_count(&sb->s_dentry_lru) > 0); in shrink_dcache_sb()
1241 * enum d_walk_ret - action to talke during tree walk
1255 * d_walk - walk the dentry tree
1273 spin_lock(&this_parent->d_lock); in d_walk()
1290 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) in d_walk()
1293 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_walk()
1300 spin_unlock(&dentry->d_lock); in d_walk()
1306 spin_unlock(&dentry->d_lock); in d_walk()
1310 if (!hlist_empty(&dentry->d_children)) { in d_walk()
1311 spin_unlock(&this_parent->d_lock); in d_walk()
1312 spin_release(&dentry->d_lock.dep_map, _RET_IP_); in d_walk()
1314 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); in d_walk()
1317 spin_unlock(&dentry->d_lock); in d_walk()
1326 this_parent = dentry->d_parent; in d_walk()
1328 spin_unlock(&dentry->d_lock); in d_walk()
1329 spin_lock(&this_parent->d_lock); in d_walk()
1336 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) { in d_walk()
1348 spin_unlock(&this_parent->d_lock); in d_walk()
1353 spin_unlock(&this_parent->d_lock); in d_walk()
1370 struct path path = { .mnt = info->mnt, .dentry = dentry }; in path_check_mount()
1375 info->mounted = 1; in path_check_mount()
1382 * path_has_submounts - check for mounts over a dentry in the
1391 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; in path_has_submounts()
1394 d_walk(parent->dentry, &data, path_check_mount); in path_has_submounts()
1412 int ret = -ENOENT; in d_set_mounted()
1414 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { in d_set_mounted()
1416 spin_lock(&p->d_lock); in d_set_mounted()
1418 spin_unlock(&p->d_lock); in d_set_mounted()
1421 spin_unlock(&p->d_lock); in d_set_mounted()
1423 spin_lock(&dentry->d_lock); in d_set_mounted()
1425 ret = -EBUSY; in d_set_mounted()
1427 dentry->d_flags |= DCACHE_MOUNTED; in d_set_mounted()
1431 spin_unlock(&dentry->d_lock); in d_set_mounted()
1441 * whenever the d_children list is non-empty and continue
1466 if (data->start == dentry) in select_collect()
1469 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect()
1470 data->found++; in select_collect()
1471 } else if (!dentry->d_lockref.count) { in select_collect()
1472 to_shrink_list(dentry, &data->dispose); in select_collect()
1473 data->found++; in select_collect()
1474 } else if (dentry->d_lockref.count < 0) { in select_collect()
1475 data->found++; in select_collect()
1482 if (!list_empty(&data->dispose)) in select_collect()
1493 if (data->start == dentry) in select_collect2()
1496 if (!dentry->d_lockref.count) { in select_collect2()
1497 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect2()
1499 data->victim = dentry; in select_collect2()
1502 to_shrink_list(dentry, &data->dispose); in select_collect2()
1509 if (!list_empty(&data->dispose)) in select_collect2()
1516 * shrink_dcache_parent - prune dcache
1540 spin_lock(&data.victim->d_lock); in shrink_dcache_parent()
1542 spin_unlock(&data.victim->d_lock); in shrink_dcache_parent()
1557 if (!hlist_empty(&dentry->d_children)) in umount_check()
1561 if (dentry == _data && dentry->d_lockref.count == 1) in umount_check()
1567 dentry->d_inode ? in umount_check()
1568 dentry->d_inode->i_ino : 0UL, in umount_check()
1570 dentry->d_lockref.count, in umount_check()
1571 dentry->d_sb->s_type->name, in umount_check()
1572 dentry->d_sb->s_id); in umount_check()
1591 rwsem_assert_held_write(&sb->s_umount); in shrink_dcache_for_umount()
1593 dentry = sb->s_root; in shrink_dcache_for_umount()
1594 sb->s_root = NULL; in shrink_dcache_for_umount()
1597 while (!hlist_bl_empty(&sb->s_roots)) { in shrink_dcache_for_umount()
1598 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); in shrink_dcache_for_umount()
1614 * d_invalidate - detach submounts, prune dcache, and drop
1620 spin_lock(&dentry->d_lock); in d_invalidate()
1622 spin_unlock(&dentry->d_lock); in d_invalidate()
1626 spin_unlock(&dentry->d_lock); in d_invalidate()
1629 if (!dentry->d_inode) in d_invalidate()
1649 * __d_alloc - allocate a dcache entry
1664 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, in __d_alloc()
1670 * We guarantee that the inline name is always NUL-terminated. in __d_alloc()
1675 dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0; in __d_alloc()
1678 dname = dentry->d_shortname.string; in __d_alloc()
1679 } else if (name->len > DNAME_INLINE_LEN-1) { in __d_alloc()
1681 struct external_name *p = kmalloc(size + name->len, in __d_alloc()
1688 atomic_set(&p->count, 1); in __d_alloc()
1689 dname = p->name; in __d_alloc()
1691 dname = dentry->d_shortname.string; in __d_alloc()
1694 dentry->d_name.len = name->len; in __d_alloc()
1695 dentry->d_name.hash = name->hash; in __d_alloc()
1696 memcpy(dname, name->name, name->len); in __d_alloc()
1697 dname[name->len] = 0; in __d_alloc()
1700 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ in __d_alloc()
1702 dentry->d_flags = 0; in __d_alloc()
1703 lockref_init(&dentry->d_lockref); in __d_alloc()
1704 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); in __d_alloc()
1705 dentry->d_inode = NULL; in __d_alloc()
1706 dentry->d_parent = dentry; in __d_alloc()
1707 dentry->d_sb = sb; in __d_alloc()
1708 dentry->d_op = NULL; in __d_alloc()
1709 dentry->d_fsdata = NULL; in __d_alloc()
1710 INIT_HLIST_BL_NODE(&dentry->d_hash); in __d_alloc()
1711 INIT_LIST_HEAD(&dentry->d_lru); in __d_alloc()
1712 INIT_HLIST_HEAD(&dentry->d_children); in __d_alloc()
1713 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_alloc()
1714 INIT_HLIST_NODE(&dentry->d_sib); in __d_alloc()
1715 d_set_d_op(dentry, dentry->d_sb->s_d_op); in __d_alloc()
1717 if (dentry->d_op && dentry->d_op->d_init) { in __d_alloc()
1718 err = dentry->d_op->d_init(dentry); in __d_alloc()
1733 * d_alloc - allocate a dcache entry
1743 struct dentry *dentry = __d_alloc(parent->d_sb, name); in d_alloc()
1746 spin_lock(&parent->d_lock); in d_alloc()
1751 dentry->d_parent = dget_dlock(parent); in d_alloc()
1752 hlist_add_head(&dentry->d_sib, &parent->d_children); in d_alloc()
1753 spin_unlock(&parent->d_lock); in d_alloc()
1767 struct dentry *dentry = d_alloc_anon(parent->d_sb); in d_alloc_cursor()
1769 dentry->d_flags |= DCACHE_DENTRY_CURSOR; in d_alloc_cursor()
1770 dentry->d_parent = dget(parent); in d_alloc_cursor()
1776 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1782 * This is used for pipes, sockets et.al. - the stuff that should
1797 dentry->d_flags |= DCACHE_NORCU; in d_alloc_pseudo()
1798 if (!sb->s_d_op) in d_alloc_pseudo()
1816 WARN_ON_ONCE(dentry->d_op); in d_set_d_op()
1817 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | in d_set_d_op()
1823 dentry->d_op = op; in d_set_d_op()
1826 if (op->d_hash) in d_set_d_op()
1827 dentry->d_flags |= DCACHE_OP_HASH; in d_set_d_op()
1828 if (op->d_compare) in d_set_d_op()
1829 dentry->d_flags |= DCACHE_OP_COMPARE; in d_set_d_op()
1830 if (op->d_revalidate) in d_set_d_op()
1831 dentry->d_flags |= DCACHE_OP_REVALIDATE; in d_set_d_op()
1832 if (op->d_weak_revalidate) in d_set_d_op()
1833 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; in d_set_d_op()
1834 if (op->d_delete) in d_set_d_op()
1835 dentry->d_flags |= DCACHE_OP_DELETE; in d_set_d_op()
1836 if (op->d_prune) in d_set_d_op()
1837 dentry->d_flags |= DCACHE_OP_PRUNE; in d_set_d_op()
1838 if (op->d_real) in d_set_d_op()
1839 dentry->d_flags |= DCACHE_OP_REAL; in d_set_d_op()
1851 if (S_ISDIR(inode->i_mode)) { in d_flags_for_inode()
1853 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { in d_flags_for_inode()
1854 if (unlikely(!inode->i_op->lookup)) in d_flags_for_inode()
1857 inode->i_opflags |= IOP_LOOKUP; in d_flags_for_inode()
1862 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { in d_flags_for_inode()
1863 if (unlikely(inode->i_op->get_link)) { in d_flags_for_inode()
1867 inode->i_opflags |= IOP_NOFOLLOW; in d_flags_for_inode()
1870 if (unlikely(!S_ISREG(inode->i_mode))) in d_flags_for_inode()
1884 spin_lock(&dentry->d_lock); in __d_instantiate()
1889 if ((dentry->d_flags & in __d_instantiate()
1892 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_instantiate()
1893 raw_write_seqcount_begin(&dentry->d_seq); in __d_instantiate()
1895 raw_write_seqcount_end(&dentry->d_seq); in __d_instantiate()
1897 spin_unlock(&dentry->d_lock); in __d_instantiate()
1901 * d_instantiate - fill in inode information for a dentry
1917 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate()
1920 spin_lock(&inode->i_lock); in d_instantiate()
1922 spin_unlock(&inode->i_lock); in d_instantiate()
1929 * with lockdep-related part of unlock_new_inode() done before
1930 * anything else. Use that instead of open-coding d_instantiate()/
1935 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate_new()
1939 spin_lock(&inode->i_lock); in d_instantiate_new()
1941 WARN_ON(!(inode->i_state & I_NEW)); in d_instantiate_new()
1942 inode->i_state &= ~I_NEW & ~I_CREATING; in d_instantiate_new()
1950 spin_unlock(&inode->i_lock); in d_instantiate_new()
1959 res = d_alloc_anon(root_inode->i_sb); in d_make_root()
1975 return ERR_PTR(-ESTALE); in __d_obtain_alias()
1979 sb = inode->i_sb; in __d_obtain_alias()
1987 res = ERR_PTR(-ENOMEM); in __d_obtain_alias()
1992 spin_lock(&inode->i_lock); in __d_obtain_alias()
2000 spin_lock(&new->d_lock); in __d_obtain_alias()
2002 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry); in __d_obtain_alias()
2004 hlist_bl_lock(&sb->s_roots); in __d_obtain_alias()
2005 hlist_bl_add_head(&new->d_hash, &sb->s_roots); in __d_obtain_alias()
2006 hlist_bl_unlock(&sb->s_roots); in __d_obtain_alias()
2008 spin_unlock(&new->d_lock); in __d_obtain_alias()
2009 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2010 inode = NULL; /* consumed by new->d_inode */ in __d_obtain_alias()
2013 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2023 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2038 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2047 * d_obtain_root - find or allocate a dentry for a given inode
2059 * replaced by ERR_PTR(-ESTALE).
2068 * d_add_ci - lookup or allocate new dentry with case-exact name
2070 * @inode: the inode case-insensitive lookup has found
2071 * @name: the case-exact name to be associated with the returned dentry
2073 * This is to avoid filling the dcache with case-insensitive names to the
2075 * case-insensitive filesystems.
2077 * For a case-insensitive lookup match and if the case-exact dentry
2092 found = d_hash_and_lookup(dentry->d_parent, name); in d_add_ci()
2098 found = d_alloc_parallel(dentry->d_parent, name, in d_add_ci()
2099 dentry->d_wait); in d_add_ci()
2105 found = d_alloc(dentry->d_parent, name); in d_add_ci()
2108 return ERR_PTR(-ENOMEM); in d_add_ci()
2122 * d_same_name - compare dentry name with case-exact name
2125 * @name: the case-exact name to be associated with the returned dentry
2132 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { in d_same_name()
2133 if (dentry->d_name.len != name->len) in d_same_name()
2135 return dentry_cmp(dentry, name->name, name->len) == 0; in d_same_name()
2137 return parent->d_op->d_compare(dentry, in d_same_name()
2138 dentry->d_name.len, dentry->d_name.name, in d_same_name()
2152 u64 hashlen = name->hash_len; in __d_lookup_rcu_op_compare()
2163 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu_op_compare()
2164 if (dentry->d_parent != parent) in __d_lookup_rcu_op_compare()
2168 if (dentry->d_name.hash != hashlen_hash(hashlen)) in __d_lookup_rcu_op_compare()
2170 tlen = dentry->d_name.len; in __d_lookup_rcu_op_compare()
2171 tname = dentry->d_name.name; in __d_lookup_rcu_op_compare()
2173 if (read_seqcount_retry(&dentry->d_seq, seq)) { in __d_lookup_rcu_op_compare()
2177 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) in __d_lookup_rcu_op_compare()
2186 * __d_lookup_rcu - search for a dentry (racy, store-free)
2192 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2193 * resolution (store-free path walking) design described in
2194 * Documentation/filesystems/path-lookup.txt.
2198 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2215 u64 hashlen = name->hash_len; in __d_lookup_rcu()
2216 const unsigned char *str = name->name; in __d_lookup_rcu()
2228 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) in __d_lookup_rcu()
2239 * false-negative result. d_lookup() protects against concurrent in __d_lookup_rcu()
2242 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup_rcu()
2262 * we are still guaranteed NUL-termination of ->d_name.name. in __d_lookup_rcu()
2264 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu()
2265 if (dentry->d_parent != parent) in __d_lookup_rcu()
2269 if (dentry->d_name.hash_len != hashlen) in __d_lookup_rcu()
2280 * d_lookup - search for a dentry
2306 * __d_lookup - search for a dentry (racy)
2312 * false-negative result due to unrelated rename activity.
2322 unsigned int hash = name->hash; in __d_lookup()
2343 * false-negative result. d_lookup() protects against concurrent in __d_lookup()
2346 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup()
2352 if (dentry->d_name.hash != hash) in __d_lookup()
2355 spin_lock(&dentry->d_lock); in __d_lookup()
2356 if (dentry->d_parent != parent) in __d_lookup()
2364 dentry->d_lockref.count++; in __d_lookup()
2366 spin_unlock(&dentry->d_lock); in __d_lookup()
2369 spin_unlock(&dentry->d_lock); in __d_lookup()
2377 * d_hash_and_lookup - hash the qstr then search for a dentry
2381 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2386 * Check for a fs-specific hash function. Note that we must in d_hash_and_lookup()
2387 * calculate the standard hash first, as the d_op->d_hash() in d_hash_and_lookup()
2390 name->hash = full_name_hash(dir, name->name, name->len); in d_hash_and_lookup()
2391 if (dir->d_flags & DCACHE_OP_HASH) { in d_hash_and_lookup()
2392 int err = dir->d_op->d_hash(dir, name); in d_hash_and_lookup()
2402 * - turn this dentry into a negative dentry
2403 * - unhash this dentry and free it.
2414 * d_delete - delete a dentry
2423 struct inode *inode = dentry->d_inode; in d_delete()
2425 spin_lock(&inode->i_lock); in d_delete()
2426 spin_lock(&dentry->d_lock); in d_delete()
2430 if (dentry->d_lockref.count == 1) { in d_delete()
2433 dentry->d_flags &= ~DCACHE_CANT_MOUNT; in d_delete()
2437 spin_unlock(&dentry->d_lock); in d_delete()
2438 spin_unlock(&inode->i_lock); in d_delete()
2445 struct hlist_bl_head *b = d_hash(entry->d_name.hash); in __d_rehash()
2448 hlist_bl_add_head_rcu(&entry->d_hash, b); in __d_rehash()
2453 * d_rehash - add an entry back to the hash
2461 spin_lock(&entry->d_lock); in d_rehash()
2463 spin_unlock(&entry->d_lock); in d_rehash()
2471 unsigned n = dir->i_dir_seq; in start_dir_add()
2472 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) in start_dir_add()
2481 smp_store_release(&dir->i_dir_seq, n + 2); in end_dir_add()
2490 add_wait_queue(dentry->d_wait, &wait); in d_wait_lookup()
2493 spin_unlock(&dentry->d_lock); in d_wait_lookup()
2495 spin_lock(&dentry->d_lock); in d_wait_lookup()
2504 unsigned int hash = name->hash; in d_alloc_parallel()
2512 return ERR_PTR(-ENOMEM); in d_alloc_parallel()
2516 seq = smp_load_acquire(&parent->d_inode->i_dir_seq); in d_alloc_parallel()
2520 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2524 if (read_seqcount_retry(&dentry->d_seq, d_seq)) { in d_alloc_parallel()
2544 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { in d_alloc_parallel()
2552 * any potential in-lookup matches are going to stay here until in d_alloc_parallel()
2557 if (dentry->d_name.hash != hash) in d_alloc_parallel()
2559 if (dentry->d_parent != parent) in d_alloc_parallel()
2565 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2575 spin_lock(&dentry->d_lock); in d_alloc_parallel()
2578 * it's not in-lookup anymore; in principle we should repeat in d_alloc_parallel()
2583 if (unlikely(dentry->d_name.hash != hash)) in d_alloc_parallel()
2585 if (unlikely(dentry->d_parent != parent)) in d_alloc_parallel()
2592 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2597 /* we can't take ->d_lock here; it's OK, though. */ in d_alloc_parallel()
2598 new->d_flags |= DCACHE_PAR_LOOKUP; in d_alloc_parallel()
2599 new->d_wait = wq; in d_alloc_parallel()
2600 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); in d_alloc_parallel()
2604 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2611 * - Unhash the dentry
2612 * - Retrieve and clear the waitqueue head in dentry
2613 * - Return the waitqueue head
2620 lockdep_assert_held(&dentry->d_lock); in __d_lookup_unhash()
2622 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); in __d_lookup_unhash()
2624 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; in __d_lookup_unhash()
2625 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); in __d_lookup_unhash()
2626 d_wait = dentry->d_wait; in __d_lookup_unhash()
2627 dentry->d_wait = NULL; in __d_lookup_unhash()
2629 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_lookup_unhash()
2630 INIT_LIST_HEAD(&dentry->d_lru); in __d_lookup_unhash()
2636 spin_lock(&dentry->d_lock); in __d_lookup_unhash_wake()
2638 spin_unlock(&dentry->d_lock); in __d_lookup_unhash_wake()
2642 /* inode->i_lock held if inode is non-NULL */
2649 spin_lock(&dentry->d_lock); in __d_add()
2651 dir = dentry->d_parent->d_inode; in __d_add()
2657 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_add()
2658 raw_write_seqcount_begin(&dentry->d_seq); in __d_add()
2660 raw_write_seqcount_end(&dentry->d_seq); in __d_add()
2666 spin_unlock(&dentry->d_lock); in __d_add()
2668 spin_unlock(&inode->i_lock); in __d_add()
2672 * d_add - add dentry to hash queues
2684 spin_lock(&inode->i_lock); in d_add()
2691 * d_exact_alias - find and hash an exact unhashed alias
2704 unsigned int hash = entry->d_name.hash; in d_exact_alias()
2706 spin_lock(&inode->i_lock); in d_exact_alias()
2707 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in d_exact_alias()
2709 * Don't need alias->d_lock here, because aliases with in d_exact_alias()
2710 * d_parent == entry->d_parent are not subject to name or in d_exact_alias()
2713 if (alias->d_name.hash != hash) in d_exact_alias()
2715 if (alias->d_parent != entry->d_parent) in d_exact_alias()
2717 if (!d_same_name(alias, entry->d_parent, &entry->d_name)) in d_exact_alias()
2719 spin_lock(&alias->d_lock); in d_exact_alias()
2721 spin_unlock(&alias->d_lock); in d_exact_alias()
2726 spin_unlock(&alias->d_lock); in d_exact_alias()
2728 spin_unlock(&inode->i_lock); in d_exact_alias()
2731 spin_unlock(&inode->i_lock); in d_exact_alias()
2743 swap(target->d_name.name, dentry->d_name.name); in swap_names()
2749 dentry->d_name.name = target->d_name.name; in swap_names()
2750 target->d_shortname = dentry->d_shortname; in swap_names()
2751 target->d_name.name = target->d_shortname.string; in swap_names()
2759 target->d_name.name = dentry->d_name.name; in swap_names()
2760 dentry->d_shortname = target->d_shortname; in swap_names()
2761 dentry->d_name.name = dentry->d_shortname.string; in swap_names()
2767 swap(dentry->d_shortname.words[i], in swap_names()
2768 target->d_shortname.words[i]); in swap_names()
2771 swap(dentry->d_name.hash_len, target->d_name.hash_len); in swap_names()
2780 atomic_inc(&external_name(target)->count); in copy_name()
2781 dentry->d_name = target->d_name; in copy_name()
2783 dentry->d_shortname = target->d_shortname; in copy_name()
2784 dentry->d_name.name = dentry->d_shortname.string; in copy_name()
2785 dentry->d_name.hash_len = target->d_name.hash_len; in copy_name()
2787 if (old_name && likely(atomic_dec_and_test(&old_name->count))) in copy_name()
2792 * __d_move - move a dentry
2800 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2810 WARN_ON(!dentry->d_inode); in __d_move()
2815 old_parent = dentry->d_parent; in __d_move()
2819 spin_lock(&target->d_parent->d_lock); in __d_move()
2821 /* target is not a descendent of dentry->d_parent */ in __d_move()
2822 spin_lock(&target->d_parent->d_lock); in __d_move()
2823 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); in __d_move()
2826 spin_lock(&old_parent->d_lock); in __d_move()
2828 spin_lock_nested(&target->d_parent->d_lock, in __d_move()
2831 spin_lock_nested(&dentry->d_lock, 2); in __d_move()
2832 spin_lock_nested(&target->d_lock, 3); in __d_move()
2835 dir = target->d_parent->d_inode; in __d_move()
2840 write_seqcount_begin(&dentry->d_seq); in __d_move()
2841 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); in __d_move()
2850 dentry->d_parent = target->d_parent; in __d_move()
2853 target->d_hash.pprev = NULL; in __d_move()
2854 dentry->d_parent->d_lockref.count++; in __d_move()
2856 WARN_ON(!--old_parent->d_lockref.count); in __d_move()
2858 target->d_parent = old_parent; in __d_move()
2860 if (!hlist_unhashed(&target->d_sib)) in __d_move()
2861 __hlist_del(&target->d_sib); in __d_move()
2862 hlist_add_head(&target->d_sib, &target->d_parent->d_children); in __d_move()
2866 if (!hlist_unhashed(&dentry->d_sib)) in __d_move()
2867 __hlist_del(&dentry->d_sib); in __d_move()
2868 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children); in __d_move()
2873 write_seqcount_end(&target->d_seq); in __d_move()
2874 write_seqcount_end(&dentry->d_seq); in __d_move()
2879 if (dentry->d_parent != old_parent) in __d_move()
2880 spin_unlock(&dentry->d_parent->d_lock); in __d_move()
2882 spin_unlock(&old_parent->d_lock); in __d_move()
2883 spin_unlock(&target->d_lock); in __d_move()
2884 spin_unlock(&dentry->d_lock); in __d_move()
2888 * d_move - move a dentry
2905 * d_exchange - exchange two dentries
2913 WARN_ON(!dentry1->d_inode); in d_exchange()
2914 WARN_ON(!dentry2->d_inode); in d_exchange()
2924 * d_ancestor - search for an ancestor
2935 for (p = p2; !IS_ROOT(p); p = p->d_parent) { in d_ancestor()
2936 if (p->d_parent == p1) in d_ancestor()
2946 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2955 int ret = -ESTALE; in __d_unalias()
2958 if (alias->d_parent == dentry->d_parent) in __d_unalias()
2962 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) in __d_unalias()
2964 m1 = &dentry->d_sb->s_vfs_rename_mutex; in __d_unalias()
2965 if (!inode_trylock_shared(alias->d_parent->d_inode)) in __d_unalias()
2967 m2 = &alias->d_parent->d_inode->i_rwsem; in __d_unalias()
2969 if (alias->d_op && alias->d_op->d_unalias_trylock && in __d_unalias()
2970 !alias->d_op->d_unalias_trylock(alias)) in __d_unalias()
2973 if (alias->d_op && alias->d_op->d_unalias_unlock) in __d_unalias()
2974 alias->d_op->d_unalias_unlock(alias); in __d_unalias()
2985 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2993 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3000 * is returned. This matches the expected return value of ->lookup.
3018 spin_lock(&inode->i_lock); in d_splice_alias()
3019 if (S_ISDIR(inode->i_mode)) { in d_splice_alias()
3023 spin_unlock(&inode->i_lock); in d_splice_alias()
3028 new = ERR_PTR(-ELOOP); in d_splice_alias()
3032 dentry->d_name.name, in d_splice_alias()
3033 inode->i_sb->s_type->name, in d_splice_alias()
3034 inode->i_sb->s_id); in d_splice_alias()
3036 struct dentry *old_parent = dget(new->d_parent); in d_splice_alias()
3065 * is_subdir - is new dentry a subdirectory of old_dentry
3102 if (d_unhashed(dentry) || !dentry->d_inode) in d_genocide_kill()
3105 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { in d_genocide_kill()
3106 dentry->d_flags |= DCACHE_GENOCIDE; in d_genocide_kill()
3107 dentry->d_lockref.count--; in d_genocide_kill()
3120 struct dentry *dentry = file->f_path.dentry; in d_mark_tmpfile()
3123 !hlist_unhashed(&dentry->d_u.d_alias) || in d_mark_tmpfile()
3125 spin_lock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3126 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_mark_tmpfile()
3127 dentry->d_name.len = sprintf(dentry->d_shortname.string, "#%llu", in d_mark_tmpfile()
3128 (unsigned long long)inode->i_ino); in d_mark_tmpfile()
3129 spin_unlock(&dentry->d_lock); in d_mark_tmpfile()
3130 spin_unlock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3136 struct dentry *dentry = file->f_path.dentry; in d_tmpfile()
3155 seq = raw_seqcount_begin(&dentry->d_seq); in d_parent_ino()
3156 parent = READ_ONCE(dentry->d_parent); in d_parent_ino()
3159 ret = iparent->i_ino; in d_parent_ino()
3160 if (!read_seqcount_retry(&dentry->d_seq, seq)) in d_parent_ino()
3165 spin_lock(&dentry->d_lock); in d_parent_ino()
3166 ret = dentry->d_parent->d_inode->i_ino; in d_parent_ino()
3167 spin_unlock(&dentry->d_lock); in d_parent_ino()
3200 d_hash_shift = 32 - d_hash_shift; in dcache_init_early()
3231 d_hash_shift = 32 - d_hash_shift; in dcache_init()