xref: /linux/fs/dcache.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dcache.c
4  *
5  * Complete reimplementation
6  * (C) 1997 Thomas Schoebel-Theuer,
7  * with heavy changes by Linus Torvalds
8  */
9 
10 /*
11  * Notes on the allocation strategy:
12  *
13  * The dcache is a master of the icache - whenever a dcache entry
14  * exists, the inode will always exist. "iput()" is done either when
15  * the dcache entry is deleted or garbage collected.
16  */
17 
18 #include <linux/ratelimit.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/fs.h>
22 #include <linux/fscrypt.h>
23 #include <linux/fsnotify.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/cache.h>
28 #include <linux/export.h>
29 #include <linux/security.h>
30 #include <linux/seqlock.h>
31 #include <linux/memblock.h>
32 #include <linux/bit_spinlock.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/list_lru.h>
35 #include "internal.h"
36 #include "mount.h"
37 
38 #include <asm/runtime-const.h>
39 
40 /*
41  * Usage:
42  * dcache->d_inode->i_lock protects:
43  *   - i_dentry, d_alias, d_inode of aliases
44  * dcache_hash_bucket lock protects:
45  *   - the dcache hash table
46  * s_roots bl list spinlock protects:
47  *   - the s_roots list (see __d_drop)
48  * dentry->d_sb->s_dentry_lru_lock protects:
49  *   - the dcache lru lists and counters
50  * d_lock protects:
51  *   - d_flags
52  *   - d_name
53  *   - d_lru
54  *   - d_count
55  *   - d_unhashed()
56  *   - d_parent and d_chilren
57  *   - childrens' d_sib and d_parent
58  *   - d_alias, d_inode
59  *
60  * Ordering:
61  * dentry->d_inode->i_lock
62  *   dentry->d_lock
63  *     dentry->d_sb->s_dentry_lru_lock
64  *     dcache_hash_bucket lock
65  *     s_roots lock
66  *
67  * If there is an ancestor relationship:
68  * dentry->d_parent->...->d_parent->d_lock
69  *   ...
70  *     dentry->d_parent->d_lock
71  *       dentry->d_lock
72  *
73  * If no ancestor relationship:
74  * arbitrary, since it's serialized on rename_lock
75  */
76 static int sysctl_vfs_cache_pressure __read_mostly = 100;
77 static int sysctl_vfs_cache_pressure_denom __read_mostly = 100;
78 
79 unsigned long vfs_pressure_ratio(unsigned long val)
80 {
81 	return mult_frac(val, sysctl_vfs_cache_pressure, sysctl_vfs_cache_pressure_denom);
82 }
83 EXPORT_SYMBOL_GPL(vfs_pressure_ratio);
84 
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86 
87 EXPORT_SYMBOL(rename_lock);
88 
89 static struct kmem_cache *__dentry_cache __ro_after_init;
90 #define dentry_cache runtime_const_ptr(__dentry_cache)
91 
92 const struct qstr empty_name = QSTR_INIT("", 0);
93 EXPORT_SYMBOL(empty_name);
94 const struct qstr slash_name = QSTR_INIT("/", 1);
95 EXPORT_SYMBOL(slash_name);
96 const struct qstr dotdot_name = QSTR_INIT("..", 2);
97 EXPORT_SYMBOL(dotdot_name);
98 
99 /*
100  * This is the single most critical data structure when it comes
101  * to the dcache: the hashtable for lookups. Somebody should try
102  * to make this good - I've just made it work.
103  *
104  * This hash-function tries to avoid losing too many bits of hash
105  * information, yet avoid using a prime hash-size or similar.
106  *
107  * Marking the variables "used" ensures that the compiler doesn't
108  * optimize them away completely on architectures with runtime
109  * constant infrastructure, this allows debuggers to see their
110  * values. But updating these values has no effect on those arches.
111  */
112 
113 static unsigned int d_hash_shift __ro_after_init __used;
114 
115 static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
116 
117 static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
118 {
119 	return runtime_const_ptr(dentry_hashtable) +
120 		runtime_const_shift_right_32(hashlen, d_hash_shift);
121 }
122 
123 #define IN_LOOKUP_SHIFT 10
124 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
125 
126 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
127 					unsigned int hash)
128 {
129 	hash += (unsigned long) parent / L1_CACHE_BYTES;
130 	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
131 }
132 
133 struct dentry_stat_t {
134 	long nr_dentry;
135 	long nr_unused;
136 	long age_limit;		/* age in seconds */
137 	long want_pages;	/* pages requested by system */
138 	long nr_negative;	/* # of unused negative dentries */
139 	long dummy;		/* Reserved for future use */
140 };
141 
142 static DEFINE_PER_CPU(long, nr_dentry);
143 static DEFINE_PER_CPU(long, nr_dentry_unused);
144 static DEFINE_PER_CPU(long, nr_dentry_negative);
145 static int dentry_negative_policy;
146 
147 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
148 /* Statistics gathering. */
149 static struct dentry_stat_t dentry_stat = {
150 	.age_limit = 45,
151 };
152 
153 /*
154  * Here we resort to our own counters instead of using generic per-cpu counters
155  * for consistency with what the vfs inode code does. We are expected to harvest
156  * better code and performance by having our own specialized counters.
157  *
158  * Please note that the loop is done over all possible CPUs, not over all online
159  * CPUs. The reason for this is that we don't want to play games with CPUs going
160  * on and off. If one of them goes off, we will just keep their counters.
161  *
162  * glommer: See cffbc8a for details, and if you ever intend to change this,
163  * please update all vfs counters to match.
164  */
165 static long get_nr_dentry(void)
166 {
167 	int i;
168 	long sum = 0;
169 	for_each_possible_cpu(i)
170 		sum += per_cpu(nr_dentry, i);
171 	return sum < 0 ? 0 : sum;
172 }
173 
174 static long get_nr_dentry_unused(void)
175 {
176 	int i;
177 	long sum = 0;
178 	for_each_possible_cpu(i)
179 		sum += per_cpu(nr_dentry_unused, i);
180 	return sum < 0 ? 0 : sum;
181 }
182 
183 static long get_nr_dentry_negative(void)
184 {
185 	int i;
186 	long sum = 0;
187 
188 	for_each_possible_cpu(i)
189 		sum += per_cpu(nr_dentry_negative, i);
190 	return sum < 0 ? 0 : sum;
191 }
192 
193 static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
194 			  size_t *lenp, loff_t *ppos)
195 {
196 	dentry_stat.nr_dentry = get_nr_dentry();
197 	dentry_stat.nr_unused = get_nr_dentry_unused();
198 	dentry_stat.nr_negative = get_nr_dentry_negative();
199 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
200 }
201 
202 static const struct ctl_table fs_dcache_sysctls[] = {
203 	{
204 		.procname	= "dentry-state",
205 		.data		= &dentry_stat,
206 		.maxlen		= 6*sizeof(long),
207 		.mode		= 0444,
208 		.proc_handler	= proc_nr_dentry,
209 	},
210 	{
211 		.procname	= "dentry-negative",
212 		.data		= &dentry_negative_policy,
213 		.maxlen		= sizeof(dentry_negative_policy),
214 		.mode		= 0644,
215 		.proc_handler	= proc_dointvec_minmax,
216 		.extra1		= SYSCTL_ZERO,
217 		.extra2		= SYSCTL_ONE,
218 	},
219 };
220 
221 static const struct ctl_table vm_dcache_sysctls[] = {
222 	{
223 		.procname	= "vfs_cache_pressure",
224 		.data		= &sysctl_vfs_cache_pressure,
225 		.maxlen		= sizeof(sysctl_vfs_cache_pressure),
226 		.mode		= 0644,
227 		.proc_handler	= proc_dointvec_minmax,
228 		.extra1		= SYSCTL_ZERO,
229 	},
230 	{
231 		.procname	= "vfs_cache_pressure_denom",
232 		.data		= &sysctl_vfs_cache_pressure_denom,
233 		.maxlen		= sizeof(sysctl_vfs_cache_pressure_denom),
234 		.mode		= 0644,
235 		.proc_handler	= proc_dointvec_minmax,
236 		.extra1		= SYSCTL_ONE_HUNDRED,
237 	},
238 };
239 
240 static int __init init_fs_dcache_sysctls(void)
241 {
242 	register_sysctl_init("vm", vm_dcache_sysctls);
243 	register_sysctl_init("fs", fs_dcache_sysctls);
244 	return 0;
245 }
246 fs_initcall(init_fs_dcache_sysctls);
247 #endif
248 
249 /*
250  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
251  * The strings are both count bytes long, and count is non-zero.
252  */
253 #ifdef CONFIG_DCACHE_WORD_ACCESS
254 
255 #include <asm/word-at-a-time.h>
256 /*
257  * NOTE! 'cs' and 'scount' come from a dentry, so it has a
258  * aligned allocation for this particular component. We don't
259  * strictly need the load_unaligned_zeropad() safety, but it
260  * doesn't hurt either.
261  *
262  * In contrast, 'ct' and 'tcount' can be from a pathname, and do
263  * need the careful unaligned handling.
264  */
265 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
266 {
267 	unsigned long a,b,mask;
268 
269 	for (;;) {
270 		a = read_word_at_a_time(cs);
271 		b = load_unaligned_zeropad(ct);
272 		if (tcount < sizeof(unsigned long))
273 			break;
274 		if (unlikely(a != b))
275 			return 1;
276 		cs += sizeof(unsigned long);
277 		ct += sizeof(unsigned long);
278 		tcount -= sizeof(unsigned long);
279 		if (!tcount)
280 			return 0;
281 	}
282 	mask = bytemask_from_count(tcount);
283 	return unlikely(!!((a ^ b) & mask));
284 }
285 
286 #else
287 
288 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
289 {
290 	do {
291 		if (*cs != *ct)
292 			return 1;
293 		cs++;
294 		ct++;
295 		tcount--;
296 	} while (tcount);
297 	return 0;
298 }
299 
300 #endif
301 
302 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
303 {
304 	/*
305 	 * Be careful about RCU walk racing with rename:
306 	 * use 'READ_ONCE' to fetch the name pointer.
307 	 *
308 	 * NOTE! Even if a rename will mean that the length
309 	 * was not loaded atomically, we don't care. The
310 	 * RCU walk will check the sequence count eventually,
311 	 * and catch it. And we won't overrun the buffer,
312 	 * because we're reading the name pointer atomically,
313 	 * and a dentry name is guaranteed to be properly
314 	 * terminated with a NUL byte.
315 	 *
316 	 * End result: even if 'len' is wrong, we'll exit
317 	 * early because the data cannot match (there can
318 	 * be no NUL in the ct/tcount data)
319 	 */
320 	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
321 
322 	return dentry_string_cmp(cs, ct, tcount);
323 }
324 
325 /*
326  * long names are allocated separately from dentry and never modified.
327  * Refcounted, freeing is RCU-delayed.  See take_dentry_name_snapshot()
328  * for the reason why ->count and ->head can't be combined into a union.
329  * dentry_string_cmp() relies upon ->name[] being word-aligned.
330  */
331 struct external_name {
332 	atomic_t count;
333 	struct rcu_head head;
334 	unsigned char name[] __aligned(sizeof(unsigned long));
335 };
336 
337 static inline struct external_name *external_name(struct dentry *dentry)
338 {
339 	return container_of(dentry->d_name.name, struct external_name, name[0]);
340 }
341 
342 static void __d_free(struct rcu_head *head)
343 {
344 	struct dentry *dentry = container_of(head, struct dentry, d_rcu);
345 
346 	kmem_cache_free(dentry_cache, dentry);
347 }
348 
349 static void __d_free_external(struct rcu_head *head)
350 {
351 	struct dentry *dentry = container_of(head, struct dentry, d_rcu);
352 	kfree(external_name(dentry));
353 	kmem_cache_free(dentry_cache, dentry);
354 }
355 
356 static inline int dname_external(const struct dentry *dentry)
357 {
358 	return dentry->d_name.name != dentry->d_shortname.string;
359 }
360 
361 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
362 {
363 	unsigned seq;
364 	const unsigned char *s;
365 
366 	rcu_read_lock();
367 retry:
368 	seq = read_seqcount_begin(&dentry->d_seq);
369 	s = READ_ONCE(dentry->d_name.name);
370 	name->name.hash_len = dentry->d_name.hash_len;
371 	name->name.name = name->inline_name.string;
372 	if (likely(s == dentry->d_shortname.string)) {
373 		name->inline_name = dentry->d_shortname;
374 	} else {
375 		struct external_name *p;
376 		p = container_of(s, struct external_name, name[0]);
377 		// get a valid reference
378 		if (unlikely(!atomic_inc_not_zero(&p->count)))
379 			goto retry;
380 		name->name.name = s;
381 	}
382 	if (read_seqcount_retry(&dentry->d_seq, seq)) {
383 		release_dentry_name_snapshot(name);
384 		goto retry;
385 	}
386 	rcu_read_unlock();
387 }
388 EXPORT_SYMBOL(take_dentry_name_snapshot);
389 
390 void release_dentry_name_snapshot(struct name_snapshot *name)
391 {
392 	if (unlikely(name->name.name != name->inline_name.string)) {
393 		struct external_name *p;
394 		p = container_of(name->name.name, struct external_name, name[0]);
395 		if (unlikely(atomic_dec_and_test(&p->count)))
396 			kfree_rcu(p, head);
397 	}
398 }
399 EXPORT_SYMBOL(release_dentry_name_snapshot);
400 
401 static inline void __d_set_inode_and_type(struct dentry *dentry,
402 					  struct inode *inode,
403 					  unsigned type_flags)
404 {
405 	unsigned flags;
406 
407 	dentry->d_inode = inode;
408 	flags = READ_ONCE(dentry->d_flags);
409 	flags &= ~DCACHE_ENTRY_TYPE;
410 	flags |= type_flags;
411 	smp_store_release(&dentry->d_flags, flags);
412 }
413 
414 static inline void __d_clear_type_and_inode(struct dentry *dentry)
415 {
416 	unsigned flags = READ_ONCE(dentry->d_flags);
417 
418 	flags &= ~DCACHE_ENTRY_TYPE;
419 	WRITE_ONCE(dentry->d_flags, flags);
420 	dentry->d_inode = NULL;
421 	/*
422 	 * The negative counter only tracks dentries on the LRU. Don't inc if
423 	 * d_lru is on another list.
424 	 */
425 	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
426 		this_cpu_inc(nr_dentry_negative);
427 }
428 
429 static void dentry_free(struct dentry *dentry)
430 {
431 	WARN_ON(d_really_is_positive(dentry));
432 	if (unlikely(dname_external(dentry))) {
433 		struct external_name *p = external_name(dentry);
434 		if (likely(atomic_dec_and_test(&p->count))) {
435 			call_rcu(&dentry->d_rcu, __d_free_external);
436 			return;
437 		}
438 	}
439 	/* if dentry was never visible to RCU, immediate free is OK */
440 	if (dentry->d_flags & DCACHE_NORCU)
441 		__d_free(&dentry->d_rcu);
442 	else
443 		call_rcu(&dentry->d_rcu, __d_free);
444 }
445 
446 /*
447  * Release the dentry's inode, using the filesystem
448  * d_iput() operation if defined.
449  */
450 static void dentry_unlink_inode(struct dentry * dentry)
451 	__releases(dentry->d_lock)
452 	__releases(dentry->d_inode->i_lock)
453 {
454 	struct inode *inode = dentry->d_inode;
455 
456 	raw_write_seqcount_begin(&dentry->d_seq);
457 	__d_clear_type_and_inode(dentry);
458 	hlist_del_init(&dentry->d_alias);
459 	/*
460 	 * dentry becomes negative, so the space occupied by ->d_alias
461 	 * belongs to ->waiters now; we could use __hlist_del() instead
462 	 * of hlist_del_init(), if not for the stunt pulled by nfs
463 	 * dummy root dentries - positive dentry *not* included into
464 	 * the alias list of its inode.  Open-coding hlist_del_init()
465 	 * and removing zeroing would be too clumsy...
466 	 */
467 	dentry->waiters = NULL;
468 	raw_write_seqcount_end(&dentry->d_seq);
469 	spin_unlock(&dentry->d_lock);
470 	spin_unlock(&inode->i_lock);
471 	if (!inode->i_nlink)
472 		fsnotify_inoderemove(inode);
473 	if (dentry->d_op && dentry->d_op->d_iput)
474 		dentry->d_op->d_iput(dentry, inode);
475 	else
476 		iput(inode);
477 }
478 
479 /*
480  * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
481  * is in use - which includes both the "real" per-superblock
482  * LRU list _and_ the DCACHE_SHRINK_LIST use.
483  *
484  * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
485  * on the shrink list (ie not on the superblock LRU list).
486  *
487  * The per-cpu "nr_dentry_unused" counters are updated with
488  * the DCACHE_LRU_LIST bit.
489  *
490  * The per-cpu "nr_dentry_negative" counters are only updated
491  * when deleted from or added to the per-superblock LRU list, not
492  * from/to the shrink list. That is to avoid an unneeded dec/inc
493  * pair when moving from LRU to shrink list in select_collect().
494  *
495  * These helper functions make sure we always follow the
496  * rules. d_lock must be held by the caller.
497  */
498 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
499 static void d_lru_add(struct dentry *dentry)
500 {
501 	D_FLAG_VERIFY(dentry, 0);
502 	dentry->d_flags |= DCACHE_LRU_LIST;
503 	this_cpu_inc(nr_dentry_unused);
504 	if (d_is_negative(dentry))
505 		this_cpu_inc(nr_dentry_negative);
506 	WARN_ON_ONCE(!list_lru_add_obj(
507 			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
508 }
509 
510 static void d_lru_del(struct dentry *dentry)
511 {
512 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
513 	dentry->d_flags &= ~DCACHE_LRU_LIST;
514 	this_cpu_dec(nr_dentry_unused);
515 	if (d_is_negative(dentry))
516 		this_cpu_dec(nr_dentry_negative);
517 	WARN_ON_ONCE(!list_lru_del_obj(
518 			&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
519 }
520 
521 static void d_shrink_del(struct dentry *dentry)
522 {
523 	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
524 	list_del_init(&dentry->d_lru);
525 	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
526 	this_cpu_dec(nr_dentry_unused);
527 }
528 
529 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
530 {
531 	D_FLAG_VERIFY(dentry, 0);
532 	list_add(&dentry->d_lru, list);
533 	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
534 	this_cpu_inc(nr_dentry_unused);
535 }
536 
537 /*
538  * These can only be called under the global LRU lock, ie during the
539  * callback for freeing the LRU list. "isolate" removes it from the
540  * LRU lists entirely, while shrink_move moves it to the indicated
541  * private list.
542  */
543 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
544 {
545 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
546 	dentry->d_flags &= ~DCACHE_LRU_LIST;
547 	this_cpu_dec(nr_dentry_unused);
548 	if (d_is_negative(dentry))
549 		this_cpu_dec(nr_dentry_negative);
550 	list_lru_isolate(lru, &dentry->d_lru);
551 }
552 
553 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
554 			      struct list_head *list)
555 {
556 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
557 	dentry->d_flags |= DCACHE_SHRINK_LIST;
558 	if (d_is_negative(dentry))
559 		this_cpu_dec(nr_dentry_negative);
560 	list_lru_isolate_move(lru, &dentry->d_lru, list);
561 }
562 
563 static void ___d_drop(struct dentry *dentry)
564 {
565 	struct hlist_bl_head *b;
566 	/*
567 	 * Hashed dentries are normally on the dentry hashtable,
568 	 * with the exception of those newly allocated by
569 	 * d_obtain_root, which are always IS_ROOT:
570 	 */
571 	if (unlikely(IS_ROOT(dentry)))
572 		b = &dentry->d_sb->s_roots;
573 	else
574 		b = d_hash(dentry->d_name.hash);
575 
576 	hlist_bl_lock(b);
577 	__hlist_bl_del(&dentry->d_hash);
578 	hlist_bl_unlock(b);
579 }
580 
581 void __d_drop(struct dentry *dentry)
582 {
583 	if (!d_unhashed(dentry)) {
584 		___d_drop(dentry);
585 		dentry->d_hash.pprev = NULL;
586 		write_seqcount_invalidate(&dentry->d_seq);
587 	}
588 }
589 EXPORT_SYMBOL(__d_drop);
590 
591 /**
592  * d_drop - drop a dentry
593  * @dentry: dentry to drop
594  *
595  * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
596  * be found through a VFS lookup any more. Note that this is different from
597  * deleting the dentry - d_delete will try to mark the dentry negative if
598  * possible, giving a successful _negative_ lookup, while d_drop will
599  * just make the cache lookup fail.
600  *
601  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
602  * reason (NFS timeouts or autofs deletes).
603  *
604  * __d_drop requires dentry->d_lock
605  *
606  * ___d_drop doesn't mark dentry as "unhashed"
607  * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
608  */
609 void d_drop(struct dentry *dentry)
610 {
611 	spin_lock(&dentry->d_lock);
612 	__d_drop(dentry);
613 	spin_unlock(&dentry->d_lock);
614 }
615 EXPORT_SYMBOL(d_drop);
616 
617 struct completion_list {
618 	struct completion_list *next;
619 	struct completion completion;
620 };
621 
622 /*
623  *  shrink_dcache_tree() needs to be notified when dentry in process of
624  *  being evicted finally gets unlisted.  Such dentries are
625  *	already with negative ->d_count
626  *	already negative
627  *	already not in in-lookup hash
628  *	reachable only via ->d_sib.
629  *
630  *  Use ->waiters for a single-linked list of struct completion_list of
631  *  waiters.
632  */
633 static inline void d_add_waiter(struct dentry *dentry, struct completion_list *p)
634 {
635 	struct completion_list *v = dentry->waiters;
636 	init_completion(&p->completion);
637 	p->next = v;
638 	dentry->waiters = p;
639 }
640 
641 static inline void d_complete_waiters(struct dentry *dentry)
642 {
643 	struct completion_list *v = dentry->waiters;
644 	if (unlikely(v)) {
645 		/* some shrink_dcache_tree() instances are waiting */
646 		dentry->waiters = NULL;
647 		while (v) {
648 			struct completion *r = &v->completion;
649 			v = v->next;
650 			complete(r);
651 		}
652 	}
653 }
654 
655 static inline void dentry_unlist(struct dentry *dentry)
656 {
657 	struct dentry *next;
658 	/*
659 	 * Inform d_walk() and shrink_dentry_list() that we are no longer
660 	 * attached to the dentry tree
661 	 */
662 	dentry->d_flags |= DCACHE_DENTRY_KILLED;
663 	d_complete_waiters(dentry);
664 	if (unlikely(hlist_unhashed(&dentry->d_sib)))
665 		return;
666 	__hlist_del(&dentry->d_sib);
667 	/*
668 	 * Cursors can move around the list of children.  While we'd been
669 	 * a normal list member, it didn't matter - ->d_sib.next would've
670 	 * been updated.  However, from now on it won't be and for the
671 	 * things like d_walk() it might end up with a nasty surprise.
672 	 * Normally d_walk() doesn't care about cursors moving around -
673 	 * ->d_lock on parent prevents that and since a cursor has no children
674 	 * of its own, we get through it without ever unlocking the parent.
675 	 * There is one exception, though - if we ascend from a child that
676 	 * gets killed as soon as we unlock it, the next sibling is found
677 	 * using the value left in its ->d_sib.next.  And if _that_
678 	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
679 	 * before d_walk() regains parent->d_lock, we'll end up skipping
680 	 * everything the cursor had been moved past.
681 	 *
682 	 * Solution: make sure that the pointer left behind in ->d_sib.next
683 	 * points to something that won't be moving around.  I.e. skip the
684 	 * cursors.
685 	 */
686 	while (dentry->d_sib.next) {
687 		next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
688 		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
689 			break;
690 		dentry->d_sib.next = next->d_sib.next;
691 	}
692 }
693 
694 static struct dentry *__dentry_kill(struct dentry *dentry)
695 {
696 	struct dentry *parent = NULL;
697 	bool can_free = true;
698 
699 	/*
700 	 * The dentry is now unrecoverably dead to the world.
701 	 */
702 	lockref_mark_dead(&dentry->d_lockref);
703 
704 	/*
705 	 * inform the fs via d_prune that this dentry is about to be
706 	 * unhashed and destroyed.
707 	 */
708 	if (dentry->d_flags & DCACHE_OP_PRUNE)
709 		dentry->d_op->d_prune(dentry);
710 
711 	if (dentry->d_flags & DCACHE_LRU_LIST) {
712 		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
713 			d_lru_del(dentry);
714 	}
715 	/* if it was on the hash then remove it */
716 	__d_drop(dentry);
717 	if (dentry->d_inode)
718 		dentry_unlink_inode(dentry);
719 	else
720 		spin_unlock(&dentry->d_lock);
721 	this_cpu_dec(nr_dentry);
722 	if (dentry->d_op && dentry->d_op->d_release)
723 		dentry->d_op->d_release(dentry);
724 
725 	cond_resched();
726 	/* now that it's negative, ->d_parent is stable */
727 	if (!IS_ROOT(dentry)) {
728 		parent = dentry->d_parent;
729 		spin_lock(&parent->d_lock);
730 	}
731 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
732 	dentry_unlist(dentry);
733 	if (dentry->d_flags & DCACHE_SHRINK_LIST)
734 		can_free = false;
735 	spin_unlock(&dentry->d_lock);
736 	if (likely(can_free))
737 		dentry_free(dentry);
738 	if (parent && --parent->d_lockref.count) {
739 		spin_unlock(&parent->d_lock);
740 		return NULL;
741 	}
742 	return parent;
743 }
744 
745 /*
746  * Lock a dentry for feeding it to __dentry_kill().
747  * Called under rcu_read_lock() and dentry->d_lock; the former
748  * guarantees that nothing we access will be freed under us.
749  * Note that dentry is *not* protected from concurrent dentry_kill(),
750  * d_delete(), etc.
751  *
752  * Return false if dentry is busy.  Otherwise, return true and have
753  * that dentry's inode locked.
754  */
755 
756 static bool lock_for_kill(struct dentry *dentry)
757 {
758 	struct inode *inode = dentry->d_inode;
759 
760 	if (unlikely(dentry->d_lockref.count))
761 		return false;
762 
763 	if (!inode || likely(spin_trylock(&inode->i_lock)))
764 		return true;
765 
766 	do {
767 		spin_unlock(&dentry->d_lock);
768 		spin_lock(&inode->i_lock);
769 		spin_lock(&dentry->d_lock);
770 		if (likely(inode == dentry->d_inode))
771 			break;
772 		spin_unlock(&inode->i_lock);
773 		inode = dentry->d_inode;
774 	} while (inode);
775 	if (likely(!dentry->d_lockref.count))
776 		return true;
777 	if (inode)
778 		spin_unlock(&inode->i_lock);
779 	return false;
780 }
781 
782 /*
783  * Decide if dentry is worth retaining.  Usually this is called with dentry
784  * locked; if not locked, we are more limited and might not be able to tell
785  * without a lock.  False in this case means "punt to locked path and recheck".
786  *
787  * In case we aren't locked, these predicates are not "stable". However, it is
788  * sufficient that at some point after we dropped the reference the dentry was
789  * hashed and the flags had the proper value. Other dentry users may have
790  * re-gotten a reference to the dentry and change that, but our work is done -
791  * we can leave the dentry around with a zero refcount.
792  */
793 static inline bool retain_dentry(struct dentry *dentry, bool locked)
794 {
795 	unsigned int d_flags;
796 
797 	smp_rmb();
798 	d_flags = READ_ONCE(dentry->d_flags);
799 
800 	// Unreachable? Nobody would be able to look it up, no point retaining
801 	if (unlikely(d_unhashed(dentry)))
802 		return false;
803 
804 	// Same if it's disconnected
805 	if (unlikely(d_flags & DCACHE_DISCONNECTED))
806 		return false;
807 
808 	// ->d_delete() might tell us not to bother, but that requires
809 	// ->d_lock; can't decide without it
810 	if (unlikely(d_flags & DCACHE_OP_DELETE)) {
811 		if (!locked || dentry->d_op->d_delete(dentry))
812 			return false;
813 	}
814 
815 	// Explicitly told not to bother
816 	if (unlikely(d_flags & DCACHE_DONTCACHE))
817 		return false;
818 
819 	// At this point it looks like we ought to keep it.  We also might
820 	// need to do something - put it on LRU if it wasn't there already
821 	// and mark it referenced if it was on LRU, but not marked yet.
822 	// Unfortunately, both actions require ->d_lock, so in lockless
823 	// case we'd have to punt rather than doing those.
824 	if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
825 		if (!locked)
826 			return false;
827 		d_lru_add(dentry);
828 	} else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
829 		if (!locked)
830 			return false;
831 		dentry->d_flags |= DCACHE_REFERENCED;
832 	}
833 	return true;
834 }
835 
836 void d_mark_dontcache(struct inode *inode)
837 {
838 	struct dentry *de;
839 
840 	spin_lock(&inode->i_lock);
841 	for_each_alias(de, inode) {
842 		spin_lock(&de->d_lock);
843 		de->d_flags |= DCACHE_DONTCACHE;
844 		spin_unlock(&de->d_lock);
845 	}
846 	inode_state_set(inode, I_DONTCACHE);
847 	spin_unlock(&inode->i_lock);
848 }
849 EXPORT_SYMBOL(d_mark_dontcache);
850 
851 /*
852  * Try to do a lockless dput(), and return whether that was successful.
853  *
854  * If unsuccessful, we return false, having already taken the dentry lock.
855  * In that case refcount is guaranteed to be zero and we have already
856  * decided that it's not worth keeping around.
857  *
858  * The caller needs to hold the RCU read lock, so that the dentry is
859  * guaranteed to stay around even if the refcount goes down to zero!
860  */
861 static inline bool fast_dput(struct dentry *dentry)
862 {
863 	int ret;
864 
865 	/*
866 	 * try to decrement the lockref optimistically.
867 	 */
868 	ret = lockref_put_return(&dentry->d_lockref);
869 
870 	/*
871 	 * If the lockref_put_return() failed due to the lock being held
872 	 * by somebody else, the fast path has failed. We will need to
873 	 * get the lock, and then check the count again.
874 	 */
875 	if (unlikely(ret < 0)) {
876 		spin_lock(&dentry->d_lock);
877 		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
878 			spin_unlock(&dentry->d_lock);
879 			return true;
880 		}
881 		dentry->d_lockref.count--;
882 		goto locked;
883 	}
884 
885 	/*
886 	 * If we weren't the last ref, we're done.
887 	 */
888 	if (ret)
889 		return true;
890 
891 	/*
892 	 * Can we decide that decrement of refcount is all we needed without
893 	 * taking the lock?  There's a very common case when it's all we need -
894 	 * dentry looks like it ought to be retained and there's nothing else
895 	 * to do.
896 	 */
897 	if (retain_dentry(dentry, false))
898 		return true;
899 
900 	/*
901 	 * Either not worth retaining or we can't tell without the lock.
902 	 * Get the lock, then.  We've already decremented the refcount to 0,
903 	 * but we'll need to re-check the situation after getting the lock.
904 	 */
905 	spin_lock(&dentry->d_lock);
906 
907 	/*
908 	 * Did somebody else grab a reference to it in the meantime, and
909 	 * we're no longer the last user after all? Alternatively, somebody
910 	 * else could have killed it and marked it dead. Either way, we
911 	 * don't need to do anything else.
912 	 */
913 locked:
914 	if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
915 		spin_unlock(&dentry->d_lock);
916 		return true;
917 	}
918 	return false;
919 }
920 
921 static void finish_dput(struct dentry *dentry)
922 	__releases(dentry->d_lock)
923 	__releases(RCU)
924 {
925 	while (lock_for_kill(dentry)) {
926 		rcu_read_unlock();
927 		dentry = __dentry_kill(dentry);
928 		if (!dentry)
929 			return;
930 		if (retain_dentry(dentry, true)) {
931 			spin_unlock(&dentry->d_lock);
932 			return;
933 		}
934 		rcu_read_lock();
935 	}
936 	rcu_read_unlock();
937 	spin_unlock(&dentry->d_lock);
938 }
939 
940 /*
941  * This is dput
942  *
943  * This is complicated by the fact that we do not want to put
944  * dentries that are no longer on any hash chain on the unused
945  * list: we'd much rather just get rid of them immediately.
946  *
947  * However, that implies that we have to traverse the dentry
948  * tree upwards to the parents which might _also_ now be
949  * scheduled for deletion (it may have been only waiting for
950  * its last child to go away).
951  *
952  * This tail recursion is done by hand as we don't want to depend
953  * on the compiler to always get this right (gcc generally doesn't).
954  * Real recursion would eat up our stack space.
955  */
956 
957 /*
958  * dput - release a dentry
959  * @dentry: dentry to release
960  *
961  * Release a dentry. This will drop the usage count and if appropriate
962  * call the dentry unlink method as well as removing it from the queues and
963  * releasing its resources. If the parent dentries were scheduled for release
964  * they too may now get deleted.
965  */
966 void dput(struct dentry *dentry)
967 {
968 	if (!dentry)
969 		return;
970 	might_sleep();
971 	rcu_read_lock();
972 	if (likely(fast_dput(dentry))) {
973 		rcu_read_unlock();
974 		return;
975 	}
976 	finish_dput(dentry);
977 }
978 EXPORT_SYMBOL(dput);
979 
980 void d_make_discardable(struct dentry *dentry)
981 {
982 	spin_lock(&dentry->d_lock);
983 	WARN_ON(!(dentry->d_flags & DCACHE_PERSISTENT));
984 	dentry->d_flags &= ~DCACHE_PERSISTENT;
985 	dentry->d_lockref.count--;
986 	rcu_read_lock();
987 	finish_dput(dentry);
988 }
989 EXPORT_SYMBOL(d_make_discardable);
990 
991 static void to_shrink_list(struct dentry *dentry, struct list_head *list)
992 __must_hold(&dentry->d_lock)
993 {
994 	if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
995 		if (dentry->d_flags & DCACHE_LRU_LIST)
996 			d_lru_del(dentry);
997 		d_shrink_add(dentry, list);
998 	}
999 }
1000 
1001 void dput_to_list(struct dentry *dentry, struct list_head *list)
1002 {
1003 	rcu_read_lock();
1004 	if (likely(fast_dput(dentry))) {
1005 		rcu_read_unlock();
1006 		return;
1007 	}
1008 	rcu_read_unlock();
1009 	to_shrink_list(dentry, list);
1010 	spin_unlock(&dentry->d_lock);
1011 }
1012 
1013 struct dentry *dget_parent(struct dentry *dentry)
1014 {
1015 	int gotref;
1016 	struct dentry *ret;
1017 	unsigned seq;
1018 
1019 	/*
1020 	 * Do optimistic parent lookup without any
1021 	 * locking.
1022 	 */
1023 	rcu_read_lock();
1024 	seq = raw_seqcount_begin(&dentry->d_seq);
1025 	ret = READ_ONCE(dentry->d_parent);
1026 	gotref = lockref_get_not_zero(&ret->d_lockref);
1027 	rcu_read_unlock();
1028 	if (likely(gotref)) {
1029 		if (!read_seqcount_retry(&dentry->d_seq, seq))
1030 			return ret;
1031 		dput(ret);
1032 	}
1033 
1034 repeat:
1035 	/*
1036 	 * Don't need rcu_dereference because we re-check it was correct under
1037 	 * the lock.
1038 	 */
1039 	rcu_read_lock();
1040 	ret = dentry->d_parent;
1041 	spin_lock(&ret->d_lock);
1042 	if (unlikely(ret != dentry->d_parent)) {
1043 		spin_unlock(&ret->d_lock);
1044 		rcu_read_unlock();
1045 		goto repeat;
1046 	}
1047 	rcu_read_unlock();
1048 	BUG_ON(!ret->d_lockref.count);
1049 	ret->d_lockref.count++;
1050 	spin_unlock(&ret->d_lock);
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL(dget_parent);
1054 
1055 static struct dentry * __d_find_any_alias(struct inode *inode)
1056 {
1057 	struct dentry *alias;
1058 
1059 	if (hlist_empty(&inode->i_dentry))
1060 		return NULL;
1061 	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1062 	lockref_get(&alias->d_lockref);
1063 	return alias;
1064 }
1065 
1066 /**
1067  * d_find_any_alias - find any alias for a given inode
1068  * @inode: inode to find an alias for
1069  *
1070  * If any aliases exist for the given inode, take and return a
1071  * reference for one of them.  If no aliases exist, return %NULL.
1072  */
1073 struct dentry *d_find_any_alias(struct inode *inode)
1074 {
1075 	struct dentry *de;
1076 
1077 	spin_lock(&inode->i_lock);
1078 	de = __d_find_any_alias(inode);
1079 	spin_unlock(&inode->i_lock);
1080 	return de;
1081 }
1082 EXPORT_SYMBOL(d_find_any_alias);
1083 
1084 static struct dentry *__d_find_alias(struct inode *inode)
1085 {
1086 	struct dentry *alias;
1087 
1088 	if (S_ISDIR(inode->i_mode))
1089 		return __d_find_any_alias(inode);
1090 
1091 	for_each_alias(alias, inode) {
1092 		spin_lock(&alias->d_lock);
1093  		if (!d_unhashed(alias)) {
1094 			dget_dlock(alias);
1095 			spin_unlock(&alias->d_lock);
1096 			return alias;
1097 		}
1098 		spin_unlock(&alias->d_lock);
1099 	}
1100 	return NULL;
1101 }
1102 
1103 /**
1104  * d_find_alias - grab a hashed alias of inode
1105  * @inode: inode in question
1106  *
1107  * If inode has a hashed alias, or is a directory and has any alias,
1108  * acquire the reference to alias and return it. Otherwise return NULL.
1109  * Notice that if inode is a directory there can be only one alias and
1110  * it can be unhashed only if it has no children, or if it is the root
1111  * of a filesystem, or if the directory was renamed and d_revalidate
1112  * was the first vfs operation to notice.
1113  *
1114  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1115  * any other hashed alias over that one.
1116  */
1117 struct dentry *d_find_alias(struct inode *inode)
1118 {
1119 	struct dentry *de = NULL;
1120 
1121 	if (!hlist_empty(&inode->i_dentry)) {
1122 		spin_lock(&inode->i_lock);
1123 		de = __d_find_alias(inode);
1124 		spin_unlock(&inode->i_lock);
1125 	}
1126 	return de;
1127 }
1128 EXPORT_SYMBOL(d_find_alias);
1129 
1130 /*
1131  *  Caller MUST be holding rcu_read_lock() and be guaranteed
1132  *  that inode won't get freed until rcu_read_unlock().
1133  */
1134 struct dentry *d_find_alias_rcu(struct inode *inode)
1135 {
1136 	struct hlist_head *l = &inode->i_dentry;
1137 	struct dentry *de = NULL;
1138 
1139 	spin_lock(&inode->i_lock);
1140 	// ->i_dentry and ->i_rcu are colocated, but the latter won't be
1141 	// used without having I_FREEING set, which means no aliases left
1142 	if (likely(!(inode_state_read(inode) & I_FREEING) && !hlist_empty(l))) {
1143 		if (S_ISDIR(inode->i_mode)) {
1144 			de = hlist_entry(l->first, struct dentry, d_alias);
1145 		} else {
1146 			hlist_for_each_entry(de, l, d_alias)
1147 				if (!d_unhashed(de))
1148 					break;
1149 		}
1150 	}
1151 	spin_unlock(&inode->i_lock);
1152 	return de;
1153 }
1154 
1155 /**
1156  * d_dispose_if_unused - move unreferenced dentries to shrink list
1157  * @dentry: dentry in question
1158  * @dispose: head of shrink list
1159  *
1160  * If dentry has no external references, move it to shrink list.
1161  *
1162  * NOTE!!! The caller is responsible for preventing eviction of the dentry by
1163  * holding dentry->d_inode->i_lock or equivalent.
1164  */
1165 void d_dispose_if_unused(struct dentry *dentry, struct list_head *dispose)
1166 {
1167 	spin_lock(&dentry->d_lock);
1168 	if (!dentry->d_lockref.count)
1169 		to_shrink_list(dentry, dispose);
1170 	spin_unlock(&dentry->d_lock);
1171 }
1172 EXPORT_SYMBOL(d_dispose_if_unused);
1173 
1174 /*
1175  *	Try to kill dentries associated with this inode.
1176  * WARNING: you must own a reference to inode.
1177  */
1178 void d_prune_aliases(struct inode *inode)
1179 {
1180 	LIST_HEAD(dispose);
1181 	struct dentry *dentry;
1182 
1183 	spin_lock(&inode->i_lock);
1184 	for_each_alias(dentry, inode)
1185 		d_dispose_if_unused(dentry, &dispose);
1186 	spin_unlock(&inode->i_lock);
1187 	shrink_dentry_list(&dispose);
1188 }
1189 EXPORT_SYMBOL(d_prune_aliases);
1190 
1191 static inline void shrink_kill(struct dentry *victim)
1192 {
1193 	do {
1194 		rcu_read_unlock();
1195 		victim = __dentry_kill(victim);
1196 		rcu_read_lock();
1197 	} while (victim && lock_for_kill(victim));
1198 	rcu_read_unlock();
1199 	if (victim)
1200 		spin_unlock(&victim->d_lock);
1201 }
1202 
1203 void shrink_dentry_list(struct list_head *list)
1204 {
1205 	while (!list_empty(list)) {
1206 		struct dentry *dentry;
1207 
1208 		dentry = list_entry(list->prev, struct dentry, d_lru);
1209 		spin_lock(&dentry->d_lock);
1210 		rcu_read_lock();
1211 		if (!lock_for_kill(dentry)) {
1212 			bool can_free;
1213 			rcu_read_unlock();
1214 			d_shrink_del(dentry);
1215 			can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
1216 			spin_unlock(&dentry->d_lock);
1217 			if (can_free)
1218 				dentry_free(dentry);
1219 			continue;
1220 		}
1221 		d_shrink_del(dentry);
1222 		shrink_kill(dentry);
1223 	}
1224 }
1225 EXPORT_SYMBOL(shrink_dentry_list);
1226 
1227 static enum lru_status dentry_lru_isolate(struct list_head *item,
1228 		struct list_lru_one *lru, void *arg)
1229 {
1230 	struct list_head *freeable = arg;
1231 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1232 
1233 
1234 	/*
1235 	 * we are inverting the lru lock/dentry->d_lock here,
1236 	 * so use a trylock. If we fail to get the lock, just skip
1237 	 * it
1238 	 */
1239 	if (!spin_trylock(&dentry->d_lock))
1240 		return LRU_SKIP;
1241 
1242 	/*
1243 	 * Referenced dentries are still in use. If they have active
1244 	 * counts, just remove them from the LRU. Otherwise give them
1245 	 * another pass through the LRU.
1246 	 */
1247 	if (dentry->d_lockref.count) {
1248 		d_lru_isolate(lru, dentry);
1249 		spin_unlock(&dentry->d_lock);
1250 		return LRU_REMOVED;
1251 	}
1252 
1253 	if (dentry->d_flags & DCACHE_REFERENCED) {
1254 		dentry->d_flags &= ~DCACHE_REFERENCED;
1255 		spin_unlock(&dentry->d_lock);
1256 
1257 		/*
1258 		 * The list move itself will be made by the common LRU code. At
1259 		 * this point, we've dropped the dentry->d_lock but keep the
1260 		 * lru lock. This is safe to do, since every list movement is
1261 		 * protected by the lru lock even if both locks are held.
1262 		 *
1263 		 * This is guaranteed by the fact that all LRU management
1264 		 * functions are intermediated by the LRU API calls like
1265 		 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1266 		 * only ever occur through this functions or through callbacks
1267 		 * like this one, that are called from the LRU API.
1268 		 *
1269 		 * The only exceptions to this are functions like
1270 		 * shrink_dentry_list, and code that first checks for the
1271 		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1272 		 * operating only with stack provided lists after they are
1273 		 * properly isolated from the main list.  It is thus, always a
1274 		 * local access.
1275 		 */
1276 		return LRU_ROTATE;
1277 	}
1278 
1279 	d_lru_shrink_move(lru, dentry, freeable);
1280 	spin_unlock(&dentry->d_lock);
1281 
1282 	return LRU_REMOVED;
1283 }
1284 
1285 /**
1286  * prune_dcache_sb - shrink the dcache
1287  * @sb: superblock
1288  * @sc: shrink control, passed to list_lru_shrink_walk()
1289  *
1290  * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1291  * is done when we need more memory and called from the superblock shrinker
1292  * function.
1293  *
1294  * This function may fail to free any resources if all the dentries are in
1295  * use.
1296  */
1297 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1298 {
1299 	LIST_HEAD(dispose);
1300 	long freed;
1301 
1302 	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1303 				     dentry_lru_isolate, &dispose);
1304 	shrink_dentry_list(&dispose);
1305 	return freed;
1306 }
1307 
1308 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1309 		struct list_lru_one *lru, void *arg)
1310 {
1311 	struct list_head *freeable = arg;
1312 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1313 
1314 	/*
1315 	 * we are inverting the lru lock/dentry->d_lock here,
1316 	 * so use a trylock. If we fail to get the lock, just skip
1317 	 * it
1318 	 */
1319 	if (!spin_trylock(&dentry->d_lock))
1320 		return LRU_SKIP;
1321 
1322 	d_lru_shrink_move(lru, dentry, freeable);
1323 	spin_unlock(&dentry->d_lock);
1324 
1325 	return LRU_REMOVED;
1326 }
1327 
1328 
1329 /**
1330  * shrink_dcache_sb - shrink dcache for a superblock
1331  * @sb: superblock
1332  *
1333  * Shrink the dcache for the specified super block. This is used to free
1334  * the dcache before unmounting a file system.
1335  */
1336 void shrink_dcache_sb(struct super_block *sb)
1337 {
1338 	do {
1339 		LIST_HEAD(dispose);
1340 
1341 		list_lru_walk(&sb->s_dentry_lru,
1342 			dentry_lru_isolate_shrink, &dispose, 1024);
1343 		shrink_dentry_list(&dispose);
1344 	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1345 }
1346 EXPORT_SYMBOL(shrink_dcache_sb);
1347 
1348 /**
1349  * enum d_walk_ret - action to take during tree walk
1350  * @D_WALK_CONTINUE:	continue walk
1351  * @D_WALK_QUIT:	quit walk
1352  * @D_WALK_NORETRY:	quit when retry is needed
1353  * @D_WALK_SKIP:	skip this dentry and its children
1354  */
1355 enum d_walk_ret {
1356 	D_WALK_CONTINUE,
1357 	D_WALK_QUIT,
1358 	D_WALK_NORETRY,
1359 	D_WALK_SKIP,
1360 };
1361 
1362 /**
1363  * d_walk - walk the dentry tree
1364  * @parent:	start of walk
1365  * @data:	data passed to @enter() and @finish()
1366  * @enter:	callback when first entering the dentry
1367  *
1368  * The @enter() callbacks are called with d_lock held.
1369  */
1370 static void d_walk(struct dentry *parent, void *data,
1371 		   enum d_walk_ret (*enter)(void *, struct dentry *))
1372 {
1373 	struct dentry *this_parent, *dentry;
1374 	unsigned seq = 0;
1375 	enum d_walk_ret ret;
1376 	bool retry = true;
1377 
1378 again:
1379 	read_seqbegin_or_lock(&rename_lock, &seq);
1380 	this_parent = parent;
1381 	spin_lock(&this_parent->d_lock);
1382 
1383 	ret = enter(data, this_parent);
1384 	switch (ret) {
1385 	case D_WALK_CONTINUE:
1386 		break;
1387 	case D_WALK_QUIT:
1388 	case D_WALK_SKIP:
1389 		goto out_unlock;
1390 	case D_WALK_NORETRY:
1391 		retry = false;
1392 		break;
1393 	}
1394 repeat:
1395 	dentry = d_first_child(this_parent);
1396 resume:
1397 	hlist_for_each_entry_from(dentry, d_sib) {
1398 		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1399 			continue;
1400 
1401 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1402 
1403 		ret = enter(data, dentry);
1404 		switch (ret) {
1405 		case D_WALK_CONTINUE:
1406 			break;
1407 		case D_WALK_QUIT:
1408 			spin_unlock(&dentry->d_lock);
1409 			goto out_unlock;
1410 		case D_WALK_NORETRY:
1411 			retry = false;
1412 			break;
1413 		case D_WALK_SKIP:
1414 			spin_unlock(&dentry->d_lock);
1415 			continue;
1416 		}
1417 
1418 		if (!hlist_empty(&dentry->d_children)) {
1419 			spin_unlock(&this_parent->d_lock);
1420 			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1421 			this_parent = dentry;
1422 			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1423 			goto repeat;
1424 		}
1425 		spin_unlock(&dentry->d_lock);
1426 	}
1427 	/*
1428 	 * All done at this level ... ascend and resume the search.
1429 	 */
1430 	rcu_read_lock();
1431 ascend:
1432 	if (this_parent != parent) {
1433 		dentry = this_parent;
1434 		this_parent = dentry->d_parent;
1435 
1436 		spin_unlock(&dentry->d_lock);
1437 		spin_lock(&this_parent->d_lock);
1438 
1439 		/* might go back up the wrong parent if we have had a rename. */
1440 		if (need_seqretry(&rename_lock, seq))
1441 			goto rename_retry;
1442 		/* go into the first sibling still alive */
1443 		hlist_for_each_entry_continue(dentry, d_sib) {
1444 			if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1445 				rcu_read_unlock();
1446 				goto resume;
1447 			}
1448 		}
1449 		goto ascend;
1450 	}
1451 	if (need_seqretry(&rename_lock, seq))
1452 		goto rename_retry;
1453 	rcu_read_unlock();
1454 
1455 out_unlock:
1456 	spin_unlock(&this_parent->d_lock);
1457 	done_seqretry(&rename_lock, seq);
1458 	return;
1459 
1460 rename_retry:
1461 	spin_unlock(&this_parent->d_lock);
1462 	rcu_read_unlock();
1463 	BUG_ON(seq & 1);
1464 	if (!retry)
1465 		return;
1466 	seq = 1;
1467 	goto again;
1468 }
1469 
1470 struct check_mount {
1471 	struct vfsmount *mnt;
1472 	unsigned int mounted;
1473 };
1474 
1475 /* locks: mount_locked_reader && dentry->d_lock */
1476 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1477 {
1478 	struct check_mount *info = data;
1479 	struct path path = { .mnt = info->mnt, .dentry = dentry };
1480 
1481 	if (likely(!d_mountpoint(dentry)))
1482 		return D_WALK_CONTINUE;
1483 	if (__path_is_mountpoint(&path)) {
1484 		info->mounted = 1;
1485 		return D_WALK_QUIT;
1486 	}
1487 	return D_WALK_CONTINUE;
1488 }
1489 
1490 /**
1491  * path_has_submounts - check for mounts over a dentry in the
1492  *                      current namespace.
1493  * @parent: path to check.
1494  *
1495  * Return true if the parent or its subdirectories contain
1496  * a mount point in the current namespace.
1497  */
1498 int path_has_submounts(const struct path *parent)
1499 {
1500 	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1501 
1502 	guard(mount_locked_reader)();
1503 	d_walk(parent->dentry, &data, path_check_mount);
1504 
1505 	return data.mounted;
1506 }
1507 EXPORT_SYMBOL(path_has_submounts);
1508 
1509 /*
1510  * Called by mount code to set a mountpoint and check if the mountpoint is
1511  * reachable (e.g. NFS can unhash a directory dentry and then the complete
1512  * subtree can become unreachable).
1513  *
1514  * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1515  * this reason take rename_lock and d_lock on dentry and ancestors.
1516  */
1517 int d_set_mounted(struct dentry *dentry)
1518 {
1519 	struct dentry *p;
1520 	int ret = -ENOENT;
1521 	read_seqlock_excl(&rename_lock);
1522 	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1523 		/* Need exclusion wrt. d_invalidate() */
1524 		spin_lock(&p->d_lock);
1525 		if (unlikely(d_unhashed(p))) {
1526 			spin_unlock(&p->d_lock);
1527 			goto out;
1528 		}
1529 		spin_unlock(&p->d_lock);
1530 	}
1531 	spin_lock(&dentry->d_lock);
1532 	if (!d_unlinked(dentry)) {
1533 		ret = -EBUSY;
1534 		if (!d_mountpoint(dentry)) {
1535 			dentry->d_flags |= DCACHE_MOUNTED;
1536 			ret = 0;
1537 		}
1538 	}
1539  	spin_unlock(&dentry->d_lock);
1540 out:
1541 	read_sequnlock_excl(&rename_lock);
1542 	return ret;
1543 }
1544 
1545 /*
1546  * Search the dentry child list of the specified parent,
1547  * and move any unused dentries to the end of the unused
1548  * list for prune_dcache(). We descend to the next level
1549  * whenever the d_children list is non-empty and continue
1550  * searching.
1551  *
1552  * It returns zero iff there are no unused children,
1553  * otherwise  it returns the number of children moved to
1554  * the end of the unused list. This may not be the total
1555  * number of unused children, because select_parent can
1556  * drop the lock and return early due to latency
1557  * constraints.
1558  */
1559 
1560 struct select_data {
1561 	struct dentry *start;
1562 	union {
1563 		long found;
1564 		struct dentry *victim;
1565 	};
1566 	struct list_head dispose;
1567 };
1568 
1569 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1570 {
1571 	struct select_data *data = _data;
1572 	enum d_walk_ret ret = D_WALK_CONTINUE;
1573 
1574 	if (data->start == dentry)
1575 		goto out;
1576 
1577 	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1578 		data->found++;
1579 	} else if (!dentry->d_lockref.count) {
1580 		to_shrink_list(dentry, &data->dispose);
1581 		data->found++;
1582 	} else if (dentry->d_lockref.count < 0) {
1583 		data->found++;
1584 	}
1585 	/*
1586 	 * We can return to the caller if we have found some (this
1587 	 * ensures forward progress). We'll be coming back to find
1588 	 * the rest.
1589 	 */
1590 	if (!list_empty(&data->dispose))
1591 		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1592 out:
1593 	return ret;
1594 }
1595 
1596 static enum d_walk_ret select_collect_umount(void *_data, struct dentry *dentry)
1597 {
1598 	if (dentry->d_flags & DCACHE_PERSISTENT) {
1599 		dentry->d_flags &= ~DCACHE_PERSISTENT;
1600 		dentry->d_lockref.count--;
1601 	}
1602 	return select_collect(_data, dentry);
1603 }
1604 
1605 static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1606 {
1607 	struct select_data *data = _data;
1608 	enum d_walk_ret ret = D_WALK_CONTINUE;
1609 
1610 	if (data->start == dentry)
1611 		goto out;
1612 
1613 	if (!dentry->d_lockref.count) {
1614 		if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1615 			rcu_read_lock();
1616 			data->victim = dentry;
1617 			return D_WALK_QUIT;
1618 		}
1619 		to_shrink_list(dentry, &data->dispose);
1620 	} else if (dentry->d_lockref.count < 0) {
1621 		rcu_read_lock();
1622 		data->victim = dentry;
1623 		return D_WALK_QUIT;
1624 	}
1625 	/*
1626 	 * We can return to the caller if we have found some (this
1627 	 * ensures forward progress). We'll be coming back to find
1628 	 * the rest.
1629 	 */
1630 	if (!list_empty(&data->dispose))
1631 		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1632 out:
1633 	return ret;
1634 }
1635 
1636 /**
1637  * shrink_dcache_tree - prune dcache
1638  * @parent: parent of entries to prune
1639  * @for_umount: true if we want to unpin the persistent ones
1640  *
1641  * Prune the dcache to remove unused children of the parent dentry.
1642  */
1643 static void shrink_dcache_tree(struct dentry *parent, bool for_umount)
1644 {
1645 	for (;;) {
1646 		struct select_data data = {.start = parent};
1647 
1648 		INIT_LIST_HEAD(&data.dispose);
1649 		d_walk(parent, &data,
1650 			for_umount ? select_collect_umount : select_collect);
1651 
1652 		if (!list_empty(&data.dispose)) {
1653 			shrink_dentry_list(&data.dispose);
1654 			continue;
1655 		}
1656 
1657 		cond_resched();
1658 		if (!data.found)
1659 			break;
1660 		data.victim = NULL;
1661 		d_walk(parent, &data, select_collect2);
1662 		if (data.victim) {
1663 			struct dentry *v = data.victim;
1664 
1665 			spin_lock(&v->d_lock);
1666 			if (v->d_lockref.count < 0 &&
1667 			    !(v->d_flags & DCACHE_DENTRY_KILLED)) {
1668 				struct completion_list wait;
1669 				// It's busy dying; have it notify us once
1670 				// it becomes invisible to d_walk().
1671 				d_add_waiter(v, &wait);
1672 				spin_unlock(&v->d_lock);
1673 				rcu_read_unlock();
1674 				if (!list_empty(&data.dispose))
1675 					shrink_dentry_list(&data.dispose);
1676 				wait_for_completion(&wait.completion);
1677 				continue;
1678 			}
1679 			if (!lock_for_kill(v)) {
1680 				spin_unlock(&v->d_lock);
1681 				rcu_read_unlock();
1682 			} else {
1683 				shrink_kill(v);
1684 			}
1685 		}
1686 		if (!list_empty(&data.dispose))
1687 			shrink_dentry_list(&data.dispose);
1688 	}
1689 }
1690 
1691 void shrink_dcache_parent(struct dentry *parent)
1692 {
1693 	shrink_dcache_tree(parent, false);
1694 }
1695 EXPORT_SYMBOL(shrink_dcache_parent);
1696 
1697 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1698 {
1699 	/* it has busy descendents; complain about those instead */
1700 	if (!hlist_empty(&dentry->d_children))
1701 		return D_WALK_CONTINUE;
1702 
1703 	/* root with refcount 1 is fine */
1704 	if (dentry == _data && dentry->d_lockref.count == 1)
1705 		return D_WALK_CONTINUE;
1706 
1707 	WARN(1, "BUG: Dentry %p{i=%llx,n=%pd} "
1708 			" still in use (%d) [unmount of %s %s]\n",
1709 		       dentry,
1710 		       dentry->d_inode ?
1711 		       dentry->d_inode->i_ino : (u64)0,
1712 		       dentry,
1713 		       dentry->d_lockref.count,
1714 		       dentry->d_sb->s_type->name,
1715 		       dentry->d_sb->s_id);
1716 	return D_WALK_CONTINUE;
1717 }
1718 
1719 static void do_one_tree(struct dentry *dentry)
1720 {
1721 	shrink_dcache_tree(dentry, true);
1722 	d_walk(dentry, dentry, umount_check);
1723 	d_drop(dentry);
1724 	dput(dentry);
1725 }
1726 
1727 /*
1728  * destroy the dentries attached to a superblock on unmounting
1729  */
1730 void shrink_dcache_for_umount(struct super_block *sb)
1731 {
1732 	struct dentry *dentry;
1733 
1734 	rwsem_assert_held_write(&sb->s_umount);
1735 
1736 	dentry = sb->s_root;
1737 	sb->s_root = NULL;
1738 	do_one_tree(dentry);
1739 
1740 	while (!hlist_bl_empty(&sb->s_roots)) {
1741 		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1742 		do_one_tree(dentry);
1743 	}
1744 }
1745 
1746 static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1747 {
1748 	struct dentry **victim = _data;
1749 	if (d_mountpoint(dentry)) {
1750 		*victim = dget_dlock(dentry);
1751 		return D_WALK_QUIT;
1752 	}
1753 	return D_WALK_CONTINUE;
1754 }
1755 
1756 /**
1757  * d_invalidate - detach submounts, prune dcache, and drop
1758  * @dentry: dentry to invalidate (aka detach, prune and drop)
1759  */
1760 void d_invalidate(struct dentry *dentry)
1761 {
1762 	bool had_submounts = false;
1763 	spin_lock(&dentry->d_lock);
1764 	if (d_unhashed(dentry)) {
1765 		spin_unlock(&dentry->d_lock);
1766 		return;
1767 	}
1768 	__d_drop(dentry);
1769 	spin_unlock(&dentry->d_lock);
1770 
1771 	/* Negative dentries can be dropped without further checks */
1772 	if (!dentry->d_inode)
1773 		return;
1774 
1775 	shrink_dcache_parent(dentry);
1776 	for (;;) {
1777 		struct dentry *victim = NULL;
1778 		d_walk(dentry, &victim, find_submount);
1779 		if (!victim) {
1780 			if (had_submounts)
1781 				shrink_dcache_parent(dentry);
1782 			return;
1783 		}
1784 		had_submounts = true;
1785 		detach_mounts(victim);
1786 		dput(victim);
1787 	}
1788 }
1789 EXPORT_SYMBOL(d_invalidate);
1790 
1791 /**
1792  * __d_alloc - allocate a dcache entry
1793  * @sb: filesystem it will belong to
1794  * @name: qstr of the name
1795  *
1796  * Allocates a dentry. It returns %NULL if there is insufficient memory
1797  * available. On a success the dentry is returned. The name passed in is
1798  * copied and the copy passed in may be reused after this call.
1799  */
1800 
1801 static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1802 {
1803 	struct dentry *dentry;
1804 	char *dname;
1805 	int err;
1806 
1807 	dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1808 				      GFP_KERNEL);
1809 	if (!dentry)
1810 		return NULL;
1811 
1812 	/*
1813 	 * We guarantee that the inline name is always NUL-terminated.
1814 	 * This way the memcpy() done by the name switching in rename
1815 	 * will still always have a NUL at the end, even if we might
1816 	 * be overwriting an internal NUL character
1817 	 */
1818 	dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
1819 	if (unlikely(!name)) {
1820 		name = &slash_name;
1821 		dname = dentry->d_shortname.string;
1822 	} else if (name->len > DNAME_INLINE_LEN-1) {
1823 		size_t size = offsetof(struct external_name, name[1]);
1824 		struct external_name *p = kmalloc(size + name->len,
1825 						  GFP_KERNEL_ACCOUNT |
1826 						  __GFP_RECLAIMABLE);
1827 		if (!p) {
1828 			kmem_cache_free(dentry_cache, dentry);
1829 			return NULL;
1830 		}
1831 		atomic_set(&p->count, 1);
1832 		dname = p->name;
1833 	} else  {
1834 		dname = dentry->d_shortname.string;
1835 	}
1836 
1837 	dentry->__d_name.len = name->len;
1838 	dentry->__d_name.hash = name->hash;
1839 	memcpy(dname, name->name, name->len);
1840 	dname[name->len] = 0;
1841 
1842 	/* Make sure we always see the terminating NUL character */
1843 	smp_store_release(&dentry->__d_name.name, dname); /* ^^^ */
1844 
1845 	dentry->d_flags = 0;
1846 	lockref_init(&dentry->d_lockref);
1847 	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1848 	dentry->d_inode = NULL;
1849 	dentry->d_parent = dentry;
1850 	dentry->d_sb = sb;
1851 	dentry->d_op = sb->__s_d_op;
1852 	dentry->d_flags = sb->s_d_flags;
1853 	dentry->d_fsdata = NULL;
1854 	INIT_HLIST_BL_NODE(&dentry->d_hash);
1855 	INIT_LIST_HEAD(&dentry->d_lru);
1856 	INIT_HLIST_HEAD(&dentry->d_children);
1857 	dentry->waiters = NULL;
1858 	INIT_HLIST_NODE(&dentry->d_sib);
1859 
1860 	if (dentry->d_op && dentry->d_op->d_init) {
1861 		err = dentry->d_op->d_init(dentry);
1862 		if (err) {
1863 			if (dname_external(dentry))
1864 				kfree(external_name(dentry));
1865 			kmem_cache_free(dentry_cache, dentry);
1866 			return NULL;
1867 		}
1868 	}
1869 
1870 	this_cpu_inc(nr_dentry);
1871 
1872 	return dentry;
1873 }
1874 
1875 /**
1876  * d_alloc - allocate a dcache entry
1877  * @parent: parent of entry to allocate
1878  * @name: qstr of the name
1879  *
1880  * Allocates a dentry. It returns %NULL if there is insufficient memory
1881  * available. On a success the dentry is returned. The name passed in is
1882  * copied and the copy passed in may be reused after this call.
1883  */
1884 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1885 {
1886 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1887 	if (!dentry)
1888 		return NULL;
1889 	spin_lock(&parent->d_lock);
1890 	/*
1891 	 * don't need child lock because it is not subject
1892 	 * to concurrency here
1893 	 */
1894 	dentry->d_parent = dget_dlock(parent);
1895 	hlist_add_head(&dentry->d_sib, &parent->d_children);
1896 	spin_unlock(&parent->d_lock);
1897 
1898 	return dentry;
1899 }
1900 EXPORT_SYMBOL(d_alloc);
1901 
1902 struct dentry *d_alloc_anon(struct super_block *sb)
1903 {
1904 	return __d_alloc(sb, NULL);
1905 }
1906 EXPORT_SYMBOL(d_alloc_anon);
1907 
1908 struct dentry *d_alloc_cursor(struct dentry * parent)
1909 {
1910 	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1911 	if (dentry) {
1912 		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1913 		dentry->d_parent = dget(parent);
1914 	}
1915 	return dentry;
1916 }
1917 
1918 /**
1919  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1920  * @sb: the superblock
1921  * @name: qstr of the name
1922  *
1923  * For a filesystem that just pins its dentries in memory and never
1924  * performs lookups at all, return an unhashed IS_ROOT dentry.
1925  * This is used for pipes, sockets et.al. - the stuff that should
1926  * never be anyone's children or parents.  Unlike all other
1927  * dentries, these will not have RCU delay between dropping the
1928  * last reference and freeing them.
1929  *
1930  * The only user is alloc_file_pseudo() and that's what should
1931  * be considered a public interface.  Don't use directly.
1932  */
1933 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1934 {
1935 	static const struct dentry_operations anon_ops = {
1936 		.d_dname = simple_dname
1937 	};
1938 	struct dentry *dentry = __d_alloc(sb, name);
1939 	if (likely(dentry)) {
1940 		dentry->d_flags |= DCACHE_NORCU;
1941 		/* d_op_flags(&anon_ops) is 0 */
1942 		if (!dentry->d_op)
1943 			dentry->d_op = &anon_ops;
1944 	}
1945 	return dentry;
1946 }
1947 
1948 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1949 {
1950 	struct qstr q;
1951 
1952 	q.name = name;
1953 	q.hash_len = hashlen_string(parent, name);
1954 	return d_alloc(parent, &q);
1955 }
1956 EXPORT_SYMBOL(d_alloc_name);
1957 
1958 #define DCACHE_OP_FLAGS \
1959 	(DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | \
1960 	 DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | DCACHE_OP_PRUNE | \
1961 	 DCACHE_OP_REAL)
1962 
1963 static unsigned int d_op_flags(const struct dentry_operations *op)
1964 {
1965 	unsigned int flags = 0;
1966 	if (op) {
1967 		if (op->d_hash)
1968 			flags |= DCACHE_OP_HASH;
1969 		if (op->d_compare)
1970 			flags |= DCACHE_OP_COMPARE;
1971 		if (op->d_revalidate)
1972 			flags |= DCACHE_OP_REVALIDATE;
1973 		if (op->d_weak_revalidate)
1974 			flags |= DCACHE_OP_WEAK_REVALIDATE;
1975 		if (op->d_delete)
1976 			flags |= DCACHE_OP_DELETE;
1977 		if (op->d_prune)
1978 			flags |= DCACHE_OP_PRUNE;
1979 		if (op->d_real)
1980 			flags |= DCACHE_OP_REAL;
1981 	}
1982 	return flags;
1983 }
1984 
1985 static void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1986 {
1987 	unsigned int flags = d_op_flags(op);
1988 	WARN_ON_ONCE(dentry->d_op);
1989 	WARN_ON_ONCE(dentry->d_flags & DCACHE_OP_FLAGS);
1990 	dentry->d_op = op;
1991 	if (flags)
1992 		dentry->d_flags |= flags;
1993 }
1994 
1995 void set_default_d_op(struct super_block *s, const struct dentry_operations *ops)
1996 {
1997 	unsigned int flags = d_op_flags(ops);
1998 	s->__s_d_op = ops;
1999 	s->s_d_flags = (s->s_d_flags & ~DCACHE_OP_FLAGS) | flags;
2000 }
2001 EXPORT_SYMBOL(set_default_d_op);
2002 
2003 static unsigned d_flags_for_inode(struct inode *inode)
2004 {
2005 	unsigned add_flags = DCACHE_REGULAR_TYPE;
2006 
2007 	if (!inode)
2008 		return DCACHE_MISS_TYPE;
2009 
2010 	if (S_ISDIR(inode->i_mode)) {
2011 		add_flags = DCACHE_DIRECTORY_TYPE;
2012 		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
2013 			if (unlikely(!inode->i_op->lookup))
2014 				add_flags = DCACHE_AUTODIR_TYPE;
2015 			else
2016 				inode->i_opflags |= IOP_LOOKUP;
2017 		}
2018 		goto type_determined;
2019 	}
2020 
2021 	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
2022 		if (unlikely(inode->i_op->get_link)) {
2023 			add_flags = DCACHE_SYMLINK_TYPE;
2024 			goto type_determined;
2025 		}
2026 		inode->i_opflags |= IOP_NOFOLLOW;
2027 	}
2028 
2029 	if (unlikely(!S_ISREG(inode->i_mode)))
2030 		add_flags = DCACHE_SPECIAL_TYPE;
2031 
2032 type_determined:
2033 	if (unlikely(IS_AUTOMOUNT(inode)))
2034 		add_flags |= DCACHE_NEED_AUTOMOUNT;
2035 	return add_flags;
2036 }
2037 
2038 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
2039 {
2040 	unsigned add_flags = d_flags_for_inode(inode);
2041 	WARN_ON(d_in_lookup(dentry));
2042 
2043 	/*
2044 	 * The negative counter only tracks dentries on the LRU. Don't dec if
2045 	 * d_lru is on another list.
2046 	 */
2047 	if ((dentry->d_flags &
2048 	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
2049 		this_cpu_dec(nr_dentry_negative);
2050 	hlist_add_head(&dentry->d_alias, &inode->i_dentry);
2051 	raw_write_seqcount_begin(&dentry->d_seq);
2052 	__d_set_inode_and_type(dentry, inode, add_flags);
2053 	raw_write_seqcount_end(&dentry->d_seq);
2054 	fsnotify_update_flags(dentry);
2055 }
2056 
2057 /**
2058  * d_instantiate - fill in inode information for a dentry
2059  * @entry: dentry to complete
2060  * @inode: inode to attach to this dentry
2061  *
2062  * Fill in inode information in the entry.
2063  *
2064  * This turns negative dentries into productive full members
2065  * of society.
2066  *
2067  * NOTE! This assumes that the inode count has been incremented
2068  * (or otherwise set) by the caller to indicate that it is now
2069  * in use by the dcache.
2070  */
2071 
2072 void d_instantiate(struct dentry *entry, struct inode * inode)
2073 {
2074 	BUG_ON(d_really_is_positive(entry));
2075 	if (inode) {
2076 		security_d_instantiate(entry, inode);
2077 		spin_lock(&inode->i_lock);
2078 		spin_lock(&entry->d_lock);
2079 		__d_instantiate(entry, inode);
2080 		spin_unlock(&entry->d_lock);
2081 		spin_unlock(&inode->i_lock);
2082 	}
2083 }
2084 EXPORT_SYMBOL(d_instantiate);
2085 
2086 /*
2087  * This should be equivalent to d_instantiate() + unlock_new_inode(),
2088  * with lockdep-related part of unlock_new_inode() done before
2089  * anything else.  Use that instead of open-coding d_instantiate()/
2090  * unlock_new_inode() combinations.
2091  */
2092 void d_instantiate_new(struct dentry *entry, struct inode *inode)
2093 {
2094 	BUG_ON(d_really_is_positive(entry));
2095 	BUG_ON(!inode);
2096 	lockdep_annotate_inode_mutex_key(inode);
2097 	security_d_instantiate(entry, inode);
2098 	spin_lock(&inode->i_lock);
2099 	spin_lock(&entry->d_lock);
2100 	__d_instantiate(entry, inode);
2101 	spin_unlock(&entry->d_lock);
2102 	WARN_ON(!(inode_state_read(inode) & I_NEW));
2103 	inode_state_clear(inode, I_NEW | I_CREATING);
2104 	inode_wake_up_bit(inode, __I_NEW);
2105 	spin_unlock(&inode->i_lock);
2106 }
2107 EXPORT_SYMBOL(d_instantiate_new);
2108 
2109 struct dentry *d_make_root(struct inode *root_inode)
2110 {
2111 	struct dentry *res = NULL;
2112 
2113 	if (root_inode) {
2114 		res = d_alloc_anon(root_inode->i_sb);
2115 		if (res)
2116 			d_instantiate(res, root_inode);
2117 		else
2118 			iput(root_inode);
2119 	}
2120 	return res;
2121 }
2122 EXPORT_SYMBOL(d_make_root);
2123 
2124 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2125 {
2126 	struct super_block *sb;
2127 	struct dentry *new, *res;
2128 
2129 	if (!inode)
2130 		return ERR_PTR(-ESTALE);
2131 	if (IS_ERR(inode))
2132 		return ERR_CAST(inode);
2133 
2134 	sb = inode->i_sb;
2135 
2136 	res = d_find_any_alias(inode); /* existing alias? */
2137 	if (res)
2138 		goto out;
2139 
2140 	new = d_alloc_anon(sb);
2141 	if (!new) {
2142 		res = ERR_PTR(-ENOMEM);
2143 		goto out;
2144 	}
2145 
2146 	security_d_instantiate(new, inode);
2147 	spin_lock(&inode->i_lock);
2148 	res = __d_find_any_alias(inode); /* recheck under lock */
2149 	if (likely(!res)) { /* still no alias, attach a disconnected dentry */
2150 		unsigned add_flags = d_flags_for_inode(inode);
2151 
2152 		if (disconnected)
2153 			add_flags |= DCACHE_DISCONNECTED;
2154 
2155 		spin_lock(&new->d_lock);
2156 		__d_set_inode_and_type(new, inode, add_flags);
2157 		hlist_add_head(&new->d_alias, &inode->i_dentry);
2158 		if (!disconnected) {
2159 			hlist_bl_lock(&sb->s_roots);
2160 			hlist_bl_add_head(&new->d_hash, &sb->s_roots);
2161 			hlist_bl_unlock(&sb->s_roots);
2162 		}
2163 		spin_unlock(&new->d_lock);
2164 		spin_unlock(&inode->i_lock);
2165 		inode = NULL; /* consumed by new->d_inode */
2166 		res = new;
2167 	} else {
2168 		spin_unlock(&inode->i_lock);
2169 		dput(new);
2170 	}
2171 
2172  out:
2173 	iput(inode);
2174 	return res;
2175 }
2176 
2177 /**
2178  * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2179  * @inode: inode to allocate the dentry for
2180  *
2181  * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2182  * similar open by handle operations.  The returned dentry may be anonymous,
2183  * or may have a full name (if the inode was already in the cache).
2184  *
2185  * When called on a directory inode, we must ensure that the inode only ever
2186  * has one dentry.  If a dentry is found, that is returned instead of
2187  * allocating a new one.
2188  *
2189  * On successful return, the reference to the inode has been transferred
2190  * to the dentry.  In case of an error the reference on the inode is released.
2191  * To make it easier to use in export operations a %NULL or IS_ERR inode may
2192  * be passed in and the error will be propagated to the return value,
2193  * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2194  */
2195 struct dentry *d_obtain_alias(struct inode *inode)
2196 {
2197 	return __d_obtain_alias(inode, true);
2198 }
2199 EXPORT_SYMBOL(d_obtain_alias);
2200 
2201 /**
2202  * d_obtain_root - find or allocate a dentry for a given inode
2203  * @inode: inode to allocate the dentry for
2204  *
2205  * Obtain an IS_ROOT dentry for the root of a filesystem.
2206  *
2207  * We must ensure that directory inodes only ever have one dentry.  If a
2208  * dentry is found, that is returned instead of allocating a new one.
2209  *
2210  * On successful return, the reference to the inode has been transferred
2211  * to the dentry.  In case of an error the reference on the inode is
2212  * released.  A %NULL or IS_ERR inode may be passed in and will be the
2213  * error will be propagate to the return value, with a %NULL @inode
2214  * replaced by ERR_PTR(-ESTALE).
2215  */
2216 struct dentry *d_obtain_root(struct inode *inode)
2217 {
2218 	return __d_obtain_alias(inode, false);
2219 }
2220 EXPORT_SYMBOL(d_obtain_root);
2221 
2222 /**
2223  * d_add_ci - lookup or allocate new dentry with case-exact name
2224  * @dentry: the negative dentry that was passed to the parent's lookup func
2225  * @inode:  the inode case-insensitive lookup has found
2226  * @name:   the case-exact name to be associated with the returned dentry
2227  *
2228  * This is to avoid filling the dcache with case-insensitive names to the
2229  * same inode, only the actual correct case is stored in the dcache for
2230  * case-insensitive filesystems.
2231  *
2232  * For a case-insensitive lookup match and if the case-exact dentry
2233  * already exists in the dcache, use it and return it.
2234  *
2235  * If no entry exists with the exact case name, allocate new dentry with
2236  * the exact case, and return the spliced entry.
2237  */
2238 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2239 			struct qstr *name)
2240 {
2241 	struct dentry *found, *res;
2242 
2243 	/*
2244 	 * First check if a dentry matching the name already exists,
2245 	 * if not go ahead and create it now.
2246 	 */
2247 	found = d_hash_and_lookup(dentry->d_parent, name);
2248 	if (found) {
2249 		iput(inode);
2250 		return found;
2251 	}
2252 	if (d_in_lookup(dentry)) {
2253 		found = d_alloc_parallel(dentry->d_parent, name,
2254 					dentry->d_wait);
2255 		if (IS_ERR(found) || !d_in_lookup(found)) {
2256 			iput(inode);
2257 			return found;
2258 		}
2259 	} else {
2260 		found = d_alloc(dentry->d_parent, name);
2261 		if (!found) {
2262 			iput(inode);
2263 			return ERR_PTR(-ENOMEM);
2264 		}
2265 	}
2266 	res = d_splice_alias(inode, found);
2267 	if (res) {
2268 		d_lookup_done(found);
2269 		dput(found);
2270 		return res;
2271 	}
2272 	return found;
2273 }
2274 EXPORT_SYMBOL(d_add_ci);
2275 
2276 /**
2277  * d_same_name - compare dentry name with case-exact name
2278  * @dentry: the negative dentry that was passed to the parent's lookup func
2279  * @parent: parent dentry
2280  * @name:   the case-exact name to be associated with the returned dentry
2281  *
2282  * Return: true if names are same, or false
2283  */
2284 bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2285 		 const struct qstr *name)
2286 {
2287 	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2288 		if (dentry->d_name.len != name->len)
2289 			return false;
2290 		return dentry_cmp(dentry, name->name, name->len) == 0;
2291 	}
2292 	return parent->d_op->d_compare(dentry,
2293 				       dentry->d_name.len, dentry->d_name.name,
2294 				       name) == 0;
2295 }
2296 EXPORT_SYMBOL_GPL(d_same_name);
2297 
2298 /*
2299  * This is __d_lookup_rcu() when the parent dentry has
2300  * DCACHE_OP_COMPARE, which makes things much nastier.
2301  */
2302 static noinline struct dentry *__d_lookup_rcu_op_compare(
2303 	const struct dentry *parent,
2304 	const struct qstr *name,
2305 	unsigned *seqp)
2306 {
2307 	u64 hashlen = name->hash_len;
2308 	struct hlist_bl_head *b = d_hash(hashlen);
2309 	struct hlist_bl_node *node;
2310 	struct dentry *dentry;
2311 
2312 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2313 		int tlen;
2314 		const char *tname;
2315 		unsigned seq;
2316 
2317 seqretry:
2318 		seq = raw_seqcount_begin(&dentry->d_seq);
2319 		if (dentry->d_parent != parent)
2320 			continue;
2321 		if (d_unhashed(dentry))
2322 			continue;
2323 		if (dentry->d_name.hash != hashlen_hash(hashlen))
2324 			continue;
2325 		tlen = dentry->d_name.len;
2326 		tname = dentry->d_name.name;
2327 		/* we want a consistent (name,len) pair */
2328 		if (read_seqcount_retry(&dentry->d_seq, seq)) {
2329 			cpu_relax();
2330 			goto seqretry;
2331 		}
2332 		if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2333 			continue;
2334 		*seqp = seq;
2335 		return dentry;
2336 	}
2337 	return NULL;
2338 }
2339 
2340 /**
2341  * __d_lookup_rcu - search for a dentry (racy, store-free)
2342  * @parent: parent dentry
2343  * @name: qstr of name we wish to find
2344  * @seqp: returns d_seq value at the point where the dentry was found
2345  * Returns: dentry, or NULL
2346  *
2347  * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2348  * resolution (store-free path walking) design described in
2349  * Documentation/filesystems/path-lookup.txt.
2350  *
2351  * This is not to be used outside core vfs.
2352  *
2353  * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2354  * held, and rcu_read_lock held. The returned dentry must not be stored into
2355  * without taking d_lock and checking d_seq sequence count against @seq
2356  * returned here.
2357  *
2358  * Alternatively, __d_lookup_rcu may be called again to look up the child of
2359  * the returned dentry, so long as its parent's seqlock is checked after the
2360  * child is looked up. Thus, an interlocking stepping of sequence lock checks
2361  * is formed, giving integrity down the path walk.
2362  *
2363  * NOTE! The caller *has* to check the resulting dentry against the sequence
2364  * number we've returned before using any of the resulting dentry state!
2365  */
2366 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2367 				const struct qstr *name,
2368 				unsigned *seqp)
2369 {
2370 	u64 hashlen = name->hash_len;
2371 	const unsigned char *str = name->name;
2372 	struct hlist_bl_head *b = d_hash(hashlen);
2373 	struct hlist_bl_node *node;
2374 	struct dentry *dentry;
2375 
2376 	/*
2377 	 * Note: There is significant duplication with __d_lookup_rcu which is
2378 	 * required to prevent single threaded performance regressions
2379 	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2380 	 * Keep the two functions in sync.
2381 	 */
2382 
2383 	if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2384 		return __d_lookup_rcu_op_compare(parent, name, seqp);
2385 
2386 	/*
2387 	 * The hash list is protected using RCU.
2388 	 *
2389 	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2390 	 * races with d_move().
2391 	 *
2392 	 * It is possible that concurrent renames can mess up our list
2393 	 * walk here and result in missing our dentry, resulting in the
2394 	 * false-negative result. d_lookup() protects against concurrent
2395 	 * renames using rename_lock seqlock.
2396 	 *
2397 	 * See Documentation/filesystems/path-lookup.txt for more details.
2398 	 */
2399 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2400 		unsigned seq;
2401 
2402 		/*
2403 		 * The dentry sequence count protects us from concurrent
2404 		 * renames, and thus protects parent and name fields.
2405 		 *
2406 		 * The caller must perform a seqcount check in order
2407 		 * to do anything useful with the returned dentry.
2408 		 *
2409 		 * NOTE! We do a "raw" seqcount_begin here. That means that
2410 		 * we don't wait for the sequence count to stabilize if it
2411 		 * is in the middle of a sequence change. If we do the slow
2412 		 * dentry compare, we will do seqretries until it is stable,
2413 		 * and if we end up with a successful lookup, we actually
2414 		 * want to exit RCU lookup anyway.
2415 		 *
2416 		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2417 		 * we are still guaranteed NUL-termination of ->d_name.name.
2418 		 */
2419 		seq = raw_seqcount_begin(&dentry->d_seq);
2420 		if (dentry->d_parent != parent)
2421 			continue;
2422 		if (dentry->d_name.hash_len != hashlen)
2423 			continue;
2424 		if (unlikely(dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0))
2425 			continue;
2426 		/*
2427 		 * Check for the dentry being unhashed.
2428 		 *
2429 		 * As tempting as it is, we *can't* skip it because of a race window
2430 		 * between us finding the dentry before it gets unhashed and loading
2431 		 * the sequence counter after unhashing is finished.
2432 		 *
2433 		 * We can at least predict on it.
2434 		 */
2435 		if (unlikely(d_unhashed(dentry)))
2436 			continue;
2437 		*seqp = seq;
2438 		return dentry;
2439 	}
2440 	return NULL;
2441 }
2442 
2443 /**
2444  * d_lookup - search for a dentry
2445  * @parent: parent dentry
2446  * @name: qstr of name we wish to find
2447  * Returns: dentry, or NULL
2448  *
2449  * d_lookup searches the children of the parent dentry for the name in
2450  * question. If the dentry is found its reference count is incremented and the
2451  * dentry is returned. The caller must use dput to free the entry when it has
2452  * finished using it. %NULL is returned if the dentry does not exist.
2453  */
2454 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2455 {
2456 	struct dentry *dentry;
2457 	unsigned seq;
2458 
2459 	do {
2460 		seq = read_seqbegin(&rename_lock);
2461 		dentry = __d_lookup(parent, name);
2462 		if (dentry)
2463 			break;
2464 	} while (read_seqretry(&rename_lock, seq));
2465 	return dentry;
2466 }
2467 EXPORT_SYMBOL(d_lookup);
2468 
2469 /**
2470  * __d_lookup - search for a dentry (racy)
2471  * @parent: parent dentry
2472  * @name: qstr of name we wish to find
2473  * Returns: dentry, or NULL
2474  *
2475  * __d_lookup is like d_lookup, however it may (rarely) return a
2476  * false-negative result due to unrelated rename activity.
2477  *
2478  * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2479  * however it must be used carefully, eg. with a following d_lookup in
2480  * the case of failure.
2481  *
2482  * __d_lookup callers must be commented.
2483  */
2484 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2485 {
2486 	unsigned int hash = name->hash;
2487 	struct hlist_bl_head *b = d_hash(hash);
2488 	struct hlist_bl_node *node;
2489 	struct dentry *found = NULL;
2490 	struct dentry *dentry;
2491 
2492 	/*
2493 	 * Note: There is significant duplication with __d_lookup_rcu which is
2494 	 * required to prevent single threaded performance regressions
2495 	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2496 	 * Keep the two functions in sync.
2497 	 */
2498 
2499 	/*
2500 	 * The hash list is protected using RCU.
2501 	 *
2502 	 * Take d_lock when comparing a candidate dentry, to avoid races
2503 	 * with d_move().
2504 	 *
2505 	 * It is possible that concurrent renames can mess up our list
2506 	 * walk here and result in missing our dentry, resulting in the
2507 	 * false-negative result. d_lookup() protects against concurrent
2508 	 * renames using rename_lock seqlock.
2509 	 *
2510 	 * See Documentation/filesystems/path-lookup.txt for more details.
2511 	 */
2512 	rcu_read_lock();
2513 
2514 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2515 
2516 		if (dentry->d_name.hash != hash)
2517 			continue;
2518 
2519 		spin_lock(&dentry->d_lock);
2520 		if (dentry->d_parent != parent)
2521 			goto next;
2522 		if (d_unhashed(dentry))
2523 			goto next;
2524 
2525 		if (!d_same_name(dentry, parent, name))
2526 			goto next;
2527 
2528 		dentry->d_lockref.count++;
2529 		found = dentry;
2530 		spin_unlock(&dentry->d_lock);
2531 		break;
2532 next:
2533 		spin_unlock(&dentry->d_lock);
2534  	}
2535  	rcu_read_unlock();
2536 
2537  	return found;
2538 }
2539 
2540 /**
2541  * d_hash_and_lookup - hash the qstr then search for a dentry
2542  * @dir: Directory to search in
2543  * @name: qstr of name we wish to find
2544  *
2545  * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2546  */
2547 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2548 {
2549 	/*
2550 	 * Check for a fs-specific hash function. Note that we must
2551 	 * calculate the standard hash first, as the d_op->d_hash()
2552 	 * routine may choose to leave the hash value unchanged.
2553 	 */
2554 	name->hash = full_name_hash(dir, name->name, name->len);
2555 	if (dir->d_flags & DCACHE_OP_HASH) {
2556 		int err = dir->d_op->d_hash(dir, name);
2557 		if (unlikely(err < 0))
2558 			return ERR_PTR(err);
2559 	}
2560 	return d_lookup(dir, name);
2561 }
2562 
2563 /*
2564  * When a file is deleted, we have two options:
2565  * - turn this dentry into a negative dentry
2566  * - unhash this dentry and free it.
2567  *
2568  * Usually, we want to just turn this into
2569  * a negative dentry, but if anybody else is
2570  * currently using the dentry or the inode
2571  * we can't do that and we fall back on removing
2572  * it from the hash queues and waiting for
2573  * it to be deleted later when it has no users
2574  */
2575 
2576 /**
2577  * d_delete - delete a dentry
2578  * @dentry: The dentry to delete
2579  *
2580  * Turn the dentry into a negative dentry if possible, otherwise
2581  * remove it from the hash queues so it can be deleted later
2582  */
2583 
2584 void d_delete(struct dentry * dentry)
2585 {
2586 	struct inode *inode = dentry->d_inode;
2587 
2588 	spin_lock(&inode->i_lock);
2589 	spin_lock(&dentry->d_lock);
2590 	/*
2591 	 * Are we the only user?
2592 	 */
2593 	if (dentry->d_lockref.count == 1) {
2594 		if (dentry_negative_policy)
2595 			__d_drop(dentry);
2596 		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2597 		dentry_unlink_inode(dentry);
2598 	} else {
2599 		__d_drop(dentry);
2600 		spin_unlock(&dentry->d_lock);
2601 		spin_unlock(&inode->i_lock);
2602 	}
2603 }
2604 EXPORT_SYMBOL(d_delete);
2605 
2606 static void __d_rehash(struct dentry *entry)
2607 {
2608 	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2609 
2610 	hlist_bl_lock(b);
2611 	hlist_bl_add_head_rcu(&entry->d_hash, b);
2612 	hlist_bl_unlock(b);
2613 }
2614 
2615 /**
2616  * d_rehash - add an entry back to the hash
2617  * @entry: dentry to add to the hash
2618  *
2619  * Adds a dentry to the hash according to its name.
2620  */
2621 
2622 void d_rehash(struct dentry * entry)
2623 {
2624 	spin_lock(&entry->d_lock);
2625 	__d_rehash(entry);
2626 	spin_unlock(&entry->d_lock);
2627 }
2628 EXPORT_SYMBOL(d_rehash);
2629 
2630 static inline unsigned start_dir_add(struct inode *dir)
2631 {
2632 	preempt_disable_nested();
2633 	for (;;) {
2634 		unsigned n = READ_ONCE(dir->i_dir_seq);
2635 		if (!(n & 1) && try_cmpxchg(&dir->i_dir_seq, &n, n + 1))
2636 			return n;
2637 		cpu_relax();
2638 	}
2639 }
2640 
2641 static inline void end_dir_add(struct inode *dir, unsigned int n,
2642 			       wait_queue_head_t *d_wait)
2643 {
2644 	smp_store_release(&dir->i_dir_seq, n + 2);
2645 	preempt_enable_nested();
2646 	if (wq_has_sleeper(d_wait))
2647 		wake_up_all(d_wait);
2648 }
2649 
2650 static void d_wait_lookup(struct dentry *dentry)
2651 {
2652 	if (d_in_lookup(dentry)) {
2653 		DECLARE_WAITQUEUE(wait, current);
2654 		add_wait_queue(dentry->d_wait, &wait);
2655 		do {
2656 			set_current_state(TASK_UNINTERRUPTIBLE);
2657 			spin_unlock(&dentry->d_lock);
2658 			schedule();
2659 			spin_lock(&dentry->d_lock);
2660 		} while (d_in_lookup(dentry));
2661 	}
2662 }
2663 
2664 struct dentry *d_alloc_parallel(struct dentry *parent,
2665 				const struct qstr *name,
2666 				wait_queue_head_t *wq)
2667 {
2668 	unsigned int hash = name->hash;
2669 	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2670 	struct hlist_bl_node *node;
2671 	struct dentry *new = __d_alloc(parent->d_sb, name);
2672 	struct dentry *dentry;
2673 	unsigned seq, r_seq, d_seq;
2674 
2675 	if (unlikely(!new))
2676 		return ERR_PTR(-ENOMEM);
2677 
2678 	new->d_flags |= DCACHE_PAR_LOOKUP;
2679 	spin_lock(&parent->d_lock);
2680 	new->d_parent = dget_dlock(parent);
2681 	hlist_add_head(&new->d_sib, &parent->d_children);
2682 	if (parent->d_flags & DCACHE_DISCONNECTED)
2683 		new->d_flags |= DCACHE_DISCONNECTED;
2684 	spin_unlock(&parent->d_lock);
2685 
2686 retry:
2687 	rcu_read_lock();
2688 	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2689 	r_seq = read_seqbegin(&rename_lock);
2690 	dentry = __d_lookup_rcu(parent, name, &d_seq);
2691 	if (unlikely(dentry)) {
2692 		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2693 			rcu_read_unlock();
2694 			goto retry;
2695 		}
2696 		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2697 			rcu_read_unlock();
2698 			dput(dentry);
2699 			goto retry;
2700 		}
2701 		rcu_read_unlock();
2702 		dput(new);
2703 		return dentry;
2704 	}
2705 	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2706 		rcu_read_unlock();
2707 		goto retry;
2708 	}
2709 
2710 	if (unlikely(seq & 1)) {
2711 		rcu_read_unlock();
2712 		goto retry;
2713 	}
2714 
2715 	hlist_bl_lock(b);
2716 	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2717 		hlist_bl_unlock(b);
2718 		rcu_read_unlock();
2719 		goto retry;
2720 	}
2721 	/*
2722 	 * No changes for the parent since the beginning of d_lookup().
2723 	 * Since all removals from the chain happen with hlist_bl_lock(),
2724 	 * any potential in-lookup matches are going to stay here until
2725 	 * we unlock the chain.  All fields are stable in everything
2726 	 * we encounter.
2727 	 */
2728 	hlist_bl_for_each_entry(dentry, node, b, d_in_lookup_hash) {
2729 		if (dentry->d_name.hash != hash)
2730 			continue;
2731 		if (dentry->d_parent != parent)
2732 			continue;
2733 		if (!d_same_name(dentry, parent, name))
2734 			continue;
2735 		hlist_bl_unlock(b);
2736 		/* now we can try to grab a reference */
2737 		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2738 			rcu_read_unlock();
2739 			goto retry;
2740 		}
2741 
2742 		rcu_read_unlock();
2743 		/*
2744 		 * somebody is likely to be still doing lookup for it;
2745 		 * wait for them to finish
2746 		 */
2747 		spin_lock(&dentry->d_lock);
2748 		d_wait_lookup(dentry);
2749 		/*
2750 		 * it's not in-lookup anymore; in principle we should repeat
2751 		 * everything from dcache lookup, but it's likely to be what
2752 		 * d_lookup() would've found anyway.  If it is, just return it;
2753 		 * otherwise we really have to repeat the whole thing.
2754 		 */
2755 		if (unlikely(dentry->d_name.hash != hash))
2756 			goto mismatch;
2757 		if (unlikely(dentry->d_parent != parent))
2758 			goto mismatch;
2759 		if (unlikely(d_unhashed(dentry)))
2760 			goto mismatch;
2761 		if (unlikely(!d_same_name(dentry, parent, name)))
2762 			goto mismatch;
2763 		/* OK, it *is* a hashed match; return it */
2764 		spin_unlock(&dentry->d_lock);
2765 		dput(new);
2766 		return dentry;
2767 	}
2768 	rcu_read_unlock();
2769 	new->d_wait = wq;
2770 	hlist_bl_add_head(&new->d_in_lookup_hash, b);
2771 	hlist_bl_unlock(b);
2772 	return new;
2773 mismatch:
2774 	spin_unlock(&dentry->d_lock);
2775 	dput(dentry);
2776 	goto retry;
2777 }
2778 EXPORT_SYMBOL(d_alloc_parallel);
2779 
2780 /*
2781  * - Unhash the dentry
2782  * - Retrieve and clear the waitqueue head in dentry
2783  * - Return the waitqueue head
2784  */
2785 static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2786 {
2787 	wait_queue_head_t *d_wait;
2788 	struct hlist_bl_head *b;
2789 
2790 	lockdep_assert_held(&dentry->d_lock);
2791 
2792 	b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2793 	hlist_bl_lock(b);
2794 	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2795 	__hlist_bl_del(&dentry->d_in_lookup_hash);
2796 	d_wait = dentry->d_wait;
2797 	dentry->d_wait = NULL;
2798 	hlist_bl_unlock(b);
2799 	dentry->waiters = NULL;
2800 	INIT_LIST_HEAD(&dentry->d_lru);
2801 	return d_wait;
2802 }
2803 
2804 void __d_lookup_unhash_wake(struct dentry *dentry)
2805 {
2806 	spin_lock(&dentry->d_lock);
2807 	wake_up_all(__d_lookup_unhash(dentry));
2808 	spin_unlock(&dentry->d_lock);
2809 }
2810 EXPORT_SYMBOL(__d_lookup_unhash_wake);
2811 
2812 /* inode->i_lock held if inode is non-NULL */
2813 
2814 static inline void __d_add(struct dentry *dentry, struct inode *inode,
2815 			   const struct dentry_operations *ops)
2816 {
2817 	wait_queue_head_t *d_wait;
2818 	struct inode *dir = NULL;
2819 	unsigned n;
2820 	spin_lock(&dentry->d_lock);
2821 	if (unlikely(d_in_lookup(dentry))) {
2822 		dir = dentry->d_parent->d_inode;
2823 		n = start_dir_add(dir);
2824 		d_wait = __d_lookup_unhash(dentry);
2825 	}
2826 	if (unlikely(ops))
2827 		d_set_d_op(dentry, ops);
2828 	if (inode) {
2829 		unsigned add_flags = d_flags_for_inode(inode);
2830 		hlist_add_head(&dentry->d_alias, &inode->i_dentry);
2831 		raw_write_seqcount_begin(&dentry->d_seq);
2832 		__d_set_inode_and_type(dentry, inode, add_flags);
2833 		raw_write_seqcount_end(&dentry->d_seq);
2834 		fsnotify_update_flags(dentry);
2835 	}
2836 	__d_rehash(dentry);
2837 	if (dir)
2838 		end_dir_add(dir, n, d_wait);
2839 	spin_unlock(&dentry->d_lock);
2840 	if (inode)
2841 		spin_unlock(&inode->i_lock);
2842 }
2843 
2844 /**
2845  * d_add - add dentry to hash queues
2846  * @entry: dentry to add
2847  * @inode: The inode to attach to this dentry
2848  *
2849  * This adds the entry to the hash queues and initializes @inode.
2850  * The entry was actually filled in earlier during d_alloc().
2851  */
2852 
2853 void d_add(struct dentry *entry, struct inode *inode)
2854 {
2855 	if (inode) {
2856 		security_d_instantiate(entry, inode);
2857 		spin_lock(&inode->i_lock);
2858 	}
2859 	__d_add(entry, inode, NULL);
2860 }
2861 EXPORT_SYMBOL(d_add);
2862 
2863 struct dentry *d_make_persistent(struct dentry *dentry, struct inode *inode)
2864 {
2865 	WARN_ON(d_really_is_positive(dentry));
2866 	WARN_ON(!inode);
2867 	security_d_instantiate(dentry, inode);
2868 	spin_lock(&inode->i_lock);
2869 	spin_lock(&dentry->d_lock);
2870 	__d_instantiate(dentry, inode);
2871 	dentry->d_flags |= DCACHE_PERSISTENT;
2872 	dget_dlock(dentry);
2873 	if (d_unhashed(dentry))
2874 		__d_rehash(dentry);
2875 	spin_unlock(&dentry->d_lock);
2876 	spin_unlock(&inode->i_lock);
2877 	return dentry;
2878 }
2879 EXPORT_SYMBOL(d_make_persistent);
2880 
2881 static void swap_names(struct dentry *dentry, struct dentry *target)
2882 {
2883 	if (unlikely(dname_external(target))) {
2884 		if (unlikely(dname_external(dentry))) {
2885 			/*
2886 			 * Both external: swap the pointers
2887 			 */
2888 			swap(target->__d_name.name, dentry->__d_name.name);
2889 		} else {
2890 			/*
2891 			 * dentry:internal, target:external.  Steal target's
2892 			 * storage and make target internal.
2893 			 */
2894 			dentry->__d_name.name = target->__d_name.name;
2895 			target->d_shortname = dentry->d_shortname;
2896 			target->__d_name.name = target->d_shortname.string;
2897 		}
2898 	} else {
2899 		if (unlikely(dname_external(dentry))) {
2900 			/*
2901 			 * dentry:external, target:internal.  Give dentry's
2902 			 * storage to target and make dentry internal
2903 			 */
2904 			target->__d_name.name = dentry->__d_name.name;
2905 			dentry->d_shortname = target->d_shortname;
2906 			dentry->__d_name.name = dentry->d_shortname.string;
2907 		} else {
2908 			/*
2909 			 * Both are internal.
2910 			 */
2911 			for (int i = 0; i < DNAME_INLINE_WORDS; i++)
2912 				swap(dentry->d_shortname.words[i],
2913 				     target->d_shortname.words[i]);
2914 		}
2915 	}
2916 	swap(dentry->__d_name.hash_len, target->__d_name.hash_len);
2917 }
2918 
2919 static void copy_name(struct dentry *dentry, struct dentry *target)
2920 {
2921 	struct external_name *old_name = NULL;
2922 	if (unlikely(dname_external(dentry)))
2923 		old_name = external_name(dentry);
2924 	if (unlikely(dname_external(target))) {
2925 		atomic_inc(&external_name(target)->count);
2926 		dentry->__d_name = target->__d_name;
2927 	} else {
2928 		dentry->d_shortname = target->d_shortname;
2929 		dentry->__d_name.name = dentry->d_shortname.string;
2930 		dentry->__d_name.hash_len = target->__d_name.hash_len;
2931 	}
2932 	if (old_name && likely(atomic_dec_and_test(&old_name->count)))
2933 		kfree_rcu(old_name, head);
2934 }
2935 
2936 /*
2937  * __d_move - move a dentry
2938  * @dentry: entry to move
2939  * @target: new dentry
2940  * @exchange: exchange the two dentries
2941  *
2942  * Update the dcache to reflect the move of a file name. Negative dcache
2943  * entries should not be moved in this way. Caller must hold rename_lock, the
2944  * i_rwsem of the source and target directories (exclusively), and the sb->
2945  * s_vfs_rename_mutex if they differ. See lock_rename().
2946  */
2947 static void __d_move(struct dentry *dentry, struct dentry *target,
2948 		     bool exchange)
2949 {
2950 	struct dentry *old_parent, *p;
2951 	wait_queue_head_t *d_wait;
2952 	struct inode *dir = NULL;
2953 	unsigned n;
2954 
2955 	WARN_ON(!dentry->d_inode);
2956 	if (WARN_ON(dentry == target))
2957 		return;
2958 
2959 	BUG_ON(d_ancestor(target, dentry));
2960 	old_parent = dentry->d_parent;
2961 	p = d_ancestor(old_parent, target);
2962 	if (IS_ROOT(dentry)) {
2963 		BUG_ON(p);
2964 		spin_lock(&target->d_parent->d_lock);
2965 	} else if (!p) {
2966 		/* target is not a descendent of dentry->d_parent */
2967 		spin_lock(&target->d_parent->d_lock);
2968 		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2969 	} else {
2970 		BUG_ON(p == dentry);
2971 		spin_lock(&old_parent->d_lock);
2972 		if (p != target)
2973 			spin_lock_nested(&target->d_parent->d_lock,
2974 					DENTRY_D_LOCK_NESTED);
2975 	}
2976 	spin_lock_nested(&dentry->d_lock, 2);
2977 	spin_lock_nested(&target->d_lock, 3);
2978 
2979 	if (unlikely(d_in_lookup(target))) {
2980 		dir = target->d_parent->d_inode;
2981 		n = start_dir_add(dir);
2982 		d_wait = __d_lookup_unhash(target);
2983 	}
2984 
2985 	write_seqcount_begin(&dentry->d_seq);
2986 	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2987 
2988 	/* unhash both */
2989 	if (!d_unhashed(dentry))
2990 		___d_drop(dentry);
2991 	if (!d_unhashed(target))
2992 		___d_drop(target);
2993 
2994 	/* ... and switch them in the tree */
2995 	dentry->d_parent = target->d_parent;
2996 	if (!exchange) {
2997 		copy_name(dentry, target);
2998 		target->d_hash.pprev = NULL;
2999 		dentry->d_parent->d_lockref.count++;
3000 		if (dentry != old_parent) /* wasn't IS_ROOT */
3001 			WARN_ON(!--old_parent->d_lockref.count);
3002 	} else {
3003 		target->d_parent = old_parent;
3004 		swap_names(dentry, target);
3005 		if (!hlist_unhashed(&target->d_sib))
3006 			__hlist_del(&target->d_sib);
3007 		hlist_add_head(&target->d_sib, &target->d_parent->d_children);
3008 		__d_rehash(target);
3009 		fsnotify_update_flags(target);
3010 	}
3011 	if (!hlist_unhashed(&dentry->d_sib))
3012 		__hlist_del(&dentry->d_sib);
3013 	hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
3014 	__d_rehash(dentry);
3015 	fsnotify_update_flags(dentry);
3016 	fscrypt_handle_d_move(dentry);
3017 
3018 	write_seqcount_end(&target->d_seq);
3019 	write_seqcount_end(&dentry->d_seq);
3020 
3021 	if (dir)
3022 		end_dir_add(dir, n, d_wait);
3023 
3024 	if (dentry->d_parent != old_parent)
3025 		spin_unlock(&dentry->d_parent->d_lock);
3026 	if (dentry != old_parent)
3027 		spin_unlock(&old_parent->d_lock);
3028 	spin_unlock(&target->d_lock);
3029 	spin_unlock(&dentry->d_lock);
3030 }
3031 
3032 /*
3033  * d_move - move a dentry
3034  * @dentry: entry to move
3035  * @target: new dentry
3036  *
3037  * Update the dcache to reflect the move of a file name. Negative
3038  * dcache entries should not be moved in this way. See the locking
3039  * requirements for __d_move.
3040  */
3041 void d_move(struct dentry *dentry, struct dentry *target)
3042 {
3043 	write_seqlock(&rename_lock);
3044 	__d_move(dentry, target, false);
3045 	write_sequnlock(&rename_lock);
3046 }
3047 EXPORT_SYMBOL(d_move);
3048 
3049 /*
3050  * d_exchange - exchange two dentries
3051  * @dentry1: first dentry
3052  * @dentry2: second dentry
3053  */
3054 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
3055 {
3056 	write_seqlock(&rename_lock);
3057 
3058 	WARN_ON(!dentry1->d_inode);
3059 	WARN_ON(!dentry2->d_inode);
3060 	WARN_ON(IS_ROOT(dentry1));
3061 	WARN_ON(IS_ROOT(dentry2));
3062 
3063 	__d_move(dentry1, dentry2, true);
3064 
3065 	write_sequnlock(&rename_lock);
3066 }
3067 EXPORT_SYMBOL(d_exchange);
3068 
3069 /**
3070  * d_ancestor - search for an ancestor
3071  * @p1: ancestor dentry
3072  * @p2: child dentry
3073  *
3074  * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
3075  * an ancestor of p2, else NULL.
3076  */
3077 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
3078 {
3079 	struct dentry *p;
3080 
3081 	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
3082 		if (p->d_parent == p1)
3083 			return p;
3084 	}
3085 	return NULL;
3086 }
3087 
3088 /*
3089  * This helper attempts to cope with remotely renamed directories
3090  *
3091  * It assumes that the caller is already holding
3092  * dentry->d_parent->d_inode->i_rwsem, and rename_lock
3093  *
3094  * Note: If ever the locking in lock_rename() changes, then please
3095  * remember to update this too...
3096  */
3097 static int __d_unalias(struct dentry *dentry, struct dentry *alias)
3098 {
3099 	struct mutex *m1 = NULL;
3100 	struct rw_semaphore *m2 = NULL;
3101 	int ret = -ESTALE;
3102 
3103 	/* If alias and dentry share a parent, then no extra locks required */
3104 	if (alias->d_parent == dentry->d_parent)
3105 		goto out_unalias;
3106 
3107 	/* See lock_rename() */
3108 	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
3109 		goto out_err;
3110 	m1 = &dentry->d_sb->s_vfs_rename_mutex;
3111 	if (!inode_trylock_shared(alias->d_parent->d_inode))
3112 		goto out_err;
3113 	m2 = &alias->d_parent->d_inode->i_rwsem;
3114 out_unalias:
3115 	if (alias->d_op && alias->d_op->d_unalias_trylock &&
3116 	    !alias->d_op->d_unalias_trylock(alias))
3117 		goto out_err;
3118 	__d_move(alias, dentry, false);
3119 	if (alias->d_op && alias->d_op->d_unalias_unlock)
3120 		alias->d_op->d_unalias_unlock(alias);
3121 	ret = 0;
3122 out_err:
3123 	if (m2)
3124 		up_read(m2);
3125 	if (m1)
3126 		mutex_unlock(m1);
3127 	return ret;
3128 }
3129 
3130 struct dentry *d_splice_alias_ops(struct inode *inode, struct dentry *dentry,
3131 				  const struct dentry_operations *ops)
3132 {
3133 	if (IS_ERR(inode))
3134 		return ERR_CAST(inode);
3135 
3136 	BUG_ON(!d_unhashed(dentry));
3137 
3138 	if (!inode)
3139 		goto out;
3140 
3141 	security_d_instantiate(dentry, inode);
3142 	spin_lock(&inode->i_lock);
3143 	if (S_ISDIR(inode->i_mode)) {
3144 		struct dentry *new = __d_find_any_alias(inode);
3145 		if (unlikely(new)) {
3146 			/* The reference to new ensures it remains an alias */
3147 			spin_unlock(&inode->i_lock);
3148 			write_seqlock(&rename_lock);
3149 			if (unlikely(d_ancestor(new, dentry))) {
3150 				write_sequnlock(&rename_lock);
3151 				dput(new);
3152 				new = ERR_PTR(-ELOOP);
3153 				pr_warn_ratelimited(
3154 					"VFS: Lookup of '%s' in %s %s"
3155 					" would have caused loop\n",
3156 					dentry->d_name.name,
3157 					inode->i_sb->s_type->name,
3158 					inode->i_sb->s_id);
3159 			} else if (!IS_ROOT(new)) {
3160 				struct dentry *old_parent = dget(new->d_parent);
3161 				int err = __d_unalias(dentry, new);
3162 				write_sequnlock(&rename_lock);
3163 				if (err) {
3164 					dput(new);
3165 					new = ERR_PTR(err);
3166 				}
3167 				dput(old_parent);
3168 			} else {
3169 				__d_move(new, dentry, false);
3170 				write_sequnlock(&rename_lock);
3171 			}
3172 			iput(inode);
3173 			return new;
3174 		}
3175 	}
3176 out:
3177 	__d_add(dentry, inode, ops);
3178 	return NULL;
3179 }
3180 
3181 /**
3182  * d_splice_alias - splice a disconnected dentry into the tree if one exists
3183  * @inode:  the inode which may have a disconnected dentry
3184  * @dentry: a negative dentry which we want to point to the inode.
3185  *
3186  * If inode is a directory and has an IS_ROOT alias, then d_move that in
3187  * place of the given dentry and return it, else simply d_add the inode
3188  * to the dentry and return NULL.
3189  *
3190  * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3191  * we should error out: directories can't have multiple aliases.
3192  *
3193  * This is needed in the lookup routine of any filesystem that is exportable
3194  * (via knfsd) so that we can build dcache paths to directories effectively.
3195  *
3196  * If a dentry was found and moved, then it is returned.  Otherwise NULL
3197  * is returned.  This matches the expected return value of ->lookup.
3198  *
3199  * Cluster filesystems may call this function with a negative, hashed dentry.
3200  * In that case, we know that the inode will be a regular file, and also this
3201  * will only occur during atomic_open. So we need to check for the dentry
3202  * being already hashed only in the final case.
3203  */
3204 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3205 {
3206 	return d_splice_alias_ops(inode, dentry, NULL);
3207 }
3208 EXPORT_SYMBOL(d_splice_alias);
3209 
3210 /*
3211  * Test whether new_dentry is a subdirectory of old_dentry.
3212  *
3213  * Trivially implemented using the dcache structure
3214  */
3215 
3216 /**
3217  * is_subdir - is new dentry a subdirectory of old_dentry
3218  * @new_dentry: new dentry
3219  * @old_dentry: old dentry
3220  *
3221  * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3222  * Returns false otherwise.
3223  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3224  */
3225 
3226 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3227 {
3228 	bool subdir;
3229 	unsigned seq;
3230 
3231 	if (new_dentry == old_dentry)
3232 		return true;
3233 
3234 	/* Access d_parent under rcu as d_move() may change it. */
3235 	rcu_read_lock();
3236 	seq = read_seqbegin(&rename_lock);
3237 	subdir = d_ancestor(old_dentry, new_dentry);
3238 	 /* Try lockless once... */
3239 	if (read_seqretry(&rename_lock, seq)) {
3240 		/* ...else acquire lock for progress even on deep chains. */
3241 		read_seqlock_excl(&rename_lock);
3242 		subdir = d_ancestor(old_dentry, new_dentry);
3243 		read_sequnlock_excl(&rename_lock);
3244 	}
3245 	rcu_read_unlock();
3246 	return subdir;
3247 }
3248 EXPORT_SYMBOL(is_subdir);
3249 
3250 void d_mark_tmpfile(struct file *file, struct inode *inode)
3251 {
3252 	struct dentry *dentry = file->f_path.dentry;
3253 
3254 	BUG_ON(dname_external(dentry) ||
3255 		d_really_is_positive(dentry) ||
3256 		!d_unlinked(dentry));
3257 	spin_lock(&dentry->d_parent->d_lock);
3258 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3259 	dentry->__d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
3260 				(unsigned long long)inode->i_ino);
3261 	spin_unlock(&dentry->d_lock);
3262 	spin_unlock(&dentry->d_parent->d_lock);
3263 }
3264 EXPORT_SYMBOL(d_mark_tmpfile);
3265 
3266 int d_mark_tmpfile_name(struct file *file, const struct qstr *name)
3267 {
3268 	struct dentry *dentry = file->f_path.dentry;
3269 	char *dname = dentry->d_shortname.string;
3270 
3271 	if (unlikely(dname_external(dentry) ||
3272 		     d_really_is_positive(dentry) ||
3273 		     !d_unlinked(dentry)))
3274 		return -EINVAL;
3275 	if (unlikely(name->len > DNAME_INLINE_LEN - 1))
3276 		return -ENAMETOOLONG;
3277 
3278 	spin_lock(&dentry->d_parent->d_lock);
3279 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3280 	dentry->__d_name.len = name->len;
3281 	memcpy(dname, name->name, name->len);
3282 	dname[name->len] = '\0';
3283 	spin_unlock(&dentry->d_lock);
3284 	spin_unlock(&dentry->d_parent->d_lock);
3285 	return 0;
3286 }
3287 EXPORT_SYMBOL(d_mark_tmpfile_name);
3288 
3289 void d_tmpfile(struct file *file, struct inode *inode)
3290 {
3291 	struct dentry *dentry = file->f_path.dentry;
3292 
3293 	inode_dec_link_count(inode);
3294 	d_mark_tmpfile(file, inode);
3295 	d_instantiate(dentry, inode);
3296 }
3297 EXPORT_SYMBOL(d_tmpfile);
3298 
3299 /*
3300  * Obtain inode number of the parent dentry.
3301  */
3302 ino_t d_parent_ino(struct dentry *dentry)
3303 {
3304 	struct dentry *parent;
3305 	struct inode *iparent;
3306 	unsigned seq;
3307 	ino_t ret;
3308 
3309 	scoped_guard(rcu) {
3310 		seq = raw_seqcount_begin(&dentry->d_seq);
3311 		parent = READ_ONCE(dentry->d_parent);
3312 		iparent = d_inode_rcu(parent);
3313 		if (likely(iparent)) {
3314 			ret = iparent->i_ino;
3315 			if (!read_seqcount_retry(&dentry->d_seq, seq))
3316 				return ret;
3317 		}
3318 	}
3319 
3320 	spin_lock(&dentry->d_lock);
3321 	ret = dentry->d_parent->d_inode->i_ino;
3322 	spin_unlock(&dentry->d_lock);
3323 	return ret;
3324 }
3325 EXPORT_SYMBOL(d_parent_ino);
3326 
3327 static __initdata unsigned long dhash_entries;
3328 static int __init set_dhash_entries(char *str)
3329 {
3330 	return kstrtoul(str, 0, &dhash_entries) == 0;
3331 }
3332 __setup("dhash_entries=", set_dhash_entries);
3333 
3334 static void __init dcache_init_early(void)
3335 {
3336 	/* If hashes are distributed across NUMA nodes, defer
3337 	 * hash allocation until vmalloc space is available.
3338 	 */
3339 	if (hashdist)
3340 		return;
3341 
3342 	dentry_hashtable =
3343 		alloc_large_system_hash("Dentry cache",
3344 					sizeof(struct hlist_bl_head),
3345 					dhash_entries,
3346 					13,
3347 					HASH_EARLY | HASH_ZERO,
3348 					&d_hash_shift,
3349 					NULL,
3350 					2,
3351 					0);
3352 	d_hash_shift = 32 - d_hash_shift;
3353 
3354 	runtime_const_init(shift, d_hash_shift);
3355 	runtime_const_init(ptr, dentry_hashtable);
3356 }
3357 
3358 static void __init dcache_init(void)
3359 {
3360 	/*
3361 	 * A constructor could be added for stable state like the lists,
3362 	 * but it is probably not worth it because of the cache nature
3363 	 * of the dcache.
3364 	 */
3365 	__dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3366 		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3367 		d_shortname.string);
3368 	runtime_const_init(ptr, __dentry_cache);
3369 
3370 	/* Hash may have been set up in dcache_init_early */
3371 	if (!hashdist)
3372 		return;
3373 
3374 	dentry_hashtable =
3375 		alloc_large_system_hash("Dentry cache",
3376 					sizeof(struct hlist_bl_head),
3377 					dhash_entries,
3378 					13,
3379 					HASH_ZERO,
3380 					&d_hash_shift,
3381 					NULL,
3382 					2,
3383 					0);
3384 	d_hash_shift = 32 - d_hash_shift;
3385 
3386 	runtime_const_init(shift, d_hash_shift);
3387 	runtime_const_init(ptr, dentry_hashtable);
3388 }
3389 
3390 void __init vfs_caches_init_early(void)
3391 {
3392 	int i;
3393 
3394 	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3395 		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3396 
3397 	dcache_init_early();
3398 	inode_init_early();
3399 }
3400 
3401 void __init vfs_caches_init(void)
3402 {
3403 	filename_init();
3404 	dcache_init();
3405 	inode_init();
3406 	files_init();
3407 	files_maxfiles_init();
3408 	mnt_init();
3409 	bdev_cache_init();
3410 	chrdev_init();
3411 }
3412