xref: /linux/fs/dcache.c (revision 109d59b900e78834c66657dd4748fcedb9a1fe8d)
1 /*
2  * fs/dcache.c
3  *
4  * Complete reimplementation
5  * (C) 1997 Thomas Schoebel-Theuer,
6  * with heavy changes by Linus Torvalds
7  */
8 
9 /*
10  * Notes on the allocation strategy:
11  *
12  * The dcache is a master of the icache - whenever a dcache entry
13  * exists, the inode will always exist. "iput()" is done either when
14  * the dcache entry is deleted or garbage collected.
15  */
16 
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <linux/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/rculist_bl.h>
37 #include <linux/prefetch.h>
38 #include <linux/ratelimit.h>
39 #include <linux/list_lru.h>
40 #include "internal.h"
41 #include "mount.h"
42 
43 /*
44  * Usage:
45  * dcache->d_inode->i_lock protects:
46  *   - i_dentry, d_u.d_alias, d_inode of aliases
47  * dcache_hash_bucket lock protects:
48  *   - the dcache hash table
49  * s_roots bl list spinlock protects:
50  *   - the s_roots list (see __d_drop)
51  * dentry->d_sb->s_dentry_lru_lock protects:
52  *   - the dcache lru lists and counters
53  * d_lock protects:
54  *   - d_flags
55  *   - d_name
56  *   - d_lru
57  *   - d_count
58  *   - d_unhashed()
59  *   - d_parent and d_subdirs
60  *   - childrens' d_child and d_parent
61  *   - d_u.d_alias, d_inode
62  *
63  * Ordering:
64  * dentry->d_inode->i_lock
65  *   dentry->d_lock
66  *     dentry->d_sb->s_dentry_lru_lock
67  *     dcache_hash_bucket lock
68  *     s_roots lock
69  *
70  * If there is an ancestor relationship:
71  * dentry->d_parent->...->d_parent->d_lock
72  *   ...
73  *     dentry->d_parent->d_lock
74  *       dentry->d_lock
75  *
76  * If no ancestor relationship:
77  * if (dentry1 < dentry2)
78  *   dentry1->d_lock
79  *     dentry2->d_lock
80  */
81 int sysctl_vfs_cache_pressure __read_mostly = 100;
82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
83 
84 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
85 
86 EXPORT_SYMBOL(rename_lock);
87 
88 static struct kmem_cache *dentry_cache __read_mostly;
89 
90 const struct qstr empty_name = QSTR_INIT("", 0);
91 EXPORT_SYMBOL(empty_name);
92 const struct qstr slash_name = QSTR_INIT("/", 1);
93 EXPORT_SYMBOL(slash_name);
94 
95 /*
96  * This is the single most critical data structure when it comes
97  * to the dcache: the hashtable for lookups. Somebody should try
98  * to make this good - I've just made it work.
99  *
100  * This hash-function tries to avoid losing too many bits of hash
101  * information, yet avoid using a prime hash-size or similar.
102  */
103 
104 static unsigned int d_hash_shift __read_mostly;
105 
106 static struct hlist_bl_head *dentry_hashtable __read_mostly;
107 
108 static inline struct hlist_bl_head *d_hash(unsigned int hash)
109 {
110 	return dentry_hashtable + (hash >> d_hash_shift);
111 }
112 
113 #define IN_LOOKUP_SHIFT 10
114 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
115 
116 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
117 					unsigned int hash)
118 {
119 	hash += (unsigned long) parent / L1_CACHE_BYTES;
120 	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
121 }
122 
123 
124 /* Statistics gathering. */
125 struct dentry_stat_t dentry_stat = {
126 	.age_limit = 45,
127 };
128 
129 static DEFINE_PER_CPU(long, nr_dentry);
130 static DEFINE_PER_CPU(long, nr_dentry_unused);
131 
132 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
133 
134 /*
135  * Here we resort to our own counters instead of using generic per-cpu counters
136  * for consistency with what the vfs inode code does. We are expected to harvest
137  * better code and performance by having our own specialized counters.
138  *
139  * Please note that the loop is done over all possible CPUs, not over all online
140  * CPUs. The reason for this is that we don't want to play games with CPUs going
141  * on and off. If one of them goes off, we will just keep their counters.
142  *
143  * glommer: See cffbc8a for details, and if you ever intend to change this,
144  * please update all vfs counters to match.
145  */
146 static long get_nr_dentry(void)
147 {
148 	int i;
149 	long sum = 0;
150 	for_each_possible_cpu(i)
151 		sum += per_cpu(nr_dentry, i);
152 	return sum < 0 ? 0 : sum;
153 }
154 
155 static long get_nr_dentry_unused(void)
156 {
157 	int i;
158 	long sum = 0;
159 	for_each_possible_cpu(i)
160 		sum += per_cpu(nr_dentry_unused, i);
161 	return sum < 0 ? 0 : sum;
162 }
163 
164 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
165 		   size_t *lenp, loff_t *ppos)
166 {
167 	dentry_stat.nr_dentry = get_nr_dentry();
168 	dentry_stat.nr_unused = get_nr_dentry_unused();
169 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
170 }
171 #endif
172 
173 /*
174  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
175  * The strings are both count bytes long, and count is non-zero.
176  */
177 #ifdef CONFIG_DCACHE_WORD_ACCESS
178 
179 #include <asm/word-at-a-time.h>
180 /*
181  * NOTE! 'cs' and 'scount' come from a dentry, so it has a
182  * aligned allocation for this particular component. We don't
183  * strictly need the load_unaligned_zeropad() safety, but it
184  * doesn't hurt either.
185  *
186  * In contrast, 'ct' and 'tcount' can be from a pathname, and do
187  * need the careful unaligned handling.
188  */
189 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
190 {
191 	unsigned long a,b,mask;
192 
193 	for (;;) {
194 		a = read_word_at_a_time(cs);
195 		b = load_unaligned_zeropad(ct);
196 		if (tcount < sizeof(unsigned long))
197 			break;
198 		if (unlikely(a != b))
199 			return 1;
200 		cs += sizeof(unsigned long);
201 		ct += sizeof(unsigned long);
202 		tcount -= sizeof(unsigned long);
203 		if (!tcount)
204 			return 0;
205 	}
206 	mask = bytemask_from_count(tcount);
207 	return unlikely(!!((a ^ b) & mask));
208 }
209 
210 #else
211 
212 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
213 {
214 	do {
215 		if (*cs != *ct)
216 			return 1;
217 		cs++;
218 		ct++;
219 		tcount--;
220 	} while (tcount);
221 	return 0;
222 }
223 
224 #endif
225 
226 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
227 {
228 	/*
229 	 * Be careful about RCU walk racing with rename:
230 	 * use 'READ_ONCE' to fetch the name pointer.
231 	 *
232 	 * NOTE! Even if a rename will mean that the length
233 	 * was not loaded atomically, we don't care. The
234 	 * RCU walk will check the sequence count eventually,
235 	 * and catch it. And we won't overrun the buffer,
236 	 * because we're reading the name pointer atomically,
237 	 * and a dentry name is guaranteed to be properly
238 	 * terminated with a NUL byte.
239 	 *
240 	 * End result: even if 'len' is wrong, we'll exit
241 	 * early because the data cannot match (there can
242 	 * be no NUL in the ct/tcount data)
243 	 */
244 	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
245 
246 	return dentry_string_cmp(cs, ct, tcount);
247 }
248 
249 struct external_name {
250 	union {
251 		atomic_t count;
252 		struct rcu_head head;
253 	} u;
254 	unsigned char name[];
255 };
256 
257 static inline struct external_name *external_name(struct dentry *dentry)
258 {
259 	return container_of(dentry->d_name.name, struct external_name, name[0]);
260 }
261 
262 static void __d_free(struct rcu_head *head)
263 {
264 	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
265 
266 	kmem_cache_free(dentry_cache, dentry);
267 }
268 
269 static void __d_free_external(struct rcu_head *head)
270 {
271 	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
272 	kfree(external_name(dentry));
273 	kmem_cache_free(dentry_cache, dentry);
274 }
275 
276 static inline int dname_external(const struct dentry *dentry)
277 {
278 	return dentry->d_name.name != dentry->d_iname;
279 }
280 
281 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
282 {
283 	spin_lock(&dentry->d_lock);
284 	if (unlikely(dname_external(dentry))) {
285 		struct external_name *p = external_name(dentry);
286 		atomic_inc(&p->u.count);
287 		spin_unlock(&dentry->d_lock);
288 		name->name = p->name;
289 	} else {
290 		memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
291 		spin_unlock(&dentry->d_lock);
292 		name->name = name->inline_name;
293 	}
294 }
295 EXPORT_SYMBOL(take_dentry_name_snapshot);
296 
297 void release_dentry_name_snapshot(struct name_snapshot *name)
298 {
299 	if (unlikely(name->name != name->inline_name)) {
300 		struct external_name *p;
301 		p = container_of(name->name, struct external_name, name[0]);
302 		if (unlikely(atomic_dec_and_test(&p->u.count)))
303 			kfree_rcu(p, u.head);
304 	}
305 }
306 EXPORT_SYMBOL(release_dentry_name_snapshot);
307 
308 static inline void __d_set_inode_and_type(struct dentry *dentry,
309 					  struct inode *inode,
310 					  unsigned type_flags)
311 {
312 	unsigned flags;
313 
314 	dentry->d_inode = inode;
315 	flags = READ_ONCE(dentry->d_flags);
316 	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
317 	flags |= type_flags;
318 	WRITE_ONCE(dentry->d_flags, flags);
319 }
320 
321 static inline void __d_clear_type_and_inode(struct dentry *dentry)
322 {
323 	unsigned flags = READ_ONCE(dentry->d_flags);
324 
325 	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
326 	WRITE_ONCE(dentry->d_flags, flags);
327 	dentry->d_inode = NULL;
328 }
329 
330 static void dentry_free(struct dentry *dentry)
331 {
332 	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
333 	if (unlikely(dname_external(dentry))) {
334 		struct external_name *p = external_name(dentry);
335 		if (likely(atomic_dec_and_test(&p->u.count))) {
336 			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
337 			return;
338 		}
339 	}
340 	/* if dentry was never visible to RCU, immediate free is OK */
341 	if (!(dentry->d_flags & DCACHE_RCUACCESS))
342 		__d_free(&dentry->d_u.d_rcu);
343 	else
344 		call_rcu(&dentry->d_u.d_rcu, __d_free);
345 }
346 
347 /*
348  * Release the dentry's inode, using the filesystem
349  * d_iput() operation if defined.
350  */
351 static void dentry_unlink_inode(struct dentry * dentry)
352 	__releases(dentry->d_lock)
353 	__releases(dentry->d_inode->i_lock)
354 {
355 	struct inode *inode = dentry->d_inode;
356 	bool hashed = !d_unhashed(dentry);
357 
358 	if (hashed)
359 		raw_write_seqcount_begin(&dentry->d_seq);
360 	__d_clear_type_and_inode(dentry);
361 	hlist_del_init(&dentry->d_u.d_alias);
362 	if (hashed)
363 		raw_write_seqcount_end(&dentry->d_seq);
364 	spin_unlock(&dentry->d_lock);
365 	spin_unlock(&inode->i_lock);
366 	if (!inode->i_nlink)
367 		fsnotify_inoderemove(inode);
368 	if (dentry->d_op && dentry->d_op->d_iput)
369 		dentry->d_op->d_iput(dentry, inode);
370 	else
371 		iput(inode);
372 }
373 
374 /*
375  * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
376  * is in use - which includes both the "real" per-superblock
377  * LRU list _and_ the DCACHE_SHRINK_LIST use.
378  *
379  * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
380  * on the shrink list (ie not on the superblock LRU list).
381  *
382  * The per-cpu "nr_dentry_unused" counters are updated with
383  * the DCACHE_LRU_LIST bit.
384  *
385  * These helper functions make sure we always follow the
386  * rules. d_lock must be held by the caller.
387  */
388 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
389 static void d_lru_add(struct dentry *dentry)
390 {
391 	D_FLAG_VERIFY(dentry, 0);
392 	dentry->d_flags |= DCACHE_LRU_LIST;
393 	this_cpu_inc(nr_dentry_unused);
394 	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
395 }
396 
397 static void d_lru_del(struct dentry *dentry)
398 {
399 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
400 	dentry->d_flags &= ~DCACHE_LRU_LIST;
401 	this_cpu_dec(nr_dentry_unused);
402 	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
403 }
404 
405 static void d_shrink_del(struct dentry *dentry)
406 {
407 	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
408 	list_del_init(&dentry->d_lru);
409 	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
410 	this_cpu_dec(nr_dentry_unused);
411 }
412 
413 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
414 {
415 	D_FLAG_VERIFY(dentry, 0);
416 	list_add(&dentry->d_lru, list);
417 	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
418 	this_cpu_inc(nr_dentry_unused);
419 }
420 
421 /*
422  * These can only be called under the global LRU lock, ie during the
423  * callback for freeing the LRU list. "isolate" removes it from the
424  * LRU lists entirely, while shrink_move moves it to the indicated
425  * private list.
426  */
427 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
428 {
429 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
430 	dentry->d_flags &= ~DCACHE_LRU_LIST;
431 	this_cpu_dec(nr_dentry_unused);
432 	list_lru_isolate(lru, &dentry->d_lru);
433 }
434 
435 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
436 			      struct list_head *list)
437 {
438 	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
439 	dentry->d_flags |= DCACHE_SHRINK_LIST;
440 	list_lru_isolate_move(lru, &dentry->d_lru, list);
441 }
442 
443 /*
444  * dentry_lru_(add|del)_list) must be called with d_lock held.
445  */
446 static void dentry_lru_add(struct dentry *dentry)
447 {
448 	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
449 		d_lru_add(dentry);
450 	else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
451 		dentry->d_flags |= DCACHE_REFERENCED;
452 }
453 
454 /**
455  * d_drop - drop a dentry
456  * @dentry: dentry to drop
457  *
458  * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
459  * be found through a VFS lookup any more. Note that this is different from
460  * deleting the dentry - d_delete will try to mark the dentry negative if
461  * possible, giving a successful _negative_ lookup, while d_drop will
462  * just make the cache lookup fail.
463  *
464  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
465  * reason (NFS timeouts or autofs deletes).
466  *
467  * __d_drop requires dentry->d_lock
468  * ___d_drop doesn't mark dentry as "unhashed"
469  *   (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
470  */
471 static void ___d_drop(struct dentry *dentry)
472 {
473 	if (!d_unhashed(dentry)) {
474 		struct hlist_bl_head *b;
475 		/*
476 		 * Hashed dentries are normally on the dentry hashtable,
477 		 * with the exception of those newly allocated by
478 		 * d_obtain_root, which are always IS_ROOT:
479 		 */
480 		if (unlikely(IS_ROOT(dentry)))
481 			b = &dentry->d_sb->s_roots;
482 		else
483 			b = d_hash(dentry->d_name.hash);
484 
485 		hlist_bl_lock(b);
486 		__hlist_bl_del(&dentry->d_hash);
487 		hlist_bl_unlock(b);
488 		/* After this call, in-progress rcu-walk path lookup will fail. */
489 		write_seqcount_invalidate(&dentry->d_seq);
490 	}
491 }
492 
493 void __d_drop(struct dentry *dentry)
494 {
495 	___d_drop(dentry);
496 	dentry->d_hash.pprev = NULL;
497 }
498 EXPORT_SYMBOL(__d_drop);
499 
500 void d_drop(struct dentry *dentry)
501 {
502 	spin_lock(&dentry->d_lock);
503 	__d_drop(dentry);
504 	spin_unlock(&dentry->d_lock);
505 }
506 EXPORT_SYMBOL(d_drop);
507 
508 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
509 {
510 	struct dentry *next;
511 	/*
512 	 * Inform d_walk() and shrink_dentry_list() that we are no longer
513 	 * attached to the dentry tree
514 	 */
515 	dentry->d_flags |= DCACHE_DENTRY_KILLED;
516 	if (unlikely(list_empty(&dentry->d_child)))
517 		return;
518 	__list_del_entry(&dentry->d_child);
519 	/*
520 	 * Cursors can move around the list of children.  While we'd been
521 	 * a normal list member, it didn't matter - ->d_child.next would've
522 	 * been updated.  However, from now on it won't be and for the
523 	 * things like d_walk() it might end up with a nasty surprise.
524 	 * Normally d_walk() doesn't care about cursors moving around -
525 	 * ->d_lock on parent prevents that and since a cursor has no children
526 	 * of its own, we get through it without ever unlocking the parent.
527 	 * There is one exception, though - if we ascend from a child that
528 	 * gets killed as soon as we unlock it, the next sibling is found
529 	 * using the value left in its ->d_child.next.  And if _that_
530 	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
531 	 * before d_walk() regains parent->d_lock, we'll end up skipping
532 	 * everything the cursor had been moved past.
533 	 *
534 	 * Solution: make sure that the pointer left behind in ->d_child.next
535 	 * points to something that won't be moving around.  I.e. skip the
536 	 * cursors.
537 	 */
538 	while (dentry->d_child.next != &parent->d_subdirs) {
539 		next = list_entry(dentry->d_child.next, struct dentry, d_child);
540 		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
541 			break;
542 		dentry->d_child.next = next->d_child.next;
543 	}
544 }
545 
546 static void __dentry_kill(struct dentry *dentry)
547 {
548 	struct dentry *parent = NULL;
549 	bool can_free = true;
550 	if (!IS_ROOT(dentry))
551 		parent = dentry->d_parent;
552 
553 	/*
554 	 * The dentry is now unrecoverably dead to the world.
555 	 */
556 	lockref_mark_dead(&dentry->d_lockref);
557 
558 	/*
559 	 * inform the fs via d_prune that this dentry is about to be
560 	 * unhashed and destroyed.
561 	 */
562 	if (dentry->d_flags & DCACHE_OP_PRUNE)
563 		dentry->d_op->d_prune(dentry);
564 
565 	if (dentry->d_flags & DCACHE_LRU_LIST) {
566 		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
567 			d_lru_del(dentry);
568 	}
569 	/* if it was on the hash then remove it */
570 	__d_drop(dentry);
571 	dentry_unlist(dentry, parent);
572 	if (parent)
573 		spin_unlock(&parent->d_lock);
574 	if (dentry->d_inode)
575 		dentry_unlink_inode(dentry);
576 	else
577 		spin_unlock(&dentry->d_lock);
578 	this_cpu_dec(nr_dentry);
579 	if (dentry->d_op && dentry->d_op->d_release)
580 		dentry->d_op->d_release(dentry);
581 
582 	spin_lock(&dentry->d_lock);
583 	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
584 		dentry->d_flags |= DCACHE_MAY_FREE;
585 		can_free = false;
586 	}
587 	spin_unlock(&dentry->d_lock);
588 	if (likely(can_free))
589 		dentry_free(dentry);
590 }
591 
592 /*
593  * Finish off a dentry we've decided to kill.
594  * dentry->d_lock must be held, returns with it unlocked.
595  * If ref is non-zero, then decrement the refcount too.
596  * Returns dentry requiring refcount drop, or NULL if we're done.
597  */
598 static struct dentry *dentry_kill(struct dentry *dentry)
599 	__releases(dentry->d_lock)
600 {
601 	struct inode *inode = dentry->d_inode;
602 	struct dentry *parent = NULL;
603 
604 	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
605 		goto failed;
606 
607 	if (!IS_ROOT(dentry)) {
608 		parent = dentry->d_parent;
609 		if (unlikely(!spin_trylock(&parent->d_lock))) {
610 			if (inode)
611 				spin_unlock(&inode->i_lock);
612 			goto failed;
613 		}
614 	}
615 
616 	__dentry_kill(dentry);
617 	return parent;
618 
619 failed:
620 	spin_unlock(&dentry->d_lock);
621 	return dentry; /* try again with same dentry */
622 }
623 
624 static inline struct dentry *lock_parent(struct dentry *dentry)
625 {
626 	struct dentry *parent = dentry->d_parent;
627 	if (IS_ROOT(dentry))
628 		return NULL;
629 	if (unlikely(dentry->d_lockref.count < 0))
630 		return NULL;
631 	if (likely(spin_trylock(&parent->d_lock)))
632 		return parent;
633 	rcu_read_lock();
634 	spin_unlock(&dentry->d_lock);
635 again:
636 	parent = READ_ONCE(dentry->d_parent);
637 	spin_lock(&parent->d_lock);
638 	/*
639 	 * We can't blindly lock dentry until we are sure
640 	 * that we won't violate the locking order.
641 	 * Any changes of dentry->d_parent must have
642 	 * been done with parent->d_lock held, so
643 	 * spin_lock() above is enough of a barrier
644 	 * for checking if it's still our child.
645 	 */
646 	if (unlikely(parent != dentry->d_parent)) {
647 		spin_unlock(&parent->d_lock);
648 		goto again;
649 	}
650 	if (parent != dentry) {
651 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
652 		if (unlikely(dentry->d_lockref.count < 0)) {
653 			spin_unlock(&parent->d_lock);
654 			parent = NULL;
655 		}
656 	} else {
657 		parent = NULL;
658 	}
659 	rcu_read_unlock();
660 	return parent;
661 }
662 
663 /*
664  * Try to do a lockless dput(), and return whether that was successful.
665  *
666  * If unsuccessful, we return false, having already taken the dentry lock.
667  *
668  * The caller needs to hold the RCU read lock, so that the dentry is
669  * guaranteed to stay around even if the refcount goes down to zero!
670  */
671 static inline bool fast_dput(struct dentry *dentry)
672 {
673 	int ret;
674 	unsigned int d_flags;
675 
676 	/*
677 	 * If we have a d_op->d_delete() operation, we sould not
678 	 * let the dentry count go to zero, so use "put_or_lock".
679 	 */
680 	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
681 		return lockref_put_or_lock(&dentry->d_lockref);
682 
683 	/*
684 	 * .. otherwise, we can try to just decrement the
685 	 * lockref optimistically.
686 	 */
687 	ret = lockref_put_return(&dentry->d_lockref);
688 
689 	/*
690 	 * If the lockref_put_return() failed due to the lock being held
691 	 * by somebody else, the fast path has failed. We will need to
692 	 * get the lock, and then check the count again.
693 	 */
694 	if (unlikely(ret < 0)) {
695 		spin_lock(&dentry->d_lock);
696 		if (dentry->d_lockref.count > 1) {
697 			dentry->d_lockref.count--;
698 			spin_unlock(&dentry->d_lock);
699 			return 1;
700 		}
701 		return 0;
702 	}
703 
704 	/*
705 	 * If we weren't the last ref, we're done.
706 	 */
707 	if (ret)
708 		return 1;
709 
710 	/*
711 	 * Careful, careful. The reference count went down
712 	 * to zero, but we don't hold the dentry lock, so
713 	 * somebody else could get it again, and do another
714 	 * dput(), and we need to not race with that.
715 	 *
716 	 * However, there is a very special and common case
717 	 * where we don't care, because there is nothing to
718 	 * do: the dentry is still hashed, it does not have
719 	 * a 'delete' op, and it's referenced and already on
720 	 * the LRU list.
721 	 *
722 	 * NOTE! Since we aren't locked, these values are
723 	 * not "stable". However, it is sufficient that at
724 	 * some point after we dropped the reference the
725 	 * dentry was hashed and the flags had the proper
726 	 * value. Other dentry users may have re-gotten
727 	 * a reference to the dentry and change that, but
728 	 * our work is done - we can leave the dentry
729 	 * around with a zero refcount.
730 	 */
731 	smp_rmb();
732 	d_flags = READ_ONCE(dentry->d_flags);
733 	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
734 
735 	/* Nothing to do? Dropping the reference was all we needed? */
736 	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
737 		return 1;
738 
739 	/*
740 	 * Not the fast normal case? Get the lock. We've already decremented
741 	 * the refcount, but we'll need to re-check the situation after
742 	 * getting the lock.
743 	 */
744 	spin_lock(&dentry->d_lock);
745 
746 	/*
747 	 * Did somebody else grab a reference to it in the meantime, and
748 	 * we're no longer the last user after all? Alternatively, somebody
749 	 * else could have killed it and marked it dead. Either way, we
750 	 * don't need to do anything else.
751 	 */
752 	if (dentry->d_lockref.count) {
753 		spin_unlock(&dentry->d_lock);
754 		return 1;
755 	}
756 
757 	/*
758 	 * Re-get the reference we optimistically dropped. We hold the
759 	 * lock, and we just tested that it was zero, so we can just
760 	 * set it to 1.
761 	 */
762 	dentry->d_lockref.count = 1;
763 	return 0;
764 }
765 
766 
767 /*
768  * This is dput
769  *
770  * This is complicated by the fact that we do not want to put
771  * dentries that are no longer on any hash chain on the unused
772  * list: we'd much rather just get rid of them immediately.
773  *
774  * However, that implies that we have to traverse the dentry
775  * tree upwards to the parents which might _also_ now be
776  * scheduled for deletion (it may have been only waiting for
777  * its last child to go away).
778  *
779  * This tail recursion is done by hand as we don't want to depend
780  * on the compiler to always get this right (gcc generally doesn't).
781  * Real recursion would eat up our stack space.
782  */
783 
784 /*
785  * dput - release a dentry
786  * @dentry: dentry to release
787  *
788  * Release a dentry. This will drop the usage count and if appropriate
789  * call the dentry unlink method as well as removing it from the queues and
790  * releasing its resources. If the parent dentries were scheduled for release
791  * they too may now get deleted.
792  */
793 void dput(struct dentry *dentry)
794 {
795 	if (unlikely(!dentry))
796 		return;
797 
798 repeat:
799 	might_sleep();
800 
801 	rcu_read_lock();
802 	if (likely(fast_dput(dentry))) {
803 		rcu_read_unlock();
804 		return;
805 	}
806 
807 	/* Slow case: now with the dentry lock held */
808 	rcu_read_unlock();
809 
810 	WARN_ON(d_in_lookup(dentry));
811 
812 	/* Unreachable? Get rid of it */
813 	if (unlikely(d_unhashed(dentry)))
814 		goto kill_it;
815 
816 	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
817 		goto kill_it;
818 
819 	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
820 		if (dentry->d_op->d_delete(dentry))
821 			goto kill_it;
822 	}
823 
824 	dentry_lru_add(dentry);
825 
826 	dentry->d_lockref.count--;
827 	spin_unlock(&dentry->d_lock);
828 	return;
829 
830 kill_it:
831 	dentry = dentry_kill(dentry);
832 	if (dentry) {
833 		cond_resched();
834 		goto repeat;
835 	}
836 }
837 EXPORT_SYMBOL(dput);
838 
839 
840 /* This must be called with d_lock held */
841 static inline void __dget_dlock(struct dentry *dentry)
842 {
843 	dentry->d_lockref.count++;
844 }
845 
846 static inline void __dget(struct dentry *dentry)
847 {
848 	lockref_get(&dentry->d_lockref);
849 }
850 
851 struct dentry *dget_parent(struct dentry *dentry)
852 {
853 	int gotref;
854 	struct dentry *ret;
855 
856 	/*
857 	 * Do optimistic parent lookup without any
858 	 * locking.
859 	 */
860 	rcu_read_lock();
861 	ret = READ_ONCE(dentry->d_parent);
862 	gotref = lockref_get_not_zero(&ret->d_lockref);
863 	rcu_read_unlock();
864 	if (likely(gotref)) {
865 		if (likely(ret == READ_ONCE(dentry->d_parent)))
866 			return ret;
867 		dput(ret);
868 	}
869 
870 repeat:
871 	/*
872 	 * Don't need rcu_dereference because we re-check it was correct under
873 	 * the lock.
874 	 */
875 	rcu_read_lock();
876 	ret = dentry->d_parent;
877 	spin_lock(&ret->d_lock);
878 	if (unlikely(ret != dentry->d_parent)) {
879 		spin_unlock(&ret->d_lock);
880 		rcu_read_unlock();
881 		goto repeat;
882 	}
883 	rcu_read_unlock();
884 	BUG_ON(!ret->d_lockref.count);
885 	ret->d_lockref.count++;
886 	spin_unlock(&ret->d_lock);
887 	return ret;
888 }
889 EXPORT_SYMBOL(dget_parent);
890 
891 /**
892  * d_find_alias - grab a hashed alias of inode
893  * @inode: inode in question
894  *
895  * If inode has a hashed alias, or is a directory and has any alias,
896  * acquire the reference to alias and return it. Otherwise return NULL.
897  * Notice that if inode is a directory there can be only one alias and
898  * it can be unhashed only if it has no children, or if it is the root
899  * of a filesystem, or if the directory was renamed and d_revalidate
900  * was the first vfs operation to notice.
901  *
902  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
903  * any other hashed alias over that one.
904  */
905 static struct dentry *__d_find_alias(struct inode *inode)
906 {
907 	struct dentry *alias, *discon_alias;
908 
909 again:
910 	discon_alias = NULL;
911 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
912 		spin_lock(&alias->d_lock);
913  		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
914 			if (IS_ROOT(alias) &&
915 			    (alias->d_flags & DCACHE_DISCONNECTED)) {
916 				discon_alias = alias;
917 			} else {
918 				__dget_dlock(alias);
919 				spin_unlock(&alias->d_lock);
920 				return alias;
921 			}
922 		}
923 		spin_unlock(&alias->d_lock);
924 	}
925 	if (discon_alias) {
926 		alias = discon_alias;
927 		spin_lock(&alias->d_lock);
928 		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
929 			__dget_dlock(alias);
930 			spin_unlock(&alias->d_lock);
931 			return alias;
932 		}
933 		spin_unlock(&alias->d_lock);
934 		goto again;
935 	}
936 	return NULL;
937 }
938 
939 struct dentry *d_find_alias(struct inode *inode)
940 {
941 	struct dentry *de = NULL;
942 
943 	if (!hlist_empty(&inode->i_dentry)) {
944 		spin_lock(&inode->i_lock);
945 		de = __d_find_alias(inode);
946 		spin_unlock(&inode->i_lock);
947 	}
948 	return de;
949 }
950 EXPORT_SYMBOL(d_find_alias);
951 
952 /*
953  *	Try to kill dentries associated with this inode.
954  * WARNING: you must own a reference to inode.
955  */
956 void d_prune_aliases(struct inode *inode)
957 {
958 	struct dentry *dentry;
959 restart:
960 	spin_lock(&inode->i_lock);
961 	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
962 		spin_lock(&dentry->d_lock);
963 		if (!dentry->d_lockref.count) {
964 			struct dentry *parent = lock_parent(dentry);
965 			if (likely(!dentry->d_lockref.count)) {
966 				__dentry_kill(dentry);
967 				dput(parent);
968 				goto restart;
969 			}
970 			if (parent)
971 				spin_unlock(&parent->d_lock);
972 		}
973 		spin_unlock(&dentry->d_lock);
974 	}
975 	spin_unlock(&inode->i_lock);
976 }
977 EXPORT_SYMBOL(d_prune_aliases);
978 
979 static void shrink_dentry_list(struct list_head *list)
980 {
981 	struct dentry *dentry, *parent;
982 
983 	while (!list_empty(list)) {
984 		struct inode *inode;
985 		dentry = list_entry(list->prev, struct dentry, d_lru);
986 		spin_lock(&dentry->d_lock);
987 		parent = lock_parent(dentry);
988 
989 		/*
990 		 * The dispose list is isolated and dentries are not accounted
991 		 * to the LRU here, so we can simply remove it from the list
992 		 * here regardless of whether it is referenced or not.
993 		 */
994 		d_shrink_del(dentry);
995 
996 		/*
997 		 * We found an inuse dentry which was not removed from
998 		 * the LRU because of laziness during lookup. Do not free it.
999 		 */
1000 		if (dentry->d_lockref.count > 0) {
1001 			spin_unlock(&dentry->d_lock);
1002 			if (parent)
1003 				spin_unlock(&parent->d_lock);
1004 			continue;
1005 		}
1006 
1007 
1008 		if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
1009 			bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
1010 			spin_unlock(&dentry->d_lock);
1011 			if (parent)
1012 				spin_unlock(&parent->d_lock);
1013 			if (can_free)
1014 				dentry_free(dentry);
1015 			continue;
1016 		}
1017 
1018 		inode = dentry->d_inode;
1019 		if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1020 			d_shrink_add(dentry, list);
1021 			spin_unlock(&dentry->d_lock);
1022 			if (parent)
1023 				spin_unlock(&parent->d_lock);
1024 			continue;
1025 		}
1026 
1027 		__dentry_kill(dentry);
1028 
1029 		/*
1030 		 * We need to prune ancestors too. This is necessary to prevent
1031 		 * quadratic behavior of shrink_dcache_parent(), but is also
1032 		 * expected to be beneficial in reducing dentry cache
1033 		 * fragmentation.
1034 		 */
1035 		dentry = parent;
1036 		while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
1037 			parent = lock_parent(dentry);
1038 			if (dentry->d_lockref.count != 1) {
1039 				dentry->d_lockref.count--;
1040 				spin_unlock(&dentry->d_lock);
1041 				if (parent)
1042 					spin_unlock(&parent->d_lock);
1043 				break;
1044 			}
1045 			inode = dentry->d_inode;	/* can't be NULL */
1046 			if (unlikely(!spin_trylock(&inode->i_lock))) {
1047 				spin_unlock(&dentry->d_lock);
1048 				if (parent)
1049 					spin_unlock(&parent->d_lock);
1050 				cpu_relax();
1051 				continue;
1052 			}
1053 			__dentry_kill(dentry);
1054 			dentry = parent;
1055 		}
1056 	}
1057 }
1058 
1059 static enum lru_status dentry_lru_isolate(struct list_head *item,
1060 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1061 {
1062 	struct list_head *freeable = arg;
1063 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1064 
1065 
1066 	/*
1067 	 * we are inverting the lru lock/dentry->d_lock here,
1068 	 * so use a trylock. If we fail to get the lock, just skip
1069 	 * it
1070 	 */
1071 	if (!spin_trylock(&dentry->d_lock))
1072 		return LRU_SKIP;
1073 
1074 	/*
1075 	 * Referenced dentries are still in use. If they have active
1076 	 * counts, just remove them from the LRU. Otherwise give them
1077 	 * another pass through the LRU.
1078 	 */
1079 	if (dentry->d_lockref.count) {
1080 		d_lru_isolate(lru, dentry);
1081 		spin_unlock(&dentry->d_lock);
1082 		return LRU_REMOVED;
1083 	}
1084 
1085 	if (dentry->d_flags & DCACHE_REFERENCED) {
1086 		dentry->d_flags &= ~DCACHE_REFERENCED;
1087 		spin_unlock(&dentry->d_lock);
1088 
1089 		/*
1090 		 * The list move itself will be made by the common LRU code. At
1091 		 * this point, we've dropped the dentry->d_lock but keep the
1092 		 * lru lock. This is safe to do, since every list movement is
1093 		 * protected by the lru lock even if both locks are held.
1094 		 *
1095 		 * This is guaranteed by the fact that all LRU management
1096 		 * functions are intermediated by the LRU API calls like
1097 		 * list_lru_add and list_lru_del. List movement in this file
1098 		 * only ever occur through this functions or through callbacks
1099 		 * like this one, that are called from the LRU API.
1100 		 *
1101 		 * The only exceptions to this are functions like
1102 		 * shrink_dentry_list, and code that first checks for the
1103 		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1104 		 * operating only with stack provided lists after they are
1105 		 * properly isolated from the main list.  It is thus, always a
1106 		 * local access.
1107 		 */
1108 		return LRU_ROTATE;
1109 	}
1110 
1111 	d_lru_shrink_move(lru, dentry, freeable);
1112 	spin_unlock(&dentry->d_lock);
1113 
1114 	return LRU_REMOVED;
1115 }
1116 
1117 /**
1118  * prune_dcache_sb - shrink the dcache
1119  * @sb: superblock
1120  * @sc: shrink control, passed to list_lru_shrink_walk()
1121  *
1122  * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1123  * is done when we need more memory and called from the superblock shrinker
1124  * function.
1125  *
1126  * This function may fail to free any resources if all the dentries are in
1127  * use.
1128  */
1129 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1130 {
1131 	LIST_HEAD(dispose);
1132 	long freed;
1133 
1134 	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1135 				     dentry_lru_isolate, &dispose);
1136 	shrink_dentry_list(&dispose);
1137 	return freed;
1138 }
1139 
1140 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1141 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1142 {
1143 	struct list_head *freeable = arg;
1144 	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1145 
1146 	/*
1147 	 * we are inverting the lru lock/dentry->d_lock here,
1148 	 * so use a trylock. If we fail to get the lock, just skip
1149 	 * it
1150 	 */
1151 	if (!spin_trylock(&dentry->d_lock))
1152 		return LRU_SKIP;
1153 
1154 	d_lru_shrink_move(lru, dentry, freeable);
1155 	spin_unlock(&dentry->d_lock);
1156 
1157 	return LRU_REMOVED;
1158 }
1159 
1160 
1161 /**
1162  * shrink_dcache_sb - shrink dcache for a superblock
1163  * @sb: superblock
1164  *
1165  * Shrink the dcache for the specified super block. This is used to free
1166  * the dcache before unmounting a file system.
1167  */
1168 void shrink_dcache_sb(struct super_block *sb)
1169 {
1170 	long freed;
1171 
1172 	do {
1173 		LIST_HEAD(dispose);
1174 
1175 		freed = list_lru_walk(&sb->s_dentry_lru,
1176 			dentry_lru_isolate_shrink, &dispose, 1024);
1177 
1178 		this_cpu_sub(nr_dentry_unused, freed);
1179 		shrink_dentry_list(&dispose);
1180 		cond_resched();
1181 	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1182 }
1183 EXPORT_SYMBOL(shrink_dcache_sb);
1184 
1185 /**
1186  * enum d_walk_ret - action to talke during tree walk
1187  * @D_WALK_CONTINUE:	contrinue walk
1188  * @D_WALK_QUIT:	quit walk
1189  * @D_WALK_NORETRY:	quit when retry is needed
1190  * @D_WALK_SKIP:	skip this dentry and its children
1191  */
1192 enum d_walk_ret {
1193 	D_WALK_CONTINUE,
1194 	D_WALK_QUIT,
1195 	D_WALK_NORETRY,
1196 	D_WALK_SKIP,
1197 };
1198 
1199 /**
1200  * d_walk - walk the dentry tree
1201  * @parent:	start of walk
1202  * @data:	data passed to @enter() and @finish()
1203  * @enter:	callback when first entering the dentry
1204  * @finish:	callback when successfully finished the walk
1205  *
1206  * The @enter() and @finish() callbacks are called with d_lock held.
1207  */
1208 static void d_walk(struct dentry *parent, void *data,
1209 		   enum d_walk_ret (*enter)(void *, struct dentry *),
1210 		   void (*finish)(void *))
1211 {
1212 	struct dentry *this_parent;
1213 	struct list_head *next;
1214 	unsigned seq = 0;
1215 	enum d_walk_ret ret;
1216 	bool retry = true;
1217 
1218 again:
1219 	read_seqbegin_or_lock(&rename_lock, &seq);
1220 	this_parent = parent;
1221 	spin_lock(&this_parent->d_lock);
1222 
1223 	ret = enter(data, this_parent);
1224 	switch (ret) {
1225 	case D_WALK_CONTINUE:
1226 		break;
1227 	case D_WALK_QUIT:
1228 	case D_WALK_SKIP:
1229 		goto out_unlock;
1230 	case D_WALK_NORETRY:
1231 		retry = false;
1232 		break;
1233 	}
1234 repeat:
1235 	next = this_parent->d_subdirs.next;
1236 resume:
1237 	while (next != &this_parent->d_subdirs) {
1238 		struct list_head *tmp = next;
1239 		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1240 		next = tmp->next;
1241 
1242 		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1243 			continue;
1244 
1245 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1246 
1247 		ret = enter(data, dentry);
1248 		switch (ret) {
1249 		case D_WALK_CONTINUE:
1250 			break;
1251 		case D_WALK_QUIT:
1252 			spin_unlock(&dentry->d_lock);
1253 			goto out_unlock;
1254 		case D_WALK_NORETRY:
1255 			retry = false;
1256 			break;
1257 		case D_WALK_SKIP:
1258 			spin_unlock(&dentry->d_lock);
1259 			continue;
1260 		}
1261 
1262 		if (!list_empty(&dentry->d_subdirs)) {
1263 			spin_unlock(&this_parent->d_lock);
1264 			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1265 			this_parent = dentry;
1266 			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1267 			goto repeat;
1268 		}
1269 		spin_unlock(&dentry->d_lock);
1270 	}
1271 	/*
1272 	 * All done at this level ... ascend and resume the search.
1273 	 */
1274 	rcu_read_lock();
1275 ascend:
1276 	if (this_parent != parent) {
1277 		struct dentry *child = this_parent;
1278 		this_parent = child->d_parent;
1279 
1280 		spin_unlock(&child->d_lock);
1281 		spin_lock(&this_parent->d_lock);
1282 
1283 		/* might go back up the wrong parent if we have had a rename. */
1284 		if (need_seqretry(&rename_lock, seq))
1285 			goto rename_retry;
1286 		/* go into the first sibling still alive */
1287 		do {
1288 			next = child->d_child.next;
1289 			if (next == &this_parent->d_subdirs)
1290 				goto ascend;
1291 			child = list_entry(next, struct dentry, d_child);
1292 		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1293 		rcu_read_unlock();
1294 		goto resume;
1295 	}
1296 	if (need_seqretry(&rename_lock, seq))
1297 		goto rename_retry;
1298 	rcu_read_unlock();
1299 	if (finish)
1300 		finish(data);
1301 
1302 out_unlock:
1303 	spin_unlock(&this_parent->d_lock);
1304 	done_seqretry(&rename_lock, seq);
1305 	return;
1306 
1307 rename_retry:
1308 	spin_unlock(&this_parent->d_lock);
1309 	rcu_read_unlock();
1310 	BUG_ON(seq & 1);
1311 	if (!retry)
1312 		return;
1313 	seq = 1;
1314 	goto again;
1315 }
1316 
1317 struct check_mount {
1318 	struct vfsmount *mnt;
1319 	unsigned int mounted;
1320 };
1321 
1322 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1323 {
1324 	struct check_mount *info = data;
1325 	struct path path = { .mnt = info->mnt, .dentry = dentry };
1326 
1327 	if (likely(!d_mountpoint(dentry)))
1328 		return D_WALK_CONTINUE;
1329 	if (__path_is_mountpoint(&path)) {
1330 		info->mounted = 1;
1331 		return D_WALK_QUIT;
1332 	}
1333 	return D_WALK_CONTINUE;
1334 }
1335 
1336 /**
1337  * path_has_submounts - check for mounts over a dentry in the
1338  *                      current namespace.
1339  * @parent: path to check.
1340  *
1341  * Return true if the parent or its subdirectories contain
1342  * a mount point in the current namespace.
1343  */
1344 int path_has_submounts(const struct path *parent)
1345 {
1346 	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1347 
1348 	read_seqlock_excl(&mount_lock);
1349 	d_walk(parent->dentry, &data, path_check_mount, NULL);
1350 	read_sequnlock_excl(&mount_lock);
1351 
1352 	return data.mounted;
1353 }
1354 EXPORT_SYMBOL(path_has_submounts);
1355 
1356 /*
1357  * Called by mount code to set a mountpoint and check if the mountpoint is
1358  * reachable (e.g. NFS can unhash a directory dentry and then the complete
1359  * subtree can become unreachable).
1360  *
1361  * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1362  * this reason take rename_lock and d_lock on dentry and ancestors.
1363  */
1364 int d_set_mounted(struct dentry *dentry)
1365 {
1366 	struct dentry *p;
1367 	int ret = -ENOENT;
1368 	write_seqlock(&rename_lock);
1369 	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1370 		/* Need exclusion wrt. d_invalidate() */
1371 		spin_lock(&p->d_lock);
1372 		if (unlikely(d_unhashed(p))) {
1373 			spin_unlock(&p->d_lock);
1374 			goto out;
1375 		}
1376 		spin_unlock(&p->d_lock);
1377 	}
1378 	spin_lock(&dentry->d_lock);
1379 	if (!d_unlinked(dentry)) {
1380 		ret = -EBUSY;
1381 		if (!d_mountpoint(dentry)) {
1382 			dentry->d_flags |= DCACHE_MOUNTED;
1383 			ret = 0;
1384 		}
1385 	}
1386  	spin_unlock(&dentry->d_lock);
1387 out:
1388 	write_sequnlock(&rename_lock);
1389 	return ret;
1390 }
1391 
1392 /*
1393  * Search the dentry child list of the specified parent,
1394  * and move any unused dentries to the end of the unused
1395  * list for prune_dcache(). We descend to the next level
1396  * whenever the d_subdirs list is non-empty and continue
1397  * searching.
1398  *
1399  * It returns zero iff there are no unused children,
1400  * otherwise  it returns the number of children moved to
1401  * the end of the unused list. This may not be the total
1402  * number of unused children, because select_parent can
1403  * drop the lock and return early due to latency
1404  * constraints.
1405  */
1406 
1407 struct select_data {
1408 	struct dentry *start;
1409 	struct list_head dispose;
1410 	int found;
1411 };
1412 
1413 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1414 {
1415 	struct select_data *data = _data;
1416 	enum d_walk_ret ret = D_WALK_CONTINUE;
1417 
1418 	if (data->start == dentry)
1419 		goto out;
1420 
1421 	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1422 		data->found++;
1423 	} else {
1424 		if (dentry->d_flags & DCACHE_LRU_LIST)
1425 			d_lru_del(dentry);
1426 		if (!dentry->d_lockref.count) {
1427 			d_shrink_add(dentry, &data->dispose);
1428 			data->found++;
1429 		}
1430 	}
1431 	/*
1432 	 * We can return to the caller if we have found some (this
1433 	 * ensures forward progress). We'll be coming back to find
1434 	 * the rest.
1435 	 */
1436 	if (!list_empty(&data->dispose))
1437 		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1438 out:
1439 	return ret;
1440 }
1441 
1442 /**
1443  * shrink_dcache_parent - prune dcache
1444  * @parent: parent of entries to prune
1445  *
1446  * Prune the dcache to remove unused children of the parent dentry.
1447  */
1448 void shrink_dcache_parent(struct dentry *parent)
1449 {
1450 	for (;;) {
1451 		struct select_data data;
1452 
1453 		INIT_LIST_HEAD(&data.dispose);
1454 		data.start = parent;
1455 		data.found = 0;
1456 
1457 		d_walk(parent, &data, select_collect, NULL);
1458 		if (!data.found)
1459 			break;
1460 
1461 		shrink_dentry_list(&data.dispose);
1462 		cond_resched();
1463 	}
1464 }
1465 EXPORT_SYMBOL(shrink_dcache_parent);
1466 
1467 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1468 {
1469 	/* it has busy descendents; complain about those instead */
1470 	if (!list_empty(&dentry->d_subdirs))
1471 		return D_WALK_CONTINUE;
1472 
1473 	/* root with refcount 1 is fine */
1474 	if (dentry == _data && dentry->d_lockref.count == 1)
1475 		return D_WALK_CONTINUE;
1476 
1477 	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1478 			" still in use (%d) [unmount of %s %s]\n",
1479 		       dentry,
1480 		       dentry->d_inode ?
1481 		       dentry->d_inode->i_ino : 0UL,
1482 		       dentry,
1483 		       dentry->d_lockref.count,
1484 		       dentry->d_sb->s_type->name,
1485 		       dentry->d_sb->s_id);
1486 	WARN_ON(1);
1487 	return D_WALK_CONTINUE;
1488 }
1489 
1490 static void do_one_tree(struct dentry *dentry)
1491 {
1492 	shrink_dcache_parent(dentry);
1493 	d_walk(dentry, dentry, umount_check, NULL);
1494 	d_drop(dentry);
1495 	dput(dentry);
1496 }
1497 
1498 /*
1499  * destroy the dentries attached to a superblock on unmounting
1500  */
1501 void shrink_dcache_for_umount(struct super_block *sb)
1502 {
1503 	struct dentry *dentry;
1504 
1505 	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1506 
1507 	dentry = sb->s_root;
1508 	sb->s_root = NULL;
1509 	do_one_tree(dentry);
1510 
1511 	while (!hlist_bl_empty(&sb->s_roots)) {
1512 		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1513 		do_one_tree(dentry);
1514 	}
1515 }
1516 
1517 struct detach_data {
1518 	struct select_data select;
1519 	struct dentry *mountpoint;
1520 };
1521 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1522 {
1523 	struct detach_data *data = _data;
1524 
1525 	if (d_mountpoint(dentry)) {
1526 		__dget_dlock(dentry);
1527 		data->mountpoint = dentry;
1528 		return D_WALK_QUIT;
1529 	}
1530 
1531 	return select_collect(&data->select, dentry);
1532 }
1533 
1534 static void check_and_drop(void *_data)
1535 {
1536 	struct detach_data *data = _data;
1537 
1538 	if (!data->mountpoint && list_empty(&data->select.dispose))
1539 		__d_drop(data->select.start);
1540 }
1541 
1542 /**
1543  * d_invalidate - detach submounts, prune dcache, and drop
1544  * @dentry: dentry to invalidate (aka detach, prune and drop)
1545  *
1546  * no dcache lock.
1547  *
1548  * The final d_drop is done as an atomic operation relative to
1549  * rename_lock ensuring there are no races with d_set_mounted.  This
1550  * ensures there are no unhashed dentries on the path to a mountpoint.
1551  */
1552 void d_invalidate(struct dentry *dentry)
1553 {
1554 	/*
1555 	 * If it's already been dropped, return OK.
1556 	 */
1557 	spin_lock(&dentry->d_lock);
1558 	if (d_unhashed(dentry)) {
1559 		spin_unlock(&dentry->d_lock);
1560 		return;
1561 	}
1562 	spin_unlock(&dentry->d_lock);
1563 
1564 	/* Negative dentries can be dropped without further checks */
1565 	if (!dentry->d_inode) {
1566 		d_drop(dentry);
1567 		return;
1568 	}
1569 
1570 	for (;;) {
1571 		struct detach_data data;
1572 
1573 		data.mountpoint = NULL;
1574 		INIT_LIST_HEAD(&data.select.dispose);
1575 		data.select.start = dentry;
1576 		data.select.found = 0;
1577 
1578 		d_walk(dentry, &data, detach_and_collect, check_and_drop);
1579 
1580 		if (!list_empty(&data.select.dispose))
1581 			shrink_dentry_list(&data.select.dispose);
1582 		else if (!data.mountpoint)
1583 			return;
1584 
1585 		if (data.mountpoint) {
1586 			detach_mounts(data.mountpoint);
1587 			dput(data.mountpoint);
1588 		}
1589 		cond_resched();
1590 	}
1591 }
1592 EXPORT_SYMBOL(d_invalidate);
1593 
1594 /**
1595  * __d_alloc	-	allocate a dcache entry
1596  * @sb: filesystem it will belong to
1597  * @name: qstr of the name
1598  *
1599  * Allocates a dentry. It returns %NULL if there is insufficient memory
1600  * available. On a success the dentry is returned. The name passed in is
1601  * copied and the copy passed in may be reused after this call.
1602  */
1603 
1604 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1605 {
1606 	struct dentry *dentry;
1607 	char *dname;
1608 	int err;
1609 
1610 	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1611 	if (!dentry)
1612 		return NULL;
1613 
1614 	/*
1615 	 * We guarantee that the inline name is always NUL-terminated.
1616 	 * This way the memcpy() done by the name switching in rename
1617 	 * will still always have a NUL at the end, even if we might
1618 	 * be overwriting an internal NUL character
1619 	 */
1620 	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1621 	if (unlikely(!name)) {
1622 		name = &slash_name;
1623 		dname = dentry->d_iname;
1624 	} else if (name->len > DNAME_INLINE_LEN-1) {
1625 		size_t size = offsetof(struct external_name, name[1]);
1626 		struct external_name *p = kmalloc(size + name->len,
1627 						  GFP_KERNEL_ACCOUNT);
1628 		if (!p) {
1629 			kmem_cache_free(dentry_cache, dentry);
1630 			return NULL;
1631 		}
1632 		atomic_set(&p->u.count, 1);
1633 		dname = p->name;
1634 	} else  {
1635 		dname = dentry->d_iname;
1636 	}
1637 
1638 	dentry->d_name.len = name->len;
1639 	dentry->d_name.hash = name->hash;
1640 	memcpy(dname, name->name, name->len);
1641 	dname[name->len] = 0;
1642 
1643 	/* Make sure we always see the terminating NUL character */
1644 	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1645 
1646 	dentry->d_lockref.count = 1;
1647 	dentry->d_flags = 0;
1648 	spin_lock_init(&dentry->d_lock);
1649 	seqcount_init(&dentry->d_seq);
1650 	dentry->d_inode = NULL;
1651 	dentry->d_parent = dentry;
1652 	dentry->d_sb = sb;
1653 	dentry->d_op = NULL;
1654 	dentry->d_fsdata = NULL;
1655 	INIT_HLIST_BL_NODE(&dentry->d_hash);
1656 	INIT_LIST_HEAD(&dentry->d_lru);
1657 	INIT_LIST_HEAD(&dentry->d_subdirs);
1658 	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1659 	INIT_LIST_HEAD(&dentry->d_child);
1660 	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1661 
1662 	if (dentry->d_op && dentry->d_op->d_init) {
1663 		err = dentry->d_op->d_init(dentry);
1664 		if (err) {
1665 			if (dname_external(dentry))
1666 				kfree(external_name(dentry));
1667 			kmem_cache_free(dentry_cache, dentry);
1668 			return NULL;
1669 		}
1670 	}
1671 
1672 	this_cpu_inc(nr_dentry);
1673 
1674 	return dentry;
1675 }
1676 
1677 /**
1678  * d_alloc	-	allocate a dcache entry
1679  * @parent: parent of entry to allocate
1680  * @name: qstr of the name
1681  *
1682  * Allocates a dentry. It returns %NULL if there is insufficient memory
1683  * available. On a success the dentry is returned. The name passed in is
1684  * copied and the copy passed in may be reused after this call.
1685  */
1686 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1687 {
1688 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1689 	if (!dentry)
1690 		return NULL;
1691 	dentry->d_flags |= DCACHE_RCUACCESS;
1692 	spin_lock(&parent->d_lock);
1693 	/*
1694 	 * don't need child lock because it is not subject
1695 	 * to concurrency here
1696 	 */
1697 	__dget_dlock(parent);
1698 	dentry->d_parent = parent;
1699 	list_add(&dentry->d_child, &parent->d_subdirs);
1700 	spin_unlock(&parent->d_lock);
1701 
1702 	return dentry;
1703 }
1704 EXPORT_SYMBOL(d_alloc);
1705 
1706 struct dentry *d_alloc_anon(struct super_block *sb)
1707 {
1708 	return __d_alloc(sb, NULL);
1709 }
1710 EXPORT_SYMBOL(d_alloc_anon);
1711 
1712 struct dentry *d_alloc_cursor(struct dentry * parent)
1713 {
1714 	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1715 	if (dentry) {
1716 		dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1717 		dentry->d_parent = dget(parent);
1718 	}
1719 	return dentry;
1720 }
1721 
1722 /**
1723  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1724  * @sb: the superblock
1725  * @name: qstr of the name
1726  *
1727  * For a filesystem that just pins its dentries in memory and never
1728  * performs lookups at all, return an unhashed IS_ROOT dentry.
1729  */
1730 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1731 {
1732 	return __d_alloc(sb, name);
1733 }
1734 EXPORT_SYMBOL(d_alloc_pseudo);
1735 
1736 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1737 {
1738 	struct qstr q;
1739 
1740 	q.name = name;
1741 	q.hash_len = hashlen_string(parent, name);
1742 	return d_alloc(parent, &q);
1743 }
1744 EXPORT_SYMBOL(d_alloc_name);
1745 
1746 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1747 {
1748 	WARN_ON_ONCE(dentry->d_op);
1749 	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1750 				DCACHE_OP_COMPARE	|
1751 				DCACHE_OP_REVALIDATE	|
1752 				DCACHE_OP_WEAK_REVALIDATE	|
1753 				DCACHE_OP_DELETE	|
1754 				DCACHE_OP_REAL));
1755 	dentry->d_op = op;
1756 	if (!op)
1757 		return;
1758 	if (op->d_hash)
1759 		dentry->d_flags |= DCACHE_OP_HASH;
1760 	if (op->d_compare)
1761 		dentry->d_flags |= DCACHE_OP_COMPARE;
1762 	if (op->d_revalidate)
1763 		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1764 	if (op->d_weak_revalidate)
1765 		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1766 	if (op->d_delete)
1767 		dentry->d_flags |= DCACHE_OP_DELETE;
1768 	if (op->d_prune)
1769 		dentry->d_flags |= DCACHE_OP_PRUNE;
1770 	if (op->d_real)
1771 		dentry->d_flags |= DCACHE_OP_REAL;
1772 
1773 }
1774 EXPORT_SYMBOL(d_set_d_op);
1775 
1776 
1777 /*
1778  * d_set_fallthru - Mark a dentry as falling through to a lower layer
1779  * @dentry - The dentry to mark
1780  *
1781  * Mark a dentry as falling through to the lower layer (as set with
1782  * d_pin_lower()).  This flag may be recorded on the medium.
1783  */
1784 void d_set_fallthru(struct dentry *dentry)
1785 {
1786 	spin_lock(&dentry->d_lock);
1787 	dentry->d_flags |= DCACHE_FALLTHRU;
1788 	spin_unlock(&dentry->d_lock);
1789 }
1790 EXPORT_SYMBOL(d_set_fallthru);
1791 
1792 static unsigned d_flags_for_inode(struct inode *inode)
1793 {
1794 	unsigned add_flags = DCACHE_REGULAR_TYPE;
1795 
1796 	if (!inode)
1797 		return DCACHE_MISS_TYPE;
1798 
1799 	if (S_ISDIR(inode->i_mode)) {
1800 		add_flags = DCACHE_DIRECTORY_TYPE;
1801 		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1802 			if (unlikely(!inode->i_op->lookup))
1803 				add_flags = DCACHE_AUTODIR_TYPE;
1804 			else
1805 				inode->i_opflags |= IOP_LOOKUP;
1806 		}
1807 		goto type_determined;
1808 	}
1809 
1810 	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1811 		if (unlikely(inode->i_op->get_link)) {
1812 			add_flags = DCACHE_SYMLINK_TYPE;
1813 			goto type_determined;
1814 		}
1815 		inode->i_opflags |= IOP_NOFOLLOW;
1816 	}
1817 
1818 	if (unlikely(!S_ISREG(inode->i_mode)))
1819 		add_flags = DCACHE_SPECIAL_TYPE;
1820 
1821 type_determined:
1822 	if (unlikely(IS_AUTOMOUNT(inode)))
1823 		add_flags |= DCACHE_NEED_AUTOMOUNT;
1824 	return add_flags;
1825 }
1826 
1827 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1828 {
1829 	unsigned add_flags = d_flags_for_inode(inode);
1830 	WARN_ON(d_in_lookup(dentry));
1831 
1832 	spin_lock(&dentry->d_lock);
1833 	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1834 	raw_write_seqcount_begin(&dentry->d_seq);
1835 	__d_set_inode_and_type(dentry, inode, add_flags);
1836 	raw_write_seqcount_end(&dentry->d_seq);
1837 	fsnotify_update_flags(dentry);
1838 	spin_unlock(&dentry->d_lock);
1839 }
1840 
1841 /**
1842  * d_instantiate - fill in inode information for a dentry
1843  * @entry: dentry to complete
1844  * @inode: inode to attach to this dentry
1845  *
1846  * Fill in inode information in the entry.
1847  *
1848  * This turns negative dentries into productive full members
1849  * of society.
1850  *
1851  * NOTE! This assumes that the inode count has been incremented
1852  * (or otherwise set) by the caller to indicate that it is now
1853  * in use by the dcache.
1854  */
1855 
1856 void d_instantiate(struct dentry *entry, struct inode * inode)
1857 {
1858 	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1859 	if (inode) {
1860 		security_d_instantiate(entry, inode);
1861 		spin_lock(&inode->i_lock);
1862 		__d_instantiate(entry, inode);
1863 		spin_unlock(&inode->i_lock);
1864 	}
1865 }
1866 EXPORT_SYMBOL(d_instantiate);
1867 
1868 /**
1869  * d_instantiate_no_diralias - instantiate a non-aliased dentry
1870  * @entry: dentry to complete
1871  * @inode: inode to attach to this dentry
1872  *
1873  * Fill in inode information in the entry.  If a directory alias is found, then
1874  * return an error (and drop inode).  Together with d_materialise_unique() this
1875  * guarantees that a directory inode may never have more than one alias.
1876  */
1877 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1878 {
1879 	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1880 
1881 	security_d_instantiate(entry, inode);
1882 	spin_lock(&inode->i_lock);
1883 	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1884 		spin_unlock(&inode->i_lock);
1885 		iput(inode);
1886 		return -EBUSY;
1887 	}
1888 	__d_instantiate(entry, inode);
1889 	spin_unlock(&inode->i_lock);
1890 
1891 	return 0;
1892 }
1893 EXPORT_SYMBOL(d_instantiate_no_diralias);
1894 
1895 struct dentry *d_make_root(struct inode *root_inode)
1896 {
1897 	struct dentry *res = NULL;
1898 
1899 	if (root_inode) {
1900 		res = d_alloc_anon(root_inode->i_sb);
1901 		if (res)
1902 			d_instantiate(res, root_inode);
1903 		else
1904 			iput(root_inode);
1905 	}
1906 	return res;
1907 }
1908 EXPORT_SYMBOL(d_make_root);
1909 
1910 static struct dentry * __d_find_any_alias(struct inode *inode)
1911 {
1912 	struct dentry *alias;
1913 
1914 	if (hlist_empty(&inode->i_dentry))
1915 		return NULL;
1916 	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1917 	__dget(alias);
1918 	return alias;
1919 }
1920 
1921 /**
1922  * d_find_any_alias - find any alias for a given inode
1923  * @inode: inode to find an alias for
1924  *
1925  * If any aliases exist for the given inode, take and return a
1926  * reference for one of them.  If no aliases exist, return %NULL.
1927  */
1928 struct dentry *d_find_any_alias(struct inode *inode)
1929 {
1930 	struct dentry *de;
1931 
1932 	spin_lock(&inode->i_lock);
1933 	de = __d_find_any_alias(inode);
1934 	spin_unlock(&inode->i_lock);
1935 	return de;
1936 }
1937 EXPORT_SYMBOL(d_find_any_alias);
1938 
1939 static struct dentry *__d_instantiate_anon(struct dentry *dentry,
1940 					   struct inode *inode,
1941 					   bool disconnected)
1942 {
1943 	struct dentry *res;
1944 	unsigned add_flags;
1945 
1946 	security_d_instantiate(dentry, inode);
1947 	spin_lock(&inode->i_lock);
1948 	res = __d_find_any_alias(inode);
1949 	if (res) {
1950 		spin_unlock(&inode->i_lock);
1951 		dput(dentry);
1952 		goto out_iput;
1953 	}
1954 
1955 	/* attach a disconnected dentry */
1956 	add_flags = d_flags_for_inode(inode);
1957 
1958 	if (disconnected)
1959 		add_flags |= DCACHE_DISCONNECTED;
1960 
1961 	spin_lock(&dentry->d_lock);
1962 	__d_set_inode_and_type(dentry, inode, add_flags);
1963 	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1964 	if (!disconnected) {
1965 		hlist_bl_lock(&dentry->d_sb->s_roots);
1966 		hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
1967 		hlist_bl_unlock(&dentry->d_sb->s_roots);
1968 	}
1969 	spin_unlock(&dentry->d_lock);
1970 	spin_unlock(&inode->i_lock);
1971 
1972 	return dentry;
1973 
1974  out_iput:
1975 	iput(inode);
1976 	return res;
1977 }
1978 
1979 struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
1980 {
1981 	return __d_instantiate_anon(dentry, inode, true);
1982 }
1983 EXPORT_SYMBOL(d_instantiate_anon);
1984 
1985 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1986 {
1987 	struct dentry *tmp;
1988 	struct dentry *res;
1989 
1990 	if (!inode)
1991 		return ERR_PTR(-ESTALE);
1992 	if (IS_ERR(inode))
1993 		return ERR_CAST(inode);
1994 
1995 	res = d_find_any_alias(inode);
1996 	if (res)
1997 		goto out_iput;
1998 
1999 	tmp = d_alloc_anon(inode->i_sb);
2000 	if (!tmp) {
2001 		res = ERR_PTR(-ENOMEM);
2002 		goto out_iput;
2003 	}
2004 
2005 	return __d_instantiate_anon(tmp, inode, disconnected);
2006 
2007 out_iput:
2008 	iput(inode);
2009 	return res;
2010 }
2011 
2012 /**
2013  * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2014  * @inode: inode to allocate the dentry for
2015  *
2016  * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2017  * similar open by handle operations.  The returned dentry may be anonymous,
2018  * or may have a full name (if the inode was already in the cache).
2019  *
2020  * When called on a directory inode, we must ensure that the inode only ever
2021  * has one dentry.  If a dentry is found, that is returned instead of
2022  * allocating a new one.
2023  *
2024  * On successful return, the reference to the inode has been transferred
2025  * to the dentry.  In case of an error the reference on the inode is released.
2026  * To make it easier to use in export operations a %NULL or IS_ERR inode may
2027  * be passed in and the error will be propagated to the return value,
2028  * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2029  */
2030 struct dentry *d_obtain_alias(struct inode *inode)
2031 {
2032 	return __d_obtain_alias(inode, true);
2033 }
2034 EXPORT_SYMBOL(d_obtain_alias);
2035 
2036 /**
2037  * d_obtain_root - find or allocate a dentry for a given inode
2038  * @inode: inode to allocate the dentry for
2039  *
2040  * Obtain an IS_ROOT dentry for the root of a filesystem.
2041  *
2042  * We must ensure that directory inodes only ever have one dentry.  If a
2043  * dentry is found, that is returned instead of allocating a new one.
2044  *
2045  * On successful return, the reference to the inode has been transferred
2046  * to the dentry.  In case of an error the reference on the inode is
2047  * released.  A %NULL or IS_ERR inode may be passed in and will be the
2048  * error will be propagate to the return value, with a %NULL @inode
2049  * replaced by ERR_PTR(-ESTALE).
2050  */
2051 struct dentry *d_obtain_root(struct inode *inode)
2052 {
2053 	return __d_obtain_alias(inode, false);
2054 }
2055 EXPORT_SYMBOL(d_obtain_root);
2056 
2057 /**
2058  * d_add_ci - lookup or allocate new dentry with case-exact name
2059  * @inode:  the inode case-insensitive lookup has found
2060  * @dentry: the negative dentry that was passed to the parent's lookup func
2061  * @name:   the case-exact name to be associated with the returned dentry
2062  *
2063  * This is to avoid filling the dcache with case-insensitive names to the
2064  * same inode, only the actual correct case is stored in the dcache for
2065  * case-insensitive filesystems.
2066  *
2067  * For a case-insensitive lookup match and if the the case-exact dentry
2068  * already exists in in the dcache, use it and return it.
2069  *
2070  * If no entry exists with the exact case name, allocate new dentry with
2071  * the exact case, and return the spliced entry.
2072  */
2073 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2074 			struct qstr *name)
2075 {
2076 	struct dentry *found, *res;
2077 
2078 	/*
2079 	 * First check if a dentry matching the name already exists,
2080 	 * if not go ahead and create it now.
2081 	 */
2082 	found = d_hash_and_lookup(dentry->d_parent, name);
2083 	if (found) {
2084 		iput(inode);
2085 		return found;
2086 	}
2087 	if (d_in_lookup(dentry)) {
2088 		found = d_alloc_parallel(dentry->d_parent, name,
2089 					dentry->d_wait);
2090 		if (IS_ERR(found) || !d_in_lookup(found)) {
2091 			iput(inode);
2092 			return found;
2093 		}
2094 	} else {
2095 		found = d_alloc(dentry->d_parent, name);
2096 		if (!found) {
2097 			iput(inode);
2098 			return ERR_PTR(-ENOMEM);
2099 		}
2100 	}
2101 	res = d_splice_alias(inode, found);
2102 	if (res) {
2103 		dput(found);
2104 		return res;
2105 	}
2106 	return found;
2107 }
2108 EXPORT_SYMBOL(d_add_ci);
2109 
2110 
2111 static inline bool d_same_name(const struct dentry *dentry,
2112 				const struct dentry *parent,
2113 				const struct qstr *name)
2114 {
2115 	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2116 		if (dentry->d_name.len != name->len)
2117 			return false;
2118 		return dentry_cmp(dentry, name->name, name->len) == 0;
2119 	}
2120 	return parent->d_op->d_compare(dentry,
2121 				       dentry->d_name.len, dentry->d_name.name,
2122 				       name) == 0;
2123 }
2124 
2125 /**
2126  * __d_lookup_rcu - search for a dentry (racy, store-free)
2127  * @parent: parent dentry
2128  * @name: qstr of name we wish to find
2129  * @seqp: returns d_seq value at the point where the dentry was found
2130  * Returns: dentry, or NULL
2131  *
2132  * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2133  * resolution (store-free path walking) design described in
2134  * Documentation/filesystems/path-lookup.txt.
2135  *
2136  * This is not to be used outside core vfs.
2137  *
2138  * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2139  * held, and rcu_read_lock held. The returned dentry must not be stored into
2140  * without taking d_lock and checking d_seq sequence count against @seq
2141  * returned here.
2142  *
2143  * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2144  * function.
2145  *
2146  * Alternatively, __d_lookup_rcu may be called again to look up the child of
2147  * the returned dentry, so long as its parent's seqlock is checked after the
2148  * child is looked up. Thus, an interlocking stepping of sequence lock checks
2149  * is formed, giving integrity down the path walk.
2150  *
2151  * NOTE! The caller *has* to check the resulting dentry against the sequence
2152  * number we've returned before using any of the resulting dentry state!
2153  */
2154 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2155 				const struct qstr *name,
2156 				unsigned *seqp)
2157 {
2158 	u64 hashlen = name->hash_len;
2159 	const unsigned char *str = name->name;
2160 	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2161 	struct hlist_bl_node *node;
2162 	struct dentry *dentry;
2163 
2164 	/*
2165 	 * Note: There is significant duplication with __d_lookup_rcu which is
2166 	 * required to prevent single threaded performance regressions
2167 	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2168 	 * Keep the two functions in sync.
2169 	 */
2170 
2171 	/*
2172 	 * The hash list is protected using RCU.
2173 	 *
2174 	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2175 	 * races with d_move().
2176 	 *
2177 	 * It is possible that concurrent renames can mess up our list
2178 	 * walk here and result in missing our dentry, resulting in the
2179 	 * false-negative result. d_lookup() protects against concurrent
2180 	 * renames using rename_lock seqlock.
2181 	 *
2182 	 * See Documentation/filesystems/path-lookup.txt for more details.
2183 	 */
2184 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2185 		unsigned seq;
2186 
2187 seqretry:
2188 		/*
2189 		 * The dentry sequence count protects us from concurrent
2190 		 * renames, and thus protects parent and name fields.
2191 		 *
2192 		 * The caller must perform a seqcount check in order
2193 		 * to do anything useful with the returned dentry.
2194 		 *
2195 		 * NOTE! We do a "raw" seqcount_begin here. That means that
2196 		 * we don't wait for the sequence count to stabilize if it
2197 		 * is in the middle of a sequence change. If we do the slow
2198 		 * dentry compare, we will do seqretries until it is stable,
2199 		 * and if we end up with a successful lookup, we actually
2200 		 * want to exit RCU lookup anyway.
2201 		 *
2202 		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2203 		 * we are still guaranteed NUL-termination of ->d_name.name.
2204 		 */
2205 		seq = raw_seqcount_begin(&dentry->d_seq);
2206 		if (dentry->d_parent != parent)
2207 			continue;
2208 		if (d_unhashed(dentry))
2209 			continue;
2210 
2211 		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2212 			int tlen;
2213 			const char *tname;
2214 			if (dentry->d_name.hash != hashlen_hash(hashlen))
2215 				continue;
2216 			tlen = dentry->d_name.len;
2217 			tname = dentry->d_name.name;
2218 			/* we want a consistent (name,len) pair */
2219 			if (read_seqcount_retry(&dentry->d_seq, seq)) {
2220 				cpu_relax();
2221 				goto seqretry;
2222 			}
2223 			if (parent->d_op->d_compare(dentry,
2224 						    tlen, tname, name) != 0)
2225 				continue;
2226 		} else {
2227 			if (dentry->d_name.hash_len != hashlen)
2228 				continue;
2229 			if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2230 				continue;
2231 		}
2232 		*seqp = seq;
2233 		return dentry;
2234 	}
2235 	return NULL;
2236 }
2237 
2238 /**
2239  * d_lookup - search for a dentry
2240  * @parent: parent dentry
2241  * @name: qstr of name we wish to find
2242  * Returns: dentry, or NULL
2243  *
2244  * d_lookup searches the children of the parent dentry for the name in
2245  * question. If the dentry is found its reference count is incremented and the
2246  * dentry is returned. The caller must use dput to free the entry when it has
2247  * finished using it. %NULL is returned if the dentry does not exist.
2248  */
2249 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2250 {
2251 	struct dentry *dentry;
2252 	unsigned seq;
2253 
2254 	do {
2255 		seq = read_seqbegin(&rename_lock);
2256 		dentry = __d_lookup(parent, name);
2257 		if (dentry)
2258 			break;
2259 	} while (read_seqretry(&rename_lock, seq));
2260 	return dentry;
2261 }
2262 EXPORT_SYMBOL(d_lookup);
2263 
2264 /**
2265  * __d_lookup - search for a dentry (racy)
2266  * @parent: parent dentry
2267  * @name: qstr of name we wish to find
2268  * Returns: dentry, or NULL
2269  *
2270  * __d_lookup is like d_lookup, however it may (rarely) return a
2271  * false-negative result due to unrelated rename activity.
2272  *
2273  * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2274  * however it must be used carefully, eg. with a following d_lookup in
2275  * the case of failure.
2276  *
2277  * __d_lookup callers must be commented.
2278  */
2279 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2280 {
2281 	unsigned int hash = name->hash;
2282 	struct hlist_bl_head *b = d_hash(hash);
2283 	struct hlist_bl_node *node;
2284 	struct dentry *found = NULL;
2285 	struct dentry *dentry;
2286 
2287 	/*
2288 	 * Note: There is significant duplication with __d_lookup_rcu which is
2289 	 * required to prevent single threaded performance regressions
2290 	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2291 	 * Keep the two functions in sync.
2292 	 */
2293 
2294 	/*
2295 	 * The hash list is protected using RCU.
2296 	 *
2297 	 * Take d_lock when comparing a candidate dentry, to avoid races
2298 	 * with d_move().
2299 	 *
2300 	 * It is possible that concurrent renames can mess up our list
2301 	 * walk here and result in missing our dentry, resulting in the
2302 	 * false-negative result. d_lookup() protects against concurrent
2303 	 * renames using rename_lock seqlock.
2304 	 *
2305 	 * See Documentation/filesystems/path-lookup.txt for more details.
2306 	 */
2307 	rcu_read_lock();
2308 
2309 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2310 
2311 		if (dentry->d_name.hash != hash)
2312 			continue;
2313 
2314 		spin_lock(&dentry->d_lock);
2315 		if (dentry->d_parent != parent)
2316 			goto next;
2317 		if (d_unhashed(dentry))
2318 			goto next;
2319 
2320 		if (!d_same_name(dentry, parent, name))
2321 			goto next;
2322 
2323 		dentry->d_lockref.count++;
2324 		found = dentry;
2325 		spin_unlock(&dentry->d_lock);
2326 		break;
2327 next:
2328 		spin_unlock(&dentry->d_lock);
2329  	}
2330  	rcu_read_unlock();
2331 
2332  	return found;
2333 }
2334 
2335 /**
2336  * d_hash_and_lookup - hash the qstr then search for a dentry
2337  * @dir: Directory to search in
2338  * @name: qstr of name we wish to find
2339  *
2340  * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2341  */
2342 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2343 {
2344 	/*
2345 	 * Check for a fs-specific hash function. Note that we must
2346 	 * calculate the standard hash first, as the d_op->d_hash()
2347 	 * routine may choose to leave the hash value unchanged.
2348 	 */
2349 	name->hash = full_name_hash(dir, name->name, name->len);
2350 	if (dir->d_flags & DCACHE_OP_HASH) {
2351 		int err = dir->d_op->d_hash(dir, name);
2352 		if (unlikely(err < 0))
2353 			return ERR_PTR(err);
2354 	}
2355 	return d_lookup(dir, name);
2356 }
2357 EXPORT_SYMBOL(d_hash_and_lookup);
2358 
2359 /*
2360  * When a file is deleted, we have two options:
2361  * - turn this dentry into a negative dentry
2362  * - unhash this dentry and free it.
2363  *
2364  * Usually, we want to just turn this into
2365  * a negative dentry, but if anybody else is
2366  * currently using the dentry or the inode
2367  * we can't do that and we fall back on removing
2368  * it from the hash queues and waiting for
2369  * it to be deleted later when it has no users
2370  */
2371 
2372 /**
2373  * d_delete - delete a dentry
2374  * @dentry: The dentry to delete
2375  *
2376  * Turn the dentry into a negative dentry if possible, otherwise
2377  * remove it from the hash queues so it can be deleted later
2378  */
2379 
2380 void d_delete(struct dentry * dentry)
2381 {
2382 	struct inode *inode;
2383 	int isdir = 0;
2384 	/*
2385 	 * Are we the only user?
2386 	 */
2387 again:
2388 	spin_lock(&dentry->d_lock);
2389 	inode = dentry->d_inode;
2390 	isdir = S_ISDIR(inode->i_mode);
2391 	if (dentry->d_lockref.count == 1) {
2392 		if (!spin_trylock(&inode->i_lock)) {
2393 			spin_unlock(&dentry->d_lock);
2394 			cpu_relax();
2395 			goto again;
2396 		}
2397 		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2398 		dentry_unlink_inode(dentry);
2399 		fsnotify_nameremove(dentry, isdir);
2400 		return;
2401 	}
2402 
2403 	if (!d_unhashed(dentry))
2404 		__d_drop(dentry);
2405 
2406 	spin_unlock(&dentry->d_lock);
2407 
2408 	fsnotify_nameremove(dentry, isdir);
2409 }
2410 EXPORT_SYMBOL(d_delete);
2411 
2412 static void __d_rehash(struct dentry *entry)
2413 {
2414 	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2415 
2416 	hlist_bl_lock(b);
2417 	hlist_bl_add_head_rcu(&entry->d_hash, b);
2418 	hlist_bl_unlock(b);
2419 }
2420 
2421 /**
2422  * d_rehash	- add an entry back to the hash
2423  * @entry: dentry to add to the hash
2424  *
2425  * Adds a dentry to the hash according to its name.
2426  */
2427 
2428 void d_rehash(struct dentry * entry)
2429 {
2430 	spin_lock(&entry->d_lock);
2431 	__d_rehash(entry);
2432 	spin_unlock(&entry->d_lock);
2433 }
2434 EXPORT_SYMBOL(d_rehash);
2435 
2436 static inline unsigned start_dir_add(struct inode *dir)
2437 {
2438 
2439 	for (;;) {
2440 		unsigned n = dir->i_dir_seq;
2441 		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2442 			return n;
2443 		cpu_relax();
2444 	}
2445 }
2446 
2447 static inline void end_dir_add(struct inode *dir, unsigned n)
2448 {
2449 	smp_store_release(&dir->i_dir_seq, n + 2);
2450 }
2451 
2452 static void d_wait_lookup(struct dentry *dentry)
2453 {
2454 	if (d_in_lookup(dentry)) {
2455 		DECLARE_WAITQUEUE(wait, current);
2456 		add_wait_queue(dentry->d_wait, &wait);
2457 		do {
2458 			set_current_state(TASK_UNINTERRUPTIBLE);
2459 			spin_unlock(&dentry->d_lock);
2460 			schedule();
2461 			spin_lock(&dentry->d_lock);
2462 		} while (d_in_lookup(dentry));
2463 	}
2464 }
2465 
2466 struct dentry *d_alloc_parallel(struct dentry *parent,
2467 				const struct qstr *name,
2468 				wait_queue_head_t *wq)
2469 {
2470 	unsigned int hash = name->hash;
2471 	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2472 	struct hlist_bl_node *node;
2473 	struct dentry *new = d_alloc(parent, name);
2474 	struct dentry *dentry;
2475 	unsigned seq, r_seq, d_seq;
2476 
2477 	if (unlikely(!new))
2478 		return ERR_PTR(-ENOMEM);
2479 
2480 retry:
2481 	rcu_read_lock();
2482 	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2483 	r_seq = read_seqbegin(&rename_lock);
2484 	dentry = __d_lookup_rcu(parent, name, &d_seq);
2485 	if (unlikely(dentry)) {
2486 		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2487 			rcu_read_unlock();
2488 			goto retry;
2489 		}
2490 		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2491 			rcu_read_unlock();
2492 			dput(dentry);
2493 			goto retry;
2494 		}
2495 		rcu_read_unlock();
2496 		dput(new);
2497 		return dentry;
2498 	}
2499 	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2500 		rcu_read_unlock();
2501 		goto retry;
2502 	}
2503 
2504 	if (unlikely(seq & 1)) {
2505 		rcu_read_unlock();
2506 		goto retry;
2507 	}
2508 
2509 	hlist_bl_lock(b);
2510 	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2511 		hlist_bl_unlock(b);
2512 		rcu_read_unlock();
2513 		goto retry;
2514 	}
2515 	/*
2516 	 * No changes for the parent since the beginning of d_lookup().
2517 	 * Since all removals from the chain happen with hlist_bl_lock(),
2518 	 * any potential in-lookup matches are going to stay here until
2519 	 * we unlock the chain.  All fields are stable in everything
2520 	 * we encounter.
2521 	 */
2522 	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2523 		if (dentry->d_name.hash != hash)
2524 			continue;
2525 		if (dentry->d_parent != parent)
2526 			continue;
2527 		if (!d_same_name(dentry, parent, name))
2528 			continue;
2529 		hlist_bl_unlock(b);
2530 		/* now we can try to grab a reference */
2531 		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2532 			rcu_read_unlock();
2533 			goto retry;
2534 		}
2535 
2536 		rcu_read_unlock();
2537 		/*
2538 		 * somebody is likely to be still doing lookup for it;
2539 		 * wait for them to finish
2540 		 */
2541 		spin_lock(&dentry->d_lock);
2542 		d_wait_lookup(dentry);
2543 		/*
2544 		 * it's not in-lookup anymore; in principle we should repeat
2545 		 * everything from dcache lookup, but it's likely to be what
2546 		 * d_lookup() would've found anyway.  If it is, just return it;
2547 		 * otherwise we really have to repeat the whole thing.
2548 		 */
2549 		if (unlikely(dentry->d_name.hash != hash))
2550 			goto mismatch;
2551 		if (unlikely(dentry->d_parent != parent))
2552 			goto mismatch;
2553 		if (unlikely(d_unhashed(dentry)))
2554 			goto mismatch;
2555 		if (unlikely(!d_same_name(dentry, parent, name)))
2556 			goto mismatch;
2557 		/* OK, it *is* a hashed match; return it */
2558 		spin_unlock(&dentry->d_lock);
2559 		dput(new);
2560 		return dentry;
2561 	}
2562 	rcu_read_unlock();
2563 	/* we can't take ->d_lock here; it's OK, though. */
2564 	new->d_flags |= DCACHE_PAR_LOOKUP;
2565 	new->d_wait = wq;
2566 	hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2567 	hlist_bl_unlock(b);
2568 	return new;
2569 mismatch:
2570 	spin_unlock(&dentry->d_lock);
2571 	dput(dentry);
2572 	goto retry;
2573 }
2574 EXPORT_SYMBOL(d_alloc_parallel);
2575 
2576 void __d_lookup_done(struct dentry *dentry)
2577 {
2578 	struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2579 						 dentry->d_name.hash);
2580 	hlist_bl_lock(b);
2581 	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2582 	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2583 	wake_up_all(dentry->d_wait);
2584 	dentry->d_wait = NULL;
2585 	hlist_bl_unlock(b);
2586 	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2587 	INIT_LIST_HEAD(&dentry->d_lru);
2588 }
2589 EXPORT_SYMBOL(__d_lookup_done);
2590 
2591 /* inode->i_lock held if inode is non-NULL */
2592 
2593 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2594 {
2595 	struct inode *dir = NULL;
2596 	unsigned n;
2597 	spin_lock(&dentry->d_lock);
2598 	if (unlikely(d_in_lookup(dentry))) {
2599 		dir = dentry->d_parent->d_inode;
2600 		n = start_dir_add(dir);
2601 		__d_lookup_done(dentry);
2602 	}
2603 	if (inode) {
2604 		unsigned add_flags = d_flags_for_inode(inode);
2605 		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2606 		raw_write_seqcount_begin(&dentry->d_seq);
2607 		__d_set_inode_and_type(dentry, inode, add_flags);
2608 		raw_write_seqcount_end(&dentry->d_seq);
2609 		fsnotify_update_flags(dentry);
2610 	}
2611 	__d_rehash(dentry);
2612 	if (dir)
2613 		end_dir_add(dir, n);
2614 	spin_unlock(&dentry->d_lock);
2615 	if (inode)
2616 		spin_unlock(&inode->i_lock);
2617 }
2618 
2619 /**
2620  * d_add - add dentry to hash queues
2621  * @entry: dentry to add
2622  * @inode: The inode to attach to this dentry
2623  *
2624  * This adds the entry to the hash queues and initializes @inode.
2625  * The entry was actually filled in earlier during d_alloc().
2626  */
2627 
2628 void d_add(struct dentry *entry, struct inode *inode)
2629 {
2630 	if (inode) {
2631 		security_d_instantiate(entry, inode);
2632 		spin_lock(&inode->i_lock);
2633 	}
2634 	__d_add(entry, inode);
2635 }
2636 EXPORT_SYMBOL(d_add);
2637 
2638 /**
2639  * d_exact_alias - find and hash an exact unhashed alias
2640  * @entry: dentry to add
2641  * @inode: The inode to go with this dentry
2642  *
2643  * If an unhashed dentry with the same name/parent and desired
2644  * inode already exists, hash and return it.  Otherwise, return
2645  * NULL.
2646  *
2647  * Parent directory should be locked.
2648  */
2649 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2650 {
2651 	struct dentry *alias;
2652 	unsigned int hash = entry->d_name.hash;
2653 
2654 	spin_lock(&inode->i_lock);
2655 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2656 		/*
2657 		 * Don't need alias->d_lock here, because aliases with
2658 		 * d_parent == entry->d_parent are not subject to name or
2659 		 * parent changes, because the parent inode i_mutex is held.
2660 		 */
2661 		if (alias->d_name.hash != hash)
2662 			continue;
2663 		if (alias->d_parent != entry->d_parent)
2664 			continue;
2665 		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2666 			continue;
2667 		spin_lock(&alias->d_lock);
2668 		if (!d_unhashed(alias)) {
2669 			spin_unlock(&alias->d_lock);
2670 			alias = NULL;
2671 		} else {
2672 			__dget_dlock(alias);
2673 			__d_rehash(alias);
2674 			spin_unlock(&alias->d_lock);
2675 		}
2676 		spin_unlock(&inode->i_lock);
2677 		return alias;
2678 	}
2679 	spin_unlock(&inode->i_lock);
2680 	return NULL;
2681 }
2682 EXPORT_SYMBOL(d_exact_alias);
2683 
2684 /**
2685  * dentry_update_name_case - update case insensitive dentry with a new name
2686  * @dentry: dentry to be updated
2687  * @name: new name
2688  *
2689  * Update a case insensitive dentry with new case of name.
2690  *
2691  * dentry must have been returned by d_lookup with name @name. Old and new
2692  * name lengths must match (ie. no d_compare which allows mismatched name
2693  * lengths).
2694  *
2695  * Parent inode i_mutex must be held over d_lookup and into this call (to
2696  * keep renames and concurrent inserts, and readdir(2) away).
2697  */
2698 void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2699 {
2700 	BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2701 	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2702 
2703 	spin_lock(&dentry->d_lock);
2704 	write_seqcount_begin(&dentry->d_seq);
2705 	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2706 	write_seqcount_end(&dentry->d_seq);
2707 	spin_unlock(&dentry->d_lock);
2708 }
2709 EXPORT_SYMBOL(dentry_update_name_case);
2710 
2711 static void swap_names(struct dentry *dentry, struct dentry *target)
2712 {
2713 	if (unlikely(dname_external(target))) {
2714 		if (unlikely(dname_external(dentry))) {
2715 			/*
2716 			 * Both external: swap the pointers
2717 			 */
2718 			swap(target->d_name.name, dentry->d_name.name);
2719 		} else {
2720 			/*
2721 			 * dentry:internal, target:external.  Steal target's
2722 			 * storage and make target internal.
2723 			 */
2724 			memcpy(target->d_iname, dentry->d_name.name,
2725 					dentry->d_name.len + 1);
2726 			dentry->d_name.name = target->d_name.name;
2727 			target->d_name.name = target->d_iname;
2728 		}
2729 	} else {
2730 		if (unlikely(dname_external(dentry))) {
2731 			/*
2732 			 * dentry:external, target:internal.  Give dentry's
2733 			 * storage to target and make dentry internal
2734 			 */
2735 			memcpy(dentry->d_iname, target->d_name.name,
2736 					target->d_name.len + 1);
2737 			target->d_name.name = dentry->d_name.name;
2738 			dentry->d_name.name = dentry->d_iname;
2739 		} else {
2740 			/*
2741 			 * Both are internal.
2742 			 */
2743 			unsigned int i;
2744 			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2745 			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2746 				swap(((long *) &dentry->d_iname)[i],
2747 				     ((long *) &target->d_iname)[i]);
2748 			}
2749 		}
2750 	}
2751 	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2752 }
2753 
2754 static void copy_name(struct dentry *dentry, struct dentry *target)
2755 {
2756 	struct external_name *old_name = NULL;
2757 	if (unlikely(dname_external(dentry)))
2758 		old_name = external_name(dentry);
2759 	if (unlikely(dname_external(target))) {
2760 		atomic_inc(&external_name(target)->u.count);
2761 		dentry->d_name = target->d_name;
2762 	} else {
2763 		memcpy(dentry->d_iname, target->d_name.name,
2764 				target->d_name.len + 1);
2765 		dentry->d_name.name = dentry->d_iname;
2766 		dentry->d_name.hash_len = target->d_name.hash_len;
2767 	}
2768 	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2769 		kfree_rcu(old_name, u.head);
2770 }
2771 
2772 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2773 {
2774 	/*
2775 	 * XXXX: do we really need to take target->d_lock?
2776 	 */
2777 	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2778 		spin_lock(&target->d_parent->d_lock);
2779 	else {
2780 		if (d_ancestor(dentry->d_parent, target->d_parent)) {
2781 			spin_lock(&dentry->d_parent->d_lock);
2782 			spin_lock_nested(&target->d_parent->d_lock,
2783 						DENTRY_D_LOCK_NESTED);
2784 		} else {
2785 			spin_lock(&target->d_parent->d_lock);
2786 			spin_lock_nested(&dentry->d_parent->d_lock,
2787 						DENTRY_D_LOCK_NESTED);
2788 		}
2789 	}
2790 	if (target < dentry) {
2791 		spin_lock_nested(&target->d_lock, 2);
2792 		spin_lock_nested(&dentry->d_lock, 3);
2793 	} else {
2794 		spin_lock_nested(&dentry->d_lock, 2);
2795 		spin_lock_nested(&target->d_lock, 3);
2796 	}
2797 }
2798 
2799 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2800 {
2801 	if (target->d_parent != dentry->d_parent)
2802 		spin_unlock(&dentry->d_parent->d_lock);
2803 	if (target->d_parent != target)
2804 		spin_unlock(&target->d_parent->d_lock);
2805 	spin_unlock(&target->d_lock);
2806 	spin_unlock(&dentry->d_lock);
2807 }
2808 
2809 /*
2810  * When switching names, the actual string doesn't strictly have to
2811  * be preserved in the target - because we're dropping the target
2812  * anyway. As such, we can just do a simple memcpy() to copy over
2813  * the new name before we switch, unless we are going to rehash
2814  * it.  Note that if we *do* unhash the target, we are not allowed
2815  * to rehash it without giving it a new name/hash key - whether
2816  * we swap or overwrite the names here, resulting name won't match
2817  * the reality in filesystem; it's only there for d_path() purposes.
2818  * Note that all of this is happening under rename_lock, so the
2819  * any hash lookup seeing it in the middle of manipulations will
2820  * be discarded anyway.  So we do not care what happens to the hash
2821  * key in that case.
2822  */
2823 /*
2824  * __d_move - move a dentry
2825  * @dentry: entry to move
2826  * @target: new dentry
2827  * @exchange: exchange the two dentries
2828  *
2829  * Update the dcache to reflect the move of a file name. Negative
2830  * dcache entries should not be moved in this way. Caller must hold
2831  * rename_lock, the i_mutex of the source and target directories,
2832  * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2833  */
2834 static void __d_move(struct dentry *dentry, struct dentry *target,
2835 		     bool exchange)
2836 {
2837 	struct inode *dir = NULL;
2838 	unsigned n;
2839 	if (!dentry->d_inode)
2840 		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2841 
2842 	BUG_ON(d_ancestor(dentry, target));
2843 	BUG_ON(d_ancestor(target, dentry));
2844 
2845 	dentry_lock_for_move(dentry, target);
2846 	if (unlikely(d_in_lookup(target))) {
2847 		dir = target->d_parent->d_inode;
2848 		n = start_dir_add(dir);
2849 		__d_lookup_done(target);
2850 	}
2851 
2852 	write_seqcount_begin(&dentry->d_seq);
2853 	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2854 
2855 	/* unhash both */
2856 	/* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
2857 	___d_drop(dentry);
2858 	___d_drop(target);
2859 
2860 	/* Switch the names.. */
2861 	if (exchange)
2862 		swap_names(dentry, target);
2863 	else
2864 		copy_name(dentry, target);
2865 
2866 	/* rehash in new place(s) */
2867 	__d_rehash(dentry);
2868 	if (exchange)
2869 		__d_rehash(target);
2870 	else
2871 		target->d_hash.pprev = NULL;
2872 
2873 	/* ... and switch them in the tree */
2874 	if (IS_ROOT(dentry)) {
2875 		/* splicing a tree */
2876 		dentry->d_flags |= DCACHE_RCUACCESS;
2877 		dentry->d_parent = target->d_parent;
2878 		target->d_parent = target;
2879 		list_del_init(&target->d_child);
2880 		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2881 	} else {
2882 		/* swapping two dentries */
2883 		swap(dentry->d_parent, target->d_parent);
2884 		list_move(&target->d_child, &target->d_parent->d_subdirs);
2885 		list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2886 		if (exchange)
2887 			fsnotify_update_flags(target);
2888 		fsnotify_update_flags(dentry);
2889 	}
2890 
2891 	write_seqcount_end(&target->d_seq);
2892 	write_seqcount_end(&dentry->d_seq);
2893 
2894 	if (dir)
2895 		end_dir_add(dir, n);
2896 	dentry_unlock_for_move(dentry, target);
2897 }
2898 
2899 /*
2900  * d_move - move a dentry
2901  * @dentry: entry to move
2902  * @target: new dentry
2903  *
2904  * Update the dcache to reflect the move of a file name. Negative
2905  * dcache entries should not be moved in this way. See the locking
2906  * requirements for __d_move.
2907  */
2908 void d_move(struct dentry *dentry, struct dentry *target)
2909 {
2910 	write_seqlock(&rename_lock);
2911 	__d_move(dentry, target, false);
2912 	write_sequnlock(&rename_lock);
2913 }
2914 EXPORT_SYMBOL(d_move);
2915 
2916 /*
2917  * d_exchange - exchange two dentries
2918  * @dentry1: first dentry
2919  * @dentry2: second dentry
2920  */
2921 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2922 {
2923 	write_seqlock(&rename_lock);
2924 
2925 	WARN_ON(!dentry1->d_inode);
2926 	WARN_ON(!dentry2->d_inode);
2927 	WARN_ON(IS_ROOT(dentry1));
2928 	WARN_ON(IS_ROOT(dentry2));
2929 
2930 	__d_move(dentry1, dentry2, true);
2931 
2932 	write_sequnlock(&rename_lock);
2933 }
2934 
2935 /**
2936  * d_ancestor - search for an ancestor
2937  * @p1: ancestor dentry
2938  * @p2: child dentry
2939  *
2940  * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2941  * an ancestor of p2, else NULL.
2942  */
2943 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2944 {
2945 	struct dentry *p;
2946 
2947 	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2948 		if (p->d_parent == p1)
2949 			return p;
2950 	}
2951 	return NULL;
2952 }
2953 
2954 /*
2955  * This helper attempts to cope with remotely renamed directories
2956  *
2957  * It assumes that the caller is already holding
2958  * dentry->d_parent->d_inode->i_mutex, and rename_lock
2959  *
2960  * Note: If ever the locking in lock_rename() changes, then please
2961  * remember to update this too...
2962  */
2963 static int __d_unalias(struct inode *inode,
2964 		struct dentry *dentry, struct dentry *alias)
2965 {
2966 	struct mutex *m1 = NULL;
2967 	struct rw_semaphore *m2 = NULL;
2968 	int ret = -ESTALE;
2969 
2970 	/* If alias and dentry share a parent, then no extra locks required */
2971 	if (alias->d_parent == dentry->d_parent)
2972 		goto out_unalias;
2973 
2974 	/* See lock_rename() */
2975 	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2976 		goto out_err;
2977 	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2978 	if (!inode_trylock_shared(alias->d_parent->d_inode))
2979 		goto out_err;
2980 	m2 = &alias->d_parent->d_inode->i_rwsem;
2981 out_unalias:
2982 	__d_move(alias, dentry, false);
2983 	ret = 0;
2984 out_err:
2985 	if (m2)
2986 		up_read(m2);
2987 	if (m1)
2988 		mutex_unlock(m1);
2989 	return ret;
2990 }
2991 
2992 /**
2993  * d_splice_alias - splice a disconnected dentry into the tree if one exists
2994  * @inode:  the inode which may have a disconnected dentry
2995  * @dentry: a negative dentry which we want to point to the inode.
2996  *
2997  * If inode is a directory and has an IS_ROOT alias, then d_move that in
2998  * place of the given dentry and return it, else simply d_add the inode
2999  * to the dentry and return NULL.
3000  *
3001  * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3002  * we should error out: directories can't have multiple aliases.
3003  *
3004  * This is needed in the lookup routine of any filesystem that is exportable
3005  * (via knfsd) so that we can build dcache paths to directories effectively.
3006  *
3007  * If a dentry was found and moved, then it is returned.  Otherwise NULL
3008  * is returned.  This matches the expected return value of ->lookup.
3009  *
3010  * Cluster filesystems may call this function with a negative, hashed dentry.
3011  * In that case, we know that the inode will be a regular file, and also this
3012  * will only occur during atomic_open. So we need to check for the dentry
3013  * being already hashed only in the final case.
3014  */
3015 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3016 {
3017 	if (IS_ERR(inode))
3018 		return ERR_CAST(inode);
3019 
3020 	BUG_ON(!d_unhashed(dentry));
3021 
3022 	if (!inode)
3023 		goto out;
3024 
3025 	security_d_instantiate(dentry, inode);
3026 	spin_lock(&inode->i_lock);
3027 	if (S_ISDIR(inode->i_mode)) {
3028 		struct dentry *new = __d_find_any_alias(inode);
3029 		if (unlikely(new)) {
3030 			/* The reference to new ensures it remains an alias */
3031 			spin_unlock(&inode->i_lock);
3032 			write_seqlock(&rename_lock);
3033 			if (unlikely(d_ancestor(new, dentry))) {
3034 				write_sequnlock(&rename_lock);
3035 				dput(new);
3036 				new = ERR_PTR(-ELOOP);
3037 				pr_warn_ratelimited(
3038 					"VFS: Lookup of '%s' in %s %s"
3039 					" would have caused loop\n",
3040 					dentry->d_name.name,
3041 					inode->i_sb->s_type->name,
3042 					inode->i_sb->s_id);
3043 			} else if (!IS_ROOT(new)) {
3044 				int err = __d_unalias(inode, dentry, new);
3045 				write_sequnlock(&rename_lock);
3046 				if (err) {
3047 					dput(new);
3048 					new = ERR_PTR(err);
3049 				}
3050 			} else {
3051 				__d_move(new, dentry, false);
3052 				write_sequnlock(&rename_lock);
3053 			}
3054 			iput(inode);
3055 			return new;
3056 		}
3057 	}
3058 out:
3059 	__d_add(dentry, inode);
3060 	return NULL;
3061 }
3062 EXPORT_SYMBOL(d_splice_alias);
3063 
3064 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3065 {
3066 	*buflen -= namelen;
3067 	if (*buflen < 0)
3068 		return -ENAMETOOLONG;
3069 	*buffer -= namelen;
3070 	memcpy(*buffer, str, namelen);
3071 	return 0;
3072 }
3073 
3074 /**
3075  * prepend_name - prepend a pathname in front of current buffer pointer
3076  * @buffer: buffer pointer
3077  * @buflen: allocated length of the buffer
3078  * @name:   name string and length qstr structure
3079  *
3080  * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to
3081  * make sure that either the old or the new name pointer and length are
3082  * fetched. However, there may be mismatch between length and pointer.
3083  * The length cannot be trusted, we need to copy it byte-by-byte until
3084  * the length is reached or a null byte is found. It also prepends "/" at
3085  * the beginning of the name. The sequence number check at the caller will
3086  * retry it again when a d_move() does happen. So any garbage in the buffer
3087  * due to mismatched pointer and length will be discarded.
3088  *
3089  * Load acquire is needed to make sure that we see that terminating NUL.
3090  */
3091 static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3092 {
3093 	const char *dname = smp_load_acquire(&name->name); /* ^^^ */
3094 	u32 dlen = READ_ONCE(name->len);
3095 	char *p;
3096 
3097 	*buflen -= dlen + 1;
3098 	if (*buflen < 0)
3099 		return -ENAMETOOLONG;
3100 	p = *buffer -= dlen + 1;
3101 	*p++ = '/';
3102 	while (dlen--) {
3103 		char c = *dname++;
3104 		if (!c)
3105 			break;
3106 		*p++ = c;
3107 	}
3108 	return 0;
3109 }
3110 
3111 /**
3112  * prepend_path - Prepend path string to a buffer
3113  * @path: the dentry/vfsmount to report
3114  * @root: root vfsmnt/dentry
3115  * @buffer: pointer to the end of the buffer
3116  * @buflen: pointer to buffer length
3117  *
3118  * The function will first try to write out the pathname without taking any
3119  * lock other than the RCU read lock to make sure that dentries won't go away.
3120  * It only checks the sequence number of the global rename_lock as any change
3121  * in the dentry's d_seq will be preceded by changes in the rename_lock
3122  * sequence number. If the sequence number had been changed, it will restart
3123  * the whole pathname back-tracing sequence again by taking the rename_lock.
3124  * In this case, there is no need to take the RCU read lock as the recursive
3125  * parent pointer references will keep the dentry chain alive as long as no
3126  * rename operation is performed.
3127  */
3128 static int prepend_path(const struct path *path,
3129 			const struct path *root,
3130 			char **buffer, int *buflen)
3131 {
3132 	struct dentry *dentry;
3133 	struct vfsmount *vfsmnt;
3134 	struct mount *mnt;
3135 	int error = 0;
3136 	unsigned seq, m_seq = 0;
3137 	char *bptr;
3138 	int blen;
3139 
3140 	rcu_read_lock();
3141 restart_mnt:
3142 	read_seqbegin_or_lock(&mount_lock, &m_seq);
3143 	seq = 0;
3144 	rcu_read_lock();
3145 restart:
3146 	bptr = *buffer;
3147 	blen = *buflen;
3148 	error = 0;
3149 	dentry = path->dentry;
3150 	vfsmnt = path->mnt;
3151 	mnt = real_mount(vfsmnt);
3152 	read_seqbegin_or_lock(&rename_lock, &seq);
3153 	while (dentry != root->dentry || vfsmnt != root->mnt) {
3154 		struct dentry * parent;
3155 
3156 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3157 			struct mount *parent = READ_ONCE(mnt->mnt_parent);
3158 			/* Escaped? */
3159 			if (dentry != vfsmnt->mnt_root) {
3160 				bptr = *buffer;
3161 				blen = *buflen;
3162 				error = 3;
3163 				break;
3164 			}
3165 			/* Global root? */
3166 			if (mnt != parent) {
3167 				dentry = READ_ONCE(mnt->mnt_mountpoint);
3168 				mnt = parent;
3169 				vfsmnt = &mnt->mnt;
3170 				continue;
3171 			}
3172 			if (!error)
3173 				error = is_mounted(vfsmnt) ? 1 : 2;
3174 			break;
3175 		}
3176 		parent = dentry->d_parent;
3177 		prefetch(parent);
3178 		error = prepend_name(&bptr, &blen, &dentry->d_name);
3179 		if (error)
3180 			break;
3181 
3182 		dentry = parent;
3183 	}
3184 	if (!(seq & 1))
3185 		rcu_read_unlock();
3186 	if (need_seqretry(&rename_lock, seq)) {
3187 		seq = 1;
3188 		goto restart;
3189 	}
3190 	done_seqretry(&rename_lock, seq);
3191 
3192 	if (!(m_seq & 1))
3193 		rcu_read_unlock();
3194 	if (need_seqretry(&mount_lock, m_seq)) {
3195 		m_seq = 1;
3196 		goto restart_mnt;
3197 	}
3198 	done_seqretry(&mount_lock, m_seq);
3199 
3200 	if (error >= 0 && bptr == *buffer) {
3201 		if (--blen < 0)
3202 			error = -ENAMETOOLONG;
3203 		else
3204 			*--bptr = '/';
3205 	}
3206 	*buffer = bptr;
3207 	*buflen = blen;
3208 	return error;
3209 }
3210 
3211 /**
3212  * __d_path - return the path of a dentry
3213  * @path: the dentry/vfsmount to report
3214  * @root: root vfsmnt/dentry
3215  * @buf: buffer to return value in
3216  * @buflen: buffer length
3217  *
3218  * Convert a dentry into an ASCII path name.
3219  *
3220  * Returns a pointer into the buffer or an error code if the
3221  * path was too long.
3222  *
3223  * "buflen" should be positive.
3224  *
3225  * If the path is not reachable from the supplied root, return %NULL.
3226  */
3227 char *__d_path(const struct path *path,
3228 	       const struct path *root,
3229 	       char *buf, int buflen)
3230 {
3231 	char *res = buf + buflen;
3232 	int error;
3233 
3234 	prepend(&res, &buflen, "\0", 1);
3235 	error = prepend_path(path, root, &res, &buflen);
3236 
3237 	if (error < 0)
3238 		return ERR_PTR(error);
3239 	if (error > 0)
3240 		return NULL;
3241 	return res;
3242 }
3243 
3244 char *d_absolute_path(const struct path *path,
3245 	       char *buf, int buflen)
3246 {
3247 	struct path root = {};
3248 	char *res = buf + buflen;
3249 	int error;
3250 
3251 	prepend(&res, &buflen, "\0", 1);
3252 	error = prepend_path(path, &root, &res, &buflen);
3253 
3254 	if (error > 1)
3255 		error = -EINVAL;
3256 	if (error < 0)
3257 		return ERR_PTR(error);
3258 	return res;
3259 }
3260 
3261 /*
3262  * same as __d_path but appends "(deleted)" for unlinked files.
3263  */
3264 static int path_with_deleted(const struct path *path,
3265 			     const struct path *root,
3266 			     char **buf, int *buflen)
3267 {
3268 	prepend(buf, buflen, "\0", 1);
3269 	if (d_unlinked(path->dentry)) {
3270 		int error = prepend(buf, buflen, " (deleted)", 10);
3271 		if (error)
3272 			return error;
3273 	}
3274 
3275 	return prepend_path(path, root, buf, buflen);
3276 }
3277 
3278 static int prepend_unreachable(char **buffer, int *buflen)
3279 {
3280 	return prepend(buffer, buflen, "(unreachable)", 13);
3281 }
3282 
3283 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3284 {
3285 	unsigned seq;
3286 
3287 	do {
3288 		seq = read_seqcount_begin(&fs->seq);
3289 		*root = fs->root;
3290 	} while (read_seqcount_retry(&fs->seq, seq));
3291 }
3292 
3293 /**
3294  * d_path - return the path of a dentry
3295  * @path: path to report
3296  * @buf: buffer to return value in
3297  * @buflen: buffer length
3298  *
3299  * Convert a dentry into an ASCII path name. If the entry has been deleted
3300  * the string " (deleted)" is appended. Note that this is ambiguous.
3301  *
3302  * Returns a pointer into the buffer or an error code if the path was
3303  * too long. Note: Callers should use the returned pointer, not the passed
3304  * in buffer, to use the name! The implementation often starts at an offset
3305  * into the buffer, and may leave 0 bytes at the start.
3306  *
3307  * "buflen" should be positive.
3308  */
3309 char *d_path(const struct path *path, char *buf, int buflen)
3310 {
3311 	char *res = buf + buflen;
3312 	struct path root;
3313 	int error;
3314 
3315 	/*
3316 	 * We have various synthetic filesystems that never get mounted.  On
3317 	 * these filesystems dentries are never used for lookup purposes, and
3318 	 * thus don't need to be hashed.  They also don't need a name until a
3319 	 * user wants to identify the object in /proc/pid/fd/.  The little hack
3320 	 * below allows us to generate a name for these objects on demand:
3321 	 *
3322 	 * Some pseudo inodes are mountable.  When they are mounted
3323 	 * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3324 	 * and instead have d_path return the mounted path.
3325 	 */
3326 	if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3327 	    (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3328 		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3329 
3330 	rcu_read_lock();
3331 	get_fs_root_rcu(current->fs, &root);
3332 	error = path_with_deleted(path, &root, &res, &buflen);
3333 	rcu_read_unlock();
3334 
3335 	if (error < 0)
3336 		res = ERR_PTR(error);
3337 	return res;
3338 }
3339 EXPORT_SYMBOL(d_path);
3340 
3341 /*
3342  * Helper function for dentry_operations.d_dname() members
3343  */
3344 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3345 			const char *fmt, ...)
3346 {
3347 	va_list args;
3348 	char temp[64];
3349 	int sz;
3350 
3351 	va_start(args, fmt);
3352 	sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3353 	va_end(args);
3354 
3355 	if (sz > sizeof(temp) || sz > buflen)
3356 		return ERR_PTR(-ENAMETOOLONG);
3357 
3358 	buffer += buflen - sz;
3359 	return memcpy(buffer, temp, sz);
3360 }
3361 
3362 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3363 {
3364 	char *end = buffer + buflen;
3365 	/* these dentries are never renamed, so d_lock is not needed */
3366 	if (prepend(&end, &buflen, " (deleted)", 11) ||
3367 	    prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3368 	    prepend(&end, &buflen, "/", 1))
3369 		end = ERR_PTR(-ENAMETOOLONG);
3370 	return end;
3371 }
3372 EXPORT_SYMBOL(simple_dname);
3373 
3374 /*
3375  * Write full pathname from the root of the filesystem into the buffer.
3376  */
3377 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3378 {
3379 	struct dentry *dentry;
3380 	char *end, *retval;
3381 	int len, seq = 0;
3382 	int error = 0;
3383 
3384 	if (buflen < 2)
3385 		goto Elong;
3386 
3387 	rcu_read_lock();
3388 restart:
3389 	dentry = d;
3390 	end = buf + buflen;
3391 	len = buflen;
3392 	prepend(&end, &len, "\0", 1);
3393 	/* Get '/' right */
3394 	retval = end-1;
3395 	*retval = '/';
3396 	read_seqbegin_or_lock(&rename_lock, &seq);
3397 	while (!IS_ROOT(dentry)) {
3398 		struct dentry *parent = dentry->d_parent;
3399 
3400 		prefetch(parent);
3401 		error = prepend_name(&end, &len, &dentry->d_name);
3402 		if (error)
3403 			break;
3404 
3405 		retval = end;
3406 		dentry = parent;
3407 	}
3408 	if (!(seq & 1))
3409 		rcu_read_unlock();
3410 	if (need_seqretry(&rename_lock, seq)) {
3411 		seq = 1;
3412 		goto restart;
3413 	}
3414 	done_seqretry(&rename_lock, seq);
3415 	if (error)
3416 		goto Elong;
3417 	return retval;
3418 Elong:
3419 	return ERR_PTR(-ENAMETOOLONG);
3420 }
3421 
3422 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3423 {
3424 	return __dentry_path(dentry, buf, buflen);
3425 }
3426 EXPORT_SYMBOL(dentry_path_raw);
3427 
3428 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3429 {
3430 	char *p = NULL;
3431 	char *retval;
3432 
3433 	if (d_unlinked(dentry)) {
3434 		p = buf + buflen;
3435 		if (prepend(&p, &buflen, "//deleted", 10) != 0)
3436 			goto Elong;
3437 		buflen++;
3438 	}
3439 	retval = __dentry_path(dentry, buf, buflen);
3440 	if (!IS_ERR(retval) && p)
3441 		*p = '/';	/* restore '/' overriden with '\0' */
3442 	return retval;
3443 Elong:
3444 	return ERR_PTR(-ENAMETOOLONG);
3445 }
3446 
3447 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3448 				    struct path *pwd)
3449 {
3450 	unsigned seq;
3451 
3452 	do {
3453 		seq = read_seqcount_begin(&fs->seq);
3454 		*root = fs->root;
3455 		*pwd = fs->pwd;
3456 	} while (read_seqcount_retry(&fs->seq, seq));
3457 }
3458 
3459 /*
3460  * NOTE! The user-level library version returns a
3461  * character pointer. The kernel system call just
3462  * returns the length of the buffer filled (which
3463  * includes the ending '\0' character), or a negative
3464  * error value. So libc would do something like
3465  *
3466  *	char *getcwd(char * buf, size_t size)
3467  *	{
3468  *		int retval;
3469  *
3470  *		retval = sys_getcwd(buf, size);
3471  *		if (retval >= 0)
3472  *			return buf;
3473  *		errno = -retval;
3474  *		return NULL;
3475  *	}
3476  */
3477 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3478 {
3479 	int error;
3480 	struct path pwd, root;
3481 	char *page = __getname();
3482 
3483 	if (!page)
3484 		return -ENOMEM;
3485 
3486 	rcu_read_lock();
3487 	get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3488 
3489 	error = -ENOENT;
3490 	if (!d_unlinked(pwd.dentry)) {
3491 		unsigned long len;
3492 		char *cwd = page + PATH_MAX;
3493 		int buflen = PATH_MAX;
3494 
3495 		prepend(&cwd, &buflen, "\0", 1);
3496 		error = prepend_path(&pwd, &root, &cwd, &buflen);
3497 		rcu_read_unlock();
3498 
3499 		if (error < 0)
3500 			goto out;
3501 
3502 		/* Unreachable from current root */
3503 		if (error > 0) {
3504 			error = prepend_unreachable(&cwd, &buflen);
3505 			if (error)
3506 				goto out;
3507 		}
3508 
3509 		error = -ERANGE;
3510 		len = PATH_MAX + page - cwd;
3511 		if (len <= size) {
3512 			error = len;
3513 			if (copy_to_user(buf, cwd, len))
3514 				error = -EFAULT;
3515 		}
3516 	} else {
3517 		rcu_read_unlock();
3518 	}
3519 
3520 out:
3521 	__putname(page);
3522 	return error;
3523 }
3524 
3525 /*
3526  * Test whether new_dentry is a subdirectory of old_dentry.
3527  *
3528  * Trivially implemented using the dcache structure
3529  */
3530 
3531 /**
3532  * is_subdir - is new dentry a subdirectory of old_dentry
3533  * @new_dentry: new dentry
3534  * @old_dentry: old dentry
3535  *
3536  * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3537  * Returns false otherwise.
3538  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3539  */
3540 
3541 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3542 {
3543 	bool result;
3544 	unsigned seq;
3545 
3546 	if (new_dentry == old_dentry)
3547 		return true;
3548 
3549 	do {
3550 		/* for restarting inner loop in case of seq retry */
3551 		seq = read_seqbegin(&rename_lock);
3552 		/*
3553 		 * Need rcu_readlock to protect against the d_parent trashing
3554 		 * due to d_move
3555 		 */
3556 		rcu_read_lock();
3557 		if (d_ancestor(old_dentry, new_dentry))
3558 			result = true;
3559 		else
3560 			result = false;
3561 		rcu_read_unlock();
3562 	} while (read_seqretry(&rename_lock, seq));
3563 
3564 	return result;
3565 }
3566 EXPORT_SYMBOL(is_subdir);
3567 
3568 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3569 {
3570 	struct dentry *root = data;
3571 	if (dentry != root) {
3572 		if (d_unhashed(dentry) || !dentry->d_inode)
3573 			return D_WALK_SKIP;
3574 
3575 		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3576 			dentry->d_flags |= DCACHE_GENOCIDE;
3577 			dentry->d_lockref.count--;
3578 		}
3579 	}
3580 	return D_WALK_CONTINUE;
3581 }
3582 
3583 void d_genocide(struct dentry *parent)
3584 {
3585 	d_walk(parent, parent, d_genocide_kill, NULL);
3586 }
3587 
3588 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3589 {
3590 	inode_dec_link_count(inode);
3591 	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3592 		!hlist_unhashed(&dentry->d_u.d_alias) ||
3593 		!d_unlinked(dentry));
3594 	spin_lock(&dentry->d_parent->d_lock);
3595 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3596 	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3597 				(unsigned long long)inode->i_ino);
3598 	spin_unlock(&dentry->d_lock);
3599 	spin_unlock(&dentry->d_parent->d_lock);
3600 	d_instantiate(dentry, inode);
3601 }
3602 EXPORT_SYMBOL(d_tmpfile);
3603 
3604 static __initdata unsigned long dhash_entries;
3605 static int __init set_dhash_entries(char *str)
3606 {
3607 	if (!str)
3608 		return 0;
3609 	dhash_entries = simple_strtoul(str, &str, 0);
3610 	return 1;
3611 }
3612 __setup("dhash_entries=", set_dhash_entries);
3613 
3614 static void __init dcache_init_early(void)
3615 {
3616 	/* If hashes are distributed across NUMA nodes, defer
3617 	 * hash allocation until vmalloc space is available.
3618 	 */
3619 	if (hashdist)
3620 		return;
3621 
3622 	dentry_hashtable =
3623 		alloc_large_system_hash("Dentry cache",
3624 					sizeof(struct hlist_bl_head),
3625 					dhash_entries,
3626 					13,
3627 					HASH_EARLY | HASH_ZERO,
3628 					&d_hash_shift,
3629 					NULL,
3630 					0,
3631 					0);
3632 	d_hash_shift = 32 - d_hash_shift;
3633 }
3634 
3635 static void __init dcache_init(void)
3636 {
3637 	/*
3638 	 * A constructor could be added for stable state like the lists,
3639 	 * but it is probably not worth it because of the cache nature
3640 	 * of the dcache.
3641 	 */
3642 	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3643 		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3644 		d_iname);
3645 
3646 	/* Hash may have been set up in dcache_init_early */
3647 	if (!hashdist)
3648 		return;
3649 
3650 	dentry_hashtable =
3651 		alloc_large_system_hash("Dentry cache",
3652 					sizeof(struct hlist_bl_head),
3653 					dhash_entries,
3654 					13,
3655 					HASH_ZERO,
3656 					&d_hash_shift,
3657 					NULL,
3658 					0,
3659 					0);
3660 	d_hash_shift = 32 - d_hash_shift;
3661 }
3662 
3663 /* SLAB cache for __getname() consumers */
3664 struct kmem_cache *names_cachep __read_mostly;
3665 EXPORT_SYMBOL(names_cachep);
3666 
3667 EXPORT_SYMBOL(d_genocide);
3668 
3669 void __init vfs_caches_init_early(void)
3670 {
3671 	int i;
3672 
3673 	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3674 		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3675 
3676 	dcache_init_early();
3677 	inode_init_early();
3678 }
3679 
3680 void __init vfs_caches_init(void)
3681 {
3682 	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3683 			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3684 
3685 	dcache_init();
3686 	inode_init();
3687 	files_init();
3688 	files_maxfiles_init();
3689 	mnt_init();
3690 	bdev_cache_init();
3691 	chrdev_init();
3692 }
3693