1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * fs/dcache.c
4 *
5 * Complete reimplementation
6 * (C) 1997 Thomas Schoebel-Theuer,
7 * with heavy changes by Linus Torvalds
8 */
9
10 /*
11 * Notes on the allocation strategy:
12 *
13 * The dcache is a master of the icache - whenever a dcache entry
14 * exists, the inode will always exist. "iput()" is done either when
15 * the dcache entry is deleted or garbage collected.
16 */
17
18 #include <linux/ratelimit.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/fs.h>
22 #include <linux/fscrypt.h>
23 #include <linux/fsnotify.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/cache.h>
28 #include <linux/export.h>
29 #include <linux/security.h>
30 #include <linux/seqlock.h>
31 #include <linux/memblock.h>
32 #include <linux/bit_spinlock.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/list_lru.h>
35 #include "internal.h"
36 #include "mount.h"
37
38 #include <asm/runtime-const.h>
39
40 /*
41 * Usage:
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_u.d_alias, d_inode of aliases
44 * dcache_hash_bucket lock protects:
45 * - the dcache hash table
46 * s_roots bl list spinlock protects:
47 * - the s_roots list (see __d_drop)
48 * dentry->d_sb->s_dentry_lru_lock protects:
49 * - the dcache lru lists and counters
50 * d_lock protects:
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_chilren
57 * - childrens' d_sib and d_parent
58 * - d_u.d_alias, d_inode
59 *
60 * Ordering:
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dentry->d_sb->s_dentry_lru_lock
64 * dcache_hash_bucket lock
65 * s_roots lock
66 *
67 * If there is an ancestor relationship:
68 * dentry->d_parent->...->d_parent->d_lock
69 * ...
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
72 *
73 * If no ancestor relationship:
74 * arbitrary, since it's serialized on rename_lock
75 */
76 static int sysctl_vfs_cache_pressure __read_mostly = 100;
77
vfs_pressure_ratio(unsigned long val)78 unsigned long vfs_pressure_ratio(unsigned long val)
79 {
80 return mult_frac(val, sysctl_vfs_cache_pressure, 100);
81 }
82 EXPORT_SYMBOL_GPL(vfs_pressure_ratio);
83
84 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
85
86 EXPORT_SYMBOL(rename_lock);
87
88 static struct kmem_cache *dentry_cache __ro_after_init;
89
90 const struct qstr empty_name = QSTR_INIT("", 0);
91 EXPORT_SYMBOL(empty_name);
92 const struct qstr slash_name = QSTR_INIT("/", 1);
93 EXPORT_SYMBOL(slash_name);
94 const struct qstr dotdot_name = QSTR_INIT("..", 2);
95 EXPORT_SYMBOL(dotdot_name);
96
97 /*
98 * This is the single most critical data structure when it comes
99 * to the dcache: the hashtable for lookups. Somebody should try
100 * to make this good - I've just made it work.
101 *
102 * This hash-function tries to avoid losing too many bits of hash
103 * information, yet avoid using a prime hash-size or similar.
104 *
105 * Marking the variables "used" ensures that the compiler doesn't
106 * optimize them away completely on architectures with runtime
107 * constant infrastructure, this allows debuggers to see their
108 * values. But updating these values has no effect on those arches.
109 */
110
111 static unsigned int d_hash_shift __ro_after_init __used;
112
113 static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
114
d_hash(unsigned long hashlen)115 static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
116 {
117 return runtime_const_ptr(dentry_hashtable) +
118 runtime_const_shift_right_32(hashlen, d_hash_shift);
119 }
120
121 #define IN_LOOKUP_SHIFT 10
122 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
123
in_lookup_hash(const struct dentry * parent,unsigned int hash)124 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
125 unsigned int hash)
126 {
127 hash += (unsigned long) parent / L1_CACHE_BYTES;
128 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
129 }
130
131 struct dentry_stat_t {
132 long nr_dentry;
133 long nr_unused;
134 long age_limit; /* age in seconds */
135 long want_pages; /* pages requested by system */
136 long nr_negative; /* # of unused negative dentries */
137 long dummy; /* Reserved for future use */
138 };
139
140 static DEFINE_PER_CPU(long, nr_dentry);
141 static DEFINE_PER_CPU(long, nr_dentry_unused);
142 static DEFINE_PER_CPU(long, nr_dentry_negative);
143 static int dentry_negative_policy;
144
145 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
146 /* Statistics gathering. */
147 static struct dentry_stat_t dentry_stat = {
148 .age_limit = 45,
149 };
150
151 /*
152 * Here we resort to our own counters instead of using generic per-cpu counters
153 * for consistency with what the vfs inode code does. We are expected to harvest
154 * better code and performance by having our own specialized counters.
155 *
156 * Please note that the loop is done over all possible CPUs, not over all online
157 * CPUs. The reason for this is that we don't want to play games with CPUs going
158 * on and off. If one of them goes off, we will just keep their counters.
159 *
160 * glommer: See cffbc8a for details, and if you ever intend to change this,
161 * please update all vfs counters to match.
162 */
get_nr_dentry(void)163 static long get_nr_dentry(void)
164 {
165 int i;
166 long sum = 0;
167 for_each_possible_cpu(i)
168 sum += per_cpu(nr_dentry, i);
169 return sum < 0 ? 0 : sum;
170 }
171
get_nr_dentry_unused(void)172 static long get_nr_dentry_unused(void)
173 {
174 int i;
175 long sum = 0;
176 for_each_possible_cpu(i)
177 sum += per_cpu(nr_dentry_unused, i);
178 return sum < 0 ? 0 : sum;
179 }
180
get_nr_dentry_negative(void)181 static long get_nr_dentry_negative(void)
182 {
183 int i;
184 long sum = 0;
185
186 for_each_possible_cpu(i)
187 sum += per_cpu(nr_dentry_negative, i);
188 return sum < 0 ? 0 : sum;
189 }
190
proc_nr_dentry(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)191 static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer,
192 size_t *lenp, loff_t *ppos)
193 {
194 dentry_stat.nr_dentry = get_nr_dentry();
195 dentry_stat.nr_unused = get_nr_dentry_unused();
196 dentry_stat.nr_negative = get_nr_dentry_negative();
197 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
198 }
199
200 static const struct ctl_table fs_dcache_sysctls[] = {
201 {
202 .procname = "dentry-state",
203 .data = &dentry_stat,
204 .maxlen = 6*sizeof(long),
205 .mode = 0444,
206 .proc_handler = proc_nr_dentry,
207 },
208 {
209 .procname = "dentry-negative",
210 .data = &dentry_negative_policy,
211 .maxlen = sizeof(dentry_negative_policy),
212 .mode = 0644,
213 .proc_handler = proc_dointvec_minmax,
214 .extra1 = SYSCTL_ZERO,
215 .extra2 = SYSCTL_ONE,
216 },
217 };
218
219 static const struct ctl_table vm_dcache_sysctls[] = {
220 {
221 .procname = "vfs_cache_pressure",
222 .data = &sysctl_vfs_cache_pressure,
223 .maxlen = sizeof(sysctl_vfs_cache_pressure),
224 .mode = 0644,
225 .proc_handler = proc_dointvec_minmax,
226 .extra1 = SYSCTL_ZERO,
227 },
228 };
229
init_fs_dcache_sysctls(void)230 static int __init init_fs_dcache_sysctls(void)
231 {
232 register_sysctl_init("vm", vm_dcache_sysctls);
233 register_sysctl_init("fs", fs_dcache_sysctls);
234 return 0;
235 }
236 fs_initcall(init_fs_dcache_sysctls);
237 #endif
238
239 /*
240 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
241 * The strings are both count bytes long, and count is non-zero.
242 */
243 #ifdef CONFIG_DCACHE_WORD_ACCESS
244
245 #include <asm/word-at-a-time.h>
246 /*
247 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
248 * aligned allocation for this particular component. We don't
249 * strictly need the load_unaligned_zeropad() safety, but it
250 * doesn't hurt either.
251 *
252 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
253 * need the careful unaligned handling.
254 */
dentry_string_cmp(const unsigned char * cs,const unsigned char * ct,unsigned tcount)255 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
256 {
257 unsigned long a,b,mask;
258
259 for (;;) {
260 a = read_word_at_a_time(cs);
261 b = load_unaligned_zeropad(ct);
262 if (tcount < sizeof(unsigned long))
263 break;
264 if (unlikely(a != b))
265 return 1;
266 cs += sizeof(unsigned long);
267 ct += sizeof(unsigned long);
268 tcount -= sizeof(unsigned long);
269 if (!tcount)
270 return 0;
271 }
272 mask = bytemask_from_count(tcount);
273 return unlikely(!!((a ^ b) & mask));
274 }
275
276 #else
277
dentry_string_cmp(const unsigned char * cs,const unsigned char * ct,unsigned tcount)278 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
279 {
280 do {
281 if (*cs != *ct)
282 return 1;
283 cs++;
284 ct++;
285 tcount--;
286 } while (tcount);
287 return 0;
288 }
289
290 #endif
291
dentry_cmp(const struct dentry * dentry,const unsigned char * ct,unsigned tcount)292 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
293 {
294 /*
295 * Be careful about RCU walk racing with rename:
296 * use 'READ_ONCE' to fetch the name pointer.
297 *
298 * NOTE! Even if a rename will mean that the length
299 * was not loaded atomically, we don't care. The
300 * RCU walk will check the sequence count eventually,
301 * and catch it. And we won't overrun the buffer,
302 * because we're reading the name pointer atomically,
303 * and a dentry name is guaranteed to be properly
304 * terminated with a NUL byte.
305 *
306 * End result: even if 'len' is wrong, we'll exit
307 * early because the data cannot match (there can
308 * be no NUL in the ct/tcount data)
309 */
310 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
311
312 return dentry_string_cmp(cs, ct, tcount);
313 }
314
315 /*
316 * long names are allocated separately from dentry and never modified.
317 * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
318 * for the reason why ->count and ->head can't be combined into a union.
319 * dentry_string_cmp() relies upon ->name[] being word-aligned.
320 */
321 struct external_name {
322 atomic_t count;
323 struct rcu_head head;
324 unsigned char name[] __aligned(sizeof(unsigned long));
325 };
326
external_name(struct dentry * dentry)327 static inline struct external_name *external_name(struct dentry *dentry)
328 {
329 return container_of(dentry->d_name.name, struct external_name, name[0]);
330 }
331
__d_free(struct rcu_head * head)332 static void __d_free(struct rcu_head *head)
333 {
334 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
335
336 kmem_cache_free(dentry_cache, dentry);
337 }
338
__d_free_external(struct rcu_head * head)339 static void __d_free_external(struct rcu_head *head)
340 {
341 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
342 kfree(external_name(dentry));
343 kmem_cache_free(dentry_cache, dentry);
344 }
345
dname_external(const struct dentry * dentry)346 static inline int dname_external(const struct dentry *dentry)
347 {
348 return dentry->d_name.name != dentry->d_shortname.string;
349 }
350
take_dentry_name_snapshot(struct name_snapshot * name,struct dentry * dentry)351 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
352 {
353 unsigned seq;
354 const unsigned char *s;
355
356 rcu_read_lock();
357 retry:
358 seq = read_seqcount_begin(&dentry->d_seq);
359 s = READ_ONCE(dentry->d_name.name);
360 name->name.hash_len = dentry->d_name.hash_len;
361 name->name.name = name->inline_name.string;
362 if (likely(s == dentry->d_shortname.string)) {
363 name->inline_name = dentry->d_shortname;
364 } else {
365 struct external_name *p;
366 p = container_of(s, struct external_name, name[0]);
367 // get a valid reference
368 if (unlikely(!atomic_inc_not_zero(&p->count)))
369 goto retry;
370 name->name.name = s;
371 }
372 if (read_seqcount_retry(&dentry->d_seq, seq)) {
373 release_dentry_name_snapshot(name);
374 goto retry;
375 }
376 rcu_read_unlock();
377 }
378 EXPORT_SYMBOL(take_dentry_name_snapshot);
379
release_dentry_name_snapshot(struct name_snapshot * name)380 void release_dentry_name_snapshot(struct name_snapshot *name)
381 {
382 if (unlikely(name->name.name != name->inline_name.string)) {
383 struct external_name *p;
384 p = container_of(name->name.name, struct external_name, name[0]);
385 if (unlikely(atomic_dec_and_test(&p->count)))
386 kfree_rcu(p, head);
387 }
388 }
389 EXPORT_SYMBOL(release_dentry_name_snapshot);
390
__d_set_inode_and_type(struct dentry * dentry,struct inode * inode,unsigned type_flags)391 static inline void __d_set_inode_and_type(struct dentry *dentry,
392 struct inode *inode,
393 unsigned type_flags)
394 {
395 unsigned flags;
396
397 dentry->d_inode = inode;
398 flags = READ_ONCE(dentry->d_flags);
399 flags &= ~DCACHE_ENTRY_TYPE;
400 flags |= type_flags;
401 smp_store_release(&dentry->d_flags, flags);
402 }
403
__d_clear_type_and_inode(struct dentry * dentry)404 static inline void __d_clear_type_and_inode(struct dentry *dentry)
405 {
406 unsigned flags = READ_ONCE(dentry->d_flags);
407
408 flags &= ~DCACHE_ENTRY_TYPE;
409 WRITE_ONCE(dentry->d_flags, flags);
410 dentry->d_inode = NULL;
411 /*
412 * The negative counter only tracks dentries on the LRU. Don't inc if
413 * d_lru is on another list.
414 */
415 if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
416 this_cpu_inc(nr_dentry_negative);
417 }
418
dentry_free(struct dentry * dentry)419 static void dentry_free(struct dentry *dentry)
420 {
421 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
422 if (unlikely(dname_external(dentry))) {
423 struct external_name *p = external_name(dentry);
424 if (likely(atomic_dec_and_test(&p->count))) {
425 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
426 return;
427 }
428 }
429 /* if dentry was never visible to RCU, immediate free is OK */
430 if (dentry->d_flags & DCACHE_NORCU)
431 __d_free(&dentry->d_u.d_rcu);
432 else
433 call_rcu(&dentry->d_u.d_rcu, __d_free);
434 }
435
436 /*
437 * Release the dentry's inode, using the filesystem
438 * d_iput() operation if defined.
439 */
dentry_unlink_inode(struct dentry * dentry)440 static void dentry_unlink_inode(struct dentry * dentry)
441 __releases(dentry->d_lock)
442 __releases(dentry->d_inode->i_lock)
443 {
444 struct inode *inode = dentry->d_inode;
445
446 raw_write_seqcount_begin(&dentry->d_seq);
447 __d_clear_type_and_inode(dentry);
448 hlist_del_init(&dentry->d_u.d_alias);
449 raw_write_seqcount_end(&dentry->d_seq);
450 spin_unlock(&dentry->d_lock);
451 spin_unlock(&inode->i_lock);
452 if (!inode->i_nlink)
453 fsnotify_inoderemove(inode);
454 if (dentry->d_op && dentry->d_op->d_iput)
455 dentry->d_op->d_iput(dentry, inode);
456 else
457 iput(inode);
458 }
459
460 /*
461 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
462 * is in use - which includes both the "real" per-superblock
463 * LRU list _and_ the DCACHE_SHRINK_LIST use.
464 *
465 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
466 * on the shrink list (ie not on the superblock LRU list).
467 *
468 * The per-cpu "nr_dentry_unused" counters are updated with
469 * the DCACHE_LRU_LIST bit.
470 *
471 * The per-cpu "nr_dentry_negative" counters are only updated
472 * when deleted from or added to the per-superblock LRU list, not
473 * from/to the shrink list. That is to avoid an unneeded dec/inc
474 * pair when moving from LRU to shrink list in select_collect().
475 *
476 * These helper functions make sure we always follow the
477 * rules. d_lock must be held by the caller.
478 */
479 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
d_lru_add(struct dentry * dentry)480 static void d_lru_add(struct dentry *dentry)
481 {
482 D_FLAG_VERIFY(dentry, 0);
483 dentry->d_flags |= DCACHE_LRU_LIST;
484 this_cpu_inc(nr_dentry_unused);
485 if (d_is_negative(dentry))
486 this_cpu_inc(nr_dentry_negative);
487 WARN_ON_ONCE(!list_lru_add_obj(
488 &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
489 }
490
d_lru_del(struct dentry * dentry)491 static void d_lru_del(struct dentry *dentry)
492 {
493 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
494 dentry->d_flags &= ~DCACHE_LRU_LIST;
495 this_cpu_dec(nr_dentry_unused);
496 if (d_is_negative(dentry))
497 this_cpu_dec(nr_dentry_negative);
498 WARN_ON_ONCE(!list_lru_del_obj(
499 &dentry->d_sb->s_dentry_lru, &dentry->d_lru));
500 }
501
d_shrink_del(struct dentry * dentry)502 static void d_shrink_del(struct dentry *dentry)
503 {
504 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
505 list_del_init(&dentry->d_lru);
506 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
507 this_cpu_dec(nr_dentry_unused);
508 }
509
d_shrink_add(struct dentry * dentry,struct list_head * list)510 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
511 {
512 D_FLAG_VERIFY(dentry, 0);
513 list_add(&dentry->d_lru, list);
514 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
515 this_cpu_inc(nr_dentry_unused);
516 }
517
518 /*
519 * These can only be called under the global LRU lock, ie during the
520 * callback for freeing the LRU list. "isolate" removes it from the
521 * LRU lists entirely, while shrink_move moves it to the indicated
522 * private list.
523 */
d_lru_isolate(struct list_lru_one * lru,struct dentry * dentry)524 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
525 {
526 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
527 dentry->d_flags &= ~DCACHE_LRU_LIST;
528 this_cpu_dec(nr_dentry_unused);
529 if (d_is_negative(dentry))
530 this_cpu_dec(nr_dentry_negative);
531 list_lru_isolate(lru, &dentry->d_lru);
532 }
533
d_lru_shrink_move(struct list_lru_one * lru,struct dentry * dentry,struct list_head * list)534 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
535 struct list_head *list)
536 {
537 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
538 dentry->d_flags |= DCACHE_SHRINK_LIST;
539 if (d_is_negative(dentry))
540 this_cpu_dec(nr_dentry_negative);
541 list_lru_isolate_move(lru, &dentry->d_lru, list);
542 }
543
___d_drop(struct dentry * dentry)544 static void ___d_drop(struct dentry *dentry)
545 {
546 struct hlist_bl_head *b;
547 /*
548 * Hashed dentries are normally on the dentry hashtable,
549 * with the exception of those newly allocated by
550 * d_obtain_root, which are always IS_ROOT:
551 */
552 if (unlikely(IS_ROOT(dentry)))
553 b = &dentry->d_sb->s_roots;
554 else
555 b = d_hash(dentry->d_name.hash);
556
557 hlist_bl_lock(b);
558 __hlist_bl_del(&dentry->d_hash);
559 hlist_bl_unlock(b);
560 }
561
__d_drop(struct dentry * dentry)562 void __d_drop(struct dentry *dentry)
563 {
564 if (!d_unhashed(dentry)) {
565 ___d_drop(dentry);
566 dentry->d_hash.pprev = NULL;
567 write_seqcount_invalidate(&dentry->d_seq);
568 }
569 }
570 EXPORT_SYMBOL(__d_drop);
571
572 /**
573 * d_drop - drop a dentry
574 * @dentry: dentry to drop
575 *
576 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
577 * be found through a VFS lookup any more. Note that this is different from
578 * deleting the dentry - d_delete will try to mark the dentry negative if
579 * possible, giving a successful _negative_ lookup, while d_drop will
580 * just make the cache lookup fail.
581 *
582 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
583 * reason (NFS timeouts or autofs deletes).
584 *
585 * __d_drop requires dentry->d_lock
586 *
587 * ___d_drop doesn't mark dentry as "unhashed"
588 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
589 */
d_drop(struct dentry * dentry)590 void d_drop(struct dentry *dentry)
591 {
592 spin_lock(&dentry->d_lock);
593 __d_drop(dentry);
594 spin_unlock(&dentry->d_lock);
595 }
596 EXPORT_SYMBOL(d_drop);
597
dentry_unlist(struct dentry * dentry)598 static inline void dentry_unlist(struct dentry *dentry)
599 {
600 struct dentry *next;
601 /*
602 * Inform d_walk() and shrink_dentry_list() that we are no longer
603 * attached to the dentry tree
604 */
605 dentry->d_flags |= DCACHE_DENTRY_KILLED;
606 if (unlikely(hlist_unhashed(&dentry->d_sib)))
607 return;
608 __hlist_del(&dentry->d_sib);
609 /*
610 * Cursors can move around the list of children. While we'd been
611 * a normal list member, it didn't matter - ->d_sib.next would've
612 * been updated. However, from now on it won't be and for the
613 * things like d_walk() it might end up with a nasty surprise.
614 * Normally d_walk() doesn't care about cursors moving around -
615 * ->d_lock on parent prevents that and since a cursor has no children
616 * of its own, we get through it without ever unlocking the parent.
617 * There is one exception, though - if we ascend from a child that
618 * gets killed as soon as we unlock it, the next sibling is found
619 * using the value left in its ->d_sib.next. And if _that_
620 * pointed to a cursor, and cursor got moved (e.g. by lseek())
621 * before d_walk() regains parent->d_lock, we'll end up skipping
622 * everything the cursor had been moved past.
623 *
624 * Solution: make sure that the pointer left behind in ->d_sib.next
625 * points to something that won't be moving around. I.e. skip the
626 * cursors.
627 */
628 while (dentry->d_sib.next) {
629 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib);
630 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
631 break;
632 dentry->d_sib.next = next->d_sib.next;
633 }
634 }
635
__dentry_kill(struct dentry * dentry)636 static struct dentry *__dentry_kill(struct dentry *dentry)
637 {
638 struct dentry *parent = NULL;
639 bool can_free = true;
640
641 /*
642 * The dentry is now unrecoverably dead to the world.
643 */
644 lockref_mark_dead(&dentry->d_lockref);
645
646 /*
647 * inform the fs via d_prune that this dentry is about to be
648 * unhashed and destroyed.
649 */
650 if (dentry->d_flags & DCACHE_OP_PRUNE)
651 dentry->d_op->d_prune(dentry);
652
653 if (dentry->d_flags & DCACHE_LRU_LIST) {
654 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
655 d_lru_del(dentry);
656 }
657 /* if it was on the hash then remove it */
658 __d_drop(dentry);
659 if (dentry->d_inode)
660 dentry_unlink_inode(dentry);
661 else
662 spin_unlock(&dentry->d_lock);
663 this_cpu_dec(nr_dentry);
664 if (dentry->d_op && dentry->d_op->d_release)
665 dentry->d_op->d_release(dentry);
666
667 cond_resched();
668 /* now that it's negative, ->d_parent is stable */
669 if (!IS_ROOT(dentry)) {
670 parent = dentry->d_parent;
671 spin_lock(&parent->d_lock);
672 }
673 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
674 dentry_unlist(dentry);
675 if (dentry->d_flags & DCACHE_SHRINK_LIST)
676 can_free = false;
677 spin_unlock(&dentry->d_lock);
678 if (likely(can_free))
679 dentry_free(dentry);
680 if (parent && --parent->d_lockref.count) {
681 spin_unlock(&parent->d_lock);
682 return NULL;
683 }
684 return parent;
685 }
686
687 /*
688 * Lock a dentry for feeding it to __dentry_kill().
689 * Called under rcu_read_lock() and dentry->d_lock; the former
690 * guarantees that nothing we access will be freed under us.
691 * Note that dentry is *not* protected from concurrent dentry_kill(),
692 * d_delete(), etc.
693 *
694 * Return false if dentry is busy. Otherwise, return true and have
695 * that dentry's inode locked.
696 */
697
lock_for_kill(struct dentry * dentry)698 static bool lock_for_kill(struct dentry *dentry)
699 {
700 struct inode *inode = dentry->d_inode;
701
702 if (unlikely(dentry->d_lockref.count))
703 return false;
704
705 if (!inode || likely(spin_trylock(&inode->i_lock)))
706 return true;
707
708 do {
709 spin_unlock(&dentry->d_lock);
710 spin_lock(&inode->i_lock);
711 spin_lock(&dentry->d_lock);
712 if (likely(inode == dentry->d_inode))
713 break;
714 spin_unlock(&inode->i_lock);
715 inode = dentry->d_inode;
716 } while (inode);
717 if (likely(!dentry->d_lockref.count))
718 return true;
719 if (inode)
720 spin_unlock(&inode->i_lock);
721 return false;
722 }
723
724 /*
725 * Decide if dentry is worth retaining. Usually this is called with dentry
726 * locked; if not locked, we are more limited and might not be able to tell
727 * without a lock. False in this case means "punt to locked path and recheck".
728 *
729 * In case we aren't locked, these predicates are not "stable". However, it is
730 * sufficient that at some point after we dropped the reference the dentry was
731 * hashed and the flags had the proper value. Other dentry users may have
732 * re-gotten a reference to the dentry and change that, but our work is done -
733 * we can leave the dentry around with a zero refcount.
734 */
retain_dentry(struct dentry * dentry,bool locked)735 static inline bool retain_dentry(struct dentry *dentry, bool locked)
736 {
737 unsigned int d_flags;
738
739 smp_rmb();
740 d_flags = READ_ONCE(dentry->d_flags);
741
742 // Unreachable? Nobody would be able to look it up, no point retaining
743 if (unlikely(d_unhashed(dentry)))
744 return false;
745
746 // Same if it's disconnected
747 if (unlikely(d_flags & DCACHE_DISCONNECTED))
748 return false;
749
750 // ->d_delete() might tell us not to bother, but that requires
751 // ->d_lock; can't decide without it
752 if (unlikely(d_flags & DCACHE_OP_DELETE)) {
753 if (!locked || dentry->d_op->d_delete(dentry))
754 return false;
755 }
756
757 // Explicitly told not to bother
758 if (unlikely(d_flags & DCACHE_DONTCACHE))
759 return false;
760
761 // At this point it looks like we ought to keep it. We also might
762 // need to do something - put it on LRU if it wasn't there already
763 // and mark it referenced if it was on LRU, but not marked yet.
764 // Unfortunately, both actions require ->d_lock, so in lockless
765 // case we'd have to punt rather than doing those.
766 if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
767 if (!locked)
768 return false;
769 d_lru_add(dentry);
770 } else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
771 if (!locked)
772 return false;
773 dentry->d_flags |= DCACHE_REFERENCED;
774 }
775 return true;
776 }
777
d_mark_dontcache(struct inode * inode)778 void d_mark_dontcache(struct inode *inode)
779 {
780 struct dentry *de;
781
782 spin_lock(&inode->i_lock);
783 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
784 spin_lock(&de->d_lock);
785 de->d_flags |= DCACHE_DONTCACHE;
786 spin_unlock(&de->d_lock);
787 }
788 inode->i_state |= I_DONTCACHE;
789 spin_unlock(&inode->i_lock);
790 }
791 EXPORT_SYMBOL(d_mark_dontcache);
792
793 /*
794 * Try to do a lockless dput(), and return whether that was successful.
795 *
796 * If unsuccessful, we return false, having already taken the dentry lock.
797 * In that case refcount is guaranteed to be zero and we have already
798 * decided that it's not worth keeping around.
799 *
800 * The caller needs to hold the RCU read lock, so that the dentry is
801 * guaranteed to stay around even if the refcount goes down to zero!
802 */
fast_dput(struct dentry * dentry)803 static inline bool fast_dput(struct dentry *dentry)
804 {
805 int ret;
806
807 /*
808 * try to decrement the lockref optimistically.
809 */
810 ret = lockref_put_return(&dentry->d_lockref);
811
812 /*
813 * If the lockref_put_return() failed due to the lock being held
814 * by somebody else, the fast path has failed. We will need to
815 * get the lock, and then check the count again.
816 */
817 if (unlikely(ret < 0)) {
818 spin_lock(&dentry->d_lock);
819 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
820 spin_unlock(&dentry->d_lock);
821 return true;
822 }
823 dentry->d_lockref.count--;
824 goto locked;
825 }
826
827 /*
828 * If we weren't the last ref, we're done.
829 */
830 if (ret)
831 return true;
832
833 /*
834 * Can we decide that decrement of refcount is all we needed without
835 * taking the lock? There's a very common case when it's all we need -
836 * dentry looks like it ought to be retained and there's nothing else
837 * to do.
838 */
839 if (retain_dentry(dentry, false))
840 return true;
841
842 /*
843 * Either not worth retaining or we can't tell without the lock.
844 * Get the lock, then. We've already decremented the refcount to 0,
845 * but we'll need to re-check the situation after getting the lock.
846 */
847 spin_lock(&dentry->d_lock);
848
849 /*
850 * Did somebody else grab a reference to it in the meantime, and
851 * we're no longer the last user after all? Alternatively, somebody
852 * else could have killed it and marked it dead. Either way, we
853 * don't need to do anything else.
854 */
855 locked:
856 if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
857 spin_unlock(&dentry->d_lock);
858 return true;
859 }
860 return false;
861 }
862
863
864 /*
865 * This is dput
866 *
867 * This is complicated by the fact that we do not want to put
868 * dentries that are no longer on any hash chain on the unused
869 * list: we'd much rather just get rid of them immediately.
870 *
871 * However, that implies that we have to traverse the dentry
872 * tree upwards to the parents which might _also_ now be
873 * scheduled for deletion (it may have been only waiting for
874 * its last child to go away).
875 *
876 * This tail recursion is done by hand as we don't want to depend
877 * on the compiler to always get this right (gcc generally doesn't).
878 * Real recursion would eat up our stack space.
879 */
880
881 /*
882 * dput - release a dentry
883 * @dentry: dentry to release
884 *
885 * Release a dentry. This will drop the usage count and if appropriate
886 * call the dentry unlink method as well as removing it from the queues and
887 * releasing its resources. If the parent dentries were scheduled for release
888 * they too may now get deleted.
889 */
dput(struct dentry * dentry)890 void dput(struct dentry *dentry)
891 {
892 if (!dentry)
893 return;
894 might_sleep();
895 rcu_read_lock();
896 if (likely(fast_dput(dentry))) {
897 rcu_read_unlock();
898 return;
899 }
900 while (lock_for_kill(dentry)) {
901 rcu_read_unlock();
902 dentry = __dentry_kill(dentry);
903 if (!dentry)
904 return;
905 if (retain_dentry(dentry, true)) {
906 spin_unlock(&dentry->d_lock);
907 return;
908 }
909 rcu_read_lock();
910 }
911 rcu_read_unlock();
912 spin_unlock(&dentry->d_lock);
913 }
914 EXPORT_SYMBOL(dput);
915
to_shrink_list(struct dentry * dentry,struct list_head * list)916 static void to_shrink_list(struct dentry *dentry, struct list_head *list)
917 __must_hold(&dentry->d_lock)
918 {
919 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
920 if (dentry->d_flags & DCACHE_LRU_LIST)
921 d_lru_del(dentry);
922 d_shrink_add(dentry, list);
923 }
924 }
925
dput_to_list(struct dentry * dentry,struct list_head * list)926 void dput_to_list(struct dentry *dentry, struct list_head *list)
927 {
928 rcu_read_lock();
929 if (likely(fast_dput(dentry))) {
930 rcu_read_unlock();
931 return;
932 }
933 rcu_read_unlock();
934 to_shrink_list(dentry, list);
935 spin_unlock(&dentry->d_lock);
936 }
937
dget_parent(struct dentry * dentry)938 struct dentry *dget_parent(struct dentry *dentry)
939 {
940 int gotref;
941 struct dentry *ret;
942 unsigned seq;
943
944 /*
945 * Do optimistic parent lookup without any
946 * locking.
947 */
948 rcu_read_lock();
949 seq = raw_seqcount_begin(&dentry->d_seq);
950 ret = READ_ONCE(dentry->d_parent);
951 gotref = lockref_get_not_zero(&ret->d_lockref);
952 rcu_read_unlock();
953 if (likely(gotref)) {
954 if (!read_seqcount_retry(&dentry->d_seq, seq))
955 return ret;
956 dput(ret);
957 }
958
959 repeat:
960 /*
961 * Don't need rcu_dereference because we re-check it was correct under
962 * the lock.
963 */
964 rcu_read_lock();
965 ret = dentry->d_parent;
966 spin_lock(&ret->d_lock);
967 if (unlikely(ret != dentry->d_parent)) {
968 spin_unlock(&ret->d_lock);
969 rcu_read_unlock();
970 goto repeat;
971 }
972 rcu_read_unlock();
973 BUG_ON(!ret->d_lockref.count);
974 ret->d_lockref.count++;
975 spin_unlock(&ret->d_lock);
976 return ret;
977 }
978 EXPORT_SYMBOL(dget_parent);
979
__d_find_any_alias(struct inode * inode)980 static struct dentry * __d_find_any_alias(struct inode *inode)
981 {
982 struct dentry *alias;
983
984 if (hlist_empty(&inode->i_dentry))
985 return NULL;
986 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
987 lockref_get(&alias->d_lockref);
988 return alias;
989 }
990
991 /**
992 * d_find_any_alias - find any alias for a given inode
993 * @inode: inode to find an alias for
994 *
995 * If any aliases exist for the given inode, take and return a
996 * reference for one of them. If no aliases exist, return %NULL.
997 */
d_find_any_alias(struct inode * inode)998 struct dentry *d_find_any_alias(struct inode *inode)
999 {
1000 struct dentry *de;
1001
1002 spin_lock(&inode->i_lock);
1003 de = __d_find_any_alias(inode);
1004 spin_unlock(&inode->i_lock);
1005 return de;
1006 }
1007 EXPORT_SYMBOL(d_find_any_alias);
1008
__d_find_alias(struct inode * inode)1009 static struct dentry *__d_find_alias(struct inode *inode)
1010 {
1011 struct dentry *alias;
1012
1013 if (S_ISDIR(inode->i_mode))
1014 return __d_find_any_alias(inode);
1015
1016 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1017 spin_lock(&alias->d_lock);
1018 if (!d_unhashed(alias)) {
1019 dget_dlock(alias);
1020 spin_unlock(&alias->d_lock);
1021 return alias;
1022 }
1023 spin_unlock(&alias->d_lock);
1024 }
1025 return NULL;
1026 }
1027
1028 /**
1029 * d_find_alias - grab a hashed alias of inode
1030 * @inode: inode in question
1031 *
1032 * If inode has a hashed alias, or is a directory and has any alias,
1033 * acquire the reference to alias and return it. Otherwise return NULL.
1034 * Notice that if inode is a directory there can be only one alias and
1035 * it can be unhashed only if it has no children, or if it is the root
1036 * of a filesystem, or if the directory was renamed and d_revalidate
1037 * was the first vfs operation to notice.
1038 *
1039 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1040 * any other hashed alias over that one.
1041 */
d_find_alias(struct inode * inode)1042 struct dentry *d_find_alias(struct inode *inode)
1043 {
1044 struct dentry *de = NULL;
1045
1046 if (!hlist_empty(&inode->i_dentry)) {
1047 spin_lock(&inode->i_lock);
1048 de = __d_find_alias(inode);
1049 spin_unlock(&inode->i_lock);
1050 }
1051 return de;
1052 }
1053 EXPORT_SYMBOL(d_find_alias);
1054
1055 /*
1056 * Caller MUST be holding rcu_read_lock() and be guaranteed
1057 * that inode won't get freed until rcu_read_unlock().
1058 */
d_find_alias_rcu(struct inode * inode)1059 struct dentry *d_find_alias_rcu(struct inode *inode)
1060 {
1061 struct hlist_head *l = &inode->i_dentry;
1062 struct dentry *de = NULL;
1063
1064 spin_lock(&inode->i_lock);
1065 // ->i_dentry and ->i_rcu are colocated, but the latter won't be
1066 // used without having I_FREEING set, which means no aliases left
1067 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1068 if (S_ISDIR(inode->i_mode)) {
1069 de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1070 } else {
1071 hlist_for_each_entry(de, l, d_u.d_alias)
1072 if (!d_unhashed(de))
1073 break;
1074 }
1075 }
1076 spin_unlock(&inode->i_lock);
1077 return de;
1078 }
1079
1080 /*
1081 * Try to kill dentries associated with this inode.
1082 * WARNING: you must own a reference to inode.
1083 */
d_prune_aliases(struct inode * inode)1084 void d_prune_aliases(struct inode *inode)
1085 {
1086 LIST_HEAD(dispose);
1087 struct dentry *dentry;
1088
1089 spin_lock(&inode->i_lock);
1090 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1091 spin_lock(&dentry->d_lock);
1092 if (!dentry->d_lockref.count)
1093 to_shrink_list(dentry, &dispose);
1094 spin_unlock(&dentry->d_lock);
1095 }
1096 spin_unlock(&inode->i_lock);
1097 shrink_dentry_list(&dispose);
1098 }
1099 EXPORT_SYMBOL(d_prune_aliases);
1100
shrink_kill(struct dentry * victim)1101 static inline void shrink_kill(struct dentry *victim)
1102 {
1103 do {
1104 rcu_read_unlock();
1105 victim = __dentry_kill(victim);
1106 rcu_read_lock();
1107 } while (victim && lock_for_kill(victim));
1108 rcu_read_unlock();
1109 if (victim)
1110 spin_unlock(&victim->d_lock);
1111 }
1112
shrink_dentry_list(struct list_head * list)1113 void shrink_dentry_list(struct list_head *list)
1114 {
1115 while (!list_empty(list)) {
1116 struct dentry *dentry;
1117
1118 dentry = list_entry(list->prev, struct dentry, d_lru);
1119 spin_lock(&dentry->d_lock);
1120 rcu_read_lock();
1121 if (!lock_for_kill(dentry)) {
1122 bool can_free;
1123 rcu_read_unlock();
1124 d_shrink_del(dentry);
1125 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED;
1126 spin_unlock(&dentry->d_lock);
1127 if (can_free)
1128 dentry_free(dentry);
1129 continue;
1130 }
1131 d_shrink_del(dentry);
1132 shrink_kill(dentry);
1133 }
1134 }
1135
dentry_lru_isolate(struct list_head * item,struct list_lru_one * lru,void * arg)1136 static enum lru_status dentry_lru_isolate(struct list_head *item,
1137 struct list_lru_one *lru, void *arg)
1138 {
1139 struct list_head *freeable = arg;
1140 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1141
1142
1143 /*
1144 * we are inverting the lru lock/dentry->d_lock here,
1145 * so use a trylock. If we fail to get the lock, just skip
1146 * it
1147 */
1148 if (!spin_trylock(&dentry->d_lock))
1149 return LRU_SKIP;
1150
1151 /*
1152 * Referenced dentries are still in use. If they have active
1153 * counts, just remove them from the LRU. Otherwise give them
1154 * another pass through the LRU.
1155 */
1156 if (dentry->d_lockref.count) {
1157 d_lru_isolate(lru, dentry);
1158 spin_unlock(&dentry->d_lock);
1159 return LRU_REMOVED;
1160 }
1161
1162 if (dentry->d_flags & DCACHE_REFERENCED) {
1163 dentry->d_flags &= ~DCACHE_REFERENCED;
1164 spin_unlock(&dentry->d_lock);
1165
1166 /*
1167 * The list move itself will be made by the common LRU code. At
1168 * this point, we've dropped the dentry->d_lock but keep the
1169 * lru lock. This is safe to do, since every list movement is
1170 * protected by the lru lock even if both locks are held.
1171 *
1172 * This is guaranteed by the fact that all LRU management
1173 * functions are intermediated by the LRU API calls like
1174 * list_lru_add_obj and list_lru_del_obj. List movement in this file
1175 * only ever occur through this functions or through callbacks
1176 * like this one, that are called from the LRU API.
1177 *
1178 * The only exceptions to this are functions like
1179 * shrink_dentry_list, and code that first checks for the
1180 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1181 * operating only with stack provided lists after they are
1182 * properly isolated from the main list. It is thus, always a
1183 * local access.
1184 */
1185 return LRU_ROTATE;
1186 }
1187
1188 d_lru_shrink_move(lru, dentry, freeable);
1189 spin_unlock(&dentry->d_lock);
1190
1191 return LRU_REMOVED;
1192 }
1193
1194 /**
1195 * prune_dcache_sb - shrink the dcache
1196 * @sb: superblock
1197 * @sc: shrink control, passed to list_lru_shrink_walk()
1198 *
1199 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1200 * is done when we need more memory and called from the superblock shrinker
1201 * function.
1202 *
1203 * This function may fail to free any resources if all the dentries are in
1204 * use.
1205 */
prune_dcache_sb(struct super_block * sb,struct shrink_control * sc)1206 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1207 {
1208 LIST_HEAD(dispose);
1209 long freed;
1210
1211 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1212 dentry_lru_isolate, &dispose);
1213 shrink_dentry_list(&dispose);
1214 return freed;
1215 }
1216
dentry_lru_isolate_shrink(struct list_head * item,struct list_lru_one * lru,void * arg)1217 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1218 struct list_lru_one *lru, void *arg)
1219 {
1220 struct list_head *freeable = arg;
1221 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1222
1223 /*
1224 * we are inverting the lru lock/dentry->d_lock here,
1225 * so use a trylock. If we fail to get the lock, just skip
1226 * it
1227 */
1228 if (!spin_trylock(&dentry->d_lock))
1229 return LRU_SKIP;
1230
1231 d_lru_shrink_move(lru, dentry, freeable);
1232 spin_unlock(&dentry->d_lock);
1233
1234 return LRU_REMOVED;
1235 }
1236
1237
1238 /**
1239 * shrink_dcache_sb - shrink dcache for a superblock
1240 * @sb: superblock
1241 *
1242 * Shrink the dcache for the specified super block. This is used to free
1243 * the dcache before unmounting a file system.
1244 */
shrink_dcache_sb(struct super_block * sb)1245 void shrink_dcache_sb(struct super_block *sb)
1246 {
1247 do {
1248 LIST_HEAD(dispose);
1249
1250 list_lru_walk(&sb->s_dentry_lru,
1251 dentry_lru_isolate_shrink, &dispose, 1024);
1252 shrink_dentry_list(&dispose);
1253 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1254 }
1255 EXPORT_SYMBOL(shrink_dcache_sb);
1256
1257 /**
1258 * enum d_walk_ret - action to talke during tree walk
1259 * @D_WALK_CONTINUE: contrinue walk
1260 * @D_WALK_QUIT: quit walk
1261 * @D_WALK_NORETRY: quit when retry is needed
1262 * @D_WALK_SKIP: skip this dentry and its children
1263 */
1264 enum d_walk_ret {
1265 D_WALK_CONTINUE,
1266 D_WALK_QUIT,
1267 D_WALK_NORETRY,
1268 D_WALK_SKIP,
1269 };
1270
1271 /**
1272 * d_walk - walk the dentry tree
1273 * @parent: start of walk
1274 * @data: data passed to @enter() and @finish()
1275 * @enter: callback when first entering the dentry
1276 *
1277 * The @enter() callbacks are called with d_lock held.
1278 */
d_walk(struct dentry * parent,void * data,enum d_walk_ret (* enter)(void *,struct dentry *))1279 static void d_walk(struct dentry *parent, void *data,
1280 enum d_walk_ret (*enter)(void *, struct dentry *))
1281 {
1282 struct dentry *this_parent, *dentry;
1283 unsigned seq = 0;
1284 enum d_walk_ret ret;
1285 bool retry = true;
1286
1287 again:
1288 read_seqbegin_or_lock(&rename_lock, &seq);
1289 this_parent = parent;
1290 spin_lock(&this_parent->d_lock);
1291
1292 ret = enter(data, this_parent);
1293 switch (ret) {
1294 case D_WALK_CONTINUE:
1295 break;
1296 case D_WALK_QUIT:
1297 case D_WALK_SKIP:
1298 goto out_unlock;
1299 case D_WALK_NORETRY:
1300 retry = false;
1301 break;
1302 }
1303 repeat:
1304 dentry = d_first_child(this_parent);
1305 resume:
1306 hlist_for_each_entry_from(dentry, d_sib) {
1307 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1308 continue;
1309
1310 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1311
1312 ret = enter(data, dentry);
1313 switch (ret) {
1314 case D_WALK_CONTINUE:
1315 break;
1316 case D_WALK_QUIT:
1317 spin_unlock(&dentry->d_lock);
1318 goto out_unlock;
1319 case D_WALK_NORETRY:
1320 retry = false;
1321 break;
1322 case D_WALK_SKIP:
1323 spin_unlock(&dentry->d_lock);
1324 continue;
1325 }
1326
1327 if (!hlist_empty(&dentry->d_children)) {
1328 spin_unlock(&this_parent->d_lock);
1329 spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1330 this_parent = dentry;
1331 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1332 goto repeat;
1333 }
1334 spin_unlock(&dentry->d_lock);
1335 }
1336 /*
1337 * All done at this level ... ascend and resume the search.
1338 */
1339 rcu_read_lock();
1340 ascend:
1341 if (this_parent != parent) {
1342 dentry = this_parent;
1343 this_parent = dentry->d_parent;
1344
1345 spin_unlock(&dentry->d_lock);
1346 spin_lock(&this_parent->d_lock);
1347
1348 /* might go back up the wrong parent if we have had a rename. */
1349 if (need_seqretry(&rename_lock, seq))
1350 goto rename_retry;
1351 /* go into the first sibling still alive */
1352 hlist_for_each_entry_continue(dentry, d_sib) {
1353 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
1354 rcu_read_unlock();
1355 goto resume;
1356 }
1357 }
1358 goto ascend;
1359 }
1360 if (need_seqretry(&rename_lock, seq))
1361 goto rename_retry;
1362 rcu_read_unlock();
1363
1364 out_unlock:
1365 spin_unlock(&this_parent->d_lock);
1366 done_seqretry(&rename_lock, seq);
1367 return;
1368
1369 rename_retry:
1370 spin_unlock(&this_parent->d_lock);
1371 rcu_read_unlock();
1372 BUG_ON(seq & 1);
1373 if (!retry)
1374 return;
1375 seq = 1;
1376 goto again;
1377 }
1378
1379 struct check_mount {
1380 struct vfsmount *mnt;
1381 unsigned int mounted;
1382 };
1383
path_check_mount(void * data,struct dentry * dentry)1384 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1385 {
1386 struct check_mount *info = data;
1387 struct path path = { .mnt = info->mnt, .dentry = dentry };
1388
1389 if (likely(!d_mountpoint(dentry)))
1390 return D_WALK_CONTINUE;
1391 if (__path_is_mountpoint(&path)) {
1392 info->mounted = 1;
1393 return D_WALK_QUIT;
1394 }
1395 return D_WALK_CONTINUE;
1396 }
1397
1398 /**
1399 * path_has_submounts - check for mounts over a dentry in the
1400 * current namespace.
1401 * @parent: path to check.
1402 *
1403 * Return true if the parent or its subdirectories contain
1404 * a mount point in the current namespace.
1405 */
path_has_submounts(const struct path * parent)1406 int path_has_submounts(const struct path *parent)
1407 {
1408 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1409
1410 read_seqlock_excl(&mount_lock);
1411 d_walk(parent->dentry, &data, path_check_mount);
1412 read_sequnlock_excl(&mount_lock);
1413
1414 return data.mounted;
1415 }
1416 EXPORT_SYMBOL(path_has_submounts);
1417
1418 /*
1419 * Called by mount code to set a mountpoint and check if the mountpoint is
1420 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1421 * subtree can become unreachable).
1422 *
1423 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1424 * this reason take rename_lock and d_lock on dentry and ancestors.
1425 */
d_set_mounted(struct dentry * dentry)1426 int d_set_mounted(struct dentry *dentry)
1427 {
1428 struct dentry *p;
1429 int ret = -ENOENT;
1430 write_seqlock(&rename_lock);
1431 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1432 /* Need exclusion wrt. d_invalidate() */
1433 spin_lock(&p->d_lock);
1434 if (unlikely(d_unhashed(p))) {
1435 spin_unlock(&p->d_lock);
1436 goto out;
1437 }
1438 spin_unlock(&p->d_lock);
1439 }
1440 spin_lock(&dentry->d_lock);
1441 if (!d_unlinked(dentry)) {
1442 ret = -EBUSY;
1443 if (!d_mountpoint(dentry)) {
1444 dentry->d_flags |= DCACHE_MOUNTED;
1445 ret = 0;
1446 }
1447 }
1448 spin_unlock(&dentry->d_lock);
1449 out:
1450 write_sequnlock(&rename_lock);
1451 return ret;
1452 }
1453
1454 /*
1455 * Search the dentry child list of the specified parent,
1456 * and move any unused dentries to the end of the unused
1457 * list for prune_dcache(). We descend to the next level
1458 * whenever the d_children list is non-empty and continue
1459 * searching.
1460 *
1461 * It returns zero iff there are no unused children,
1462 * otherwise it returns the number of children moved to
1463 * the end of the unused list. This may not be the total
1464 * number of unused children, because select_parent can
1465 * drop the lock and return early due to latency
1466 * constraints.
1467 */
1468
1469 struct select_data {
1470 struct dentry *start;
1471 union {
1472 long found;
1473 struct dentry *victim;
1474 };
1475 struct list_head dispose;
1476 };
1477
select_collect(void * _data,struct dentry * dentry)1478 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1479 {
1480 struct select_data *data = _data;
1481 enum d_walk_ret ret = D_WALK_CONTINUE;
1482
1483 if (data->start == dentry)
1484 goto out;
1485
1486 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1487 data->found++;
1488 } else if (!dentry->d_lockref.count) {
1489 to_shrink_list(dentry, &data->dispose);
1490 data->found++;
1491 } else if (dentry->d_lockref.count < 0) {
1492 data->found++;
1493 }
1494 /*
1495 * We can return to the caller if we have found some (this
1496 * ensures forward progress). We'll be coming back to find
1497 * the rest.
1498 */
1499 if (!list_empty(&data->dispose))
1500 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1501 out:
1502 return ret;
1503 }
1504
select_collect2(void * _data,struct dentry * dentry)1505 static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1506 {
1507 struct select_data *data = _data;
1508 enum d_walk_ret ret = D_WALK_CONTINUE;
1509
1510 if (data->start == dentry)
1511 goto out;
1512
1513 if (!dentry->d_lockref.count) {
1514 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1515 rcu_read_lock();
1516 data->victim = dentry;
1517 return D_WALK_QUIT;
1518 }
1519 to_shrink_list(dentry, &data->dispose);
1520 }
1521 /*
1522 * We can return to the caller if we have found some (this
1523 * ensures forward progress). We'll be coming back to find
1524 * the rest.
1525 */
1526 if (!list_empty(&data->dispose))
1527 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1528 out:
1529 return ret;
1530 }
1531
1532 /**
1533 * shrink_dcache_parent - prune dcache
1534 * @parent: parent of entries to prune
1535 *
1536 * Prune the dcache to remove unused children of the parent dentry.
1537 */
shrink_dcache_parent(struct dentry * parent)1538 void shrink_dcache_parent(struct dentry *parent)
1539 {
1540 for (;;) {
1541 struct select_data data = {.start = parent};
1542
1543 INIT_LIST_HEAD(&data.dispose);
1544 d_walk(parent, &data, select_collect);
1545
1546 if (!list_empty(&data.dispose)) {
1547 shrink_dentry_list(&data.dispose);
1548 continue;
1549 }
1550
1551 cond_resched();
1552 if (!data.found)
1553 break;
1554 data.victim = NULL;
1555 d_walk(parent, &data, select_collect2);
1556 if (data.victim) {
1557 spin_lock(&data.victim->d_lock);
1558 if (!lock_for_kill(data.victim)) {
1559 spin_unlock(&data.victim->d_lock);
1560 rcu_read_unlock();
1561 } else {
1562 shrink_kill(data.victim);
1563 }
1564 }
1565 if (!list_empty(&data.dispose))
1566 shrink_dentry_list(&data.dispose);
1567 }
1568 }
1569 EXPORT_SYMBOL(shrink_dcache_parent);
1570
umount_check(void * _data,struct dentry * dentry)1571 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1572 {
1573 /* it has busy descendents; complain about those instead */
1574 if (!hlist_empty(&dentry->d_children))
1575 return D_WALK_CONTINUE;
1576
1577 /* root with refcount 1 is fine */
1578 if (dentry == _data && dentry->d_lockref.count == 1)
1579 return D_WALK_CONTINUE;
1580
1581 WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
1582 " still in use (%d) [unmount of %s %s]\n",
1583 dentry,
1584 dentry->d_inode ?
1585 dentry->d_inode->i_ino : 0UL,
1586 dentry,
1587 dentry->d_lockref.count,
1588 dentry->d_sb->s_type->name,
1589 dentry->d_sb->s_id);
1590 return D_WALK_CONTINUE;
1591 }
1592
do_one_tree(struct dentry * dentry)1593 static void do_one_tree(struct dentry *dentry)
1594 {
1595 shrink_dcache_parent(dentry);
1596 d_walk(dentry, dentry, umount_check);
1597 d_drop(dentry);
1598 dput(dentry);
1599 }
1600
1601 /*
1602 * destroy the dentries attached to a superblock on unmounting
1603 */
shrink_dcache_for_umount(struct super_block * sb)1604 void shrink_dcache_for_umount(struct super_block *sb)
1605 {
1606 struct dentry *dentry;
1607
1608 rwsem_assert_held_write(&sb->s_umount);
1609
1610 dentry = sb->s_root;
1611 sb->s_root = NULL;
1612 do_one_tree(dentry);
1613
1614 while (!hlist_bl_empty(&sb->s_roots)) {
1615 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1616 do_one_tree(dentry);
1617 }
1618 }
1619
find_submount(void * _data,struct dentry * dentry)1620 static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1621 {
1622 struct dentry **victim = _data;
1623 if (d_mountpoint(dentry)) {
1624 *victim = dget_dlock(dentry);
1625 return D_WALK_QUIT;
1626 }
1627 return D_WALK_CONTINUE;
1628 }
1629
1630 /**
1631 * d_invalidate - detach submounts, prune dcache, and drop
1632 * @dentry: dentry to invalidate (aka detach, prune and drop)
1633 */
d_invalidate(struct dentry * dentry)1634 void d_invalidate(struct dentry *dentry)
1635 {
1636 bool had_submounts = false;
1637 spin_lock(&dentry->d_lock);
1638 if (d_unhashed(dentry)) {
1639 spin_unlock(&dentry->d_lock);
1640 return;
1641 }
1642 __d_drop(dentry);
1643 spin_unlock(&dentry->d_lock);
1644
1645 /* Negative dentries can be dropped without further checks */
1646 if (!dentry->d_inode)
1647 return;
1648
1649 shrink_dcache_parent(dentry);
1650 for (;;) {
1651 struct dentry *victim = NULL;
1652 d_walk(dentry, &victim, find_submount);
1653 if (!victim) {
1654 if (had_submounts)
1655 shrink_dcache_parent(dentry);
1656 return;
1657 }
1658 had_submounts = true;
1659 detach_mounts(victim);
1660 dput(victim);
1661 }
1662 }
1663 EXPORT_SYMBOL(d_invalidate);
1664
1665 /**
1666 * __d_alloc - allocate a dcache entry
1667 * @sb: filesystem it will belong to
1668 * @name: qstr of the name
1669 *
1670 * Allocates a dentry. It returns %NULL if there is insufficient memory
1671 * available. On a success the dentry is returned. The name passed in is
1672 * copied and the copy passed in may be reused after this call.
1673 */
1674
__d_alloc(struct super_block * sb,const struct qstr * name)1675 static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1676 {
1677 struct dentry *dentry;
1678 char *dname;
1679 int err;
1680
1681 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1682 GFP_KERNEL);
1683 if (!dentry)
1684 return NULL;
1685
1686 /*
1687 * We guarantee that the inline name is always NUL-terminated.
1688 * This way the memcpy() done by the name switching in rename
1689 * will still always have a NUL at the end, even if we might
1690 * be overwriting an internal NUL character
1691 */
1692 dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0;
1693 if (unlikely(!name)) {
1694 name = &slash_name;
1695 dname = dentry->d_shortname.string;
1696 } else if (name->len > DNAME_INLINE_LEN-1) {
1697 size_t size = offsetof(struct external_name, name[1]);
1698 struct external_name *p = kmalloc(size + name->len,
1699 GFP_KERNEL_ACCOUNT |
1700 __GFP_RECLAIMABLE);
1701 if (!p) {
1702 kmem_cache_free(dentry_cache, dentry);
1703 return NULL;
1704 }
1705 atomic_set(&p->count, 1);
1706 dname = p->name;
1707 } else {
1708 dname = dentry->d_shortname.string;
1709 }
1710
1711 dentry->d_name.len = name->len;
1712 dentry->d_name.hash = name->hash;
1713 memcpy(dname, name->name, name->len);
1714 dname[name->len] = 0;
1715
1716 /* Make sure we always see the terminating NUL character */
1717 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1718
1719 dentry->d_flags = 0;
1720 lockref_init(&dentry->d_lockref);
1721 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1722 dentry->d_inode = NULL;
1723 dentry->d_parent = dentry;
1724 dentry->d_sb = sb;
1725 dentry->d_op = NULL;
1726 dentry->d_fsdata = NULL;
1727 INIT_HLIST_BL_NODE(&dentry->d_hash);
1728 INIT_LIST_HEAD(&dentry->d_lru);
1729 INIT_HLIST_HEAD(&dentry->d_children);
1730 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1731 INIT_HLIST_NODE(&dentry->d_sib);
1732 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1733
1734 if (dentry->d_op && dentry->d_op->d_init) {
1735 err = dentry->d_op->d_init(dentry);
1736 if (err) {
1737 if (dname_external(dentry))
1738 kfree(external_name(dentry));
1739 kmem_cache_free(dentry_cache, dentry);
1740 return NULL;
1741 }
1742 }
1743
1744 this_cpu_inc(nr_dentry);
1745
1746 return dentry;
1747 }
1748
1749 /**
1750 * d_alloc - allocate a dcache entry
1751 * @parent: parent of entry to allocate
1752 * @name: qstr of the name
1753 *
1754 * Allocates a dentry. It returns %NULL if there is insufficient memory
1755 * available. On a success the dentry is returned. The name passed in is
1756 * copied and the copy passed in may be reused after this call.
1757 */
d_alloc(struct dentry * parent,const struct qstr * name)1758 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1759 {
1760 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1761 if (!dentry)
1762 return NULL;
1763 spin_lock(&parent->d_lock);
1764 /*
1765 * don't need child lock because it is not subject
1766 * to concurrency here
1767 */
1768 dentry->d_parent = dget_dlock(parent);
1769 hlist_add_head(&dentry->d_sib, &parent->d_children);
1770 spin_unlock(&parent->d_lock);
1771
1772 return dentry;
1773 }
1774 EXPORT_SYMBOL(d_alloc);
1775
d_alloc_anon(struct super_block * sb)1776 struct dentry *d_alloc_anon(struct super_block *sb)
1777 {
1778 return __d_alloc(sb, NULL);
1779 }
1780 EXPORT_SYMBOL(d_alloc_anon);
1781
d_alloc_cursor(struct dentry * parent)1782 struct dentry *d_alloc_cursor(struct dentry * parent)
1783 {
1784 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1785 if (dentry) {
1786 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1787 dentry->d_parent = dget(parent);
1788 }
1789 return dentry;
1790 }
1791
1792 /**
1793 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1794 * @sb: the superblock
1795 * @name: qstr of the name
1796 *
1797 * For a filesystem that just pins its dentries in memory and never
1798 * performs lookups at all, return an unhashed IS_ROOT dentry.
1799 * This is used for pipes, sockets et.al. - the stuff that should
1800 * never be anyone's children or parents. Unlike all other
1801 * dentries, these will not have RCU delay between dropping the
1802 * last reference and freeing them.
1803 *
1804 * The only user is alloc_file_pseudo() and that's what should
1805 * be considered a public interface. Don't use directly.
1806 */
d_alloc_pseudo(struct super_block * sb,const struct qstr * name)1807 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1808 {
1809 static const struct dentry_operations anon_ops = {
1810 .d_dname = simple_dname
1811 };
1812 struct dentry *dentry = __d_alloc(sb, name);
1813 if (likely(dentry)) {
1814 dentry->d_flags |= DCACHE_NORCU;
1815 if (!sb->s_d_op)
1816 d_set_d_op(dentry, &anon_ops);
1817 }
1818 return dentry;
1819 }
1820
d_alloc_name(struct dentry * parent,const char * name)1821 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1822 {
1823 struct qstr q;
1824
1825 q.name = name;
1826 q.hash_len = hashlen_string(parent, name);
1827 return d_alloc(parent, &q);
1828 }
1829 EXPORT_SYMBOL(d_alloc_name);
1830
d_set_d_op(struct dentry * dentry,const struct dentry_operations * op)1831 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1832 {
1833 WARN_ON_ONCE(dentry->d_op);
1834 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1835 DCACHE_OP_COMPARE |
1836 DCACHE_OP_REVALIDATE |
1837 DCACHE_OP_WEAK_REVALIDATE |
1838 DCACHE_OP_DELETE |
1839 DCACHE_OP_REAL));
1840 dentry->d_op = op;
1841 if (!op)
1842 return;
1843 if (op->d_hash)
1844 dentry->d_flags |= DCACHE_OP_HASH;
1845 if (op->d_compare)
1846 dentry->d_flags |= DCACHE_OP_COMPARE;
1847 if (op->d_revalidate)
1848 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1849 if (op->d_weak_revalidate)
1850 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1851 if (op->d_delete)
1852 dentry->d_flags |= DCACHE_OP_DELETE;
1853 if (op->d_prune)
1854 dentry->d_flags |= DCACHE_OP_PRUNE;
1855 if (op->d_real)
1856 dentry->d_flags |= DCACHE_OP_REAL;
1857
1858 }
1859 EXPORT_SYMBOL(d_set_d_op);
1860
d_flags_for_inode(struct inode * inode)1861 static unsigned d_flags_for_inode(struct inode *inode)
1862 {
1863 unsigned add_flags = DCACHE_REGULAR_TYPE;
1864
1865 if (!inode)
1866 return DCACHE_MISS_TYPE;
1867
1868 if (S_ISDIR(inode->i_mode)) {
1869 add_flags = DCACHE_DIRECTORY_TYPE;
1870 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1871 if (unlikely(!inode->i_op->lookup))
1872 add_flags = DCACHE_AUTODIR_TYPE;
1873 else
1874 inode->i_opflags |= IOP_LOOKUP;
1875 }
1876 goto type_determined;
1877 }
1878
1879 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1880 if (unlikely(inode->i_op->get_link)) {
1881 add_flags = DCACHE_SYMLINK_TYPE;
1882 goto type_determined;
1883 }
1884 inode->i_opflags |= IOP_NOFOLLOW;
1885 }
1886
1887 if (unlikely(!S_ISREG(inode->i_mode)))
1888 add_flags = DCACHE_SPECIAL_TYPE;
1889
1890 type_determined:
1891 if (unlikely(IS_AUTOMOUNT(inode)))
1892 add_flags |= DCACHE_NEED_AUTOMOUNT;
1893 return add_flags;
1894 }
1895
__d_instantiate(struct dentry * dentry,struct inode * inode)1896 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1897 {
1898 unsigned add_flags = d_flags_for_inode(inode);
1899 WARN_ON(d_in_lookup(dentry));
1900
1901 spin_lock(&dentry->d_lock);
1902 /*
1903 * The negative counter only tracks dentries on the LRU. Don't dec if
1904 * d_lru is on another list.
1905 */
1906 if ((dentry->d_flags &
1907 (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
1908 this_cpu_dec(nr_dentry_negative);
1909 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1910 raw_write_seqcount_begin(&dentry->d_seq);
1911 __d_set_inode_and_type(dentry, inode, add_flags);
1912 raw_write_seqcount_end(&dentry->d_seq);
1913 fsnotify_update_flags(dentry);
1914 spin_unlock(&dentry->d_lock);
1915 }
1916
1917 /**
1918 * d_instantiate - fill in inode information for a dentry
1919 * @entry: dentry to complete
1920 * @inode: inode to attach to this dentry
1921 *
1922 * Fill in inode information in the entry.
1923 *
1924 * This turns negative dentries into productive full members
1925 * of society.
1926 *
1927 * NOTE! This assumes that the inode count has been incremented
1928 * (or otherwise set) by the caller to indicate that it is now
1929 * in use by the dcache.
1930 */
1931
d_instantiate(struct dentry * entry,struct inode * inode)1932 void d_instantiate(struct dentry *entry, struct inode * inode)
1933 {
1934 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1935 if (inode) {
1936 security_d_instantiate(entry, inode);
1937 spin_lock(&inode->i_lock);
1938 __d_instantiate(entry, inode);
1939 spin_unlock(&inode->i_lock);
1940 }
1941 }
1942 EXPORT_SYMBOL(d_instantiate);
1943
1944 /*
1945 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1946 * with lockdep-related part of unlock_new_inode() done before
1947 * anything else. Use that instead of open-coding d_instantiate()/
1948 * unlock_new_inode() combinations.
1949 */
d_instantiate_new(struct dentry * entry,struct inode * inode)1950 void d_instantiate_new(struct dentry *entry, struct inode *inode)
1951 {
1952 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1953 BUG_ON(!inode);
1954 lockdep_annotate_inode_mutex_key(inode);
1955 security_d_instantiate(entry, inode);
1956 spin_lock(&inode->i_lock);
1957 __d_instantiate(entry, inode);
1958 WARN_ON(!(inode->i_state & I_NEW));
1959 inode->i_state &= ~I_NEW & ~I_CREATING;
1960 /*
1961 * Pairs with the barrier in prepare_to_wait_event() to make sure
1962 * ___wait_var_event() either sees the bit cleared or
1963 * waitqueue_active() check in wake_up_var() sees the waiter.
1964 */
1965 smp_mb();
1966 inode_wake_up_bit(inode, __I_NEW);
1967 spin_unlock(&inode->i_lock);
1968 }
1969 EXPORT_SYMBOL(d_instantiate_new);
1970
d_make_root(struct inode * root_inode)1971 struct dentry *d_make_root(struct inode *root_inode)
1972 {
1973 struct dentry *res = NULL;
1974
1975 if (root_inode) {
1976 res = d_alloc_anon(root_inode->i_sb);
1977 if (res)
1978 d_instantiate(res, root_inode);
1979 else
1980 iput(root_inode);
1981 }
1982 return res;
1983 }
1984 EXPORT_SYMBOL(d_make_root);
1985
__d_obtain_alias(struct inode * inode,bool disconnected)1986 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1987 {
1988 struct super_block *sb;
1989 struct dentry *new, *res;
1990
1991 if (!inode)
1992 return ERR_PTR(-ESTALE);
1993 if (IS_ERR(inode))
1994 return ERR_CAST(inode);
1995
1996 sb = inode->i_sb;
1997
1998 res = d_find_any_alias(inode); /* existing alias? */
1999 if (res)
2000 goto out;
2001
2002 new = d_alloc_anon(sb);
2003 if (!new) {
2004 res = ERR_PTR(-ENOMEM);
2005 goto out;
2006 }
2007
2008 security_d_instantiate(new, inode);
2009 spin_lock(&inode->i_lock);
2010 res = __d_find_any_alias(inode); /* recheck under lock */
2011 if (likely(!res)) { /* still no alias, attach a disconnected dentry */
2012 unsigned add_flags = d_flags_for_inode(inode);
2013
2014 if (disconnected)
2015 add_flags |= DCACHE_DISCONNECTED;
2016
2017 spin_lock(&new->d_lock);
2018 __d_set_inode_and_type(new, inode, add_flags);
2019 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry);
2020 if (!disconnected) {
2021 hlist_bl_lock(&sb->s_roots);
2022 hlist_bl_add_head(&new->d_hash, &sb->s_roots);
2023 hlist_bl_unlock(&sb->s_roots);
2024 }
2025 spin_unlock(&new->d_lock);
2026 spin_unlock(&inode->i_lock);
2027 inode = NULL; /* consumed by new->d_inode */
2028 res = new;
2029 } else {
2030 spin_unlock(&inode->i_lock);
2031 dput(new);
2032 }
2033
2034 out:
2035 iput(inode);
2036 return res;
2037 }
2038
2039 /**
2040 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2041 * @inode: inode to allocate the dentry for
2042 *
2043 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2044 * similar open by handle operations. The returned dentry may be anonymous,
2045 * or may have a full name (if the inode was already in the cache).
2046 *
2047 * When called on a directory inode, we must ensure that the inode only ever
2048 * has one dentry. If a dentry is found, that is returned instead of
2049 * allocating a new one.
2050 *
2051 * On successful return, the reference to the inode has been transferred
2052 * to the dentry. In case of an error the reference on the inode is released.
2053 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2054 * be passed in and the error will be propagated to the return value,
2055 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2056 */
d_obtain_alias(struct inode * inode)2057 struct dentry *d_obtain_alias(struct inode *inode)
2058 {
2059 return __d_obtain_alias(inode, true);
2060 }
2061 EXPORT_SYMBOL(d_obtain_alias);
2062
2063 /**
2064 * d_obtain_root - find or allocate a dentry for a given inode
2065 * @inode: inode to allocate the dentry for
2066 *
2067 * Obtain an IS_ROOT dentry for the root of a filesystem.
2068 *
2069 * We must ensure that directory inodes only ever have one dentry. If a
2070 * dentry is found, that is returned instead of allocating a new one.
2071 *
2072 * On successful return, the reference to the inode has been transferred
2073 * to the dentry. In case of an error the reference on the inode is
2074 * released. A %NULL or IS_ERR inode may be passed in and will be the
2075 * error will be propagate to the return value, with a %NULL @inode
2076 * replaced by ERR_PTR(-ESTALE).
2077 */
d_obtain_root(struct inode * inode)2078 struct dentry *d_obtain_root(struct inode *inode)
2079 {
2080 return __d_obtain_alias(inode, false);
2081 }
2082 EXPORT_SYMBOL(d_obtain_root);
2083
2084 /**
2085 * d_add_ci - lookup or allocate new dentry with case-exact name
2086 * @dentry: the negative dentry that was passed to the parent's lookup func
2087 * @inode: the inode case-insensitive lookup has found
2088 * @name: the case-exact name to be associated with the returned dentry
2089 *
2090 * This is to avoid filling the dcache with case-insensitive names to the
2091 * same inode, only the actual correct case is stored in the dcache for
2092 * case-insensitive filesystems.
2093 *
2094 * For a case-insensitive lookup match and if the case-exact dentry
2095 * already exists in the dcache, use it and return it.
2096 *
2097 * If no entry exists with the exact case name, allocate new dentry with
2098 * the exact case, and return the spliced entry.
2099 */
d_add_ci(struct dentry * dentry,struct inode * inode,struct qstr * name)2100 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2101 struct qstr *name)
2102 {
2103 struct dentry *found, *res;
2104
2105 /*
2106 * First check if a dentry matching the name already exists,
2107 * if not go ahead and create it now.
2108 */
2109 found = d_hash_and_lookup(dentry->d_parent, name);
2110 if (found) {
2111 iput(inode);
2112 return found;
2113 }
2114 if (d_in_lookup(dentry)) {
2115 found = d_alloc_parallel(dentry->d_parent, name,
2116 dentry->d_wait);
2117 if (IS_ERR(found) || !d_in_lookup(found)) {
2118 iput(inode);
2119 return found;
2120 }
2121 } else {
2122 found = d_alloc(dentry->d_parent, name);
2123 if (!found) {
2124 iput(inode);
2125 return ERR_PTR(-ENOMEM);
2126 }
2127 }
2128 res = d_splice_alias(inode, found);
2129 if (res) {
2130 d_lookup_done(found);
2131 dput(found);
2132 return res;
2133 }
2134 return found;
2135 }
2136 EXPORT_SYMBOL(d_add_ci);
2137
2138 /**
2139 * d_same_name - compare dentry name with case-exact name
2140 * @dentry: the negative dentry that was passed to the parent's lookup func
2141 * @parent: parent dentry
2142 * @name: the case-exact name to be associated with the returned dentry
2143 *
2144 * Return: true if names are same, or false
2145 */
d_same_name(const struct dentry * dentry,const struct dentry * parent,const struct qstr * name)2146 bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2147 const struct qstr *name)
2148 {
2149 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2150 if (dentry->d_name.len != name->len)
2151 return false;
2152 return dentry_cmp(dentry, name->name, name->len) == 0;
2153 }
2154 return parent->d_op->d_compare(dentry,
2155 dentry->d_name.len, dentry->d_name.name,
2156 name) == 0;
2157 }
2158 EXPORT_SYMBOL_GPL(d_same_name);
2159
2160 /*
2161 * This is __d_lookup_rcu() when the parent dentry has
2162 * DCACHE_OP_COMPARE, which makes things much nastier.
2163 */
__d_lookup_rcu_op_compare(const struct dentry * parent,const struct qstr * name,unsigned * seqp)2164 static noinline struct dentry *__d_lookup_rcu_op_compare(
2165 const struct dentry *parent,
2166 const struct qstr *name,
2167 unsigned *seqp)
2168 {
2169 u64 hashlen = name->hash_len;
2170 struct hlist_bl_head *b = d_hash(hashlen);
2171 struct hlist_bl_node *node;
2172 struct dentry *dentry;
2173
2174 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2175 int tlen;
2176 const char *tname;
2177 unsigned seq;
2178
2179 seqretry:
2180 seq = raw_seqcount_begin(&dentry->d_seq);
2181 if (dentry->d_parent != parent)
2182 continue;
2183 if (d_unhashed(dentry))
2184 continue;
2185 if (dentry->d_name.hash != hashlen_hash(hashlen))
2186 continue;
2187 tlen = dentry->d_name.len;
2188 tname = dentry->d_name.name;
2189 /* we want a consistent (name,len) pair */
2190 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2191 cpu_relax();
2192 goto seqretry;
2193 }
2194 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2195 continue;
2196 *seqp = seq;
2197 return dentry;
2198 }
2199 return NULL;
2200 }
2201
2202 /**
2203 * __d_lookup_rcu - search for a dentry (racy, store-free)
2204 * @parent: parent dentry
2205 * @name: qstr of name we wish to find
2206 * @seqp: returns d_seq value at the point where the dentry was found
2207 * Returns: dentry, or NULL
2208 *
2209 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2210 * resolution (store-free path walking) design described in
2211 * Documentation/filesystems/path-lookup.txt.
2212 *
2213 * This is not to be used outside core vfs.
2214 *
2215 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2216 * held, and rcu_read_lock held. The returned dentry must not be stored into
2217 * without taking d_lock and checking d_seq sequence count against @seq
2218 * returned here.
2219 *
2220 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2221 * the returned dentry, so long as its parent's seqlock is checked after the
2222 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2223 * is formed, giving integrity down the path walk.
2224 *
2225 * NOTE! The caller *has* to check the resulting dentry against the sequence
2226 * number we've returned before using any of the resulting dentry state!
2227 */
__d_lookup_rcu(const struct dentry * parent,const struct qstr * name,unsigned * seqp)2228 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2229 const struct qstr *name,
2230 unsigned *seqp)
2231 {
2232 u64 hashlen = name->hash_len;
2233 const unsigned char *str = name->name;
2234 struct hlist_bl_head *b = d_hash(hashlen);
2235 struct hlist_bl_node *node;
2236 struct dentry *dentry;
2237
2238 /*
2239 * Note: There is significant duplication with __d_lookup_rcu which is
2240 * required to prevent single threaded performance regressions
2241 * especially on architectures where smp_rmb (in seqcounts) are costly.
2242 * Keep the two functions in sync.
2243 */
2244
2245 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2246 return __d_lookup_rcu_op_compare(parent, name, seqp);
2247
2248 /*
2249 * The hash list is protected using RCU.
2250 *
2251 * Carefully use d_seq when comparing a candidate dentry, to avoid
2252 * races with d_move().
2253 *
2254 * It is possible that concurrent renames can mess up our list
2255 * walk here and result in missing our dentry, resulting in the
2256 * false-negative result. d_lookup() protects against concurrent
2257 * renames using rename_lock seqlock.
2258 *
2259 * See Documentation/filesystems/path-lookup.txt for more details.
2260 */
2261 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2262 unsigned seq;
2263
2264 /*
2265 * The dentry sequence count protects us from concurrent
2266 * renames, and thus protects parent and name fields.
2267 *
2268 * The caller must perform a seqcount check in order
2269 * to do anything useful with the returned dentry.
2270 *
2271 * NOTE! We do a "raw" seqcount_begin here. That means that
2272 * we don't wait for the sequence count to stabilize if it
2273 * is in the middle of a sequence change. If we do the slow
2274 * dentry compare, we will do seqretries until it is stable,
2275 * and if we end up with a successful lookup, we actually
2276 * want to exit RCU lookup anyway.
2277 *
2278 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2279 * we are still guaranteed NUL-termination of ->d_name.name.
2280 */
2281 seq = raw_seqcount_begin(&dentry->d_seq);
2282 if (dentry->d_parent != parent)
2283 continue;
2284 if (d_unhashed(dentry))
2285 continue;
2286 if (dentry->d_name.hash_len != hashlen)
2287 continue;
2288 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2289 continue;
2290 *seqp = seq;
2291 return dentry;
2292 }
2293 return NULL;
2294 }
2295
2296 /**
2297 * d_lookup - search for a dentry
2298 * @parent: parent dentry
2299 * @name: qstr of name we wish to find
2300 * Returns: dentry, or NULL
2301 *
2302 * d_lookup searches the children of the parent dentry for the name in
2303 * question. If the dentry is found its reference count is incremented and the
2304 * dentry is returned. The caller must use dput to free the entry when it has
2305 * finished using it. %NULL is returned if the dentry does not exist.
2306 */
d_lookup(const struct dentry * parent,const struct qstr * name)2307 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2308 {
2309 struct dentry *dentry;
2310 unsigned seq;
2311
2312 do {
2313 seq = read_seqbegin(&rename_lock);
2314 dentry = __d_lookup(parent, name);
2315 if (dentry)
2316 break;
2317 } while (read_seqretry(&rename_lock, seq));
2318 return dentry;
2319 }
2320 EXPORT_SYMBOL(d_lookup);
2321
2322 /**
2323 * __d_lookup - search for a dentry (racy)
2324 * @parent: parent dentry
2325 * @name: qstr of name we wish to find
2326 * Returns: dentry, or NULL
2327 *
2328 * __d_lookup is like d_lookup, however it may (rarely) return a
2329 * false-negative result due to unrelated rename activity.
2330 *
2331 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2332 * however it must be used carefully, eg. with a following d_lookup in
2333 * the case of failure.
2334 *
2335 * __d_lookup callers must be commented.
2336 */
__d_lookup(const struct dentry * parent,const struct qstr * name)2337 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2338 {
2339 unsigned int hash = name->hash;
2340 struct hlist_bl_head *b = d_hash(hash);
2341 struct hlist_bl_node *node;
2342 struct dentry *found = NULL;
2343 struct dentry *dentry;
2344
2345 /*
2346 * Note: There is significant duplication with __d_lookup_rcu which is
2347 * required to prevent single threaded performance regressions
2348 * especially on architectures where smp_rmb (in seqcounts) are costly.
2349 * Keep the two functions in sync.
2350 */
2351
2352 /*
2353 * The hash list is protected using RCU.
2354 *
2355 * Take d_lock when comparing a candidate dentry, to avoid races
2356 * with d_move().
2357 *
2358 * It is possible that concurrent renames can mess up our list
2359 * walk here and result in missing our dentry, resulting in the
2360 * false-negative result. d_lookup() protects against concurrent
2361 * renames using rename_lock seqlock.
2362 *
2363 * See Documentation/filesystems/path-lookup.txt for more details.
2364 */
2365 rcu_read_lock();
2366
2367 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2368
2369 if (dentry->d_name.hash != hash)
2370 continue;
2371
2372 spin_lock(&dentry->d_lock);
2373 if (dentry->d_parent != parent)
2374 goto next;
2375 if (d_unhashed(dentry))
2376 goto next;
2377
2378 if (!d_same_name(dentry, parent, name))
2379 goto next;
2380
2381 dentry->d_lockref.count++;
2382 found = dentry;
2383 spin_unlock(&dentry->d_lock);
2384 break;
2385 next:
2386 spin_unlock(&dentry->d_lock);
2387 }
2388 rcu_read_unlock();
2389
2390 return found;
2391 }
2392
2393 /**
2394 * d_hash_and_lookup - hash the qstr then search for a dentry
2395 * @dir: Directory to search in
2396 * @name: qstr of name we wish to find
2397 *
2398 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2399 */
d_hash_and_lookup(struct dentry * dir,struct qstr * name)2400 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2401 {
2402 /*
2403 * Check for a fs-specific hash function. Note that we must
2404 * calculate the standard hash first, as the d_op->d_hash()
2405 * routine may choose to leave the hash value unchanged.
2406 */
2407 name->hash = full_name_hash(dir, name->name, name->len);
2408 if (dir->d_flags & DCACHE_OP_HASH) {
2409 int err = dir->d_op->d_hash(dir, name);
2410 if (unlikely(err < 0))
2411 return ERR_PTR(err);
2412 }
2413 return d_lookup(dir, name);
2414 }
2415 EXPORT_SYMBOL(d_hash_and_lookup);
2416
2417 /*
2418 * When a file is deleted, we have two options:
2419 * - turn this dentry into a negative dentry
2420 * - unhash this dentry and free it.
2421 *
2422 * Usually, we want to just turn this into
2423 * a negative dentry, but if anybody else is
2424 * currently using the dentry or the inode
2425 * we can't do that and we fall back on removing
2426 * it from the hash queues and waiting for
2427 * it to be deleted later when it has no users
2428 */
2429
2430 /**
2431 * d_delete - delete a dentry
2432 * @dentry: The dentry to delete
2433 *
2434 * Turn the dentry into a negative dentry if possible, otherwise
2435 * remove it from the hash queues so it can be deleted later
2436 */
2437
d_delete(struct dentry * dentry)2438 void d_delete(struct dentry * dentry)
2439 {
2440 struct inode *inode = dentry->d_inode;
2441
2442 spin_lock(&inode->i_lock);
2443 spin_lock(&dentry->d_lock);
2444 /*
2445 * Are we the only user?
2446 */
2447 if (dentry->d_lockref.count == 1) {
2448 if (dentry_negative_policy)
2449 __d_drop(dentry);
2450 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2451 dentry_unlink_inode(dentry);
2452 } else {
2453 __d_drop(dentry);
2454 spin_unlock(&dentry->d_lock);
2455 spin_unlock(&inode->i_lock);
2456 }
2457 }
2458 EXPORT_SYMBOL(d_delete);
2459
__d_rehash(struct dentry * entry)2460 static void __d_rehash(struct dentry *entry)
2461 {
2462 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2463
2464 hlist_bl_lock(b);
2465 hlist_bl_add_head_rcu(&entry->d_hash, b);
2466 hlist_bl_unlock(b);
2467 }
2468
2469 /**
2470 * d_rehash - add an entry back to the hash
2471 * @entry: dentry to add to the hash
2472 *
2473 * Adds a dentry to the hash according to its name.
2474 */
2475
d_rehash(struct dentry * entry)2476 void d_rehash(struct dentry * entry)
2477 {
2478 spin_lock(&entry->d_lock);
2479 __d_rehash(entry);
2480 spin_unlock(&entry->d_lock);
2481 }
2482 EXPORT_SYMBOL(d_rehash);
2483
start_dir_add(struct inode * dir)2484 static inline unsigned start_dir_add(struct inode *dir)
2485 {
2486 preempt_disable_nested();
2487 for (;;) {
2488 unsigned n = dir->i_dir_seq;
2489 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2490 return n;
2491 cpu_relax();
2492 }
2493 }
2494
end_dir_add(struct inode * dir,unsigned int n,wait_queue_head_t * d_wait)2495 static inline void end_dir_add(struct inode *dir, unsigned int n,
2496 wait_queue_head_t *d_wait)
2497 {
2498 smp_store_release(&dir->i_dir_seq, n + 2);
2499 preempt_enable_nested();
2500 if (wq_has_sleeper(d_wait))
2501 wake_up_all(d_wait);
2502 }
2503
d_wait_lookup(struct dentry * dentry)2504 static void d_wait_lookup(struct dentry *dentry)
2505 {
2506 if (d_in_lookup(dentry)) {
2507 DECLARE_WAITQUEUE(wait, current);
2508 add_wait_queue(dentry->d_wait, &wait);
2509 do {
2510 set_current_state(TASK_UNINTERRUPTIBLE);
2511 spin_unlock(&dentry->d_lock);
2512 schedule();
2513 spin_lock(&dentry->d_lock);
2514 } while (d_in_lookup(dentry));
2515 }
2516 }
2517
d_alloc_parallel(struct dentry * parent,const struct qstr * name,wait_queue_head_t * wq)2518 struct dentry *d_alloc_parallel(struct dentry *parent,
2519 const struct qstr *name,
2520 wait_queue_head_t *wq)
2521 {
2522 unsigned int hash = name->hash;
2523 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2524 struct hlist_bl_node *node;
2525 struct dentry *new = d_alloc(parent, name);
2526 struct dentry *dentry;
2527 unsigned seq, r_seq, d_seq;
2528
2529 if (unlikely(!new))
2530 return ERR_PTR(-ENOMEM);
2531
2532 retry:
2533 rcu_read_lock();
2534 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2535 r_seq = read_seqbegin(&rename_lock);
2536 dentry = __d_lookup_rcu(parent, name, &d_seq);
2537 if (unlikely(dentry)) {
2538 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2539 rcu_read_unlock();
2540 goto retry;
2541 }
2542 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2543 rcu_read_unlock();
2544 dput(dentry);
2545 goto retry;
2546 }
2547 rcu_read_unlock();
2548 dput(new);
2549 return dentry;
2550 }
2551 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2552 rcu_read_unlock();
2553 goto retry;
2554 }
2555
2556 if (unlikely(seq & 1)) {
2557 rcu_read_unlock();
2558 goto retry;
2559 }
2560
2561 hlist_bl_lock(b);
2562 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2563 hlist_bl_unlock(b);
2564 rcu_read_unlock();
2565 goto retry;
2566 }
2567 /*
2568 * No changes for the parent since the beginning of d_lookup().
2569 * Since all removals from the chain happen with hlist_bl_lock(),
2570 * any potential in-lookup matches are going to stay here until
2571 * we unlock the chain. All fields are stable in everything
2572 * we encounter.
2573 */
2574 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2575 if (dentry->d_name.hash != hash)
2576 continue;
2577 if (dentry->d_parent != parent)
2578 continue;
2579 if (!d_same_name(dentry, parent, name))
2580 continue;
2581 hlist_bl_unlock(b);
2582 /* now we can try to grab a reference */
2583 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2584 rcu_read_unlock();
2585 goto retry;
2586 }
2587
2588 rcu_read_unlock();
2589 /*
2590 * somebody is likely to be still doing lookup for it;
2591 * wait for them to finish
2592 */
2593 spin_lock(&dentry->d_lock);
2594 d_wait_lookup(dentry);
2595 /*
2596 * it's not in-lookup anymore; in principle we should repeat
2597 * everything from dcache lookup, but it's likely to be what
2598 * d_lookup() would've found anyway. If it is, just return it;
2599 * otherwise we really have to repeat the whole thing.
2600 */
2601 if (unlikely(dentry->d_name.hash != hash))
2602 goto mismatch;
2603 if (unlikely(dentry->d_parent != parent))
2604 goto mismatch;
2605 if (unlikely(d_unhashed(dentry)))
2606 goto mismatch;
2607 if (unlikely(!d_same_name(dentry, parent, name)))
2608 goto mismatch;
2609 /* OK, it *is* a hashed match; return it */
2610 spin_unlock(&dentry->d_lock);
2611 dput(new);
2612 return dentry;
2613 }
2614 rcu_read_unlock();
2615 /* we can't take ->d_lock here; it's OK, though. */
2616 new->d_flags |= DCACHE_PAR_LOOKUP;
2617 new->d_wait = wq;
2618 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b);
2619 hlist_bl_unlock(b);
2620 return new;
2621 mismatch:
2622 spin_unlock(&dentry->d_lock);
2623 dput(dentry);
2624 goto retry;
2625 }
2626 EXPORT_SYMBOL(d_alloc_parallel);
2627
2628 /*
2629 * - Unhash the dentry
2630 * - Retrieve and clear the waitqueue head in dentry
2631 * - Return the waitqueue head
2632 */
__d_lookup_unhash(struct dentry * dentry)2633 static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2634 {
2635 wait_queue_head_t *d_wait;
2636 struct hlist_bl_head *b;
2637
2638 lockdep_assert_held(&dentry->d_lock);
2639
2640 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2641 hlist_bl_lock(b);
2642 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2643 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2644 d_wait = dentry->d_wait;
2645 dentry->d_wait = NULL;
2646 hlist_bl_unlock(b);
2647 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2648 INIT_LIST_HEAD(&dentry->d_lru);
2649 return d_wait;
2650 }
2651
__d_lookup_unhash_wake(struct dentry * dentry)2652 void __d_lookup_unhash_wake(struct dentry *dentry)
2653 {
2654 spin_lock(&dentry->d_lock);
2655 wake_up_all(__d_lookup_unhash(dentry));
2656 spin_unlock(&dentry->d_lock);
2657 }
2658 EXPORT_SYMBOL(__d_lookup_unhash_wake);
2659
2660 /* inode->i_lock held if inode is non-NULL */
2661
__d_add(struct dentry * dentry,struct inode * inode)2662 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2663 {
2664 wait_queue_head_t *d_wait;
2665 struct inode *dir = NULL;
2666 unsigned n;
2667 spin_lock(&dentry->d_lock);
2668 if (unlikely(d_in_lookup(dentry))) {
2669 dir = dentry->d_parent->d_inode;
2670 n = start_dir_add(dir);
2671 d_wait = __d_lookup_unhash(dentry);
2672 }
2673 if (inode) {
2674 unsigned add_flags = d_flags_for_inode(inode);
2675 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2676 raw_write_seqcount_begin(&dentry->d_seq);
2677 __d_set_inode_and_type(dentry, inode, add_flags);
2678 raw_write_seqcount_end(&dentry->d_seq);
2679 fsnotify_update_flags(dentry);
2680 }
2681 __d_rehash(dentry);
2682 if (dir)
2683 end_dir_add(dir, n, d_wait);
2684 spin_unlock(&dentry->d_lock);
2685 if (inode)
2686 spin_unlock(&inode->i_lock);
2687 }
2688
2689 /**
2690 * d_add - add dentry to hash queues
2691 * @entry: dentry to add
2692 * @inode: The inode to attach to this dentry
2693 *
2694 * This adds the entry to the hash queues and initializes @inode.
2695 * The entry was actually filled in earlier during d_alloc().
2696 */
2697
d_add(struct dentry * entry,struct inode * inode)2698 void d_add(struct dentry *entry, struct inode *inode)
2699 {
2700 if (inode) {
2701 security_d_instantiate(entry, inode);
2702 spin_lock(&inode->i_lock);
2703 }
2704 __d_add(entry, inode);
2705 }
2706 EXPORT_SYMBOL(d_add);
2707
swap_names(struct dentry * dentry,struct dentry * target)2708 static void swap_names(struct dentry *dentry, struct dentry *target)
2709 {
2710 if (unlikely(dname_external(target))) {
2711 if (unlikely(dname_external(dentry))) {
2712 /*
2713 * Both external: swap the pointers
2714 */
2715 swap(target->d_name.name, dentry->d_name.name);
2716 } else {
2717 /*
2718 * dentry:internal, target:external. Steal target's
2719 * storage and make target internal.
2720 */
2721 dentry->d_name.name = target->d_name.name;
2722 target->d_shortname = dentry->d_shortname;
2723 target->d_name.name = target->d_shortname.string;
2724 }
2725 } else {
2726 if (unlikely(dname_external(dentry))) {
2727 /*
2728 * dentry:external, target:internal. Give dentry's
2729 * storage to target and make dentry internal
2730 */
2731 target->d_name.name = dentry->d_name.name;
2732 dentry->d_shortname = target->d_shortname;
2733 dentry->d_name.name = dentry->d_shortname.string;
2734 } else {
2735 /*
2736 * Both are internal.
2737 */
2738 for (int i = 0; i < DNAME_INLINE_WORDS; i++)
2739 swap(dentry->d_shortname.words[i],
2740 target->d_shortname.words[i]);
2741 }
2742 }
2743 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2744 }
2745
copy_name(struct dentry * dentry,struct dentry * target)2746 static void copy_name(struct dentry *dentry, struct dentry *target)
2747 {
2748 struct external_name *old_name = NULL;
2749 if (unlikely(dname_external(dentry)))
2750 old_name = external_name(dentry);
2751 if (unlikely(dname_external(target))) {
2752 atomic_inc(&external_name(target)->count);
2753 dentry->d_name = target->d_name;
2754 } else {
2755 dentry->d_shortname = target->d_shortname;
2756 dentry->d_name.name = dentry->d_shortname.string;
2757 dentry->d_name.hash_len = target->d_name.hash_len;
2758 }
2759 if (old_name && likely(atomic_dec_and_test(&old_name->count)))
2760 kfree_rcu(old_name, head);
2761 }
2762
2763 /*
2764 * __d_move - move a dentry
2765 * @dentry: entry to move
2766 * @target: new dentry
2767 * @exchange: exchange the two dentries
2768 *
2769 * Update the dcache to reflect the move of a file name. Negative
2770 * dcache entries should not be moved in this way. Caller must hold
2771 * rename_lock, the i_mutex of the source and target directories,
2772 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2773 */
__d_move(struct dentry * dentry,struct dentry * target,bool exchange)2774 static void __d_move(struct dentry *dentry, struct dentry *target,
2775 bool exchange)
2776 {
2777 struct dentry *old_parent, *p;
2778 wait_queue_head_t *d_wait;
2779 struct inode *dir = NULL;
2780 unsigned n;
2781
2782 WARN_ON(!dentry->d_inode);
2783 if (WARN_ON(dentry == target))
2784 return;
2785
2786 BUG_ON(d_ancestor(target, dentry));
2787 old_parent = dentry->d_parent;
2788 p = d_ancestor(old_parent, target);
2789 if (IS_ROOT(dentry)) {
2790 BUG_ON(p);
2791 spin_lock(&target->d_parent->d_lock);
2792 } else if (!p) {
2793 /* target is not a descendent of dentry->d_parent */
2794 spin_lock(&target->d_parent->d_lock);
2795 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2796 } else {
2797 BUG_ON(p == dentry);
2798 spin_lock(&old_parent->d_lock);
2799 if (p != target)
2800 spin_lock_nested(&target->d_parent->d_lock,
2801 DENTRY_D_LOCK_NESTED);
2802 }
2803 spin_lock_nested(&dentry->d_lock, 2);
2804 spin_lock_nested(&target->d_lock, 3);
2805
2806 if (unlikely(d_in_lookup(target))) {
2807 dir = target->d_parent->d_inode;
2808 n = start_dir_add(dir);
2809 d_wait = __d_lookup_unhash(target);
2810 }
2811
2812 write_seqcount_begin(&dentry->d_seq);
2813 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2814
2815 /* unhash both */
2816 if (!d_unhashed(dentry))
2817 ___d_drop(dentry);
2818 if (!d_unhashed(target))
2819 ___d_drop(target);
2820
2821 /* ... and switch them in the tree */
2822 dentry->d_parent = target->d_parent;
2823 if (!exchange) {
2824 copy_name(dentry, target);
2825 target->d_hash.pprev = NULL;
2826 dentry->d_parent->d_lockref.count++;
2827 if (dentry != old_parent) /* wasn't IS_ROOT */
2828 WARN_ON(!--old_parent->d_lockref.count);
2829 } else {
2830 target->d_parent = old_parent;
2831 swap_names(dentry, target);
2832 if (!hlist_unhashed(&target->d_sib))
2833 __hlist_del(&target->d_sib);
2834 hlist_add_head(&target->d_sib, &target->d_parent->d_children);
2835 __d_rehash(target);
2836 fsnotify_update_flags(target);
2837 }
2838 if (!hlist_unhashed(&dentry->d_sib))
2839 __hlist_del(&dentry->d_sib);
2840 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children);
2841 __d_rehash(dentry);
2842 fsnotify_update_flags(dentry);
2843 fscrypt_handle_d_move(dentry);
2844
2845 write_seqcount_end(&target->d_seq);
2846 write_seqcount_end(&dentry->d_seq);
2847
2848 if (dir)
2849 end_dir_add(dir, n, d_wait);
2850
2851 if (dentry->d_parent != old_parent)
2852 spin_unlock(&dentry->d_parent->d_lock);
2853 if (dentry != old_parent)
2854 spin_unlock(&old_parent->d_lock);
2855 spin_unlock(&target->d_lock);
2856 spin_unlock(&dentry->d_lock);
2857 }
2858
2859 /*
2860 * d_move - move a dentry
2861 * @dentry: entry to move
2862 * @target: new dentry
2863 *
2864 * Update the dcache to reflect the move of a file name. Negative
2865 * dcache entries should not be moved in this way. See the locking
2866 * requirements for __d_move.
2867 */
d_move(struct dentry * dentry,struct dentry * target)2868 void d_move(struct dentry *dentry, struct dentry *target)
2869 {
2870 write_seqlock(&rename_lock);
2871 __d_move(dentry, target, false);
2872 write_sequnlock(&rename_lock);
2873 }
2874 EXPORT_SYMBOL(d_move);
2875
2876 /*
2877 * d_exchange - exchange two dentries
2878 * @dentry1: first dentry
2879 * @dentry2: second dentry
2880 */
d_exchange(struct dentry * dentry1,struct dentry * dentry2)2881 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2882 {
2883 write_seqlock(&rename_lock);
2884
2885 WARN_ON(!dentry1->d_inode);
2886 WARN_ON(!dentry2->d_inode);
2887 WARN_ON(IS_ROOT(dentry1));
2888 WARN_ON(IS_ROOT(dentry2));
2889
2890 __d_move(dentry1, dentry2, true);
2891
2892 write_sequnlock(&rename_lock);
2893 }
2894
2895 /**
2896 * d_ancestor - search for an ancestor
2897 * @p1: ancestor dentry
2898 * @p2: child dentry
2899 *
2900 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2901 * an ancestor of p2, else NULL.
2902 */
d_ancestor(struct dentry * p1,struct dentry * p2)2903 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2904 {
2905 struct dentry *p;
2906
2907 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2908 if (p->d_parent == p1)
2909 return p;
2910 }
2911 return NULL;
2912 }
2913
2914 /*
2915 * This helper attempts to cope with remotely renamed directories
2916 *
2917 * It assumes that the caller is already holding
2918 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2919 *
2920 * Note: If ever the locking in lock_rename() changes, then please
2921 * remember to update this too...
2922 */
__d_unalias(struct dentry * dentry,struct dentry * alias)2923 static int __d_unalias(struct dentry *dentry, struct dentry *alias)
2924 {
2925 struct mutex *m1 = NULL;
2926 struct rw_semaphore *m2 = NULL;
2927 int ret = -ESTALE;
2928
2929 /* If alias and dentry share a parent, then no extra locks required */
2930 if (alias->d_parent == dentry->d_parent)
2931 goto out_unalias;
2932
2933 /* See lock_rename() */
2934 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2935 goto out_err;
2936 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2937 if (!inode_trylock_shared(alias->d_parent->d_inode))
2938 goto out_err;
2939 m2 = &alias->d_parent->d_inode->i_rwsem;
2940 out_unalias:
2941 if (alias->d_op && alias->d_op->d_unalias_trylock &&
2942 !alias->d_op->d_unalias_trylock(alias))
2943 goto out_err;
2944 __d_move(alias, dentry, false);
2945 if (alias->d_op && alias->d_op->d_unalias_unlock)
2946 alias->d_op->d_unalias_unlock(alias);
2947 ret = 0;
2948 out_err:
2949 if (m2)
2950 up_read(m2);
2951 if (m1)
2952 mutex_unlock(m1);
2953 return ret;
2954 }
2955
2956 /**
2957 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2958 * @inode: the inode which may have a disconnected dentry
2959 * @dentry: a negative dentry which we want to point to the inode.
2960 *
2961 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2962 * place of the given dentry and return it, else simply d_add the inode
2963 * to the dentry and return NULL.
2964 *
2965 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2966 * we should error out: directories can't have multiple aliases.
2967 *
2968 * This is needed in the lookup routine of any filesystem that is exportable
2969 * (via knfsd) so that we can build dcache paths to directories effectively.
2970 *
2971 * If a dentry was found and moved, then it is returned. Otherwise NULL
2972 * is returned. This matches the expected return value of ->lookup.
2973 *
2974 * Cluster filesystems may call this function with a negative, hashed dentry.
2975 * In that case, we know that the inode will be a regular file, and also this
2976 * will only occur during atomic_open. So we need to check for the dentry
2977 * being already hashed only in the final case.
2978 */
d_splice_alias(struct inode * inode,struct dentry * dentry)2979 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2980 {
2981 if (IS_ERR(inode))
2982 return ERR_CAST(inode);
2983
2984 BUG_ON(!d_unhashed(dentry));
2985
2986 if (!inode)
2987 goto out;
2988
2989 security_d_instantiate(dentry, inode);
2990 spin_lock(&inode->i_lock);
2991 if (S_ISDIR(inode->i_mode)) {
2992 struct dentry *new = __d_find_any_alias(inode);
2993 if (unlikely(new)) {
2994 /* The reference to new ensures it remains an alias */
2995 spin_unlock(&inode->i_lock);
2996 write_seqlock(&rename_lock);
2997 if (unlikely(d_ancestor(new, dentry))) {
2998 write_sequnlock(&rename_lock);
2999 dput(new);
3000 new = ERR_PTR(-ELOOP);
3001 pr_warn_ratelimited(
3002 "VFS: Lookup of '%s' in %s %s"
3003 " would have caused loop\n",
3004 dentry->d_name.name,
3005 inode->i_sb->s_type->name,
3006 inode->i_sb->s_id);
3007 } else if (!IS_ROOT(new)) {
3008 struct dentry *old_parent = dget(new->d_parent);
3009 int err = __d_unalias(dentry, new);
3010 write_sequnlock(&rename_lock);
3011 if (err) {
3012 dput(new);
3013 new = ERR_PTR(err);
3014 }
3015 dput(old_parent);
3016 } else {
3017 __d_move(new, dentry, false);
3018 write_sequnlock(&rename_lock);
3019 }
3020 iput(inode);
3021 return new;
3022 }
3023 }
3024 out:
3025 __d_add(dentry, inode);
3026 return NULL;
3027 }
3028 EXPORT_SYMBOL(d_splice_alias);
3029
3030 /*
3031 * Test whether new_dentry is a subdirectory of old_dentry.
3032 *
3033 * Trivially implemented using the dcache structure
3034 */
3035
3036 /**
3037 * is_subdir - is new dentry a subdirectory of old_dentry
3038 * @new_dentry: new dentry
3039 * @old_dentry: old dentry
3040 *
3041 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3042 * Returns false otherwise.
3043 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3044 */
3045
is_subdir(struct dentry * new_dentry,struct dentry * old_dentry)3046 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3047 {
3048 bool subdir;
3049 unsigned seq;
3050
3051 if (new_dentry == old_dentry)
3052 return true;
3053
3054 /* Access d_parent under rcu as d_move() may change it. */
3055 rcu_read_lock();
3056 seq = read_seqbegin(&rename_lock);
3057 subdir = d_ancestor(old_dentry, new_dentry);
3058 /* Try lockless once... */
3059 if (read_seqretry(&rename_lock, seq)) {
3060 /* ...else acquire lock for progress even on deep chains. */
3061 read_seqlock_excl(&rename_lock);
3062 subdir = d_ancestor(old_dentry, new_dentry);
3063 read_sequnlock_excl(&rename_lock);
3064 }
3065 rcu_read_unlock();
3066 return subdir;
3067 }
3068 EXPORT_SYMBOL(is_subdir);
3069
d_genocide_kill(void * data,struct dentry * dentry)3070 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3071 {
3072 struct dentry *root = data;
3073 if (dentry != root) {
3074 if (d_unhashed(dentry) || !dentry->d_inode)
3075 return D_WALK_SKIP;
3076
3077 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3078 dentry->d_flags |= DCACHE_GENOCIDE;
3079 dentry->d_lockref.count--;
3080 }
3081 }
3082 return D_WALK_CONTINUE;
3083 }
3084
d_genocide(struct dentry * parent)3085 void d_genocide(struct dentry *parent)
3086 {
3087 d_walk(parent, parent, d_genocide_kill);
3088 }
3089
d_mark_tmpfile(struct file * file,struct inode * inode)3090 void d_mark_tmpfile(struct file *file, struct inode *inode)
3091 {
3092 struct dentry *dentry = file->f_path.dentry;
3093
3094 BUG_ON(dname_external(dentry) ||
3095 !hlist_unhashed(&dentry->d_u.d_alias) ||
3096 !d_unlinked(dentry));
3097 spin_lock(&dentry->d_parent->d_lock);
3098 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3099 dentry->d_name.len = sprintf(dentry->d_shortname.string, "#%llu",
3100 (unsigned long long)inode->i_ino);
3101 spin_unlock(&dentry->d_lock);
3102 spin_unlock(&dentry->d_parent->d_lock);
3103 }
3104 EXPORT_SYMBOL(d_mark_tmpfile);
3105
d_tmpfile(struct file * file,struct inode * inode)3106 void d_tmpfile(struct file *file, struct inode *inode)
3107 {
3108 struct dentry *dentry = file->f_path.dentry;
3109
3110 inode_dec_link_count(inode);
3111 d_mark_tmpfile(file, inode);
3112 d_instantiate(dentry, inode);
3113 }
3114 EXPORT_SYMBOL(d_tmpfile);
3115
3116 /*
3117 * Obtain inode number of the parent dentry.
3118 */
d_parent_ino(struct dentry * dentry)3119 ino_t d_parent_ino(struct dentry *dentry)
3120 {
3121 struct dentry *parent;
3122 struct inode *iparent;
3123 unsigned seq;
3124 ino_t ret;
3125
3126 scoped_guard(rcu) {
3127 seq = raw_seqcount_begin(&dentry->d_seq);
3128 parent = READ_ONCE(dentry->d_parent);
3129 iparent = d_inode_rcu(parent);
3130 if (likely(iparent)) {
3131 ret = iparent->i_ino;
3132 if (!read_seqcount_retry(&dentry->d_seq, seq))
3133 return ret;
3134 }
3135 }
3136
3137 spin_lock(&dentry->d_lock);
3138 ret = dentry->d_parent->d_inode->i_ino;
3139 spin_unlock(&dentry->d_lock);
3140 return ret;
3141 }
3142 EXPORT_SYMBOL(d_parent_ino);
3143
3144 static __initdata unsigned long dhash_entries;
set_dhash_entries(char * str)3145 static int __init set_dhash_entries(char *str)
3146 {
3147 if (!str)
3148 return 0;
3149 dhash_entries = simple_strtoul(str, &str, 0);
3150 return 1;
3151 }
3152 __setup("dhash_entries=", set_dhash_entries);
3153
dcache_init_early(void)3154 static void __init dcache_init_early(void)
3155 {
3156 /* If hashes are distributed across NUMA nodes, defer
3157 * hash allocation until vmalloc space is available.
3158 */
3159 if (hashdist)
3160 return;
3161
3162 dentry_hashtable =
3163 alloc_large_system_hash("Dentry cache",
3164 sizeof(struct hlist_bl_head),
3165 dhash_entries,
3166 13,
3167 HASH_EARLY | HASH_ZERO,
3168 &d_hash_shift,
3169 NULL,
3170 0,
3171 0);
3172 d_hash_shift = 32 - d_hash_shift;
3173
3174 runtime_const_init(shift, d_hash_shift);
3175 runtime_const_init(ptr, dentry_hashtable);
3176 }
3177
dcache_init(void)3178 static void __init dcache_init(void)
3179 {
3180 /*
3181 * A constructor could be added for stable state like the lists,
3182 * but it is probably not worth it because of the cache nature
3183 * of the dcache.
3184 */
3185 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3186 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT,
3187 d_shortname.string);
3188
3189 /* Hash may have been set up in dcache_init_early */
3190 if (!hashdist)
3191 return;
3192
3193 dentry_hashtable =
3194 alloc_large_system_hash("Dentry cache",
3195 sizeof(struct hlist_bl_head),
3196 dhash_entries,
3197 13,
3198 HASH_ZERO,
3199 &d_hash_shift,
3200 NULL,
3201 0,
3202 0);
3203 d_hash_shift = 32 - d_hash_shift;
3204
3205 runtime_const_init(shift, d_hash_shift);
3206 runtime_const_init(ptr, dentry_hashtable);
3207 }
3208
3209 /* SLAB cache for __getname() consumers */
3210 struct kmem_cache *names_cachep __ro_after_init;
3211 EXPORT_SYMBOL(names_cachep);
3212
vfs_caches_init_early(void)3213 void __init vfs_caches_init_early(void)
3214 {
3215 int i;
3216
3217 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3218 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3219
3220 dcache_init_early();
3221 inode_init_early();
3222 }
3223
vfs_caches_init(void)3224 void __init vfs_caches_init(void)
3225 {
3226 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3227 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3228
3229 dcache_init();
3230 inode_init();
3231 files_init();
3232 files_maxfiles_init();
3233 mnt_init();
3234 bdev_cache_init();
3235 chrdev_init();
3236 }
3237