xref: /linux/fs/smb/client/cached_dir.c (revision 7f4f3b14e8079ecde096bd734af10e30d40c27b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Functions to handle the cached directory entries
4  *
5  *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6  */
7 
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14 
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19 
20 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
21 						    const char *path,
22 						    bool lookup_only,
23 						    __u32 max_cached_dirs)
24 {
25 	struct cached_fid *cfid;
26 
27 	spin_lock(&cfids->cfid_list_lock);
28 	list_for_each_entry(cfid, &cfids->entries, entry) {
29 		if (!strcmp(cfid->path, path)) {
30 			/*
31 			 * If it doesn't have a lease it is either not yet
32 			 * fully cached or it may be in the process of
33 			 * being deleted due to a lease break.
34 			 */
35 			if (!cfid->time || !cfid->has_lease) {
36 				spin_unlock(&cfids->cfid_list_lock);
37 				return NULL;
38 			}
39 			kref_get(&cfid->refcount);
40 			spin_unlock(&cfids->cfid_list_lock);
41 			return cfid;
42 		}
43 	}
44 	if (lookup_only) {
45 		spin_unlock(&cfids->cfid_list_lock);
46 		return NULL;
47 	}
48 	if (cfids->num_entries >= max_cached_dirs) {
49 		spin_unlock(&cfids->cfid_list_lock);
50 		return NULL;
51 	}
52 	cfid = init_cached_dir(path);
53 	if (cfid == NULL) {
54 		spin_unlock(&cfids->cfid_list_lock);
55 		return NULL;
56 	}
57 	cfid->cfids = cfids;
58 	cfids->num_entries++;
59 	list_add(&cfid->entry, &cfids->entries);
60 	cfid->on_list = true;
61 	kref_get(&cfid->refcount);
62 	/*
63 	 * Set @cfid->has_lease to true during construction so that the lease
64 	 * reference can be put in cached_dir_lease_break() due to a potential
65 	 * lease break right after the request is sent or while @cfid is still
66 	 * being cached, or if a reconnection is triggered during construction.
67 	 * Concurrent processes won't be to use it yet due to @cfid->time being
68 	 * zero.
69 	 */
70 	cfid->has_lease = true;
71 
72 	spin_unlock(&cfids->cfid_list_lock);
73 	return cfid;
74 }
75 
76 static struct dentry *
77 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
78 {
79 	struct dentry *dentry;
80 	const char *s, *p;
81 	char sep;
82 
83 	sep = CIFS_DIR_SEP(cifs_sb);
84 	dentry = dget(cifs_sb->root);
85 	s = path;
86 
87 	do {
88 		struct inode *dir = d_inode(dentry);
89 		struct dentry *child;
90 
91 		if (!S_ISDIR(dir->i_mode)) {
92 			dput(dentry);
93 			dentry = ERR_PTR(-ENOTDIR);
94 			break;
95 		}
96 
97 		/* skip separators */
98 		while (*s == sep)
99 			s++;
100 		if (!*s)
101 			break;
102 		p = s++;
103 		/* next separator */
104 		while (*s && *s != sep)
105 			s++;
106 
107 		child = lookup_positive_unlocked(p, dentry, s - p);
108 		dput(dentry);
109 		dentry = child;
110 	} while (!IS_ERR(dentry));
111 	return dentry;
112 }
113 
114 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
115 				  const char *path)
116 {
117 	size_t len = 0;
118 
119 	if (!*path)
120 		return path;
121 
122 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
123 	    cifs_sb->prepath) {
124 		len = strlen(cifs_sb->prepath) + 1;
125 		if (unlikely(len > strlen(path)))
126 			return ERR_PTR(-EINVAL);
127 	}
128 	return path + len;
129 }
130 
131 /*
132  * Open the and cache a directory handle.
133  * If error then *cfid is not initialized.
134  */
135 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
136 		    const char *path,
137 		    struct cifs_sb_info *cifs_sb,
138 		    bool lookup_only, struct cached_fid **ret_cfid)
139 {
140 	struct cifs_ses *ses;
141 	struct TCP_Server_Info *server;
142 	struct cifs_open_parms oparms;
143 	struct smb2_create_rsp *o_rsp = NULL;
144 	struct smb2_query_info_rsp *qi_rsp = NULL;
145 	int resp_buftype[2];
146 	struct smb_rqst rqst[2];
147 	struct kvec rsp_iov[2];
148 	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
149 	struct kvec qi_iov[1];
150 	int rc, flags = 0;
151 	__le16 *utf16_path = NULL;
152 	u8 oplock = SMB2_OPLOCK_LEVEL_II;
153 	struct cifs_fid *pfid;
154 	struct dentry *dentry = NULL;
155 	struct cached_fid *cfid;
156 	struct cached_fids *cfids;
157 	const char *npath;
158 	int retries = 0, cur_sleep = 1;
159 
160 	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
161 	    is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
162 		return -EOPNOTSUPP;
163 
164 	ses = tcon->ses;
165 	cfids = tcon->cfids;
166 
167 	if (cifs_sb->root == NULL)
168 		return -ENOENT;
169 
170 replay_again:
171 	/* reinitialize for possible replay */
172 	flags = 0;
173 	oplock = SMB2_OPLOCK_LEVEL_II;
174 	server = cifs_pick_channel(ses);
175 
176 	if (!server->ops->new_lease_key)
177 		return -EIO;
178 
179 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
180 	if (!utf16_path)
181 		return -ENOMEM;
182 
183 	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
184 	if (cfid == NULL) {
185 		kfree(utf16_path);
186 		return -ENOENT;
187 	}
188 	/*
189 	 * Return cached fid if it is valid (has a lease and has a time).
190 	 * Otherwise, it is either a new entry or laundromat worker removed it
191 	 * from @cfids->entries.  Caller will put last reference if the latter.
192 	 */
193 	spin_lock(&cfids->cfid_list_lock);
194 	if (cfid->has_lease && cfid->time) {
195 		spin_unlock(&cfids->cfid_list_lock);
196 		*ret_cfid = cfid;
197 		kfree(utf16_path);
198 		return 0;
199 	}
200 	spin_unlock(&cfids->cfid_list_lock);
201 
202 	/*
203 	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
204 	 * calling ->lookup() which already adds those through
205 	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
206 	 * below when trying to send compounded request and then potentially
207 	 * having a different prefix path (e.g. after DFS failover).
208 	 */
209 	npath = path_no_prefix(cifs_sb, path);
210 	if (IS_ERR(npath)) {
211 		rc = PTR_ERR(npath);
212 		goto out;
213 	}
214 
215 	if (!npath[0]) {
216 		dentry = dget(cifs_sb->root);
217 	} else {
218 		dentry = path_to_dentry(cifs_sb, npath);
219 		if (IS_ERR(dentry)) {
220 			rc = -ENOENT;
221 			goto out;
222 		}
223 	}
224 	cfid->dentry = dentry;
225 
226 	/*
227 	 * We do not hold the lock for the open because in case
228 	 * SMB2_open needs to reconnect.
229 	 * This is safe because no other thread will be able to get a ref
230 	 * to the cfid until we have finished opening the file and (possibly)
231 	 * acquired a lease.
232 	 */
233 	if (smb3_encryption_required(tcon))
234 		flags |= CIFS_TRANSFORM_REQ;
235 
236 	pfid = &cfid->fid;
237 	server->ops->new_lease_key(pfid);
238 
239 	memset(rqst, 0, sizeof(rqst));
240 	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
241 	memset(rsp_iov, 0, sizeof(rsp_iov));
242 
243 	/* Open */
244 	memset(&open_iov, 0, sizeof(open_iov));
245 	rqst[0].rq_iov = open_iov;
246 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
247 
248 	oparms = (struct cifs_open_parms) {
249 		.tcon = tcon,
250 		.path = path,
251 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
252 		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
253 				   FILE_READ_EA,
254 		.disposition = FILE_OPEN,
255 		.fid = pfid,
256 		.replay = !!(retries),
257 	};
258 
259 	rc = SMB2_open_init(tcon, server,
260 			    &rqst[0], &oplock, &oparms, utf16_path);
261 	if (rc)
262 		goto oshr_free;
263 	smb2_set_next_command(tcon, &rqst[0]);
264 
265 	memset(&qi_iov, 0, sizeof(qi_iov));
266 	rqst[1].rq_iov = qi_iov;
267 	rqst[1].rq_nvec = 1;
268 
269 	rc = SMB2_query_info_init(tcon, server,
270 				  &rqst[1], COMPOUND_FID,
271 				  COMPOUND_FID, FILE_ALL_INFORMATION,
272 				  SMB2_O_INFO_FILE, 0,
273 				  sizeof(struct smb2_file_all_info) +
274 				  PATH_MAX * 2, 0, NULL);
275 	if (rc)
276 		goto oshr_free;
277 
278 	smb2_set_related(&rqst[1]);
279 
280 	if (retries) {
281 		smb2_set_replay(server, &rqst[0]);
282 		smb2_set_replay(server, &rqst[1]);
283 	}
284 
285 	rc = compound_send_recv(xid, ses, server,
286 				flags, 2, rqst,
287 				resp_buftype, rsp_iov);
288 	if (rc) {
289 		if (rc == -EREMCHG) {
290 			tcon->need_reconnect = true;
291 			pr_warn_once("server share %s deleted\n",
292 				     tcon->tree_name);
293 		}
294 		goto oshr_free;
295 	}
296 	cfid->tcon = tcon;
297 	cfid->is_open = true;
298 
299 	spin_lock(&cfids->cfid_list_lock);
300 
301 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
302 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
303 	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
304 #ifdef CONFIG_CIFS_DEBUG2
305 	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
306 #endif /* CIFS_DEBUG2 */
307 
308 
309 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
310 		spin_unlock(&cfids->cfid_list_lock);
311 		rc = -EINVAL;
312 		goto oshr_free;
313 	}
314 
315 	rc = smb2_parse_contexts(server, rsp_iov,
316 				 &oparms.fid->epoch,
317 				 oparms.fid->lease_key,
318 				 &oplock, NULL, NULL);
319 	if (rc) {
320 		spin_unlock(&cfids->cfid_list_lock);
321 		goto oshr_free;
322 	}
323 
324 	rc = -EINVAL;
325 	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
326 		spin_unlock(&cfids->cfid_list_lock);
327 		goto oshr_free;
328 	}
329 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
330 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
331 		spin_unlock(&cfids->cfid_list_lock);
332 		goto oshr_free;
333 	}
334 	if (!smb2_validate_and_copy_iov(
335 				le16_to_cpu(qi_rsp->OutputBufferOffset),
336 				sizeof(struct smb2_file_all_info),
337 				&rsp_iov[1], sizeof(struct smb2_file_all_info),
338 				(char *)&cfid->file_all_info))
339 		cfid->file_all_info_is_valid = true;
340 
341 	cfid->time = jiffies;
342 	spin_unlock(&cfids->cfid_list_lock);
343 	/* At this point the directory handle is fully cached */
344 	rc = 0;
345 
346 oshr_free:
347 	SMB2_open_free(&rqst[0]);
348 	SMB2_query_info_free(&rqst[1]);
349 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
350 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
351 out:
352 	if (rc) {
353 		spin_lock(&cfids->cfid_list_lock);
354 		if (cfid->on_list) {
355 			list_del(&cfid->entry);
356 			cfid->on_list = false;
357 			cfids->num_entries--;
358 		}
359 		if (cfid->has_lease) {
360 			/*
361 			 * We are guaranteed to have two references at this
362 			 * point. One for the caller and one for a potential
363 			 * lease. Release one here, and the second below.
364 			 */
365 			cfid->has_lease = false;
366 			kref_put(&cfid->refcount, smb2_close_cached_fid);
367 		}
368 		spin_unlock(&cfids->cfid_list_lock);
369 
370 		kref_put(&cfid->refcount, smb2_close_cached_fid);
371 	} else {
372 		*ret_cfid = cfid;
373 		atomic_inc(&tcon->num_remote_opens);
374 	}
375 	kfree(utf16_path);
376 
377 	if (is_replayable_error(rc) &&
378 	    smb2_should_replay(tcon, &retries, &cur_sleep))
379 		goto replay_again;
380 
381 	return rc;
382 }
383 
384 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
385 			      struct dentry *dentry,
386 			      struct cached_fid **ret_cfid)
387 {
388 	struct cached_fid *cfid;
389 	struct cached_fids *cfids = tcon->cfids;
390 
391 	if (cfids == NULL)
392 		return -ENOENT;
393 
394 	spin_lock(&cfids->cfid_list_lock);
395 	list_for_each_entry(cfid, &cfids->entries, entry) {
396 		if (dentry && cfid->dentry == dentry) {
397 			cifs_dbg(FYI, "found a cached file handle by dentry\n");
398 			kref_get(&cfid->refcount);
399 			*ret_cfid = cfid;
400 			spin_unlock(&cfids->cfid_list_lock);
401 			return 0;
402 		}
403 	}
404 	spin_unlock(&cfids->cfid_list_lock);
405 	return -ENOENT;
406 }
407 
408 static void
409 smb2_close_cached_fid(struct kref *ref)
410 {
411 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
412 					       refcount);
413 	int rc;
414 
415 	spin_lock(&cfid->cfids->cfid_list_lock);
416 	if (cfid->on_list) {
417 		list_del(&cfid->entry);
418 		cfid->on_list = false;
419 		cfid->cfids->num_entries--;
420 	}
421 	spin_unlock(&cfid->cfids->cfid_list_lock);
422 
423 	dput(cfid->dentry);
424 	cfid->dentry = NULL;
425 
426 	if (cfid->is_open) {
427 		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
428 			   cfid->fid.volatile_fid);
429 		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
430 			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
431 	}
432 
433 	free_cached_dir(cfid);
434 }
435 
436 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
437 			     const char *name, struct cifs_sb_info *cifs_sb)
438 {
439 	struct cached_fid *cfid = NULL;
440 	int rc;
441 
442 	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
443 	if (rc) {
444 		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
445 		return;
446 	}
447 	spin_lock(&cfid->cfids->cfid_list_lock);
448 	if (cfid->has_lease) {
449 		cfid->has_lease = false;
450 		kref_put(&cfid->refcount, smb2_close_cached_fid);
451 	}
452 	spin_unlock(&cfid->cfids->cfid_list_lock);
453 	close_cached_dir(cfid);
454 }
455 
456 
457 void close_cached_dir(struct cached_fid *cfid)
458 {
459 	kref_put(&cfid->refcount, smb2_close_cached_fid);
460 }
461 
462 /*
463  * Called from cifs_kill_sb when we unmount a share
464  */
465 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
466 {
467 	struct rb_root *root = &cifs_sb->tlink_tree;
468 	struct rb_node *node;
469 	struct cached_fid *cfid;
470 	struct cifs_tcon *tcon;
471 	struct tcon_link *tlink;
472 	struct cached_fids *cfids;
473 
474 	for (node = rb_first(root); node; node = rb_next(node)) {
475 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
476 		tcon = tlink_tcon(tlink);
477 		if (IS_ERR(tcon))
478 			continue;
479 		cfids = tcon->cfids;
480 		if (cfids == NULL)
481 			continue;
482 		list_for_each_entry(cfid, &cfids->entries, entry) {
483 			dput(cfid->dentry);
484 			cfid->dentry = NULL;
485 		}
486 	}
487 }
488 
489 /*
490  * Invalidate all cached dirs when a TCON has been reset
491  * due to a session loss.
492  */
493 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
494 {
495 	struct cached_fids *cfids = tcon->cfids;
496 	struct cached_fid *cfid, *q;
497 	LIST_HEAD(entry);
498 
499 	if (cfids == NULL)
500 		return;
501 
502 	spin_lock(&cfids->cfid_list_lock);
503 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
504 		list_move(&cfid->entry, &entry);
505 		cfids->num_entries--;
506 		cfid->is_open = false;
507 		cfid->on_list = false;
508 		if (cfid->has_lease) {
509 			/*
510 			 * The lease was never cancelled from the server,
511 			 * so steal that reference.
512 			 */
513 			cfid->has_lease = false;
514 		} else
515 			kref_get(&cfid->refcount);
516 	}
517 	spin_unlock(&cfids->cfid_list_lock);
518 
519 	list_for_each_entry_safe(cfid, q, &entry, entry) {
520 		list_del(&cfid->entry);
521 		cancel_work_sync(&cfid->lease_break);
522 		/*
523 		 * Drop the ref-count from above, either the lease-ref (if there
524 		 * was one) or the extra one acquired.
525 		 */
526 		kref_put(&cfid->refcount, smb2_close_cached_fid);
527 	}
528 }
529 
530 static void
531 smb2_cached_lease_break(struct work_struct *work)
532 {
533 	struct cached_fid *cfid = container_of(work,
534 				struct cached_fid, lease_break);
535 
536 	kref_put(&cfid->refcount, smb2_close_cached_fid);
537 }
538 
539 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
540 {
541 	struct cached_fids *cfids = tcon->cfids;
542 	struct cached_fid *cfid;
543 
544 	if (cfids == NULL)
545 		return false;
546 
547 	spin_lock(&cfids->cfid_list_lock);
548 	list_for_each_entry(cfid, &cfids->entries, entry) {
549 		if (cfid->has_lease &&
550 		    !memcmp(lease_key,
551 			    cfid->fid.lease_key,
552 			    SMB2_LEASE_KEY_SIZE)) {
553 			cfid->has_lease = false;
554 			cfid->time = 0;
555 			/*
556 			 * We found a lease remove it from the list
557 			 * so no threads can access it.
558 			 */
559 			list_del(&cfid->entry);
560 			cfid->on_list = false;
561 			cfids->num_entries--;
562 
563 			queue_work(cifsiod_wq,
564 				   &cfid->lease_break);
565 			spin_unlock(&cfids->cfid_list_lock);
566 			return true;
567 		}
568 	}
569 	spin_unlock(&cfids->cfid_list_lock);
570 	return false;
571 }
572 
573 static struct cached_fid *init_cached_dir(const char *path)
574 {
575 	struct cached_fid *cfid;
576 
577 	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
578 	if (!cfid)
579 		return NULL;
580 	cfid->path = kstrdup(path, GFP_ATOMIC);
581 	if (!cfid->path) {
582 		kfree(cfid);
583 		return NULL;
584 	}
585 
586 	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
587 	INIT_LIST_HEAD(&cfid->entry);
588 	INIT_LIST_HEAD(&cfid->dirents.entries);
589 	mutex_init(&cfid->dirents.de_mutex);
590 	spin_lock_init(&cfid->fid_lock);
591 	kref_init(&cfid->refcount);
592 	return cfid;
593 }
594 
595 static void free_cached_dir(struct cached_fid *cfid)
596 {
597 	struct cached_dirent *dirent, *q;
598 
599 	dput(cfid->dentry);
600 	cfid->dentry = NULL;
601 
602 	/*
603 	 * Delete all cached dirent names
604 	 */
605 	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
606 		list_del(&dirent->entry);
607 		kfree(dirent->name);
608 		kfree(dirent);
609 	}
610 
611 	kfree(cfid->path);
612 	cfid->path = NULL;
613 	kfree(cfid);
614 }
615 
616 static void cfids_laundromat_worker(struct work_struct *work)
617 {
618 	struct cached_fids *cfids;
619 	struct cached_fid *cfid, *q;
620 	LIST_HEAD(entry);
621 
622 	cfids = container_of(work, struct cached_fids, laundromat_work.work);
623 
624 	spin_lock(&cfids->cfid_list_lock);
625 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
626 		if (cfid->time &&
627 		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
628 			cfid->on_list = false;
629 			list_move(&cfid->entry, &entry);
630 			cfids->num_entries--;
631 			if (cfid->has_lease) {
632 				/*
633 				 * Our lease has not yet been cancelled from the
634 				 * server. Steal that reference.
635 				 */
636 				cfid->has_lease = false;
637 			} else
638 				kref_get(&cfid->refcount);
639 		}
640 	}
641 	spin_unlock(&cfids->cfid_list_lock);
642 
643 	list_for_each_entry_safe(cfid, q, &entry, entry) {
644 		list_del(&cfid->entry);
645 		/*
646 		 * Cancel and wait for the work to finish in case we are racing
647 		 * with it.
648 		 */
649 		cancel_work_sync(&cfid->lease_break);
650 		/*
651 		 * Drop the ref-count from above, either the lease-ref (if there
652 		 * was one) or the extra one acquired.
653 		 */
654 		kref_put(&cfid->refcount, smb2_close_cached_fid);
655 	}
656 	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
657 			   dir_cache_timeout * HZ);
658 }
659 
660 struct cached_fids *init_cached_dirs(void)
661 {
662 	struct cached_fids *cfids;
663 
664 	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
665 	if (!cfids)
666 		return NULL;
667 	spin_lock_init(&cfids->cfid_list_lock);
668 	INIT_LIST_HEAD(&cfids->entries);
669 
670 	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
671 	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
672 			   dir_cache_timeout * HZ);
673 
674 	return cfids;
675 }
676 
677 /*
678  * Called from tconInfoFree when we are tearing down the tcon.
679  * There are no active users or open files/directories at this point.
680  */
681 void free_cached_dirs(struct cached_fids *cfids)
682 {
683 	struct cached_fid *cfid, *q;
684 	LIST_HEAD(entry);
685 
686 	if (cfids == NULL)
687 		return;
688 
689 	cancel_delayed_work_sync(&cfids->laundromat_work);
690 
691 	spin_lock(&cfids->cfid_list_lock);
692 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
693 		cfid->on_list = false;
694 		cfid->is_open = false;
695 		list_move(&cfid->entry, &entry);
696 	}
697 	spin_unlock(&cfids->cfid_list_lock);
698 
699 	list_for_each_entry_safe(cfid, q, &entry, entry) {
700 		list_del(&cfid->entry);
701 		free_cached_dir(cfid);
702 	}
703 
704 	kfree(cfids);
705 }
706