1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions to handle the cached directory entries
4 *
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6 */
7
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19
20 struct cached_dir_dentry {
21 struct list_head entry;
22 struct dentry *dentry;
23 };
24
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
26 const char *path,
27 bool lookup_only,
28 __u32 max_cached_dirs)
29 {
30 struct cached_fid *cfid;
31
32 spin_lock(&cfids->cfid_list_lock);
33 list_for_each_entry(cfid, &cfids->entries, entry) {
34 if (!strcmp(cfid->path, path)) {
35 /*
36 * If it doesn't have a lease it is either not yet
37 * fully cached or it may be in the process of
38 * being deleted due to a lease break.
39 */
40 if (!cfid->time || !cfid->has_lease) {
41 spin_unlock(&cfids->cfid_list_lock);
42 return NULL;
43 }
44 kref_get(&cfid->refcount);
45 spin_unlock(&cfids->cfid_list_lock);
46 return cfid;
47 }
48 }
49 if (lookup_only) {
50 spin_unlock(&cfids->cfid_list_lock);
51 return NULL;
52 }
53 if (cfids->num_entries >= max_cached_dirs) {
54 spin_unlock(&cfids->cfid_list_lock);
55 return NULL;
56 }
57 cfid = init_cached_dir(path);
58 if (cfid == NULL) {
59 spin_unlock(&cfids->cfid_list_lock);
60 return NULL;
61 }
62 cfid->cfids = cfids;
63 cfids->num_entries++;
64 list_add(&cfid->entry, &cfids->entries);
65 cfid->on_list = true;
66 kref_get(&cfid->refcount);
67 /*
68 * Set @cfid->has_lease to true during construction so that the lease
69 * reference can be put in cached_dir_lease_break() due to a potential
70 * lease break right after the request is sent or while @cfid is still
71 * being cached, or if a reconnection is triggered during construction.
72 * Concurrent processes won't be to use it yet due to @cfid->time being
73 * zero.
74 */
75 cfid->has_lease = true;
76
77 spin_unlock(&cfids->cfid_list_lock);
78 return cfid;
79 }
80
81 static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)82 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
83 {
84 struct dentry *dentry;
85 const char *s, *p;
86 char sep;
87
88 sep = CIFS_DIR_SEP(cifs_sb);
89 dentry = dget(cifs_sb->root);
90 s = path;
91
92 do {
93 struct inode *dir = d_inode(dentry);
94 struct dentry *child;
95
96 if (!S_ISDIR(dir->i_mode)) {
97 dput(dentry);
98 dentry = ERR_PTR(-ENOTDIR);
99 break;
100 }
101
102 /* skip separators */
103 while (*s == sep)
104 s++;
105 if (!*s)
106 break;
107 p = s++;
108 /* next separator */
109 while (*s && *s != sep)
110 s++;
111
112 child = lookup_positive_unlocked(p, dentry, s - p);
113 dput(dentry);
114 dentry = child;
115 } while (!IS_ERR(dentry));
116 return dentry;
117 }
118
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)119 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
120 const char *path)
121 {
122 size_t len = 0;
123
124 if (!*path)
125 return path;
126
127 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
128 cifs_sb->prepath) {
129 len = strlen(cifs_sb->prepath) + 1;
130 if (unlikely(len > strlen(path)))
131 return ERR_PTR(-EINVAL);
132 }
133 return path + len;
134 }
135
136 /*
137 * Open the and cache a directory handle.
138 * If error then *cfid is not initialized.
139 */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)140 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
141 const char *path,
142 struct cifs_sb_info *cifs_sb,
143 bool lookup_only, struct cached_fid **ret_cfid)
144 {
145 struct cifs_ses *ses;
146 struct TCP_Server_Info *server;
147 struct cifs_open_parms oparms;
148 struct smb2_create_rsp *o_rsp = NULL;
149 struct smb2_query_info_rsp *qi_rsp = NULL;
150 int resp_buftype[2];
151 struct smb_rqst rqst[2];
152 struct kvec rsp_iov[2];
153 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
154 struct kvec qi_iov[1];
155 int rc, flags = 0;
156 __le16 *utf16_path = NULL;
157 u8 oplock = SMB2_OPLOCK_LEVEL_II;
158 struct cifs_fid *pfid;
159 struct dentry *dentry = NULL;
160 struct cached_fid *cfid;
161 struct cached_fids *cfids;
162 const char *npath;
163 int retries = 0, cur_sleep = 1;
164
165 if (cifs_sb->root == NULL)
166 return -ENOENT;
167
168 if (tcon == NULL)
169 return -EOPNOTSUPP;
170
171 ses = tcon->ses;
172 cfids = tcon->cfids;
173
174 if (cfids == NULL)
175 return -EOPNOTSUPP;
176
177 replay_again:
178 /* reinitialize for possible replay */
179 flags = 0;
180 oplock = SMB2_OPLOCK_LEVEL_II;
181 server = cifs_pick_channel(ses);
182
183 if (!server->ops->new_lease_key)
184 return -EIO;
185
186 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
187 if (!utf16_path)
188 return -ENOMEM;
189
190 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
191 if (cfid == NULL) {
192 kfree(utf16_path);
193 return -ENOENT;
194 }
195 /*
196 * Return cached fid if it is valid (has a lease and has a time).
197 * Otherwise, it is either a new entry or laundromat worker removed it
198 * from @cfids->entries. Caller will put last reference if the latter.
199 */
200 spin_lock(&cfids->cfid_list_lock);
201 if (cfid->has_lease && cfid->time) {
202 spin_unlock(&cfids->cfid_list_lock);
203 *ret_cfid = cfid;
204 kfree(utf16_path);
205 return 0;
206 }
207 spin_unlock(&cfids->cfid_list_lock);
208
209 /*
210 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
211 * calling ->lookup() which already adds those through
212 * build_path_from_dentry(). Also, do it earlier as we might reconnect
213 * below when trying to send compounded request and then potentially
214 * having a different prefix path (e.g. after DFS failover).
215 */
216 npath = path_no_prefix(cifs_sb, path);
217 if (IS_ERR(npath)) {
218 rc = PTR_ERR(npath);
219 goto out;
220 }
221
222 if (!npath[0]) {
223 dentry = dget(cifs_sb->root);
224 } else {
225 dentry = path_to_dentry(cifs_sb, npath);
226 if (IS_ERR(dentry)) {
227 rc = -ENOENT;
228 goto out;
229 }
230 }
231 cfid->dentry = dentry;
232 cfid->tcon = tcon;
233
234 /*
235 * We do not hold the lock for the open because in case
236 * SMB2_open needs to reconnect.
237 * This is safe because no other thread will be able to get a ref
238 * to the cfid until we have finished opening the file and (possibly)
239 * acquired a lease.
240 */
241 if (smb3_encryption_required(tcon))
242 flags |= CIFS_TRANSFORM_REQ;
243
244 pfid = &cfid->fid;
245 server->ops->new_lease_key(pfid);
246
247 memset(rqst, 0, sizeof(rqst));
248 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
249 memset(rsp_iov, 0, sizeof(rsp_iov));
250
251 /* Open */
252 memset(&open_iov, 0, sizeof(open_iov));
253 rqst[0].rq_iov = open_iov;
254 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
255
256 oparms = (struct cifs_open_parms) {
257 .tcon = tcon,
258 .path = path,
259 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
260 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
261 FILE_READ_EA,
262 .disposition = FILE_OPEN,
263 .fid = pfid,
264 .replay = !!(retries),
265 };
266
267 rc = SMB2_open_init(tcon, server,
268 &rqst[0], &oplock, &oparms, utf16_path);
269 if (rc)
270 goto oshr_free;
271 smb2_set_next_command(tcon, &rqst[0]);
272
273 memset(&qi_iov, 0, sizeof(qi_iov));
274 rqst[1].rq_iov = qi_iov;
275 rqst[1].rq_nvec = 1;
276
277 rc = SMB2_query_info_init(tcon, server,
278 &rqst[1], COMPOUND_FID,
279 COMPOUND_FID, FILE_ALL_INFORMATION,
280 SMB2_O_INFO_FILE, 0,
281 sizeof(struct smb2_file_all_info) +
282 PATH_MAX * 2, 0, NULL);
283 if (rc)
284 goto oshr_free;
285
286 smb2_set_related(&rqst[1]);
287
288 if (retries) {
289 smb2_set_replay(server, &rqst[0]);
290 smb2_set_replay(server, &rqst[1]);
291 }
292
293 rc = compound_send_recv(xid, ses, server,
294 flags, 2, rqst,
295 resp_buftype, rsp_iov);
296 if (rc) {
297 if (rc == -EREMCHG) {
298 tcon->need_reconnect = true;
299 pr_warn_once("server share %s deleted\n",
300 tcon->tree_name);
301 }
302 goto oshr_free;
303 }
304 cfid->is_open = true;
305
306 spin_lock(&cfids->cfid_list_lock);
307
308 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
309 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
310 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
311 #ifdef CONFIG_CIFS_DEBUG2
312 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
313 #endif /* CIFS_DEBUG2 */
314
315
316 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
317 spin_unlock(&cfids->cfid_list_lock);
318 rc = -EINVAL;
319 goto oshr_free;
320 }
321
322 rc = smb2_parse_contexts(server, rsp_iov,
323 &oparms.fid->epoch,
324 oparms.fid->lease_key,
325 &oplock, NULL, NULL);
326 if (rc) {
327 spin_unlock(&cfids->cfid_list_lock);
328 goto oshr_free;
329 }
330
331 rc = -EINVAL;
332 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
333 spin_unlock(&cfids->cfid_list_lock);
334 goto oshr_free;
335 }
336 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
337 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
338 spin_unlock(&cfids->cfid_list_lock);
339 goto oshr_free;
340 }
341 if (!smb2_validate_and_copy_iov(
342 le16_to_cpu(qi_rsp->OutputBufferOffset),
343 sizeof(struct smb2_file_all_info),
344 &rsp_iov[1], sizeof(struct smb2_file_all_info),
345 (char *)&cfid->file_all_info))
346 cfid->file_all_info_is_valid = true;
347
348 cfid->time = jiffies;
349 spin_unlock(&cfids->cfid_list_lock);
350 /* At this point the directory handle is fully cached */
351 rc = 0;
352
353 oshr_free:
354 SMB2_open_free(&rqst[0]);
355 SMB2_query_info_free(&rqst[1]);
356 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
357 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
358 out:
359 if (rc) {
360 spin_lock(&cfids->cfid_list_lock);
361 if (cfid->on_list) {
362 list_del(&cfid->entry);
363 cfid->on_list = false;
364 cfids->num_entries--;
365 }
366 if (cfid->has_lease) {
367 /*
368 * We are guaranteed to have two references at this
369 * point. One for the caller and one for a potential
370 * lease. Release one here, and the second below.
371 */
372 cfid->has_lease = false;
373 kref_put(&cfid->refcount, smb2_close_cached_fid);
374 }
375 spin_unlock(&cfids->cfid_list_lock);
376
377 kref_put(&cfid->refcount, smb2_close_cached_fid);
378 } else {
379 *ret_cfid = cfid;
380 atomic_inc(&tcon->num_remote_opens);
381 }
382 kfree(utf16_path);
383
384 if (is_replayable_error(rc) &&
385 smb2_should_replay(tcon, &retries, &cur_sleep))
386 goto replay_again;
387
388 return rc;
389 }
390
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)391 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
392 struct dentry *dentry,
393 struct cached_fid **ret_cfid)
394 {
395 struct cached_fid *cfid;
396 struct cached_fids *cfids = tcon->cfids;
397
398 if (cfids == NULL)
399 return -EOPNOTSUPP;
400
401 spin_lock(&cfids->cfid_list_lock);
402 list_for_each_entry(cfid, &cfids->entries, entry) {
403 if (dentry && cfid->dentry == dentry) {
404 cifs_dbg(FYI, "found a cached file handle by dentry\n");
405 kref_get(&cfid->refcount);
406 *ret_cfid = cfid;
407 spin_unlock(&cfids->cfid_list_lock);
408 return 0;
409 }
410 }
411 spin_unlock(&cfids->cfid_list_lock);
412 return -ENOENT;
413 }
414
415 static void
smb2_close_cached_fid(struct kref * ref)416 smb2_close_cached_fid(struct kref *ref)
417 {
418 struct cached_fid *cfid = container_of(ref, struct cached_fid,
419 refcount);
420 int rc;
421
422 spin_lock(&cfid->cfids->cfid_list_lock);
423 if (cfid->on_list) {
424 list_del(&cfid->entry);
425 cfid->on_list = false;
426 cfid->cfids->num_entries--;
427 }
428 spin_unlock(&cfid->cfids->cfid_list_lock);
429
430 dput(cfid->dentry);
431 cfid->dentry = NULL;
432
433 if (cfid->is_open) {
434 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
435 cfid->fid.volatile_fid);
436 if (rc) /* should we retry on -EBUSY or -EAGAIN? */
437 cifs_dbg(VFS, "close cached dir rc %d\n", rc);
438 }
439
440 free_cached_dir(cfid);
441 }
442
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)443 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
444 const char *name, struct cifs_sb_info *cifs_sb)
445 {
446 struct cached_fid *cfid = NULL;
447 int rc;
448
449 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
450 if (rc) {
451 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
452 return;
453 }
454 spin_lock(&cfid->cfids->cfid_list_lock);
455 if (cfid->has_lease) {
456 cfid->has_lease = false;
457 kref_put(&cfid->refcount, smb2_close_cached_fid);
458 }
459 spin_unlock(&cfid->cfids->cfid_list_lock);
460 close_cached_dir(cfid);
461 }
462
463
close_cached_dir(struct cached_fid * cfid)464 void close_cached_dir(struct cached_fid *cfid)
465 {
466 kref_put(&cfid->refcount, smb2_close_cached_fid);
467 }
468
469 /*
470 * Called from cifs_kill_sb when we unmount a share
471 */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)472 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
473 {
474 struct rb_root *root = &cifs_sb->tlink_tree;
475 struct rb_node *node;
476 struct cached_fid *cfid;
477 struct cifs_tcon *tcon;
478 struct tcon_link *tlink;
479 struct cached_fids *cfids;
480 struct cached_dir_dentry *tmp_list, *q;
481 LIST_HEAD(entry);
482
483 spin_lock(&cifs_sb->tlink_tree_lock);
484 for (node = rb_first(root); node; node = rb_next(node)) {
485 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
486 tcon = tlink_tcon(tlink);
487 if (IS_ERR(tcon))
488 continue;
489 cfids = tcon->cfids;
490 if (cfids == NULL)
491 continue;
492 spin_lock(&cfids->cfid_list_lock);
493 list_for_each_entry(cfid, &cfids->entries, entry) {
494 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
495 if (tmp_list == NULL)
496 break;
497 spin_lock(&cfid->fid_lock);
498 tmp_list->dentry = cfid->dentry;
499 cfid->dentry = NULL;
500 spin_unlock(&cfid->fid_lock);
501
502 list_add_tail(&tmp_list->entry, &entry);
503 }
504 spin_unlock(&cfids->cfid_list_lock);
505 }
506 spin_unlock(&cifs_sb->tlink_tree_lock);
507
508 list_for_each_entry_safe(tmp_list, q, &entry, entry) {
509 list_del(&tmp_list->entry);
510 dput(tmp_list->dentry);
511 kfree(tmp_list);
512 }
513
514 /* Flush any pending work that will drop dentries */
515 flush_workqueue(cfid_put_wq);
516 }
517
518 /*
519 * Invalidate all cached dirs when a TCON has been reset
520 * due to a session loss.
521 */
invalidate_all_cached_dirs(struct cifs_tcon * tcon)522 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
523 {
524 struct cached_fids *cfids = tcon->cfids;
525 struct cached_fid *cfid, *q;
526
527 if (cfids == NULL)
528 return;
529
530 /*
531 * Mark all the cfids as closed, and move them to the cfids->dying list.
532 * They'll be cleaned up later by cfids_invalidation_worker. Take
533 * a reference to each cfid during this process.
534 */
535 spin_lock(&cfids->cfid_list_lock);
536 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
537 list_move(&cfid->entry, &cfids->dying);
538 cfids->num_entries--;
539 cfid->is_open = false;
540 cfid->on_list = false;
541 if (cfid->has_lease) {
542 /*
543 * The lease was never cancelled from the server,
544 * so steal that reference.
545 */
546 cfid->has_lease = false;
547 } else
548 kref_get(&cfid->refcount);
549 }
550 /*
551 * Queue dropping of the dentries once locks have been dropped
552 */
553 if (!list_empty(&cfids->dying))
554 queue_work(cfid_put_wq, &cfids->invalidation_work);
555 spin_unlock(&cfids->cfid_list_lock);
556 }
557
558 static void
cached_dir_offload_close(struct work_struct * work)559 cached_dir_offload_close(struct work_struct *work)
560 {
561 struct cached_fid *cfid = container_of(work,
562 struct cached_fid, close_work);
563 struct cifs_tcon *tcon = cfid->tcon;
564
565 WARN_ON(cfid->on_list);
566
567 kref_put(&cfid->refcount, smb2_close_cached_fid);
568 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
569 }
570
571 /*
572 * Release the cached directory's dentry, and then queue work to drop cached
573 * directory itself (closing on server if needed).
574 *
575 * Must be called with a reference to the cached_fid and a reference to the
576 * tcon.
577 */
cached_dir_put_work(struct work_struct * work)578 static void cached_dir_put_work(struct work_struct *work)
579 {
580 struct cached_fid *cfid = container_of(work, struct cached_fid,
581 put_work);
582 struct dentry *dentry;
583
584 spin_lock(&cfid->fid_lock);
585 dentry = cfid->dentry;
586 cfid->dentry = NULL;
587 spin_unlock(&cfid->fid_lock);
588
589 dput(dentry);
590 queue_work(serverclose_wq, &cfid->close_work);
591 }
592
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])593 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
594 {
595 struct cached_fids *cfids = tcon->cfids;
596 struct cached_fid *cfid;
597
598 if (cfids == NULL)
599 return false;
600
601 spin_lock(&cfids->cfid_list_lock);
602 list_for_each_entry(cfid, &cfids->entries, entry) {
603 if (cfid->has_lease &&
604 !memcmp(lease_key,
605 cfid->fid.lease_key,
606 SMB2_LEASE_KEY_SIZE)) {
607 cfid->has_lease = false;
608 cfid->time = 0;
609 /*
610 * We found a lease remove it from the list
611 * so no threads can access it.
612 */
613 list_del(&cfid->entry);
614 cfid->on_list = false;
615 cfids->num_entries--;
616
617 ++tcon->tc_count;
618 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
619 netfs_trace_tcon_ref_get_cached_lease_break);
620 queue_work(cfid_put_wq, &cfid->put_work);
621 spin_unlock(&cfids->cfid_list_lock);
622 return true;
623 }
624 }
625 spin_unlock(&cfids->cfid_list_lock);
626 return false;
627 }
628
init_cached_dir(const char * path)629 static struct cached_fid *init_cached_dir(const char *path)
630 {
631 struct cached_fid *cfid;
632
633 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
634 if (!cfid)
635 return NULL;
636 cfid->path = kstrdup(path, GFP_ATOMIC);
637 if (!cfid->path) {
638 kfree(cfid);
639 return NULL;
640 }
641
642 INIT_WORK(&cfid->close_work, cached_dir_offload_close);
643 INIT_WORK(&cfid->put_work, cached_dir_put_work);
644 INIT_LIST_HEAD(&cfid->entry);
645 INIT_LIST_HEAD(&cfid->dirents.entries);
646 mutex_init(&cfid->dirents.de_mutex);
647 spin_lock_init(&cfid->fid_lock);
648 kref_init(&cfid->refcount);
649 return cfid;
650 }
651
free_cached_dir(struct cached_fid * cfid)652 static void free_cached_dir(struct cached_fid *cfid)
653 {
654 struct cached_dirent *dirent, *q;
655
656 WARN_ON(work_pending(&cfid->close_work));
657 WARN_ON(work_pending(&cfid->put_work));
658
659 dput(cfid->dentry);
660 cfid->dentry = NULL;
661
662 /*
663 * Delete all cached dirent names
664 */
665 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
666 list_del(&dirent->entry);
667 kfree(dirent->name);
668 kfree(dirent);
669 }
670
671 kfree(cfid->path);
672 cfid->path = NULL;
673 kfree(cfid);
674 }
675
cfids_invalidation_worker(struct work_struct * work)676 static void cfids_invalidation_worker(struct work_struct *work)
677 {
678 struct cached_fids *cfids = container_of(work, struct cached_fids,
679 invalidation_work);
680 struct cached_fid *cfid, *q;
681 LIST_HEAD(entry);
682
683 spin_lock(&cfids->cfid_list_lock);
684 /* move cfids->dying to the local list */
685 list_cut_before(&entry, &cfids->dying, &cfids->dying);
686 spin_unlock(&cfids->cfid_list_lock);
687
688 list_for_each_entry_safe(cfid, q, &entry, entry) {
689 list_del(&cfid->entry);
690 /* Drop the ref-count acquired in invalidate_all_cached_dirs */
691 kref_put(&cfid->refcount, smb2_close_cached_fid);
692 }
693 }
694
cfids_laundromat_worker(struct work_struct * work)695 static void cfids_laundromat_worker(struct work_struct *work)
696 {
697 struct cached_fids *cfids;
698 struct cached_fid *cfid, *q;
699 struct dentry *dentry;
700 LIST_HEAD(entry);
701
702 cfids = container_of(work, struct cached_fids, laundromat_work.work);
703
704 spin_lock(&cfids->cfid_list_lock);
705 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
706 if (cfid->time &&
707 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
708 cfid->on_list = false;
709 list_move(&cfid->entry, &entry);
710 cfids->num_entries--;
711 if (cfid->has_lease) {
712 /*
713 * Our lease has not yet been cancelled from the
714 * server. Steal that reference.
715 */
716 cfid->has_lease = false;
717 } else
718 kref_get(&cfid->refcount);
719 }
720 }
721 spin_unlock(&cfids->cfid_list_lock);
722
723 list_for_each_entry_safe(cfid, q, &entry, entry) {
724 list_del(&cfid->entry);
725
726 spin_lock(&cfid->fid_lock);
727 dentry = cfid->dentry;
728 cfid->dentry = NULL;
729 spin_unlock(&cfid->fid_lock);
730
731 dput(dentry);
732 if (cfid->is_open) {
733 spin_lock(&cifs_tcp_ses_lock);
734 ++cfid->tcon->tc_count;
735 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
736 netfs_trace_tcon_ref_get_cached_laundromat);
737 spin_unlock(&cifs_tcp_ses_lock);
738 queue_work(serverclose_wq, &cfid->close_work);
739 } else
740 /*
741 * Drop the ref-count from above, either the lease-ref (if there
742 * was one) or the extra one acquired.
743 */
744 kref_put(&cfid->refcount, smb2_close_cached_fid);
745 }
746 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
747 dir_cache_timeout * HZ);
748 }
749
init_cached_dirs(void)750 struct cached_fids *init_cached_dirs(void)
751 {
752 struct cached_fids *cfids;
753
754 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
755 if (!cfids)
756 return NULL;
757 spin_lock_init(&cfids->cfid_list_lock);
758 INIT_LIST_HEAD(&cfids->entries);
759 INIT_LIST_HEAD(&cfids->dying);
760
761 INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
762 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
763 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
764 dir_cache_timeout * HZ);
765
766 return cfids;
767 }
768
769 /*
770 * Called from tconInfoFree when we are tearing down the tcon.
771 * There are no active users or open files/directories at this point.
772 */
free_cached_dirs(struct cached_fids * cfids)773 void free_cached_dirs(struct cached_fids *cfids)
774 {
775 struct cached_fid *cfid, *q;
776 LIST_HEAD(entry);
777
778 if (cfids == NULL)
779 return;
780
781 cancel_delayed_work_sync(&cfids->laundromat_work);
782 cancel_work_sync(&cfids->invalidation_work);
783
784 spin_lock(&cfids->cfid_list_lock);
785 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
786 cfid->on_list = false;
787 cfid->is_open = false;
788 list_move(&cfid->entry, &entry);
789 }
790 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
791 cfid->on_list = false;
792 cfid->is_open = false;
793 list_move(&cfid->entry, &entry);
794 }
795 spin_unlock(&cfids->cfid_list_lock);
796
797 list_for_each_entry_safe(cfid, q, &entry, entry) {
798 list_del(&cfid->entry);
799 free_cached_dir(cfid);
800 }
801
802 kfree(cfids);
803 }
804