1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions to handle the cached directory entries
4 *
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6 */
7
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19
20 struct cached_dir_dentry {
21 struct list_head entry;
22 struct dentry *dentry;
23 };
24
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
26 const char *path,
27 bool lookup_only,
28 __u32 max_cached_dirs)
29 {
30 struct cached_fid *cfid;
31
32 list_for_each_entry(cfid, &cfids->entries, entry) {
33 if (!strcmp(cfid->path, path)) {
34 /*
35 * If it doesn't have a lease it is either not yet
36 * fully cached or it may be in the process of
37 * being deleted due to a lease break.
38 */
39 if (!is_valid_cached_dir(cfid))
40 return NULL;
41 kref_get(&cfid->refcount);
42 return cfid;
43 }
44 }
45 if (lookup_only) {
46 return NULL;
47 }
48 if (cfids->num_entries >= max_cached_dirs) {
49 return NULL;
50 }
51 cfid = init_cached_dir(path);
52 if (cfid == NULL) {
53 return NULL;
54 }
55 cfid->cfids = cfids;
56 cfids->num_entries++;
57 list_add(&cfid->entry, &cfids->entries);
58 cfid->on_list = true;
59 kref_get(&cfid->refcount);
60 /*
61 * Set @cfid->has_lease to true during construction so that the lease
62 * reference can be put in cached_dir_lease_break() due to a potential
63 * lease break right after the request is sent or while @cfid is still
64 * being cached, or if a reconnection is triggered during construction.
65 * Concurrent processes won't be to use it yet due to @cfid->time being
66 * zero.
67 */
68 cfid->has_lease = true;
69
70 return cfid;
71 }
72
73 static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)74 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
75 {
76 struct dentry *dentry;
77 const char *s, *p;
78 char sep;
79
80 sep = CIFS_DIR_SEP(cifs_sb);
81 dentry = dget(cifs_sb->root);
82 s = path;
83
84 do {
85 struct inode *dir = d_inode(dentry);
86 struct dentry *child;
87
88 if (!S_ISDIR(dir->i_mode)) {
89 dput(dentry);
90 dentry = ERR_PTR(-ENOTDIR);
91 break;
92 }
93
94 /* skip separators */
95 while (*s == sep)
96 s++;
97 if (!*s)
98 break;
99 p = s++;
100 /* next separator */
101 while (*s && *s != sep)
102 s++;
103
104 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
105 dentry);
106 dput(dentry);
107 dentry = child;
108 } while (!IS_ERR(dentry));
109 return dentry;
110 }
111
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)112 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
113 const char *path)
114 {
115 size_t len = 0;
116
117 if (!*path)
118 return path;
119
120 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
121 cifs_sb->prepath) {
122 len = strlen(cifs_sb->prepath) + 1;
123 if (unlikely(len > strlen(path)))
124 return ERR_PTR(-EINVAL);
125 }
126 return path + len;
127 }
128
129 /*
130 * Open the and cache a directory handle.
131 * If error then *cfid is not initialized.
132 */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)133 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
134 const char *path,
135 struct cifs_sb_info *cifs_sb,
136 bool lookup_only, struct cached_fid **ret_cfid)
137 {
138 struct cifs_ses *ses;
139 struct TCP_Server_Info *server;
140 struct cifs_open_parms oparms;
141 struct smb2_create_rsp *o_rsp = NULL;
142 struct smb2_query_info_rsp *qi_rsp = NULL;
143 int resp_buftype[2];
144 struct smb_rqst rqst[2];
145 struct kvec rsp_iov[2];
146 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
147 struct kvec qi_iov[1];
148 int rc, flags = 0;
149 __le16 *utf16_path = NULL;
150 u8 oplock = SMB2_OPLOCK_LEVEL_II;
151 struct cifs_fid *pfid;
152 struct dentry *dentry = NULL;
153 struct cached_fid *cfid;
154 struct cached_fids *cfids;
155 const char *npath;
156 int retries = 0, cur_sleep = 1;
157 __le32 lease_flags = 0;
158
159 if (cifs_sb->root == NULL)
160 return -ENOENT;
161
162 if (tcon == NULL)
163 return -EOPNOTSUPP;
164
165 ses = tcon->ses;
166 cfids = tcon->cfids;
167
168 if (cfids == NULL)
169 return -EOPNOTSUPP;
170
171 replay_again:
172 /* reinitialize for possible replay */
173 flags = 0;
174 oplock = SMB2_OPLOCK_LEVEL_II;
175 server = cifs_pick_channel(ses);
176
177 if (!server->ops->new_lease_key)
178 return -EIO;
179
180 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
181 if (!utf16_path)
182 return -ENOMEM;
183
184 spin_lock(&cfids->cfid_list_lock);
185 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
186 if (cfid == NULL) {
187 spin_unlock(&cfids->cfid_list_lock);
188 kfree(utf16_path);
189 return -ENOENT;
190 }
191 /*
192 * Return cached fid if it is valid (has a lease and has a time).
193 * Otherwise, it is either a new entry or laundromat worker removed it
194 * from @cfids->entries. Caller will put last reference if the latter.
195 */
196 if (is_valid_cached_dir(cfid)) {
197 cfid->last_access_time = jiffies;
198 spin_unlock(&cfids->cfid_list_lock);
199 *ret_cfid = cfid;
200 kfree(utf16_path);
201 return 0;
202 }
203 spin_unlock(&cfids->cfid_list_lock);
204
205 pfid = &cfid->fid;
206
207 /*
208 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up
209 * calling ->lookup() which already adds those through
210 * build_path_from_dentry(). Also, do it earlier as we might reconnect
211 * below when trying to send compounded request and then potentially
212 * having a different prefix path (e.g. after DFS failover).
213 */
214 npath = path_no_prefix(cifs_sb, path);
215 if (IS_ERR(npath)) {
216 rc = PTR_ERR(npath);
217 goto out;
218 }
219
220 if (!npath[0]) {
221 dentry = dget(cifs_sb->root);
222 } else {
223 dentry = path_to_dentry(cifs_sb, npath);
224 if (IS_ERR(dentry)) {
225 rc = -ENOENT;
226 goto out;
227 }
228 if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) {
229 struct cached_fid *parent_cfid;
230
231 spin_lock(&cfids->cfid_list_lock);
232 list_for_each_entry(parent_cfid, &cfids->entries, entry) {
233 if (parent_cfid->dentry == dentry->d_parent) {
234 cifs_dbg(FYI, "found a parent cached file handle\n");
235 if (is_valid_cached_dir(parent_cfid)) {
236 lease_flags
237 |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
238 memcpy(pfid->parent_lease_key,
239 parent_cfid->fid.lease_key,
240 SMB2_LEASE_KEY_SIZE);
241 }
242 break;
243 }
244 }
245 spin_unlock(&cfids->cfid_list_lock);
246 }
247 }
248 cfid->dentry = dentry;
249 cfid->tcon = tcon;
250
251 /*
252 * We do not hold the lock for the open because in case
253 * SMB2_open needs to reconnect.
254 * This is safe because no other thread will be able to get a ref
255 * to the cfid until we have finished opening the file and (possibly)
256 * acquired a lease.
257 */
258 if (smb3_encryption_required(tcon))
259 flags |= CIFS_TRANSFORM_REQ;
260
261 server->ops->new_lease_key(pfid);
262
263 memset(rqst, 0, sizeof(rqst));
264 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
265 memset(rsp_iov, 0, sizeof(rsp_iov));
266
267 /* Open */
268 memset(&open_iov, 0, sizeof(open_iov));
269 rqst[0].rq_iov = open_iov;
270 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
271
272 oparms = (struct cifs_open_parms) {
273 .tcon = tcon,
274 .path = path,
275 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
276 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
277 FILE_READ_EA,
278 .disposition = FILE_OPEN,
279 .fid = pfid,
280 .lease_flags = lease_flags,
281 .replay = !!(retries),
282 };
283
284 rc = SMB2_open_init(tcon, server,
285 &rqst[0], &oplock, &oparms, utf16_path);
286 if (rc)
287 goto oshr_free;
288 smb2_set_next_command(tcon, &rqst[0]);
289
290 memset(&qi_iov, 0, sizeof(qi_iov));
291 rqst[1].rq_iov = qi_iov;
292 rqst[1].rq_nvec = 1;
293
294 rc = SMB2_query_info_init(tcon, server,
295 &rqst[1], COMPOUND_FID,
296 COMPOUND_FID, FILE_ALL_INFORMATION,
297 SMB2_O_INFO_FILE, 0,
298 sizeof(struct smb2_file_all_info) +
299 PATH_MAX * 2, 0, NULL);
300 if (rc)
301 goto oshr_free;
302
303 smb2_set_related(&rqst[1]);
304
305 if (retries) {
306 smb2_set_replay(server, &rqst[0]);
307 smb2_set_replay(server, &rqst[1]);
308 }
309
310 rc = compound_send_recv(xid, ses, server,
311 flags, 2, rqst,
312 resp_buftype, rsp_iov);
313 if (rc) {
314 if (rc == -EREMCHG) {
315 tcon->need_reconnect = true;
316 pr_warn_once("server share %s deleted\n",
317 tcon->tree_name);
318 }
319 goto oshr_free;
320 }
321 cfid->is_open = true;
322
323 spin_lock(&cfids->cfid_list_lock);
324
325 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
326 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
327 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
328 #ifdef CONFIG_CIFS_DEBUG2
329 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
330 #endif /* CIFS_DEBUG2 */
331
332
333 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
334 spin_unlock(&cfids->cfid_list_lock);
335 rc = -EINVAL;
336 goto oshr_free;
337 }
338
339 rc = smb2_parse_contexts(server, rsp_iov,
340 &oparms.fid->epoch,
341 oparms.fid->lease_key,
342 &oplock, NULL, NULL);
343 if (rc) {
344 spin_unlock(&cfids->cfid_list_lock);
345 goto oshr_free;
346 }
347
348 rc = -EINVAL;
349 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
350 spin_unlock(&cfids->cfid_list_lock);
351 goto oshr_free;
352 }
353 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
354 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
355 spin_unlock(&cfids->cfid_list_lock);
356 goto oshr_free;
357 }
358 if (!smb2_validate_and_copy_iov(
359 le16_to_cpu(qi_rsp->OutputBufferOffset),
360 sizeof(struct smb2_file_all_info),
361 &rsp_iov[1], sizeof(struct smb2_file_all_info),
362 (char *)&cfid->file_all_info))
363 cfid->file_all_info_is_valid = true;
364
365 cfid->time = jiffies;
366 cfid->last_access_time = jiffies;
367 spin_unlock(&cfids->cfid_list_lock);
368 /* At this point the directory handle is fully cached */
369 rc = 0;
370
371 oshr_free:
372 SMB2_open_free(&rqst[0]);
373 SMB2_query_info_free(&rqst[1]);
374 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
375 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
376 out:
377 if (rc) {
378 spin_lock(&cfids->cfid_list_lock);
379 if (cfid->on_list) {
380 list_del(&cfid->entry);
381 cfid->on_list = false;
382 cfids->num_entries--;
383 }
384 if (cfid->has_lease) {
385 /*
386 * We are guaranteed to have two references at this
387 * point. One for the caller and one for a potential
388 * lease. Release one here, and the second below.
389 */
390 cfid->has_lease = false;
391 close_cached_dir(cfid);
392 }
393 spin_unlock(&cfids->cfid_list_lock);
394
395 close_cached_dir(cfid);
396 } else {
397 *ret_cfid = cfid;
398 atomic_inc(&tcon->num_remote_opens);
399 }
400 kfree(utf16_path);
401
402 if (is_replayable_error(rc) &&
403 smb2_should_replay(tcon, &retries, &cur_sleep))
404 goto replay_again;
405
406 return rc;
407 }
408
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)409 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
410 struct dentry *dentry,
411 struct cached_fid **ret_cfid)
412 {
413 struct cached_fid *cfid;
414 struct cached_fids *cfids = tcon->cfids;
415
416 if (cfids == NULL)
417 return -EOPNOTSUPP;
418
419 if (!dentry)
420 return -ENOENT;
421
422 spin_lock(&cfids->cfid_list_lock);
423 list_for_each_entry(cfid, &cfids->entries, entry) {
424 if (cfid->dentry == dentry) {
425 if (!is_valid_cached_dir(cfid))
426 break;
427 cifs_dbg(FYI, "found a cached file handle by dentry\n");
428 kref_get(&cfid->refcount);
429 *ret_cfid = cfid;
430 cfid->last_access_time = jiffies;
431 spin_unlock(&cfids->cfid_list_lock);
432 return 0;
433 }
434 }
435 spin_unlock(&cfids->cfid_list_lock);
436 return -ENOENT;
437 }
438
439 static void
smb2_close_cached_fid(struct kref * ref)440 smb2_close_cached_fid(struct kref *ref)
441 __releases(&cfid->cfids->cfid_list_lock)
442 {
443 struct cached_fid *cfid = container_of(ref, struct cached_fid,
444 refcount);
445 int rc;
446
447 lockdep_assert_held(&cfid->cfids->cfid_list_lock);
448
449 if (cfid->on_list) {
450 list_del(&cfid->entry);
451 cfid->on_list = false;
452 cfid->cfids->num_entries--;
453 }
454 spin_unlock(&cfid->cfids->cfid_list_lock);
455
456 dput(cfid->dentry);
457 cfid->dentry = NULL;
458
459 if (cfid->is_open) {
460 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
461 cfid->fid.volatile_fid);
462 if (rc) /* should we retry on -EBUSY or -EAGAIN? */
463 cifs_dbg(VFS, "close cached dir rc %d\n", rc);
464 }
465
466 free_cached_dir(cfid);
467 }
468
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)469 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
470 const char *name, struct cifs_sb_info *cifs_sb)
471 {
472 struct cached_fid *cfid = NULL;
473 int rc;
474
475 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
476 if (rc) {
477 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
478 return;
479 }
480 spin_lock(&cfid->cfids->cfid_list_lock);
481 if (cfid->has_lease) {
482 cfid->has_lease = false;
483 close_cached_dir(cfid);
484 }
485 spin_unlock(&cfid->cfids->cfid_list_lock);
486 close_cached_dir(cfid);
487 }
488
489
close_cached_dir(struct cached_fid * cfid)490 void close_cached_dir(struct cached_fid *cfid)
491 {
492 kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
493 }
494
495 /*
496 * Called from cifs_kill_sb when we unmount a share
497 */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)498 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
499 {
500 struct rb_root *root = &cifs_sb->tlink_tree;
501 struct rb_node *node;
502 struct cached_fid *cfid;
503 struct cifs_tcon *tcon;
504 struct tcon_link *tlink;
505 struct cached_fids *cfids;
506 struct cached_dir_dentry *tmp_list, *q;
507 LIST_HEAD(entry);
508
509 spin_lock(&cifs_sb->tlink_tree_lock);
510 for (node = rb_first(root); node; node = rb_next(node)) {
511 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
512 tcon = tlink_tcon(tlink);
513 if (IS_ERR(tcon))
514 continue;
515 cfids = tcon->cfids;
516 if (cfids == NULL)
517 continue;
518 spin_lock(&cfids->cfid_list_lock);
519 list_for_each_entry(cfid, &cfids->entries, entry) {
520 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
521 if (tmp_list == NULL) {
522 /*
523 * If the malloc() fails, we won't drop all
524 * dentries, and unmounting is likely to trigger
525 * a 'Dentry still in use' error.
526 */
527 cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n");
528 spin_unlock(&cfids->cfid_list_lock);
529 spin_unlock(&cifs_sb->tlink_tree_lock);
530 goto done;
531 }
532
533 tmp_list->dentry = cfid->dentry;
534 cfid->dentry = NULL;
535
536 list_add_tail(&tmp_list->entry, &entry);
537 }
538 spin_unlock(&cfids->cfid_list_lock);
539 }
540 spin_unlock(&cifs_sb->tlink_tree_lock);
541
542 done:
543 list_for_each_entry_safe(tmp_list, q, &entry, entry) {
544 list_del(&tmp_list->entry);
545 dput(tmp_list->dentry);
546 kfree(tmp_list);
547 }
548
549 /* Flush any pending work that will drop dentries */
550 flush_workqueue(cfid_put_wq);
551 }
552
553 /*
554 * Invalidate all cached dirs when a TCON has been reset
555 * due to a session loss.
556 */
invalidate_all_cached_dirs(struct cifs_tcon * tcon)557 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
558 {
559 struct cached_fids *cfids = tcon->cfids;
560 struct cached_fid *cfid, *q;
561
562 if (cfids == NULL)
563 return;
564
565 /*
566 * Mark all the cfids as closed, and move them to the cfids->dying list.
567 * They'll be cleaned up by laundromat. Take a reference to each cfid
568 * during this process.
569 */
570 spin_lock(&cfids->cfid_list_lock);
571 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
572 list_move(&cfid->entry, &cfids->dying);
573 cfids->num_entries--;
574 cfid->is_open = false;
575 cfid->on_list = false;
576 if (cfid->has_lease) {
577 /*
578 * The lease was never cancelled from the server,
579 * so steal that reference.
580 */
581 cfid->has_lease = false;
582 } else
583 kref_get(&cfid->refcount);
584 }
585 spin_unlock(&cfids->cfid_list_lock);
586
587 /* run laundromat unconditionally now as there might have been previously queued work */
588 mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0);
589 flush_delayed_work(&cfids->laundromat_work);
590 }
591
592 static void
cached_dir_offload_close(struct work_struct * work)593 cached_dir_offload_close(struct work_struct *work)
594 {
595 struct cached_fid *cfid = container_of(work,
596 struct cached_fid, close_work);
597 struct cifs_tcon *tcon = cfid->tcon;
598
599 WARN_ON(cfid->on_list);
600
601 close_cached_dir(cfid);
602 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
603 }
604
605 /*
606 * Release the cached directory's dentry, and then queue work to drop cached
607 * directory itself (closing on server if needed).
608 *
609 * Must be called with a reference to the cached_fid and a reference to the
610 * tcon.
611 */
cached_dir_put_work(struct work_struct * work)612 static void cached_dir_put_work(struct work_struct *work)
613 {
614 struct cached_fid *cfid = container_of(work, struct cached_fid,
615 put_work);
616 dput(cfid->dentry);
617 cfid->dentry = NULL;
618
619 queue_work(serverclose_wq, &cfid->close_work);
620 }
621
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])622 bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
623 {
624 struct cached_fids *cfids = tcon->cfids;
625 struct cached_fid *cfid;
626
627 if (cfids == NULL)
628 return false;
629
630 spin_lock(&cfids->cfid_list_lock);
631 list_for_each_entry(cfid, &cfids->entries, entry) {
632 if (cfid->has_lease &&
633 !memcmp(lease_key,
634 cfid->fid.lease_key,
635 SMB2_LEASE_KEY_SIZE)) {
636 cfid->has_lease = false;
637 cfid->time = 0;
638 /*
639 * We found a lease remove it from the list
640 * so no threads can access it.
641 */
642 list_del(&cfid->entry);
643 cfid->on_list = false;
644 cfids->num_entries--;
645
646 ++tcon->tc_count;
647 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
648 netfs_trace_tcon_ref_get_cached_lease_break);
649 queue_work(cfid_put_wq, &cfid->put_work);
650 spin_unlock(&cfids->cfid_list_lock);
651 return true;
652 }
653 }
654 spin_unlock(&cfids->cfid_list_lock);
655 return false;
656 }
657
init_cached_dir(const char * path)658 static struct cached_fid *init_cached_dir(const char *path)
659 {
660 struct cached_fid *cfid;
661
662 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
663 if (!cfid)
664 return NULL;
665 cfid->path = kstrdup(path, GFP_ATOMIC);
666 if (!cfid->path) {
667 kfree(cfid);
668 return NULL;
669 }
670
671 INIT_WORK(&cfid->close_work, cached_dir_offload_close);
672 INIT_WORK(&cfid->put_work, cached_dir_put_work);
673 INIT_LIST_HEAD(&cfid->entry);
674 INIT_LIST_HEAD(&cfid->dirents.entries);
675 mutex_init(&cfid->dirents.de_mutex);
676 kref_init(&cfid->refcount);
677 return cfid;
678 }
679
free_cached_dir(struct cached_fid * cfid)680 static void free_cached_dir(struct cached_fid *cfid)
681 {
682 struct cached_dirent *dirent, *q;
683
684 WARN_ON(work_pending(&cfid->close_work));
685 WARN_ON(work_pending(&cfid->put_work));
686
687 dput(cfid->dentry);
688 cfid->dentry = NULL;
689
690 /*
691 * Delete all cached dirent names
692 */
693 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
694 list_del(&dirent->entry);
695 kfree(dirent->name);
696 kfree(dirent);
697 }
698
699 /* adjust tcon-level counters and reset per-dir accounting */
700 if (cfid->cfids) {
701 if (cfid->dirents.entries_count)
702 atomic_long_sub((long)cfid->dirents.entries_count,
703 &cfid->cfids->total_dirents_entries);
704 if (cfid->dirents.bytes_used) {
705 atomic64_sub((long long)cfid->dirents.bytes_used,
706 &cfid->cfids->total_dirents_bytes);
707 atomic64_sub((long long)cfid->dirents.bytes_used,
708 &cifs_dircache_bytes_used);
709 }
710 }
711 cfid->dirents.entries_count = 0;
712 cfid->dirents.bytes_used = 0;
713
714 kfree(cfid->path);
715 cfid->path = NULL;
716 kfree(cfid);
717 }
718
cfids_laundromat_worker(struct work_struct * work)719 static void cfids_laundromat_worker(struct work_struct *work)
720 {
721 struct cached_fids *cfids;
722 struct cached_fid *cfid, *q;
723 LIST_HEAD(entry);
724
725 cfids = container_of(work, struct cached_fids, laundromat_work.work);
726
727 spin_lock(&cfids->cfid_list_lock);
728 /* move cfids->dying to the local list */
729 list_cut_before(&entry, &cfids->dying, &cfids->dying);
730
731 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
732 if (cfid->last_access_time &&
733 time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
734 cfid->on_list = false;
735 list_move(&cfid->entry, &entry);
736 cfids->num_entries--;
737 if (cfid->has_lease) {
738 /*
739 * Our lease has not yet been cancelled from the
740 * server. Steal that reference.
741 */
742 cfid->has_lease = false;
743 } else
744 kref_get(&cfid->refcount);
745 }
746 }
747 spin_unlock(&cfids->cfid_list_lock);
748
749 list_for_each_entry_safe(cfid, q, &entry, entry) {
750 list_del(&cfid->entry);
751
752 dput(cfid->dentry);
753 cfid->dentry = NULL;
754
755 if (cfid->is_open) {
756 spin_lock(&cifs_tcp_ses_lock);
757 ++cfid->tcon->tc_count;
758 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
759 netfs_trace_tcon_ref_get_cached_laundromat);
760 spin_unlock(&cifs_tcp_ses_lock);
761 queue_work(serverclose_wq, &cfid->close_work);
762 } else
763 /*
764 * Drop the ref-count from above, either the lease-ref (if there
765 * was one) or the extra one acquired.
766 */
767 close_cached_dir(cfid);
768 }
769 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
770 dir_cache_timeout * HZ);
771 }
772
init_cached_dirs(void)773 struct cached_fids *init_cached_dirs(void)
774 {
775 struct cached_fids *cfids;
776
777 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
778 if (!cfids)
779 return NULL;
780 spin_lock_init(&cfids->cfid_list_lock);
781 INIT_LIST_HEAD(&cfids->entries);
782 INIT_LIST_HEAD(&cfids->dying);
783
784 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
785 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
786 dir_cache_timeout * HZ);
787
788 atomic_long_set(&cfids->total_dirents_entries, 0);
789 atomic64_set(&cfids->total_dirents_bytes, 0);
790
791 return cfids;
792 }
793
794 /*
795 * Called from tconInfoFree when we are tearing down the tcon.
796 * There are no active users or open files/directories at this point.
797 */
free_cached_dirs(struct cached_fids * cfids)798 void free_cached_dirs(struct cached_fids *cfids)
799 {
800 struct cached_fid *cfid, *q;
801 LIST_HEAD(entry);
802
803 if (cfids == NULL)
804 return;
805
806 cancel_delayed_work_sync(&cfids->laundromat_work);
807
808 spin_lock(&cfids->cfid_list_lock);
809 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
810 cfid->on_list = false;
811 cfid->is_open = false;
812 list_move(&cfid->entry, &entry);
813 }
814 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
815 cfid->on_list = false;
816 cfid->is_open = false;
817 list_move(&cfid->entry, &entry);
818 }
819 spin_unlock(&cfids->cfid_list_lock);
820
821 list_for_each_entry_safe(cfid, q, &entry, entry) {
822 list_del(&cfid->entry);
823 free_cached_dir(cfid);
824 }
825
826 kfree(cfids);
827 }
828