1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions to handle the cached directory entries
4 *
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6 */
7
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19
20 struct cached_dir_dentry {
21 struct list_head entry;
22 struct dentry *dentry;
23 };
24
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
26 const char *path,
27 bool lookup_only,
28 __u32 max_cached_dirs)
29 {
30 struct cached_fid *cfid;
31
32 list_for_each_entry(cfid, &cfids->entries, entry) {
33 if (!strcmp(cfid->path, path)) {
34 /*
35 * If it doesn't have a lease it is either not yet
36 * fully cached or it may be in the process of
37 * being deleted due to a lease break.
38 */
39 if (!is_valid_cached_dir(cfid))
40 return NULL;
41 kref_get(&cfid->refcount);
42 return cfid;
43 }
44 }
45 if (lookup_only) {
46 return NULL;
47 }
48 if (cfids->num_entries >= max_cached_dirs) {
49 return NULL;
50 }
51 cfid = init_cached_dir(path);
52 if (cfid == NULL) {
53 return NULL;
54 }
55 cfid->cfids = cfids;
56 cfids->num_entries++;
57 list_add(&cfid->entry, &cfids->entries);
58 cfid->on_list = true;
59 kref_get(&cfid->refcount);
60 /*
61 * Set @cfid->has_lease to true during construction so that the lease
62 * reference can be put in cached_dir_lease_break() due to a potential
63 * lease break right after the request is sent or while @cfid is still
64 * being cached, or if a reconnection is triggered during construction.
65 * Concurrent processes won't be to use it yet due to @cfid->time being
66 * zero.
67 */
68 cfid->has_lease = true;
69
70 return cfid;
71 }
72
73 static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)74 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
75 {
76 struct dentry *dentry;
77 const char *s, *p;
78 char sep;
79
80 sep = CIFS_DIR_SEP(cifs_sb);
81 dentry = dget(cifs_sb->root);
82 s = path;
83
84 do {
85 struct inode *dir = d_inode(dentry);
86 struct dentry *child;
87
88 if (!S_ISDIR(dir->i_mode)) {
89 dput(dentry);
90 dentry = ERR_PTR(-ENOTDIR);
91 break;
92 }
93
94 /* skip separators */
95 while (*s == sep)
96 s++;
97 if (!*s)
98 break;
99 p = s++;
100 /* next separator */
101 while (*s && *s != sep)
102 s++;
103
104 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
105 dentry);
106 dput(dentry);
107 dentry = child;
108 } while (!IS_ERR(dentry));
109 return dentry;
110 }
111
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)112 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
113 const char *path)
114 {
115 size_t len = 0;
116
117 if (!*path)
118 return path;
119
120 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
121 cifs_sb->prepath) {
122 len = strlen(cifs_sb->prepath) + 1;
123 if (unlikely(len > strlen(path)))
124 return ERR_PTR(-EINVAL);
125 }
126 return path + len;
127 }
128
129 /*
130 * Open the and cache a directory handle.
131 * If error then *cfid is not initialized.
132 */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)133 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
134 const char *path,
135 struct cifs_sb_info *cifs_sb,
136 bool lookup_only, struct cached_fid **ret_cfid)
137 {
138 struct cifs_ses *ses;
139 struct TCP_Server_Info *server;
140 struct cifs_open_parms oparms;
141 struct smb2_create_rsp *o_rsp = NULL;
142 struct smb2_query_info_rsp *qi_rsp = NULL;
143 int resp_buftype[2];
144 struct smb_rqst rqst[2];
145 struct kvec rsp_iov[2];
146 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
147 struct kvec qi_iov[1];
148 int rc, flags = 0;
149 __le16 *utf16_path = NULL;
150 u8 oplock = SMB2_OPLOCK_LEVEL_II;
151 struct cifs_fid *pfid;
152 struct dentry *dentry = NULL;
153 struct cached_fid *cfid;
154 struct cached_fids *cfids;
155 const char *npath;
156 int retries = 0, cur_sleep = 1;
157 __le32 lease_flags = 0;
158
159 if (cifs_sb->root == NULL)
160 return -ENOENT;
161
162 if (tcon == NULL)
163 return -EOPNOTSUPP;
164
165 ses = tcon->ses;
166 cfids = tcon->cfids;
167
168 if (cfids == NULL)
169 return -EOPNOTSUPP;
170
171 replay_again:
172 /* reinitialize for possible replay */
173 flags = 0;
174 oplock = SMB2_OPLOCK_LEVEL_II;
175 server = cifs_pick_channel(ses);
176
177 if (!server->ops->new_lease_key)
178 return -EIO;
179
180 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
181 if (!utf16_path)
182 return -ENOMEM;
183
184 spin_lock(&cfids->cfid_list_lock);
185 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
186 if (cfid == NULL) {
187 spin_unlock(&cfids->cfid_list_lock);
188 kfree(utf16_path);
189 return -ENOENT;
190 }
191 /*
192 * Return cached fid if it is valid (has a lease and has a time).
193 * Otherwise, it is either a new entry or laundromat worker removed it
194 * from @cfids->entries. Caller will put last reference if the latter.
195 */
196 if (is_valid_cached_dir(cfid)) {
197 cfid->last_access_time = jiffies;
198 spin_unlock(&cfids->cfid_list_lock);
199 *ret_cfid = cfid;
200 kfree(utf16_path);
201 return 0;
202 }
203 spin_unlock(&cfids->cfid_list_lock);
204
205 pfid = &cfid->fid;
206
207 /*
208 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up
209 * calling ->lookup() which already adds those through
210 * build_path_from_dentry(). Also, do it earlier as we might reconnect
211 * below when trying to send compounded request and then potentially
212 * having a different prefix path (e.g. after DFS failover).
213 */
214 npath = path_no_prefix(cifs_sb, path);
215 if (IS_ERR(npath)) {
216 rc = PTR_ERR(npath);
217 goto out;
218 }
219
220 if (!npath[0]) {
221 dentry = dget(cifs_sb->root);
222 } else {
223 dentry = path_to_dentry(cifs_sb, npath);
224 if (IS_ERR(dentry)) {
225 rc = -ENOENT;
226 goto out;
227 }
228 if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) {
229 struct cached_fid *parent_cfid;
230
231 spin_lock(&cfids->cfid_list_lock);
232 list_for_each_entry(parent_cfid, &cfids->entries, entry) {
233 if (parent_cfid->dentry == dentry->d_parent) {
234 cifs_dbg(FYI, "found a parent cached file handle\n");
235 if (is_valid_cached_dir(parent_cfid)) {
236 lease_flags
237 |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
238 memcpy(pfid->parent_lease_key,
239 parent_cfid->fid.lease_key,
240 SMB2_LEASE_KEY_SIZE);
241 }
242 break;
243 }
244 }
245 spin_unlock(&cfids->cfid_list_lock);
246 }
247 }
248 cfid->dentry = dentry;
249 cfid->tcon = tcon;
250
251 /*
252 * We do not hold the lock for the open because in case
253 * SMB2_open needs to reconnect.
254 * This is safe because no other thread will be able to get a ref
255 * to the cfid until we have finished opening the file and (possibly)
256 * acquired a lease.
257 */
258 if (smb3_encryption_required(tcon))
259 flags |= CIFS_TRANSFORM_REQ;
260
261 server->ops->new_lease_key(pfid);
262
263 memset(rqst, 0, sizeof(rqst));
264 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
265 memset(rsp_iov, 0, sizeof(rsp_iov));
266
267 /* Open */
268 memset(&open_iov, 0, sizeof(open_iov));
269 rqst[0].rq_iov = open_iov;
270 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
271
272 oparms = (struct cifs_open_parms) {
273 .tcon = tcon,
274 .path = path,
275 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
276 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
277 FILE_READ_EA,
278 .disposition = FILE_OPEN,
279 .fid = pfid,
280 .lease_flags = lease_flags,
281 .replay = !!(retries),
282 };
283
284 rc = SMB2_open_init(tcon, server,
285 &rqst[0], &oplock, &oparms, utf16_path);
286 if (rc)
287 goto oshr_free;
288 smb2_set_next_command(tcon, &rqst[0]);
289
290 memset(&qi_iov, 0, sizeof(qi_iov));
291 rqst[1].rq_iov = qi_iov;
292 rqst[1].rq_nvec = 1;
293
294 rc = SMB2_query_info_init(tcon, server,
295 &rqst[1], COMPOUND_FID,
296 COMPOUND_FID, FILE_ALL_INFORMATION,
297 SMB2_O_INFO_FILE, 0,
298 sizeof(struct smb2_file_all_info) +
299 PATH_MAX * 2, 0, NULL);
300 if (rc)
301 goto oshr_free;
302
303 smb2_set_related(&rqst[1]);
304
305 if (retries) {
306 smb2_set_replay(server, &rqst[0]);
307 smb2_set_replay(server, &rqst[1]);
308 }
309
310 rc = compound_send_recv(xid, ses, server,
311 flags, 2, rqst,
312 resp_buftype, rsp_iov);
313 if (rc) {
314 if (rc == -EREMCHG) {
315 tcon->need_reconnect = true;
316 pr_warn_once("server share %s deleted\n",
317 tcon->tree_name);
318 }
319 goto oshr_free;
320 }
321 cfid->is_open = true;
322
323 spin_lock(&cfids->cfid_list_lock);
324
325 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
326 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
327 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
328 #ifdef CONFIG_CIFS_DEBUG2
329 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
330 #endif /* CIFS_DEBUG2 */
331
332
333 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
334 spin_unlock(&cfids->cfid_list_lock);
335 rc = -EINVAL;
336 goto oshr_free;
337 }
338
339 rc = smb2_parse_contexts(server, rsp_iov,
340 &oparms.fid->epoch,
341 oparms.fid->lease_key,
342 &oplock, NULL, NULL);
343 if (rc) {
344 spin_unlock(&cfids->cfid_list_lock);
345 goto oshr_free;
346 }
347
348 rc = -EINVAL;
349 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
350 spin_unlock(&cfids->cfid_list_lock);
351 goto oshr_free;
352 }
353 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
354 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
355 spin_unlock(&cfids->cfid_list_lock);
356 goto oshr_free;
357 }
358 if (!smb2_validate_and_copy_iov(
359 le16_to_cpu(qi_rsp->OutputBufferOffset),
360 sizeof(struct smb2_file_all_info),
361 &rsp_iov[1], sizeof(struct smb2_file_all_info),
362 (char *)&cfid->file_all_info))
363 cfid->file_all_info_is_valid = true;
364
365 cfid->time = jiffies;
366 cfid->last_access_time = jiffies;
367 spin_unlock(&cfids->cfid_list_lock);
368 /* At this point the directory handle is fully cached */
369 rc = 0;
370
371 oshr_free:
372 SMB2_open_free(&rqst[0]);
373 SMB2_query_info_free(&rqst[1]);
374 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
375 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
376 out:
377 if (rc) {
378 spin_lock(&cfids->cfid_list_lock);
379 if (cfid->on_list) {
380 list_del(&cfid->entry);
381 cfid->on_list = false;
382 cfids->num_entries--;
383 }
384 if (cfid->has_lease) {
385 /*
386 * We are guaranteed to have two references at this
387 * point. One for the caller and one for a potential
388 * lease. Release one here, and the second below.
389 */
390 cfid->has_lease = false;
391 kref_put(&cfid->refcount, smb2_close_cached_fid);
392 }
393 spin_unlock(&cfids->cfid_list_lock);
394
395 kref_put(&cfid->refcount, smb2_close_cached_fid);
396 } else {
397 *ret_cfid = cfid;
398 atomic_inc(&tcon->num_remote_opens);
399 }
400 kfree(utf16_path);
401
402 if (is_replayable_error(rc) &&
403 smb2_should_replay(tcon, &retries, &cur_sleep))
404 goto replay_again;
405
406 return rc;
407 }
408
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)409 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
410 struct dentry *dentry,
411 struct cached_fid **ret_cfid)
412 {
413 struct cached_fid *cfid;
414 struct cached_fids *cfids = tcon->cfids;
415
416 if (cfids == NULL)
417 return -EOPNOTSUPP;
418
419 if (!dentry)
420 return -ENOENT;
421
422 spin_lock(&cfids->cfid_list_lock);
423 list_for_each_entry(cfid, &cfids->entries, entry) {
424 if (cfid->dentry == dentry) {
425 if (!is_valid_cached_dir(cfid))
426 break;
427 cifs_dbg(FYI, "found a cached file handle by dentry\n");
428 kref_get(&cfid->refcount);
429 *ret_cfid = cfid;
430 cfid->last_access_time = jiffies;
431 spin_unlock(&cfids->cfid_list_lock);
432 return 0;
433 }
434 }
435 spin_unlock(&cfids->cfid_list_lock);
436 return -ENOENT;
437 }
438
439 static void
smb2_close_cached_fid(struct kref * ref)440 smb2_close_cached_fid(struct kref *ref)
441 {
442 struct cached_fid *cfid = container_of(ref, struct cached_fid,
443 refcount);
444 int rc;
445
446 spin_lock(&cfid->cfids->cfid_list_lock);
447 if (cfid->on_list) {
448 list_del(&cfid->entry);
449 cfid->on_list = false;
450 cfid->cfids->num_entries--;
451 }
452 spin_unlock(&cfid->cfids->cfid_list_lock);
453
454 dput(cfid->dentry);
455 cfid->dentry = NULL;
456
457 if (cfid->is_open) {
458 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
459 cfid->fid.volatile_fid);
460 if (rc) /* should we retry on -EBUSY or -EAGAIN? */
461 cifs_dbg(VFS, "close cached dir rc %d\n", rc);
462 }
463
464 free_cached_dir(cfid);
465 }
466
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)467 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
468 const char *name, struct cifs_sb_info *cifs_sb)
469 {
470 struct cached_fid *cfid = NULL;
471 int rc;
472
473 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
474 if (rc) {
475 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
476 return;
477 }
478 spin_lock(&cfid->cfids->cfid_list_lock);
479 if (cfid->has_lease) {
480 cfid->has_lease = false;
481 kref_put(&cfid->refcount, smb2_close_cached_fid);
482 }
483 spin_unlock(&cfid->cfids->cfid_list_lock);
484 close_cached_dir(cfid);
485 }
486
487
close_cached_dir(struct cached_fid * cfid)488 void close_cached_dir(struct cached_fid *cfid)
489 {
490 kref_put(&cfid->refcount, smb2_close_cached_fid);
491 }
492
493 /*
494 * Called from cifs_kill_sb when we unmount a share
495 */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)496 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
497 {
498 struct rb_root *root = &cifs_sb->tlink_tree;
499 struct rb_node *node;
500 struct cached_fid *cfid;
501 struct cifs_tcon *tcon;
502 struct tcon_link *tlink;
503 struct cached_fids *cfids;
504 struct cached_dir_dentry *tmp_list, *q;
505 LIST_HEAD(entry);
506
507 spin_lock(&cifs_sb->tlink_tree_lock);
508 for (node = rb_first(root); node; node = rb_next(node)) {
509 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
510 tcon = tlink_tcon(tlink);
511 if (IS_ERR(tcon))
512 continue;
513 cfids = tcon->cfids;
514 if (cfids == NULL)
515 continue;
516 spin_lock(&cfids->cfid_list_lock);
517 list_for_each_entry(cfid, &cfids->entries, entry) {
518 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
519 if (tmp_list == NULL) {
520 /*
521 * If the malloc() fails, we won't drop all
522 * dentries, and unmounting is likely to trigger
523 * a 'Dentry still in use' error.
524 */
525 cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n");
526 spin_unlock(&cfids->cfid_list_lock);
527 spin_unlock(&cifs_sb->tlink_tree_lock);
528 goto done;
529 }
530
531 tmp_list->dentry = cfid->dentry;
532 cfid->dentry = NULL;
533
534 list_add_tail(&tmp_list->entry, &entry);
535 }
536 spin_unlock(&cfids->cfid_list_lock);
537 }
538 spin_unlock(&cifs_sb->tlink_tree_lock);
539
540 done:
541 list_for_each_entry_safe(tmp_list, q, &entry, entry) {
542 list_del(&tmp_list->entry);
543 dput(tmp_list->dentry);
544 kfree(tmp_list);
545 }
546
547 /* Flush any pending work that will drop dentries */
548 flush_workqueue(cfid_put_wq);
549 }
550
551 /*
552 * Invalidate all cached dirs when a TCON has been reset
553 * due to a session loss.
554 */
invalidate_all_cached_dirs(struct cifs_tcon * tcon)555 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
556 {
557 struct cached_fids *cfids = tcon->cfids;
558 struct cached_fid *cfid, *q;
559
560 if (cfids == NULL)
561 return;
562
563 /*
564 * Mark all the cfids as closed, and move them to the cfids->dying list.
565 * They'll be cleaned up later by cfids_invalidation_worker. Take
566 * a reference to each cfid during this process.
567 */
568 spin_lock(&cfids->cfid_list_lock);
569 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
570 list_move(&cfid->entry, &cfids->dying);
571 cfids->num_entries--;
572 cfid->is_open = false;
573 cfid->on_list = false;
574 if (cfid->has_lease) {
575 /*
576 * The lease was never cancelled from the server,
577 * so steal that reference.
578 */
579 cfid->has_lease = false;
580 } else
581 kref_get(&cfid->refcount);
582 }
583 /*
584 * Queue dropping of the dentries once locks have been dropped
585 */
586 if (!list_empty(&cfids->dying))
587 queue_work(cfid_put_wq, &cfids->invalidation_work);
588 spin_unlock(&cfids->cfid_list_lock);
589 }
590
591 static void
cached_dir_offload_close(struct work_struct * work)592 cached_dir_offload_close(struct work_struct *work)
593 {
594 struct cached_fid *cfid = container_of(work,
595 struct cached_fid, close_work);
596 struct cifs_tcon *tcon = cfid->tcon;
597
598 WARN_ON(cfid->on_list);
599
600 kref_put(&cfid->refcount, smb2_close_cached_fid);
601 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
602 }
603
604 /*
605 * Release the cached directory's dentry, and then queue work to drop cached
606 * directory itself (closing on server if needed).
607 *
608 * Must be called with a reference to the cached_fid and a reference to the
609 * tcon.
610 */
cached_dir_put_work(struct work_struct * work)611 static void cached_dir_put_work(struct work_struct *work)
612 {
613 struct cached_fid *cfid = container_of(work, struct cached_fid,
614 put_work);
615 dput(cfid->dentry);
616 cfid->dentry = NULL;
617
618 queue_work(serverclose_wq, &cfid->close_work);
619 }
620
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])621 bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
622 {
623 struct cached_fids *cfids = tcon->cfids;
624 struct cached_fid *cfid;
625
626 if (cfids == NULL)
627 return false;
628
629 spin_lock(&cfids->cfid_list_lock);
630 list_for_each_entry(cfid, &cfids->entries, entry) {
631 if (cfid->has_lease &&
632 !memcmp(lease_key,
633 cfid->fid.lease_key,
634 SMB2_LEASE_KEY_SIZE)) {
635 cfid->has_lease = false;
636 cfid->time = 0;
637 /*
638 * We found a lease remove it from the list
639 * so no threads can access it.
640 */
641 list_del(&cfid->entry);
642 cfid->on_list = false;
643 cfids->num_entries--;
644
645 ++tcon->tc_count;
646 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
647 netfs_trace_tcon_ref_get_cached_lease_break);
648 queue_work(cfid_put_wq, &cfid->put_work);
649 spin_unlock(&cfids->cfid_list_lock);
650 return true;
651 }
652 }
653 spin_unlock(&cfids->cfid_list_lock);
654 return false;
655 }
656
init_cached_dir(const char * path)657 static struct cached_fid *init_cached_dir(const char *path)
658 {
659 struct cached_fid *cfid;
660
661 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
662 if (!cfid)
663 return NULL;
664 cfid->path = kstrdup(path, GFP_ATOMIC);
665 if (!cfid->path) {
666 kfree(cfid);
667 return NULL;
668 }
669
670 INIT_WORK(&cfid->close_work, cached_dir_offload_close);
671 INIT_WORK(&cfid->put_work, cached_dir_put_work);
672 INIT_LIST_HEAD(&cfid->entry);
673 INIT_LIST_HEAD(&cfid->dirents.entries);
674 mutex_init(&cfid->dirents.de_mutex);
675 kref_init(&cfid->refcount);
676 return cfid;
677 }
678
free_cached_dir(struct cached_fid * cfid)679 static void free_cached_dir(struct cached_fid *cfid)
680 {
681 struct cached_dirent *dirent, *q;
682
683 WARN_ON(work_pending(&cfid->close_work));
684 WARN_ON(work_pending(&cfid->put_work));
685
686 dput(cfid->dentry);
687 cfid->dentry = NULL;
688
689 /*
690 * Delete all cached dirent names
691 */
692 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
693 list_del(&dirent->entry);
694 kfree(dirent->name);
695 kfree(dirent);
696 }
697
698 /* adjust tcon-level counters and reset per-dir accounting */
699 if (cfid->cfids) {
700 if (cfid->dirents.entries_count)
701 atomic_long_sub((long)cfid->dirents.entries_count,
702 &cfid->cfids->total_dirents_entries);
703 if (cfid->dirents.bytes_used) {
704 atomic64_sub((long long)cfid->dirents.bytes_used,
705 &cfid->cfids->total_dirents_bytes);
706 atomic64_sub((long long)cfid->dirents.bytes_used,
707 &cifs_dircache_bytes_used);
708 }
709 }
710 cfid->dirents.entries_count = 0;
711 cfid->dirents.bytes_used = 0;
712
713 kfree(cfid->path);
714 cfid->path = NULL;
715 kfree(cfid);
716 }
717
cfids_invalidation_worker(struct work_struct * work)718 static void cfids_invalidation_worker(struct work_struct *work)
719 {
720 struct cached_fids *cfids = container_of(work, struct cached_fids,
721 invalidation_work);
722 struct cached_fid *cfid, *q;
723 LIST_HEAD(entry);
724
725 spin_lock(&cfids->cfid_list_lock);
726 /* move cfids->dying to the local list */
727 list_cut_before(&entry, &cfids->dying, &cfids->dying);
728 spin_unlock(&cfids->cfid_list_lock);
729
730 list_for_each_entry_safe(cfid, q, &entry, entry) {
731 list_del(&cfid->entry);
732 /* Drop the ref-count acquired in invalidate_all_cached_dirs */
733 kref_put(&cfid->refcount, smb2_close_cached_fid);
734 }
735 }
736
cfids_laundromat_worker(struct work_struct * work)737 static void cfids_laundromat_worker(struct work_struct *work)
738 {
739 struct cached_fids *cfids;
740 struct cached_fid *cfid, *q;
741 LIST_HEAD(entry);
742
743 cfids = container_of(work, struct cached_fids, laundromat_work.work);
744
745 spin_lock(&cfids->cfid_list_lock);
746 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
747 if (cfid->last_access_time &&
748 time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
749 cfid->on_list = false;
750 list_move(&cfid->entry, &entry);
751 cfids->num_entries--;
752 if (cfid->has_lease) {
753 /*
754 * Our lease has not yet been cancelled from the
755 * server. Steal that reference.
756 */
757 cfid->has_lease = false;
758 } else
759 kref_get(&cfid->refcount);
760 }
761 }
762 spin_unlock(&cfids->cfid_list_lock);
763
764 list_for_each_entry_safe(cfid, q, &entry, entry) {
765 list_del(&cfid->entry);
766
767 dput(cfid->dentry);
768 cfid->dentry = NULL;
769
770 if (cfid->is_open) {
771 spin_lock(&cifs_tcp_ses_lock);
772 ++cfid->tcon->tc_count;
773 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
774 netfs_trace_tcon_ref_get_cached_laundromat);
775 spin_unlock(&cifs_tcp_ses_lock);
776 queue_work(serverclose_wq, &cfid->close_work);
777 } else
778 /*
779 * Drop the ref-count from above, either the lease-ref (if there
780 * was one) or the extra one acquired.
781 */
782 kref_put(&cfid->refcount, smb2_close_cached_fid);
783 }
784 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
785 dir_cache_timeout * HZ);
786 }
787
init_cached_dirs(void)788 struct cached_fids *init_cached_dirs(void)
789 {
790 struct cached_fids *cfids;
791
792 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
793 if (!cfids)
794 return NULL;
795 spin_lock_init(&cfids->cfid_list_lock);
796 INIT_LIST_HEAD(&cfids->entries);
797 INIT_LIST_HEAD(&cfids->dying);
798
799 INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
800 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
801 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
802 dir_cache_timeout * HZ);
803
804 atomic_long_set(&cfids->total_dirents_entries, 0);
805 atomic64_set(&cfids->total_dirents_bytes, 0);
806
807 return cfids;
808 }
809
810 /*
811 * Called from tconInfoFree when we are tearing down the tcon.
812 * There are no active users or open files/directories at this point.
813 */
free_cached_dirs(struct cached_fids * cfids)814 void free_cached_dirs(struct cached_fids *cfids)
815 {
816 struct cached_fid *cfid, *q;
817 LIST_HEAD(entry);
818
819 if (cfids == NULL)
820 return;
821
822 cancel_delayed_work_sync(&cfids->laundromat_work);
823 cancel_work_sync(&cfids->invalidation_work);
824
825 spin_lock(&cfids->cfid_list_lock);
826 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
827 cfid->on_list = false;
828 cfid->is_open = false;
829 list_move(&cfid->entry, &entry);
830 }
831 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
832 cfid->on_list = false;
833 cfid->is_open = false;
834 list_move(&cfid->entry, &entry);
835 }
836 spin_unlock(&cfids->cfid_list_lock);
837
838 list_for_each_entry_safe(cfid, q, &entry, entry) {
839 list_del(&cfid->entry);
840 free_cached_dir(cfid);
841 }
842
843 kfree(cfids);
844 }
845