xref: /linux/fs/smb/client/cached_dir.c (revision 81d6f7807536a0436dfada07e9292e3702d2bed4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Functions to handle the cached directory entries
4  *
5  *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6  */
7 
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14 
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19 static void close_cached_dir_locked(struct cached_fid *cfid);
20 
21 struct cached_dir_dentry {
22 	struct list_head entry;
23 	struct dentry *dentry;
24 };
25 
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)26 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
27 						    const char *path,
28 						    bool lookup_only,
29 						    __u32 max_cached_dirs)
30 {
31 	struct cached_fid *cfid;
32 
33 	list_for_each_entry(cfid, &cfids->entries, entry) {
34 		if (!strcmp(cfid->path, path)) {
35 			/*
36 			 * If it doesn't have a lease it is either not yet
37 			 * fully cached or it may be in the process of
38 			 * being deleted due to a lease break.
39 			 */
40 			if (!is_valid_cached_dir(cfid))
41 				return NULL;
42 			kref_get(&cfid->refcount);
43 			return cfid;
44 		}
45 	}
46 	if (lookup_only) {
47 		return NULL;
48 	}
49 	if (cfids->num_entries >= max_cached_dirs) {
50 		return NULL;
51 	}
52 	cfid = init_cached_dir(path);
53 	if (cfid == NULL) {
54 		return NULL;
55 	}
56 	cfid->cfids = cfids;
57 	cfids->num_entries++;
58 	list_add(&cfid->entry, &cfids->entries);
59 	cfid->on_list = true;
60 	kref_get(&cfid->refcount);
61 	/*
62 	 * Set @cfid->has_lease to true during construction so that the lease
63 	 * reference can be put in cached_dir_lease_break() due to a potential
64 	 * lease break right after the request is sent or while @cfid is still
65 	 * being cached, or if a reconnection is triggered during construction.
66 	 * Concurrent processes won't be to use it yet due to @cfid->time being
67 	 * zero.
68 	 */
69 	cfid->has_lease = true;
70 
71 	return cfid;
72 }
73 
74 static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)75 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
76 {
77 	struct dentry *dentry;
78 	const char *s, *p;
79 	char sep;
80 
81 	sep = CIFS_DIR_SEP(cifs_sb);
82 	dentry = dget(cifs_sb->root);
83 	s = path;
84 
85 	do {
86 		struct inode *dir = d_inode(dentry);
87 		struct dentry *child;
88 
89 		if (!S_ISDIR(dir->i_mode)) {
90 			dput(dentry);
91 			dentry = ERR_PTR(-ENOTDIR);
92 			break;
93 		}
94 
95 		/* skip separators */
96 		while (*s == sep)
97 			s++;
98 		if (!*s)
99 			break;
100 		p = s++;
101 		/* next separator */
102 		while (*s && *s != sep)
103 			s++;
104 
105 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
106 							dentry);
107 		dput(dentry);
108 		dentry = child;
109 	} while (!IS_ERR(dentry));
110 	return dentry;
111 }
112 
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)113 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
114 				  const char *path)
115 {
116 	size_t len = 0;
117 
118 	if (!*path)
119 		return path;
120 
121 	if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH) &&
122 	    cifs_sb->prepath) {
123 		len = strlen(cifs_sb->prepath) + 1;
124 		if (unlikely(len > strlen(path)))
125 			return ERR_PTR(-EINVAL);
126 	}
127 	return path + len;
128 }
129 
130 /*
131  * Open the and cache a directory handle.
132  * If error then *cfid is not initialized.
133  */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)134 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
135 		    const char *path,
136 		    struct cifs_sb_info *cifs_sb,
137 		    bool lookup_only, struct cached_fid **ret_cfid)
138 {
139 	struct cifs_ses *ses;
140 	struct TCP_Server_Info *server;
141 	struct cifs_open_parms oparms;
142 	struct smb2_create_rsp *o_rsp = NULL;
143 	struct smb2_query_info_rsp *qi_rsp = NULL;
144 	int resp_buftype[2];
145 	struct smb_rqst rqst[2];
146 	struct kvec rsp_iov[2];
147 	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
148 	struct kvec qi_iov[1];
149 	int rc, flags = 0;
150 	__le16 *utf16_path = NULL;
151 	u8 oplock = SMB2_OPLOCK_LEVEL_II;
152 	struct cifs_fid *pfid;
153 	struct dentry *dentry = NULL;
154 	struct cached_fid *cfid;
155 	struct cached_fids *cfids;
156 	const char *npath;
157 	int retries = 0, cur_sleep = 0;
158 	__le32 lease_flags = 0;
159 
160 	if (cifs_sb->root == NULL)
161 		return -ENOENT;
162 
163 	if (tcon == NULL)
164 		return -EOPNOTSUPP;
165 
166 	ses = tcon->ses;
167 	cfids = tcon->cfids;
168 
169 	if (cfids == NULL)
170 		return -EOPNOTSUPP;
171 
172 replay_again:
173 	/* reinitialize for possible replay */
174 	flags = 0;
175 	oplock = SMB2_OPLOCK_LEVEL_II;
176 	server = cifs_pick_channel(ses);
177 
178 	if (!server->ops->new_lease_key)
179 		return smb_EIO(smb_eio_trace_no_lease_key);
180 
181 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
182 	if (!utf16_path)
183 		return -ENOMEM;
184 
185 	spin_lock(&cfids->cfid_list_lock);
186 	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
187 	if (cfid == NULL) {
188 		spin_unlock(&cfids->cfid_list_lock);
189 		kfree(utf16_path);
190 		return -ENOENT;
191 	}
192 	/*
193 	 * Return cached fid if it is valid (has a lease and has a time).
194 	 * Otherwise, it is either a new entry or laundromat worker removed it
195 	 * from @cfids->entries.  Caller will put last reference if the latter.
196 	 */
197 	if (is_valid_cached_dir(cfid)) {
198 		cfid->last_access_time = jiffies;
199 		spin_unlock(&cfids->cfid_list_lock);
200 		*ret_cfid = cfid;
201 		kfree(utf16_path);
202 		return 0;
203 	}
204 	spin_unlock(&cfids->cfid_list_lock);
205 
206 	pfid = &cfid->fid;
207 
208 	/*
209 	 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up
210 	 * calling ->lookup() which already adds those through
211 	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
212 	 * below when trying to send compounded request and then potentially
213 	 * having a different prefix path (e.g. after DFS failover).
214 	 */
215 	npath = path_no_prefix(cifs_sb, path);
216 	if (IS_ERR(npath)) {
217 		rc = PTR_ERR(npath);
218 		goto out;
219 	}
220 
221 	if (!npath[0]) {
222 		dentry = dget(cifs_sb->root);
223 	} else {
224 		dentry = path_to_dentry(cifs_sb, npath);
225 		if (IS_ERR(dentry)) {
226 			rc = -ENOENT;
227 			goto out;
228 		}
229 		if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) {
230 			struct cached_fid *parent_cfid;
231 
232 			spin_lock(&cfids->cfid_list_lock);
233 			list_for_each_entry(parent_cfid, &cfids->entries, entry) {
234 				if (parent_cfid->dentry == dentry->d_parent) {
235 					cifs_dbg(FYI, "found a parent cached file handle\n");
236 					if (is_valid_cached_dir(parent_cfid)) {
237 						lease_flags
238 							|= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
239 						memcpy(pfid->parent_lease_key,
240 						       parent_cfid->fid.lease_key,
241 						       SMB2_LEASE_KEY_SIZE);
242 					}
243 					break;
244 				}
245 			}
246 			spin_unlock(&cfids->cfid_list_lock);
247 		}
248 	}
249 	cfid->dentry = dentry;
250 	cfid->tcon = tcon;
251 
252 	/*
253 	 * We do not hold the lock for the open because in case
254 	 * SMB2_open needs to reconnect.
255 	 * This is safe because no other thread will be able to get a ref
256 	 * to the cfid until we have finished opening the file and (possibly)
257 	 * acquired a lease.
258 	 */
259 	if (smb3_encryption_required(tcon))
260 		flags |= CIFS_TRANSFORM_REQ;
261 
262 	server->ops->new_lease_key(pfid);
263 
264 	memset(rqst, 0, sizeof(rqst));
265 	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
266 	memset(rsp_iov, 0, sizeof(rsp_iov));
267 
268 	/* Open */
269 	memset(&open_iov, 0, sizeof(open_iov));
270 	rqst[0].rq_iov = open_iov;
271 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
272 
273 	oparms = (struct cifs_open_parms) {
274 		.tcon = tcon,
275 		.path = path,
276 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
277 		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
278 				   FILE_READ_EA,
279 		.disposition = FILE_OPEN,
280 		.fid = pfid,
281 		.lease_flags = lease_flags,
282 		.replay = !!(retries),
283 	};
284 
285 	rc = SMB2_open_init(tcon, server,
286 			    &rqst[0], &oplock, &oparms, utf16_path);
287 	if (rc)
288 		goto oshr_free;
289 
290 	if (oplock != SMB2_OPLOCK_LEVEL_II) {
291 		rc = -EINVAL;
292 		cifs_dbg(FYI, "%s: Oplock level %d not suitable for cached directory\n",
293 			 __func__, oplock);
294 		goto oshr_free;
295 	}
296 
297 	smb2_set_next_command(tcon, &rqst[0]);
298 
299 	memset(&qi_iov, 0, sizeof(qi_iov));
300 	rqst[1].rq_iov = qi_iov;
301 	rqst[1].rq_nvec = 1;
302 
303 	rc = SMB2_query_info_init(tcon, server,
304 				  &rqst[1], COMPOUND_FID,
305 				  COMPOUND_FID, FILE_ALL_INFORMATION,
306 				  SMB2_O_INFO_FILE, 0,
307 				  sizeof(struct smb2_file_all_info) +
308 				  PATH_MAX * 2, 0, NULL);
309 	if (rc)
310 		goto oshr_free;
311 
312 	smb2_set_related(&rqst[1]);
313 
314 	if (retries) {
315 		/* Back-off before retry */
316 		if (cur_sleep)
317 			msleep(cur_sleep);
318 
319 		smb2_set_replay(server, &rqst[0]);
320 		smb2_set_replay(server, &rqst[1]);
321 	}
322 
323 	rc = compound_send_recv(xid, ses, server,
324 				flags, 2, rqst,
325 				resp_buftype, rsp_iov);
326 	if (rc) {
327 		if (rc == -EREMCHG) {
328 			tcon->need_reconnect = true;
329 			pr_warn_once("server share %s deleted\n",
330 				     tcon->tree_name);
331 		}
332 		goto oshr_free;
333 	}
334 	cfid->is_open = true;
335 
336 	spin_lock(&cfids->cfid_list_lock);
337 
338 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
339 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
340 	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
341 #ifdef CONFIG_CIFS_DEBUG2
342 	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
343 #endif /* CIFS_DEBUG2 */
344 
345 
346 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
347 		spin_unlock(&cfids->cfid_list_lock);
348 		rc = -EINVAL;
349 		goto oshr_free;
350 	}
351 
352 	rc = smb2_parse_contexts(server, rsp_iov,
353 				 &oparms.fid->epoch,
354 				 oparms.fid->lease_key,
355 				 &oplock, NULL, NULL);
356 	if (rc) {
357 		spin_unlock(&cfids->cfid_list_lock);
358 		goto oshr_free;
359 	}
360 
361 	rc = -EINVAL;
362 	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
363 		spin_unlock(&cfids->cfid_list_lock);
364 		goto oshr_free;
365 	}
366 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
367 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
368 		spin_unlock(&cfids->cfid_list_lock);
369 		goto oshr_free;
370 	}
371 	if (!smb2_validate_and_copy_iov(
372 				le16_to_cpu(qi_rsp->OutputBufferOffset),
373 				sizeof(struct smb2_file_all_info),
374 				&rsp_iov[1], sizeof(struct smb2_file_all_info),
375 				(char *)&cfid->file_all_info))
376 		cfid->file_all_info_is_valid = true;
377 
378 	cfid->time = jiffies;
379 	cfid->last_access_time = jiffies;
380 	spin_unlock(&cfids->cfid_list_lock);
381 	/* At this point the directory handle is fully cached */
382 	rc = 0;
383 
384 oshr_free:
385 	SMB2_open_free(&rqst[0]);
386 	SMB2_query_info_free(&rqst[1]);
387 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
388 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
389 out:
390 	if (rc) {
391 		spin_lock(&cfids->cfid_list_lock);
392 		if (cfid->on_list) {
393 			list_del(&cfid->entry);
394 			cfid->on_list = false;
395 			cfids->num_entries--;
396 		}
397 		if (cfid->has_lease) {
398 			/*
399 			 * We are guaranteed to have two references at this
400 			 * point. One for the caller and one for a potential
401 			 * lease. Release one here, and the second below.
402 			 */
403 			cfid->has_lease = false;
404 			close_cached_dir_locked(cfid);
405 		}
406 		spin_unlock(&cfids->cfid_list_lock);
407 
408 		close_cached_dir(cfid);
409 	} else {
410 		*ret_cfid = cfid;
411 		atomic_inc(&tcon->num_remote_opens);
412 	}
413 	kfree(utf16_path);
414 
415 	if (is_replayable_error(rc) &&
416 	    smb2_should_replay(tcon, &retries, &cur_sleep))
417 		goto replay_again;
418 
419 	return rc;
420 }
421 
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)422 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
423 			      struct dentry *dentry,
424 			      struct cached_fid **ret_cfid)
425 {
426 	struct cached_fid *cfid;
427 	struct cached_fids *cfids = tcon->cfids;
428 
429 	if (cfids == NULL)
430 		return -EOPNOTSUPP;
431 
432 	if (!dentry)
433 		return -ENOENT;
434 
435 	spin_lock(&cfids->cfid_list_lock);
436 	list_for_each_entry(cfid, &cfids->entries, entry) {
437 		if (cfid->dentry == dentry) {
438 			if (!is_valid_cached_dir(cfid))
439 				break;
440 			cifs_dbg(FYI, "found a cached file handle by dentry\n");
441 			kref_get(&cfid->refcount);
442 			*ret_cfid = cfid;
443 			cfid->last_access_time = jiffies;
444 			spin_unlock(&cfids->cfid_list_lock);
445 			return 0;
446 		}
447 	}
448 	spin_unlock(&cfids->cfid_list_lock);
449 	return -ENOENT;
450 }
451 
452 static void
smb2_close_cached_fid(struct kref * ref)453 smb2_close_cached_fid(struct kref *ref)
454 __releases(&cfid->cfids->cfid_list_lock)
455 {
456 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
457 					       refcount);
458 	int rc;
459 
460 	lockdep_assert_held(&cfid->cfids->cfid_list_lock);
461 
462 	if (cfid->on_list) {
463 		list_del(&cfid->entry);
464 		cfid->on_list = false;
465 		cfid->cfids->num_entries--;
466 	}
467 	spin_unlock(&cfid->cfids->cfid_list_lock);
468 
469 	dput(cfid->dentry);
470 	cfid->dentry = NULL;
471 
472 	if (cfid->is_open) {
473 		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
474 			   cfid->fid.volatile_fid);
475 		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
476 			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
477 	}
478 
479 	free_cached_dir(cfid);
480 }
481 
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)482 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
483 			     const char *name, struct cifs_sb_info *cifs_sb)
484 {
485 	struct cached_fid *cfid = NULL;
486 	int rc;
487 
488 	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
489 	if (rc) {
490 		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
491 		return;
492 	}
493 	spin_lock(&cfid->cfids->cfid_list_lock);
494 	if (cfid->has_lease) {
495 		cfid->has_lease = false;
496 		close_cached_dir_locked(cfid);
497 	}
498 	spin_unlock(&cfid->cfids->cfid_list_lock);
499 	close_cached_dir(cfid);
500 }
501 
502 /**
503  * close_cached_dir - drop a reference of a cached dir
504  *
505  * The release function will be called with cfid_list_lock held to remove the
506  * cached dirs from the list before any other thread can take another @cfid
507  * ref. Must not be called with cfid_list_lock held; use
508  * close_cached_dir_locked() called instead.
509  *
510  * @cfid: cached dir
511  */
close_cached_dir(struct cached_fid * cfid)512 void close_cached_dir(struct cached_fid *cfid)
513 {
514 	lockdep_assert_not_held(&cfid->cfids->cfid_list_lock);
515 	kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
516 }
517 
518 /**
519  * close_cached_dir_locked - put a reference of a cached dir with
520  * cfid_list_lock held
521  *
522  * Calling close_cached_dir() with cfid_list_lock held has the potential effect
523  * of causing a deadlock if the invariant of refcount >= 2 is false.
524  *
525  * This function is used in paths that hold cfid_list_lock and expect at least
526  * two references. If that invariant is violated, WARNs and returns without
527  * dropping a reference; the final put must still go through
528  * close_cached_dir().
529  *
530  * @cfid: cached dir
531  */
close_cached_dir_locked(struct cached_fid * cfid)532 static void close_cached_dir_locked(struct cached_fid *cfid)
533 {
534 	lockdep_assert_held(&cfid->cfids->cfid_list_lock);
535 
536 	if (WARN_ON(kref_read(&cfid->refcount) < 2))
537 		return;
538 
539 	kref_put(&cfid->refcount, smb2_close_cached_fid);
540 }
541 
542 /*
543  * Called from cifs_kill_sb when we unmount a share
544  */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)545 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
546 {
547 	struct rb_root *root = &cifs_sb->tlink_tree;
548 	struct rb_node *node;
549 	struct cached_fid *cfid;
550 	struct cifs_tcon *tcon;
551 	struct tcon_link *tlink;
552 	struct cached_fids *cfids;
553 	struct cached_dir_dentry *tmp_list, *q;
554 	LIST_HEAD(entry);
555 
556 	spin_lock(&cifs_sb->tlink_tree_lock);
557 	for (node = rb_first(root); node; node = rb_next(node)) {
558 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
559 		tcon = tlink_tcon(tlink);
560 		if (IS_ERR(tcon))
561 			continue;
562 		cfids = tcon->cfids;
563 		if (cfids == NULL)
564 			continue;
565 		spin_lock(&cfids->cfid_list_lock);
566 		list_for_each_entry(cfid, &cfids->entries, entry) {
567 			tmp_list = kmalloc_obj(*tmp_list, GFP_ATOMIC);
568 			if (tmp_list == NULL) {
569 				/*
570 				 * If the malloc() fails, we won't drop all
571 				 * dentries, and unmounting is likely to trigger
572 				 * a 'Dentry still in use' error.
573 				 */
574 				cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n");
575 				spin_unlock(&cfids->cfid_list_lock);
576 				spin_unlock(&cifs_sb->tlink_tree_lock);
577 				goto done;
578 			}
579 
580 			tmp_list->dentry = cfid->dentry;
581 			cfid->dentry = NULL;
582 
583 			list_add_tail(&tmp_list->entry, &entry);
584 		}
585 		spin_unlock(&cfids->cfid_list_lock);
586 	}
587 	spin_unlock(&cifs_sb->tlink_tree_lock);
588 
589 done:
590 	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
591 		list_del(&tmp_list->entry);
592 		dput(tmp_list->dentry);
593 		kfree(tmp_list);
594 	}
595 
596 	/* Flush any pending work that will drop dentries */
597 	flush_workqueue(cfid_put_wq);
598 }
599 
600 /*
601  * Invalidate all cached dirs when a TCON has been reset
602  * due to a session loss.
603  */
invalidate_all_cached_dirs(struct cifs_tcon * tcon,bool sync)604 void invalidate_all_cached_dirs(struct cifs_tcon *tcon, bool sync)
605 {
606 	struct cached_fids *cfids = tcon->cfids;
607 	struct cached_fid *cfid, *q;
608 
609 	if (cfids == NULL)
610 		return;
611 
612 	/*
613 	 * Mark all the cfids as closed, and move them to the cfids->dying list.
614 	 * They'll be cleaned up by laundromat.  Take a reference to each cfid
615 	 * during this process.
616 	 */
617 	spin_lock(&cfids->cfid_list_lock);
618 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
619 		list_move(&cfid->entry, &cfids->dying);
620 		cfids->num_entries--;
621 		cfid->is_open = false;
622 		cfid->on_list = false;
623 		if (cfid->has_lease) {
624 			/*
625 			 * The lease was never cancelled from the server,
626 			 * so steal that reference.
627 			 */
628 			cfid->has_lease = false;
629 		} else
630 			kref_get(&cfid->refcount);
631 	}
632 	spin_unlock(&cfids->cfid_list_lock);
633 
634 	/* run laundromat unconditionally now as there might have been previously queued work */
635 	mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0);
636 	if (sync)
637 		flush_delayed_work(&cfids->laundromat_work);
638 }
639 
640 static void
cached_dir_offload_close(struct work_struct * work)641 cached_dir_offload_close(struct work_struct *work)
642 {
643 	struct cached_fid *cfid = container_of(work,
644 				struct cached_fid, close_work);
645 	struct cifs_tcon *tcon = cfid->tcon;
646 
647 	WARN_ON(cfid->on_list);
648 
649 	close_cached_dir(cfid);
650 	cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
651 }
652 
653 /*
654  * Release the cached directory's dentry, and then queue work to drop cached
655  * directory itself (closing on server if needed).
656  *
657  * Must be called with a reference to the cached_fid and a reference to the
658  * tcon.
659  */
cached_dir_put_work(struct work_struct * work)660 static void cached_dir_put_work(struct work_struct *work)
661 {
662 	struct cached_fid *cfid = container_of(work, struct cached_fid,
663 					       put_work);
664 	dput(cfid->dentry);
665 	cfid->dentry = NULL;
666 
667 	queue_work(serverclose_wq, &cfid->close_work);
668 }
669 
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])670 bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
671 {
672 	struct cached_fids *cfids = tcon->cfids;
673 	struct cached_fid *cfid;
674 
675 	if (cfids == NULL)
676 		return false;
677 
678 	spin_lock(&cfids->cfid_list_lock);
679 	list_for_each_entry(cfid, &cfids->entries, entry) {
680 		if (cfid->has_lease &&
681 		    !memcmp(lease_key,
682 			    cfid->fid.lease_key,
683 			    SMB2_LEASE_KEY_SIZE)) {
684 			cfid->has_lease = false;
685 			cfid->time = 0;
686 			/*
687 			 * We found a lease remove it from the list
688 			 * so no threads can access it.
689 			 */
690 			list_del(&cfid->entry);
691 			cfid->on_list = false;
692 			cfids->num_entries--;
693 
694 			++tcon->tc_count;
695 			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
696 					    netfs_trace_tcon_ref_get_cached_lease_break);
697 			queue_work(cfid_put_wq, &cfid->put_work);
698 			spin_unlock(&cfids->cfid_list_lock);
699 			return true;
700 		}
701 	}
702 	spin_unlock(&cfids->cfid_list_lock);
703 	return false;
704 }
705 
init_cached_dir(const char * path)706 static struct cached_fid *init_cached_dir(const char *path)
707 {
708 	struct cached_fid *cfid;
709 
710 	cfid = kzalloc_obj(*cfid, GFP_ATOMIC);
711 	if (!cfid)
712 		return NULL;
713 	cfid->path = kstrdup(path, GFP_ATOMIC);
714 	if (!cfid->path) {
715 		kfree(cfid);
716 		return NULL;
717 	}
718 
719 	INIT_WORK(&cfid->close_work, cached_dir_offload_close);
720 	INIT_WORK(&cfid->put_work, cached_dir_put_work);
721 	INIT_LIST_HEAD(&cfid->entry);
722 	INIT_LIST_HEAD(&cfid->dirents.entries);
723 	mutex_init(&cfid->dirents.de_mutex);
724 	kref_init(&cfid->refcount);
725 	return cfid;
726 }
727 
free_cached_dir(struct cached_fid * cfid)728 static void free_cached_dir(struct cached_fid *cfid)
729 {
730 	struct cached_dirent *dirent, *q;
731 
732 	WARN_ON(work_pending(&cfid->close_work));
733 	WARN_ON(work_pending(&cfid->put_work));
734 
735 	dput(cfid->dentry);
736 	cfid->dentry = NULL;
737 
738 	/*
739 	 * Delete all cached dirent names
740 	 */
741 	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
742 		list_del(&dirent->entry);
743 		kfree(dirent->name);
744 		kfree(dirent);
745 	}
746 
747 	/* adjust tcon-level counters and reset per-dir accounting */
748 	if (cfid->cfids) {
749 		if (cfid->dirents.entries_count)
750 			atomic_long_sub((long)cfid->dirents.entries_count,
751 					&cfid->cfids->total_dirents_entries);
752 		if (cfid->dirents.bytes_used) {
753 			atomic64_sub((long long)cfid->dirents.bytes_used,
754 					&cfid->cfids->total_dirents_bytes);
755 			atomic64_sub((long long)cfid->dirents.bytes_used,
756 					&cifs_dircache_bytes_used);
757 		}
758 	}
759 	cfid->dirents.entries_count = 0;
760 	cfid->dirents.bytes_used = 0;
761 
762 	kfree(cfid->path);
763 	cfid->path = NULL;
764 	kfree(cfid);
765 }
766 
cfids_laundromat_worker(struct work_struct * work)767 static void cfids_laundromat_worker(struct work_struct *work)
768 {
769 	struct cached_fids *cfids;
770 	struct cached_fid *cfid, *q;
771 	LIST_HEAD(entry);
772 
773 	cfids = container_of(work, struct cached_fids, laundromat_work.work);
774 
775 	spin_lock(&cfids->cfid_list_lock);
776 	/* move cfids->dying to the local list */
777 	list_cut_before(&entry, &cfids->dying, &cfids->dying);
778 
779 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
780 		if (cfid->last_access_time &&
781 		    time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
782 			cfid->on_list = false;
783 			list_move(&cfid->entry, &entry);
784 			cfids->num_entries--;
785 			if (cfid->has_lease) {
786 				/*
787 				 * Our lease has not yet been cancelled from the
788 				 * server. Steal that reference.
789 				 */
790 				cfid->has_lease = false;
791 			} else
792 				kref_get(&cfid->refcount);
793 		}
794 	}
795 	spin_unlock(&cfids->cfid_list_lock);
796 
797 	list_for_each_entry_safe(cfid, q, &entry, entry) {
798 		list_del(&cfid->entry);
799 
800 		dput(cfid->dentry);
801 		cfid->dentry = NULL;
802 
803 		if (cfid->is_open) {
804 			spin_lock(&cfid->tcon->tc_lock);
805 			++cfid->tcon->tc_count;
806 			trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
807 					    netfs_trace_tcon_ref_get_cached_laundromat);
808 			spin_unlock(&cfid->tcon->tc_lock);
809 			queue_work(serverclose_wq, &cfid->close_work);
810 		} else
811 			/*
812 			 * Drop the ref-count from above, either the lease-ref (if there
813 			 * was one) or the extra one acquired.
814 			 */
815 			close_cached_dir(cfid);
816 	}
817 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
818 			   dir_cache_timeout * HZ);
819 }
820 
init_cached_dirs(void)821 struct cached_fids *init_cached_dirs(void)
822 {
823 	struct cached_fids *cfids;
824 
825 	cfids = kzalloc_obj(*cfids);
826 	if (!cfids)
827 		return NULL;
828 	spin_lock_init(&cfids->cfid_list_lock);
829 	INIT_LIST_HEAD(&cfids->entries);
830 	INIT_LIST_HEAD(&cfids->dying);
831 
832 	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
833 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
834 			   dir_cache_timeout * HZ);
835 
836 	atomic_long_set(&cfids->total_dirents_entries, 0);
837 	atomic64_set(&cfids->total_dirents_bytes, 0);
838 
839 	return cfids;
840 }
841 
842 /*
843  * Called from tconInfoFree when we are tearing down the tcon.
844  * There are no active users or open files/directories at this point.
845  */
free_cached_dirs(struct cached_fids * cfids)846 void free_cached_dirs(struct cached_fids *cfids)
847 {
848 	struct cached_fid *cfid, *q;
849 	LIST_HEAD(entry);
850 
851 	if (cfids == NULL)
852 		return;
853 
854 	cancel_delayed_work_sync(&cfids->laundromat_work);
855 
856 	spin_lock(&cfids->cfid_list_lock);
857 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
858 		cfid->on_list = false;
859 		cfid->is_open = false;
860 		list_move(&cfid->entry, &entry);
861 	}
862 	list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
863 		cfid->on_list = false;
864 		cfid->is_open = false;
865 		list_move(&cfid->entry, &entry);
866 	}
867 	spin_unlock(&cfids->cfid_list_lock);
868 
869 	list_for_each_entry_safe(cfid, q, &entry, entry) {
870 		list_del(&cfid->entry);
871 		free_cached_dir(cfid);
872 	}
873 
874 	kfree(cfids);
875 }
876