xref: /linux/fs/smb/server/vfs_cache.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4  * Copyright (C) 2019 Samsung Electronics Co., Ltd.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/filelock.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 
14 #include "glob.h"
15 #include "vfs_cache.h"
16 #include "oplock.h"
17 #include "vfs.h"
18 #include "connection.h"
19 #include "misc.h"
20 #include "mgmt/tree_connect.h"
21 #include "mgmt/user_session.h"
22 #include "smb_common.h"
23 #include "server.h"
24 #include "smb2pdu.h"
25 
26 #define S_DEL_PENDING			1
27 #define S_DEL_ON_CLS			2
28 #define S_DEL_ON_CLS_STREAM		8
29 
30 static unsigned int inode_hash_mask __read_mostly;
31 static unsigned int inode_hash_shift __read_mostly;
32 static struct hlist_head *inode_hashtable __read_mostly;
33 static DEFINE_RWLOCK(inode_hash_lock);
34 
35 static struct ksmbd_file_table global_ft;
36 static atomic_long_t fd_limit;
37 static struct kmem_cache *filp_cache;
38 
39 #define OPLOCK_NONE      0
40 #define OPLOCK_EXCLUSIVE 1
41 #define OPLOCK_BATCH     2
42 #define OPLOCK_READ      3  /* level 2 oplock */
43 
44 #ifdef CONFIG_PROC_FS
45 
46 static const struct ksmbd_const_name ksmbd_lease_const_names[] = {
47 	{le32_to_cpu(SMB2_LEASE_NONE_LE), "LEASE_NONE"},
48 	{le32_to_cpu(SMB2_LEASE_READ_CACHING_LE), "LEASE_R"},
49 	{le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_H"},
50 	{le32_to_cpu(SMB2_LEASE_WRITE_CACHING_LE), "LEASE_W"},
51 	{le32_to_cpu(SMB2_LEASE_READ_CACHING_LE |
52 		     SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_RH"},
53 	{le32_to_cpu(SMB2_LEASE_READ_CACHING_LE |
54 		     SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RW"},
55 	{le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE |
56 		     SMB2_LEASE_WRITE_CACHING_LE), "LEASE_WH"},
57 	{le32_to_cpu(SMB2_LEASE_READ_CACHING_LE |
58 		     SMB2_LEASE_HANDLE_CACHING_LE |
59 		     SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RWH"},
60 };
61 
62 static const struct ksmbd_const_name ksmbd_oplock_const_names[] = {
63 	{SMB2_OPLOCK_LEVEL_NONE, "OPLOCK_NONE"},
64 	{SMB2_OPLOCK_LEVEL_II, "OPLOCK_II"},
65 	{SMB2_OPLOCK_LEVEL_EXCLUSIVE, "OPLOCK_EXECL"},
66 	{SMB2_OPLOCK_LEVEL_BATCH, "OPLOCK_BATCH"},
67 };
68 
69 static int proc_show_files(struct seq_file *m, void *v)
70 {
71 	struct ksmbd_file *fp = NULL;
72 	unsigned int id;
73 	struct oplock_info *opinfo;
74 
75 	seq_printf(m, "#%-10s %-10s %-10s %-10s %-15s %-10s %-10s %s\n",
76 		   "<tree id>", "<pid>", "<vid>", "<refcnt>",
77 		   "<oplock>", "<daccess>", "<saccess>",
78 		   "<name>");
79 
80 	read_lock(&global_ft.lock);
81 	idr_for_each_entry(global_ft.idr, fp, id) {
82 		seq_printf(m, "%#-10x %#-10llx %#-10llx %#-10x",
83 			   fp->tcon->id,
84 			   fp->persistent_id,
85 			   fp->volatile_id,
86 			   atomic_read(&fp->refcount));
87 
88 		rcu_read_lock();
89 		opinfo = rcu_dereference(fp->f_opinfo);
90 		rcu_read_unlock();
91 
92 		if (!opinfo) {
93 			seq_printf(m, " %-15s", " ");
94 		} else {
95 			const struct ksmbd_const_name *const_names;
96 			int count;
97 			unsigned int level;
98 
99 			if (opinfo->is_lease) {
100 				const_names = ksmbd_lease_const_names;
101 				count = ARRAY_SIZE(ksmbd_lease_const_names);
102 				level = le32_to_cpu(opinfo->o_lease->state);
103 			} else {
104 				const_names = ksmbd_oplock_const_names;
105 				count = ARRAY_SIZE(ksmbd_oplock_const_names);
106 				level = opinfo->level;
107 			}
108 			ksmbd_proc_show_const_name(m, " %-15s",
109 						   const_names, count, level);
110 		}
111 
112 		seq_printf(m, " %#010x %#010x %s\n",
113 			   le32_to_cpu(fp->daccess),
114 			   le32_to_cpu(fp->saccess),
115 			   fp->filp->f_path.dentry->d_name.name);
116 	}
117 	read_unlock(&global_ft.lock);
118 	return 0;
119 }
120 
121 static int create_proc_files(void)
122 {
123 	ksmbd_proc_create("files", proc_show_files, NULL);
124 	return 0;
125 }
126 #else
127 static int create_proc_files(void) { return 0; }
128 #endif
129 
130 static bool durable_scavenger_running;
131 static DEFINE_MUTEX(durable_scavenger_lock);
132 static wait_queue_head_t dh_wq;
133 
134 void ksmbd_set_fd_limit(unsigned long limit)
135 {
136 	limit = min(limit, get_max_files());
137 	atomic_long_set(&fd_limit, limit);
138 }
139 
140 static bool fd_limit_depleted(void)
141 {
142 	long v = atomic_long_dec_return(&fd_limit);
143 
144 	if (v >= 0)
145 		return false;
146 	atomic_long_inc(&fd_limit);
147 	return true;
148 }
149 
150 static void fd_limit_close(void)
151 {
152 	atomic_long_inc(&fd_limit);
153 }
154 
155 /*
156  * INODE hash
157  */
158 
159 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
160 {
161 	unsigned long tmp;
162 
163 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
164 		L1_CACHE_BYTES;
165 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
166 	return tmp & inode_hash_mask;
167 }
168 
169 static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
170 {
171 	struct hlist_head *head = inode_hashtable +
172 		inode_hash(d_inode(de)->i_sb, (unsigned long)de);
173 	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
174 
175 	hlist_for_each_entry(ci, head, m_hash) {
176 		if (ci->m_de == de) {
177 			if (atomic_inc_not_zero(&ci->m_count))
178 				ret_ci = ci;
179 			break;
180 		}
181 	}
182 	return ret_ci;
183 }
184 
185 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
186 {
187 	return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
188 }
189 
190 struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
191 {
192 	struct ksmbd_inode *ci;
193 
194 	read_lock(&inode_hash_lock);
195 	ci = __ksmbd_inode_lookup(d);
196 	read_unlock(&inode_hash_lock);
197 
198 	return ci;
199 }
200 
201 int ksmbd_query_inode_status(struct dentry *dentry)
202 {
203 	struct ksmbd_inode *ci;
204 	int ret = KSMBD_INODE_STATUS_UNKNOWN;
205 
206 	read_lock(&inode_hash_lock);
207 	ci = __ksmbd_inode_lookup(dentry);
208 	read_unlock(&inode_hash_lock);
209 	if (!ci)
210 		return ret;
211 
212 	down_read(&ci->m_lock);
213 	if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
214 		ret = KSMBD_INODE_STATUS_PENDING_DELETE;
215 	else
216 		ret = KSMBD_INODE_STATUS_OK;
217 	up_read(&ci->m_lock);
218 
219 	atomic_dec(&ci->m_count);
220 	return ret;
221 }
222 
223 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
224 {
225 	struct ksmbd_inode *ci = fp->f_ci;
226 	int ret;
227 
228 	down_read(&ci->m_lock);
229 	ret = (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
230 	up_read(&ci->m_lock);
231 
232 	return ret;
233 }
234 
235 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
236 {
237 	struct ksmbd_inode *ci = fp->f_ci;
238 
239 	down_write(&ci->m_lock);
240 	ci->m_flags |= S_DEL_PENDING;
241 	up_write(&ci->m_lock);
242 }
243 
244 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
245 {
246 	struct ksmbd_inode *ci = fp->f_ci;
247 
248 	down_write(&ci->m_lock);
249 	ci->m_flags &= ~S_DEL_PENDING;
250 	up_write(&ci->m_lock);
251 }
252 
253 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
254 				  int file_info)
255 {
256 	struct ksmbd_inode *ci = fp->f_ci;
257 
258 	down_write(&ci->m_lock);
259 	if (ksmbd_stream_fd(fp))
260 		ci->m_flags |= S_DEL_ON_CLS_STREAM;
261 	else
262 		ci->m_flags |= S_DEL_ON_CLS;
263 	up_write(&ci->m_lock);
264 }
265 
266 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
267 {
268 	struct hlist_head *b = inode_hashtable +
269 		inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
270 
271 	hlist_add_head(&ci->m_hash, b);
272 }
273 
274 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
275 {
276 	write_lock(&inode_hash_lock);
277 	hlist_del_init(&ci->m_hash);
278 	write_unlock(&inode_hash_lock);
279 }
280 
281 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
282 {
283 	atomic_set(&ci->m_count, 1);
284 	atomic_set(&ci->op_count, 0);
285 	atomic_set(&ci->sop_count, 0);
286 	ci->m_flags = 0;
287 	ci->m_fattr = 0;
288 	INIT_LIST_HEAD(&ci->m_fp_list);
289 	INIT_LIST_HEAD(&ci->m_op_list);
290 	init_rwsem(&ci->m_lock);
291 	ci->m_de = fp->filp->f_path.dentry;
292 	return 0;
293 }
294 
295 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
296 {
297 	struct ksmbd_inode *ci, *tmpci;
298 	int rc;
299 
300 	read_lock(&inode_hash_lock);
301 	ci = ksmbd_inode_lookup(fp);
302 	read_unlock(&inode_hash_lock);
303 	if (ci)
304 		return ci;
305 
306 	ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP);
307 	if (!ci)
308 		return NULL;
309 
310 	rc = ksmbd_inode_init(ci, fp);
311 	if (rc) {
312 		pr_err("inode initialized failed\n");
313 		kfree(ci);
314 		return NULL;
315 	}
316 
317 	write_lock(&inode_hash_lock);
318 	tmpci = ksmbd_inode_lookup(fp);
319 	if (!tmpci) {
320 		ksmbd_inode_hash(ci);
321 	} else {
322 		kfree(ci);
323 		ci = tmpci;
324 	}
325 	write_unlock(&inode_hash_lock);
326 	return ci;
327 }
328 
329 static void ksmbd_inode_free(struct ksmbd_inode *ci)
330 {
331 	ksmbd_inode_unhash(ci);
332 	kfree(ci);
333 }
334 
335 void ksmbd_inode_put(struct ksmbd_inode *ci)
336 {
337 	if (atomic_dec_and_test(&ci->m_count))
338 		ksmbd_inode_free(ci);
339 }
340 
341 int __init ksmbd_inode_hash_init(void)
342 {
343 	unsigned int loop;
344 	unsigned long numentries = 16384;
345 	unsigned long bucketsize = sizeof(struct hlist_head);
346 	unsigned long size;
347 
348 	inode_hash_shift = ilog2(numentries);
349 	inode_hash_mask = (1 << inode_hash_shift) - 1;
350 
351 	size = bucketsize << inode_hash_shift;
352 
353 	/* init master fp hash table */
354 	inode_hashtable = vmalloc(size);
355 	if (!inode_hashtable)
356 		return -ENOMEM;
357 
358 	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
359 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
360 	return 0;
361 }
362 
363 void ksmbd_release_inode_hash(void)
364 {
365 	vfree(inode_hashtable);
366 }
367 
368 static void __ksmbd_inode_close(struct ksmbd_file *fp)
369 {
370 	struct ksmbd_inode *ci = fp->f_ci;
371 	int err;
372 	struct file *filp;
373 
374 	filp = fp->filp;
375 
376 	if (ksmbd_stream_fd(fp)) {
377 		bool remove_stream_xattr = false;
378 
379 		down_write(&ci->m_lock);
380 		if (ci->m_flags & S_DEL_ON_CLS_STREAM) {
381 			ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
382 			remove_stream_xattr = true;
383 		}
384 		up_write(&ci->m_lock);
385 
386 		if (remove_stream_xattr) {
387 			err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
388 						     &filp->f_path,
389 						     fp->stream.name,
390 						     true);
391 			if (err)
392 				pr_err("remove xattr failed : %s\n",
393 				       fp->stream.name);
394 		}
395 	}
396 
397 	if (atomic_dec_and_test(&ci->m_count)) {
398 		bool do_unlink = false;
399 
400 		down_write(&ci->m_lock);
401 		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
402 			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
403 			do_unlink = true;
404 		}
405 		up_write(&ci->m_lock);
406 
407 		if (do_unlink)
408 			ksmbd_vfs_unlink(filp);
409 
410 		ksmbd_inode_free(ci);
411 	}
412 }
413 
414 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
415 {
416 	if (!has_file_id(fp->persistent_id))
417 		return;
418 
419 	idr_remove(global_ft.idr, fp->persistent_id);
420 }
421 
422 static void ksmbd_remove_durable_fd(struct ksmbd_file *fp)
423 {
424 	write_lock(&global_ft.lock);
425 	__ksmbd_remove_durable_fd(fp);
426 	write_unlock(&global_ft.lock);
427 	if (waitqueue_active(&dh_wq))
428 		wake_up(&dh_wq);
429 }
430 
431 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
432 {
433 	if (!has_file_id(fp->volatile_id))
434 		return;
435 
436 	down_write(&fp->f_ci->m_lock);
437 	list_del_init(&fp->node);
438 	up_write(&fp->f_ci->m_lock);
439 
440 	write_lock(&ft->lock);
441 	idr_remove(ft->idr, fp->volatile_id);
442 	write_unlock(&ft->lock);
443 }
444 
445 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
446 {
447 	struct file *filp;
448 	struct ksmbd_lock *smb_lock, *tmp_lock;
449 
450 	fd_limit_close();
451 	ksmbd_remove_durable_fd(fp);
452 	if (ft)
453 		__ksmbd_remove_fd(ft, fp);
454 
455 	close_id_del_oplock(fp);
456 	filp = fp->filp;
457 
458 	__ksmbd_inode_close(fp);
459 	if (!IS_ERR_OR_NULL(filp))
460 		fput(filp);
461 
462 	/* because the reference count of fp is 0, it is guaranteed that
463 	 * there are not accesses to fp->lock_list.
464 	 */
465 	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
466 		spin_lock(&fp->conn->llist_lock);
467 		list_del(&smb_lock->clist);
468 		spin_unlock(&fp->conn->llist_lock);
469 
470 		list_del(&smb_lock->flist);
471 		locks_free_lock(smb_lock->fl);
472 		kfree(smb_lock);
473 	}
474 
475 	if (ksmbd_stream_fd(fp))
476 		kfree(fp->stream.name);
477 	kmem_cache_free(filp_cache, fp);
478 }
479 
480 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
481 {
482 	if (fp->f_state != FP_INITED)
483 		return NULL;
484 
485 	if (!atomic_inc_not_zero(&fp->refcount))
486 		return NULL;
487 	return fp;
488 }
489 
490 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
491 					    u64 id)
492 {
493 	struct ksmbd_file *fp;
494 
495 	if (!has_file_id(id))
496 		return NULL;
497 
498 	read_lock(&ft->lock);
499 	fp = idr_find(ft->idr, id);
500 	if (fp)
501 		fp = ksmbd_fp_get(fp);
502 	read_unlock(&ft->lock);
503 	return fp;
504 }
505 
506 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
507 {
508 	__ksmbd_close_fd(&work->sess->file_table, fp);
509 	atomic_dec(&work->conn->stats.open_files_count);
510 }
511 
512 static void set_close_state_blocked_works(struct ksmbd_file *fp)
513 {
514 	struct ksmbd_work *cancel_work;
515 
516 	spin_lock(&fp->f_lock);
517 	list_for_each_entry(cancel_work, &fp->blocked_works,
518 				 fp_entry) {
519 		cancel_work->state = KSMBD_WORK_CLOSED;
520 		cancel_work->cancel_fn(cancel_work->cancel_argv);
521 	}
522 	spin_unlock(&fp->f_lock);
523 }
524 
525 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
526 {
527 	struct ksmbd_file	*fp;
528 	struct ksmbd_file_table	*ft;
529 
530 	if (!has_file_id(id))
531 		return 0;
532 
533 	ft = &work->sess->file_table;
534 	write_lock(&ft->lock);
535 	fp = idr_find(ft->idr, id);
536 	if (fp) {
537 		set_close_state_blocked_works(fp);
538 
539 		if (fp->f_state != FP_INITED)
540 			fp = NULL;
541 		else {
542 			fp->f_state = FP_CLOSED;
543 			if (!atomic_dec_and_test(&fp->refcount))
544 				fp = NULL;
545 		}
546 	}
547 	write_unlock(&ft->lock);
548 
549 	if (!fp)
550 		return -EINVAL;
551 
552 	__put_fd_final(work, fp);
553 	return 0;
554 }
555 
556 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
557 {
558 	if (!fp)
559 		return;
560 
561 	if (!atomic_dec_and_test(&fp->refcount))
562 		return;
563 	__put_fd_final(work, fp);
564 }
565 
566 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
567 {
568 	if (!fp)
569 		return false;
570 	if (fp->tcon != tcon)
571 		return false;
572 	return true;
573 }
574 
575 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
576 {
577 	return __ksmbd_lookup_fd(&work->sess->file_table, id);
578 }
579 
580 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
581 {
582 	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
583 
584 	if (__sanity_check(work->tcon, fp))
585 		return fp;
586 
587 	ksmbd_fd_put(work, fp);
588 	return NULL;
589 }
590 
591 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
592 					u64 pid)
593 {
594 	struct ksmbd_file *fp;
595 
596 	if (!has_file_id(id)) {
597 		id = work->compound_fid;
598 		pid = work->compound_pfid;
599 	}
600 
601 	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
602 	if (!__sanity_check(work->tcon, fp)) {
603 		ksmbd_fd_put(work, fp);
604 		return NULL;
605 	}
606 	if (fp->persistent_id != pid) {
607 		ksmbd_fd_put(work, fp);
608 		return NULL;
609 	}
610 	return fp;
611 }
612 
613 struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
614 {
615 	return __ksmbd_lookup_fd(&global_ft, id);
616 }
617 
618 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
619 {
620 	struct ksmbd_file *fp;
621 
622 	fp = __ksmbd_lookup_fd(&global_ft, id);
623 	if (fp && (fp->conn ||
624 		   (fp->durable_scavenger_timeout &&
625 		    (fp->durable_scavenger_timeout <
626 		     jiffies_to_msecs(jiffies))))) {
627 		ksmbd_put_durable_fd(fp);
628 		fp = NULL;
629 	}
630 
631 	return fp;
632 }
633 
634 void ksmbd_put_durable_fd(struct ksmbd_file *fp)
635 {
636 	if (!atomic_dec_and_test(&fp->refcount))
637 		return;
638 
639 	__ksmbd_close_fd(NULL, fp);
640 }
641 
642 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
643 {
644 	struct ksmbd_file	*fp = NULL;
645 	unsigned int		id;
646 
647 	read_lock(&global_ft.lock);
648 	idr_for_each_entry(global_ft.idr, fp, id) {
649 		if (!memcmp(fp->create_guid,
650 			    cguid,
651 			    SMB2_CREATE_GUID_SIZE)) {
652 			fp = ksmbd_fp_get(fp);
653 			break;
654 		}
655 	}
656 	read_unlock(&global_ft.lock);
657 
658 	return fp;
659 }
660 
661 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
662 {
663 	struct ksmbd_file	*lfp;
664 	struct ksmbd_inode	*ci;
665 	struct inode		*inode = d_inode(dentry);
666 
667 	read_lock(&inode_hash_lock);
668 	ci = __ksmbd_inode_lookup(dentry);
669 	read_unlock(&inode_hash_lock);
670 	if (!ci)
671 		return NULL;
672 
673 	down_read(&ci->m_lock);
674 	list_for_each_entry(lfp, &ci->m_fp_list, node) {
675 		if (inode == file_inode(lfp->filp)) {
676 			atomic_dec(&ci->m_count);
677 			lfp = ksmbd_fp_get(lfp);
678 			up_read(&ci->m_lock);
679 			return lfp;
680 		}
681 	}
682 	atomic_dec(&ci->m_count);
683 	up_read(&ci->m_lock);
684 	return NULL;
685 }
686 
687 #define OPEN_ID_TYPE_VOLATILE_ID	(0)
688 #define OPEN_ID_TYPE_PERSISTENT_ID	(1)
689 
690 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
691 {
692 	if (type == OPEN_ID_TYPE_VOLATILE_ID)
693 		fp->volatile_id = id;
694 	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
695 		fp->persistent_id = id;
696 }
697 
698 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
699 		     int type)
700 {
701 	u64			id = 0;
702 	int			ret;
703 
704 	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
705 		__open_id_set(fp, KSMBD_NO_FID, type);
706 		return -EMFILE;
707 	}
708 
709 	idr_preload(KSMBD_DEFAULT_GFP);
710 	write_lock(&ft->lock);
711 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
712 	if (ret >= 0) {
713 		id = ret;
714 		ret = 0;
715 	} else {
716 		id = KSMBD_NO_FID;
717 		fd_limit_close();
718 	}
719 
720 	__open_id_set(fp, id, type);
721 	write_unlock(&ft->lock);
722 	idr_preload_end();
723 	return ret;
724 }
725 
726 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
727 {
728 	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
729 	return fp->persistent_id;
730 }
731 
732 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
733 {
734 	struct ksmbd_file *fp;
735 	int ret;
736 
737 	fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP);
738 	if (!fp) {
739 		pr_err("Failed to allocate memory\n");
740 		return ERR_PTR(-ENOMEM);
741 	}
742 
743 	INIT_LIST_HEAD(&fp->blocked_works);
744 	INIT_LIST_HEAD(&fp->node);
745 	INIT_LIST_HEAD(&fp->lock_list);
746 	spin_lock_init(&fp->f_lock);
747 	atomic_set(&fp->refcount, 1);
748 
749 	fp->filp		= filp;
750 	fp->conn		= work->conn;
751 	fp->tcon		= work->tcon;
752 	fp->volatile_id		= KSMBD_NO_FID;
753 	fp->persistent_id	= KSMBD_NO_FID;
754 	fp->f_state		= FP_NEW;
755 	fp->f_ci		= ksmbd_inode_get(fp);
756 
757 	if (!fp->f_ci) {
758 		ret = -ENOMEM;
759 		goto err_out;
760 	}
761 
762 	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
763 	if (ret) {
764 		ksmbd_inode_put(fp->f_ci);
765 		goto err_out;
766 	}
767 
768 	atomic_inc(&work->conn->stats.open_files_count);
769 	return fp;
770 
771 err_out:
772 	kmem_cache_free(filp_cache, fp);
773 	return ERR_PTR(ret);
774 }
775 
776 void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
777 			 unsigned int state)
778 {
779 	if (!fp)
780 		return;
781 
782 	write_lock(&ft->lock);
783 	fp->f_state = state;
784 	write_unlock(&ft->lock);
785 }
786 
787 static int
788 __close_file_table_ids(struct ksmbd_file_table *ft,
789 		       struct ksmbd_tree_connect *tcon,
790 		       bool (*skip)(struct ksmbd_tree_connect *tcon,
791 				    struct ksmbd_file *fp))
792 {
793 	struct ksmbd_file *fp;
794 	unsigned int id = 0;
795 	int num = 0;
796 
797 	while (1) {
798 		write_lock(&ft->lock);
799 		fp = idr_get_next(ft->idr, &id);
800 		if (!fp) {
801 			write_unlock(&ft->lock);
802 			break;
803 		}
804 
805 		if (skip(tcon, fp) ||
806 		    !atomic_dec_and_test(&fp->refcount)) {
807 			id++;
808 			write_unlock(&ft->lock);
809 			continue;
810 		}
811 
812 		set_close_state_blocked_works(fp);
813 		idr_remove(ft->idr, fp->volatile_id);
814 		fp->volatile_id = KSMBD_NO_FID;
815 		write_unlock(&ft->lock);
816 
817 		down_write(&fp->f_ci->m_lock);
818 		list_del_init(&fp->node);
819 		up_write(&fp->f_ci->m_lock);
820 
821 		__ksmbd_close_fd(ft, fp);
822 
823 		num++;
824 		id++;
825 	}
826 
827 	return num;
828 }
829 
830 static inline bool is_reconnectable(struct ksmbd_file *fp)
831 {
832 	struct oplock_info *opinfo = opinfo_get(fp);
833 	bool reconn = false;
834 
835 	if (!opinfo)
836 		return false;
837 
838 	if (opinfo->op_state != OPLOCK_STATE_NONE) {
839 		opinfo_put(opinfo);
840 		return false;
841 	}
842 
843 	if (fp->is_resilient || fp->is_persistent)
844 		reconn = true;
845 	else if (fp->is_durable && opinfo->is_lease &&
846 		 opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
847 		reconn = true;
848 
849 	else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
850 		reconn = true;
851 
852 	opinfo_put(opinfo);
853 	return reconn;
854 }
855 
856 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
857 			       struct ksmbd_file *fp)
858 {
859 	return fp->tcon != tcon;
860 }
861 
862 static bool ksmbd_durable_scavenger_alive(void)
863 {
864 	if (!durable_scavenger_running)
865 		return false;
866 
867 	if (kthread_should_stop())
868 		return false;
869 
870 	if (idr_is_empty(global_ft.idr))
871 		return false;
872 
873 	return true;
874 }
875 
876 static void ksmbd_scavenger_dispose_dh(struct list_head *head)
877 {
878 	while (!list_empty(head)) {
879 		struct ksmbd_file *fp;
880 
881 		fp = list_first_entry(head, struct ksmbd_file, node);
882 		list_del_init(&fp->node);
883 		__ksmbd_close_fd(NULL, fp);
884 	}
885 }
886 
887 static int ksmbd_durable_scavenger(void *dummy)
888 {
889 	struct ksmbd_file *fp = NULL;
890 	unsigned int id;
891 	unsigned int min_timeout = 1;
892 	bool found_fp_timeout;
893 	LIST_HEAD(scavenger_list);
894 	unsigned long remaining_jiffies;
895 
896 	__module_get(THIS_MODULE);
897 
898 	set_freezable();
899 	while (ksmbd_durable_scavenger_alive()) {
900 		if (try_to_freeze())
901 			continue;
902 
903 		found_fp_timeout = false;
904 
905 		remaining_jiffies = wait_event_timeout(dh_wq,
906 				   ksmbd_durable_scavenger_alive() == false,
907 				   __msecs_to_jiffies(min_timeout));
908 		if (remaining_jiffies)
909 			min_timeout = jiffies_to_msecs(remaining_jiffies);
910 		else
911 			min_timeout = DURABLE_HANDLE_MAX_TIMEOUT;
912 
913 		write_lock(&global_ft.lock);
914 		idr_for_each_entry(global_ft.idr, fp, id) {
915 			if (!fp->durable_timeout)
916 				continue;
917 
918 			if (atomic_read(&fp->refcount) > 1 ||
919 			    fp->conn)
920 				continue;
921 
922 			found_fp_timeout = true;
923 			if (fp->durable_scavenger_timeout <=
924 			    jiffies_to_msecs(jiffies)) {
925 				__ksmbd_remove_durable_fd(fp);
926 				list_add(&fp->node, &scavenger_list);
927 			} else {
928 				unsigned long durable_timeout;
929 
930 				durable_timeout =
931 					fp->durable_scavenger_timeout -
932 						jiffies_to_msecs(jiffies);
933 
934 				if (min_timeout > durable_timeout)
935 					min_timeout = durable_timeout;
936 			}
937 		}
938 		write_unlock(&global_ft.lock);
939 
940 		ksmbd_scavenger_dispose_dh(&scavenger_list);
941 
942 		if (found_fp_timeout == false)
943 			break;
944 	}
945 
946 	durable_scavenger_running = false;
947 
948 	module_put(THIS_MODULE);
949 
950 	return 0;
951 }
952 
953 void ksmbd_launch_ksmbd_durable_scavenger(void)
954 {
955 	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
956 		return;
957 
958 	mutex_lock(&durable_scavenger_lock);
959 	if (durable_scavenger_running == true) {
960 		mutex_unlock(&durable_scavenger_lock);
961 		return;
962 	}
963 
964 	durable_scavenger_running = true;
965 
966 	server_conf.dh_task = kthread_run(ksmbd_durable_scavenger,
967 				     (void *)NULL, "ksmbd-durable-scavenger");
968 	if (IS_ERR(server_conf.dh_task))
969 		pr_err("cannot start conn thread, err : %ld\n",
970 		       PTR_ERR(server_conf.dh_task));
971 	mutex_unlock(&durable_scavenger_lock);
972 }
973 
974 void ksmbd_stop_durable_scavenger(void)
975 {
976 	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
977 		return;
978 
979 	mutex_lock(&durable_scavenger_lock);
980 	if (!durable_scavenger_running) {
981 		mutex_unlock(&durable_scavenger_lock);
982 		return;
983 	}
984 
985 	durable_scavenger_running = false;
986 	if (waitqueue_active(&dh_wq))
987 		wake_up(&dh_wq);
988 	mutex_unlock(&durable_scavenger_lock);
989 	kthread_stop(server_conf.dh_task);
990 }
991 
992 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
993 			     struct ksmbd_file *fp)
994 {
995 	struct ksmbd_inode *ci;
996 	struct oplock_info *op;
997 	struct ksmbd_conn *conn;
998 
999 	if (!is_reconnectable(fp))
1000 		return false;
1001 
1002 	conn = fp->conn;
1003 	ci = fp->f_ci;
1004 	down_write(&ci->m_lock);
1005 	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
1006 		if (op->conn != conn)
1007 			continue;
1008 		if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
1009 			kfree(op->conn);
1010 		op->conn = NULL;
1011 	}
1012 	up_write(&ci->m_lock);
1013 
1014 	fp->conn = NULL;
1015 	fp->tcon = NULL;
1016 	fp->volatile_id = KSMBD_NO_FID;
1017 
1018 	if (fp->durable_timeout)
1019 		fp->durable_scavenger_timeout =
1020 			jiffies_to_msecs(jiffies) + fp->durable_timeout;
1021 
1022 	return true;
1023 }
1024 
1025 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
1026 {
1027 	int num = __close_file_table_ids(&work->sess->file_table,
1028 					 work->tcon,
1029 					 tree_conn_fd_check);
1030 
1031 	atomic_sub(num, &work->conn->stats.open_files_count);
1032 }
1033 
1034 void ksmbd_close_session_fds(struct ksmbd_work *work)
1035 {
1036 	int num = __close_file_table_ids(&work->sess->file_table,
1037 					 work->tcon,
1038 					 session_fd_check);
1039 
1040 	atomic_sub(num, &work->conn->stats.open_files_count);
1041 }
1042 
1043 int ksmbd_init_global_file_table(void)
1044 {
1045 	create_proc_files();
1046 	return ksmbd_init_file_table(&global_ft);
1047 }
1048 
1049 void ksmbd_free_global_file_table(void)
1050 {
1051 	struct ksmbd_file	*fp = NULL;
1052 	unsigned int		id;
1053 
1054 	idr_for_each_entry(global_ft.idr, fp, id) {
1055 		ksmbd_remove_durable_fd(fp);
1056 		__ksmbd_close_fd(NULL, fp);
1057 	}
1058 
1059 	idr_destroy(global_ft.idr);
1060 	kfree(global_ft.idr);
1061 }
1062 
1063 int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
1064 				  struct ksmbd_file *fp, char *name)
1065 {
1066 	char *pathname, *ab_pathname;
1067 	int ret = 0;
1068 
1069 	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
1070 	if (!pathname)
1071 		return -EACCES;
1072 
1073 	ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
1074 	if (IS_ERR(ab_pathname)) {
1075 		kfree(pathname);
1076 		return -EACCES;
1077 	}
1078 
1079 	if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
1080 		ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
1081 		ret = -EINVAL;
1082 	}
1083 
1084 	kfree(pathname);
1085 
1086 	return ret;
1087 }
1088 
1089 int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
1090 {
1091 	struct ksmbd_inode *ci;
1092 	struct oplock_info *op;
1093 
1094 	if (!fp->is_durable || fp->conn || fp->tcon) {
1095 		pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
1096 		return -EBADF;
1097 	}
1098 
1099 	if (has_file_id(fp->volatile_id)) {
1100 		pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
1101 		return -EBADF;
1102 	}
1103 
1104 	fp->conn = work->conn;
1105 	fp->tcon = work->tcon;
1106 
1107 	ci = fp->f_ci;
1108 	down_write(&ci->m_lock);
1109 	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
1110 		if (op->conn)
1111 			continue;
1112 		op->conn = fp->conn;
1113 		atomic_inc(&op->conn->refcnt);
1114 	}
1115 	up_write(&ci->m_lock);
1116 
1117 	fp->f_state = FP_NEW;
1118 	__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
1119 	if (!has_file_id(fp->volatile_id)) {
1120 		fp->conn = NULL;
1121 		fp->tcon = NULL;
1122 		return -EBADF;
1123 	}
1124 	return 0;
1125 }
1126 
1127 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
1128 {
1129 	ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP);
1130 	if (!ft->idr)
1131 		return -ENOMEM;
1132 
1133 	idr_init(ft->idr);
1134 	rwlock_init(&ft->lock);
1135 	return 0;
1136 }
1137 
1138 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
1139 {
1140 	if (!ft->idr)
1141 		return;
1142 
1143 	__close_file_table_ids(ft, NULL, session_fd_check);
1144 	idr_destroy(ft->idr);
1145 	kfree(ft->idr);
1146 	ft->idr = NULL;
1147 }
1148 
1149 int ksmbd_init_file_cache(void)
1150 {
1151 	filp_cache = kmem_cache_create("ksmbd_file_cache",
1152 				       sizeof(struct ksmbd_file), 0,
1153 				       SLAB_HWCACHE_ALIGN, NULL);
1154 	if (!filp_cache)
1155 		goto out;
1156 
1157 	init_waitqueue_head(&dh_wq);
1158 
1159 	return 0;
1160 
1161 out:
1162 	pr_err("failed to allocate file cache\n");
1163 	return -ENOMEM;
1164 }
1165 
1166 void ksmbd_exit_file_cache(void)
1167 {
1168 	kmem_cache_destroy(filp_cache);
1169 }
1170