xref: /linux/fs/locks.c (revision 03f7c1d2a49acd30e38789cd809d3300721e9b0e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/locks.c
4  *
5  * We implement four types of file locks: BSD locks, posix locks, open
6  * file description locks, and leases.  For details about BSD locks,
7  * see the flock(2) man page; for details about the other three, see
8  * fcntl(2).
9  *
10  *
11  * Locking conflicts and dependencies:
12  * If multiple threads attempt to lock the same byte (or flock the same file)
13  * only one can be granted the lock, and other must wait their turn.
14  * The first lock has been "applied" or "granted", the others are "waiting"
15  * and are "blocked" by the "applied" lock..
16  *
17  * Waiting and applied locks are all kept in trees whose properties are:
18  *
19  *	- the root of a tree may be an applied or waiting lock.
20  *	- every other node in the tree is a waiting lock that
21  *	  conflicts with every ancestor of that node.
22  *
23  * Every such tree begins life as a waiting singleton which obviously
24  * satisfies the above properties.
25  *
26  * The only ways we modify trees preserve these properties:
27  *
28  *	1. We may add a new leaf node, but only after first verifying that it
29  *	   conflicts with all of its ancestors.
30  *	2. We may remove the root of a tree, creating a new singleton
31  *	   tree from the root and N new trees rooted in the immediate
32  *	   children.
33  *	3. If the root of a tree is not currently an applied lock, we may
34  *	   apply it (if possible).
35  *	4. We may upgrade the root of the tree (either extend its range,
36  *	   or upgrade its entire range from read to write).
37  *
38  * When an applied lock is modified in a way that reduces or downgrades any
39  * part of its range, we remove all its children (2 above).  This particularly
40  * happens when a lock is unlocked.
41  *
42  * For each of those child trees we "wake up" the thread which is
43  * waiting for the lock so it can continue handling as follows: if the
44  * root of the tree applies, we do so (3).  If it doesn't, it must
45  * conflict with some applied lock.  We remove (wake up) all of its children
46  * (2), and add it is a new leaf to the tree rooted in the applied
47  * lock (1).  We then repeat the process recursively with those
48  * children.
49  *
50  */
51 
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/fs.h>
56 #include <linux/init.h>
57 #include <linux/security.h>
58 #include <linux/slab.h>
59 #include <linux/syscalls.h>
60 #include <linux/time.h>
61 #include <linux/rcupdate.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/hashtable.h>
64 #include <linux/percpu.h>
65 #include <linux/sysctl.h>
66 
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/filelock.h>
69 
70 #include <linux/uaccess.h>
71 
72 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
73 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
74 #define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
75 #define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
76 #define IS_REMOTELCK(fl)	(fl->fl_pid <= 0)
77 
78 static bool lease_breaking(struct file_lock *fl)
79 {
80 	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
81 }
82 
83 static int target_leasetype(struct file_lock *fl)
84 {
85 	if (fl->fl_flags & FL_UNLOCK_PENDING)
86 		return F_UNLCK;
87 	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
88 		return F_RDLCK;
89 	return fl->fl_type;
90 }
91 
92 static int leases_enable = 1;
93 static int lease_break_time = 45;
94 
95 #ifdef CONFIG_SYSCTL
96 static struct ctl_table locks_sysctls[] = {
97 	{
98 		.procname	= "leases-enable",
99 		.data		= &leases_enable,
100 		.maxlen		= sizeof(int),
101 		.mode		= 0644,
102 		.proc_handler	= proc_dointvec,
103 	},
104 #ifdef CONFIG_MMU
105 	{
106 		.procname	= "lease-break-time",
107 		.data		= &lease_break_time,
108 		.maxlen		= sizeof(int),
109 		.mode		= 0644,
110 		.proc_handler	= proc_dointvec,
111 	},
112 #endif /* CONFIG_MMU */
113 	{}
114 };
115 
116 static int __init init_fs_locks_sysctls(void)
117 {
118 	register_sysctl_init("fs", locks_sysctls);
119 	return 0;
120 }
121 early_initcall(init_fs_locks_sysctls);
122 #endif /* CONFIG_SYSCTL */
123 
124 /*
125  * The global file_lock_list is only used for displaying /proc/locks, so we
126  * keep a list on each CPU, with each list protected by its own spinlock.
127  * Global serialization is done using file_rwsem.
128  *
129  * Note that alterations to the list also require that the relevant flc_lock is
130  * held.
131  */
132 struct file_lock_list_struct {
133 	spinlock_t		lock;
134 	struct hlist_head	hlist;
135 };
136 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
138 
139 
140 /*
141  * The blocked_hash is used to find POSIX lock loops for deadlock detection.
142  * It is protected by blocked_lock_lock.
143  *
144  * We hash locks by lockowner in order to optimize searching for the lock a
145  * particular lockowner is waiting on.
146  *
147  * FIXME: make this value scale via some heuristic? We generally will want more
148  * buckets when we have more lockowners holding locks, but that's a little
149  * difficult to determine without knowing what the workload will look like.
150  */
151 #define BLOCKED_HASH_BITS	7
152 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
153 
154 /*
155  * This lock protects the blocked_hash. Generally, if you're accessing it, you
156  * want to be holding this lock.
157  *
158  * In addition, it also protects the fl->fl_blocked_requests list, and the
159  * fl->fl_blocker pointer for file_lock structures that are acting as lock
160  * requests (in contrast to those that are acting as records of acquired locks).
161  *
162  * Note that when we acquire this lock in order to change the above fields,
163  * we often hold the flc_lock as well. In certain cases, when reading the fields
164  * protected by this lock, we can skip acquiring it iff we already hold the
165  * flc_lock.
166  */
167 static DEFINE_SPINLOCK(blocked_lock_lock);
168 
169 static struct kmem_cache *flctx_cache __read_mostly;
170 static struct kmem_cache *filelock_cache __read_mostly;
171 
172 static struct file_lock_context *
173 locks_get_lock_context(struct inode *inode, int type)
174 {
175 	struct file_lock_context *ctx;
176 
177 	/* paired with cmpxchg() below */
178 	ctx = smp_load_acquire(&inode->i_flctx);
179 	if (likely(ctx) || type == F_UNLCK)
180 		goto out;
181 
182 	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
183 	if (!ctx)
184 		goto out;
185 
186 	spin_lock_init(&ctx->flc_lock);
187 	INIT_LIST_HEAD(&ctx->flc_flock);
188 	INIT_LIST_HEAD(&ctx->flc_posix);
189 	INIT_LIST_HEAD(&ctx->flc_lease);
190 
191 	/*
192 	 * Assign the pointer if it's not already assigned. If it is, then
193 	 * free the context we just allocated.
194 	 */
195 	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
196 		kmem_cache_free(flctx_cache, ctx);
197 		ctx = smp_load_acquire(&inode->i_flctx);
198 	}
199 out:
200 	trace_locks_get_lock_context(inode, type, ctx);
201 	return ctx;
202 }
203 
204 static void
205 locks_dump_ctx_list(struct list_head *list, char *list_type)
206 {
207 	struct file_lock *fl;
208 
209 	list_for_each_entry(fl, list, fl_list) {
210 		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
211 	}
212 }
213 
214 static void
215 locks_check_ctx_lists(struct inode *inode)
216 {
217 	struct file_lock_context *ctx = inode->i_flctx;
218 
219 	if (unlikely(!list_empty(&ctx->flc_flock) ||
220 		     !list_empty(&ctx->flc_posix) ||
221 		     !list_empty(&ctx->flc_lease))) {
222 		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
224 			inode->i_ino);
225 		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
226 		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
227 		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
228 	}
229 }
230 
231 static void
232 locks_check_ctx_file_list(struct file *filp, struct list_head *list,
233 				char *list_type)
234 {
235 	struct file_lock *fl;
236 	struct inode *inode = locks_inode(filp);
237 
238 	list_for_each_entry(fl, list, fl_list)
239 		if (fl->fl_file == filp)
240 			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 				list_type, MAJOR(inode->i_sb->s_dev),
243 				MINOR(inode->i_sb->s_dev), inode->i_ino,
244 				fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
245 }
246 
247 void
248 locks_free_lock_context(struct inode *inode)
249 {
250 	struct file_lock_context *ctx = inode->i_flctx;
251 
252 	if (unlikely(ctx)) {
253 		locks_check_ctx_lists(inode);
254 		kmem_cache_free(flctx_cache, ctx);
255 	}
256 }
257 
258 static void locks_init_lock_heads(struct file_lock *fl)
259 {
260 	INIT_HLIST_NODE(&fl->fl_link);
261 	INIT_LIST_HEAD(&fl->fl_list);
262 	INIT_LIST_HEAD(&fl->fl_blocked_requests);
263 	INIT_LIST_HEAD(&fl->fl_blocked_member);
264 	init_waitqueue_head(&fl->fl_wait);
265 }
266 
267 /* Allocate an empty lock structure. */
268 struct file_lock *locks_alloc_lock(void)
269 {
270 	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
271 
272 	if (fl)
273 		locks_init_lock_heads(fl);
274 
275 	return fl;
276 }
277 EXPORT_SYMBOL_GPL(locks_alloc_lock);
278 
279 void locks_release_private(struct file_lock *fl)
280 {
281 	BUG_ON(waitqueue_active(&fl->fl_wait));
282 	BUG_ON(!list_empty(&fl->fl_list));
283 	BUG_ON(!list_empty(&fl->fl_blocked_requests));
284 	BUG_ON(!list_empty(&fl->fl_blocked_member));
285 	BUG_ON(!hlist_unhashed(&fl->fl_link));
286 
287 	if (fl->fl_ops) {
288 		if (fl->fl_ops->fl_release_private)
289 			fl->fl_ops->fl_release_private(fl);
290 		fl->fl_ops = NULL;
291 	}
292 
293 	if (fl->fl_lmops) {
294 		if (fl->fl_lmops->lm_put_owner) {
295 			fl->fl_lmops->lm_put_owner(fl->fl_owner);
296 			fl->fl_owner = NULL;
297 		}
298 		fl->fl_lmops = NULL;
299 	}
300 }
301 EXPORT_SYMBOL_GPL(locks_release_private);
302 
303 /**
304  * locks_owner_has_blockers - Check for blocking lock requests
305  * @flctx: file lock context
306  * @owner: lock owner
307  *
308  * Return values:
309  *   %true: @owner has at least one blocker
310  *   %false: @owner has no blockers
311  */
312 bool locks_owner_has_blockers(struct file_lock_context *flctx,
313 		fl_owner_t owner)
314 {
315 	struct file_lock *fl;
316 
317 	spin_lock(&flctx->flc_lock);
318 	list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
319 		if (fl->fl_owner != owner)
320 			continue;
321 		if (!list_empty(&fl->fl_blocked_requests)) {
322 			spin_unlock(&flctx->flc_lock);
323 			return true;
324 		}
325 	}
326 	spin_unlock(&flctx->flc_lock);
327 	return false;
328 }
329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
330 
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock *fl)
333 {
334 	locks_release_private(fl);
335 	kmem_cache_free(filelock_cache, fl);
336 }
337 EXPORT_SYMBOL(locks_free_lock);
338 
339 static void
340 locks_dispose_list(struct list_head *dispose)
341 {
342 	struct file_lock *fl;
343 
344 	while (!list_empty(dispose)) {
345 		fl = list_first_entry(dispose, struct file_lock, fl_list);
346 		list_del_init(&fl->fl_list);
347 		locks_free_lock(fl);
348 	}
349 }
350 
351 void locks_init_lock(struct file_lock *fl)
352 {
353 	memset(fl, 0, sizeof(struct file_lock));
354 	locks_init_lock_heads(fl);
355 }
356 EXPORT_SYMBOL(locks_init_lock);
357 
358 /*
359  * Initialize a new lock from an existing file_lock structure.
360  */
361 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
362 {
363 	new->fl_owner = fl->fl_owner;
364 	new->fl_pid = fl->fl_pid;
365 	new->fl_file = NULL;
366 	new->fl_flags = fl->fl_flags;
367 	new->fl_type = fl->fl_type;
368 	new->fl_start = fl->fl_start;
369 	new->fl_end = fl->fl_end;
370 	new->fl_lmops = fl->fl_lmops;
371 	new->fl_ops = NULL;
372 
373 	if (fl->fl_lmops) {
374 		if (fl->fl_lmops->lm_get_owner)
375 			fl->fl_lmops->lm_get_owner(fl->fl_owner);
376 	}
377 }
378 EXPORT_SYMBOL(locks_copy_conflock);
379 
380 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
381 {
382 	/* "new" must be a freshly-initialized lock */
383 	WARN_ON_ONCE(new->fl_ops);
384 
385 	locks_copy_conflock(new, fl);
386 
387 	new->fl_file = fl->fl_file;
388 	new->fl_ops = fl->fl_ops;
389 
390 	if (fl->fl_ops) {
391 		if (fl->fl_ops->fl_copy_lock)
392 			fl->fl_ops->fl_copy_lock(new, fl);
393 	}
394 }
395 EXPORT_SYMBOL(locks_copy_lock);
396 
397 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
398 {
399 	struct file_lock *f;
400 
401 	/*
402 	 * As ctx->flc_lock is held, new requests cannot be added to
403 	 * ->fl_blocked_requests, so we don't need a lock to check if it
404 	 * is empty.
405 	 */
406 	if (list_empty(&fl->fl_blocked_requests))
407 		return;
408 	spin_lock(&blocked_lock_lock);
409 	list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
410 	list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
411 		f->fl_blocker = new;
412 	spin_unlock(&blocked_lock_lock);
413 }
414 
415 static inline int flock_translate_cmd(int cmd) {
416 	switch (cmd) {
417 	case LOCK_SH:
418 		return F_RDLCK;
419 	case LOCK_EX:
420 		return F_WRLCK;
421 	case LOCK_UN:
422 		return F_UNLCK;
423 	}
424 	return -EINVAL;
425 }
426 
427 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
428 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
429 {
430 	locks_init_lock(fl);
431 
432 	fl->fl_file = filp;
433 	fl->fl_owner = filp;
434 	fl->fl_pid = current->tgid;
435 	fl->fl_flags = FL_FLOCK;
436 	fl->fl_type = type;
437 	fl->fl_end = OFFSET_MAX;
438 }
439 
440 static int assign_type(struct file_lock *fl, long type)
441 {
442 	switch (type) {
443 	case F_RDLCK:
444 	case F_WRLCK:
445 	case F_UNLCK:
446 		fl->fl_type = type;
447 		break;
448 	default:
449 		return -EINVAL;
450 	}
451 	return 0;
452 }
453 
454 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
455 				 struct flock64 *l)
456 {
457 	switch (l->l_whence) {
458 	case SEEK_SET:
459 		fl->fl_start = 0;
460 		break;
461 	case SEEK_CUR:
462 		fl->fl_start = filp->f_pos;
463 		break;
464 	case SEEK_END:
465 		fl->fl_start = i_size_read(file_inode(filp));
466 		break;
467 	default:
468 		return -EINVAL;
469 	}
470 	if (l->l_start > OFFSET_MAX - fl->fl_start)
471 		return -EOVERFLOW;
472 	fl->fl_start += l->l_start;
473 	if (fl->fl_start < 0)
474 		return -EINVAL;
475 
476 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
477 	   POSIX-2001 defines it. */
478 	if (l->l_len > 0) {
479 		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
480 			return -EOVERFLOW;
481 		fl->fl_end = fl->fl_start + (l->l_len - 1);
482 
483 	} else if (l->l_len < 0) {
484 		if (fl->fl_start + l->l_len < 0)
485 			return -EINVAL;
486 		fl->fl_end = fl->fl_start - 1;
487 		fl->fl_start += l->l_len;
488 	} else
489 		fl->fl_end = OFFSET_MAX;
490 
491 	fl->fl_owner = current->files;
492 	fl->fl_pid = current->tgid;
493 	fl->fl_file = filp;
494 	fl->fl_flags = FL_POSIX;
495 	fl->fl_ops = NULL;
496 	fl->fl_lmops = NULL;
497 
498 	return assign_type(fl, l->l_type);
499 }
500 
501 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
502  * style lock.
503  */
504 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
505 			       struct flock *l)
506 {
507 	struct flock64 ll = {
508 		.l_type = l->l_type,
509 		.l_whence = l->l_whence,
510 		.l_start = l->l_start,
511 		.l_len = l->l_len,
512 	};
513 
514 	return flock64_to_posix_lock(filp, fl, &ll);
515 }
516 
517 /* default lease lock manager operations */
518 static bool
519 lease_break_callback(struct file_lock *fl)
520 {
521 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
522 	return false;
523 }
524 
525 static void
526 lease_setup(struct file_lock *fl, void **priv)
527 {
528 	struct file *filp = fl->fl_file;
529 	struct fasync_struct *fa = *priv;
530 
531 	/*
532 	 * fasync_insert_entry() returns the old entry if any. If there was no
533 	 * old entry, then it used "priv" and inserted it into the fasync list.
534 	 * Clear the pointer to indicate that it shouldn't be freed.
535 	 */
536 	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
537 		*priv = NULL;
538 
539 	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
540 }
541 
542 static const struct lock_manager_operations lease_manager_ops = {
543 	.lm_break = lease_break_callback,
544 	.lm_change = lease_modify,
545 	.lm_setup = lease_setup,
546 };
547 
548 /*
549  * Initialize a lease, use the default lock manager operations
550  */
551 static int lease_init(struct file *filp, long type, struct file_lock *fl)
552 {
553 	if (assign_type(fl, type) != 0)
554 		return -EINVAL;
555 
556 	fl->fl_owner = filp;
557 	fl->fl_pid = current->tgid;
558 
559 	fl->fl_file = filp;
560 	fl->fl_flags = FL_LEASE;
561 	fl->fl_start = 0;
562 	fl->fl_end = OFFSET_MAX;
563 	fl->fl_ops = NULL;
564 	fl->fl_lmops = &lease_manager_ops;
565 	return 0;
566 }
567 
568 /* Allocate a file_lock initialised to this type of lease */
569 static struct file_lock *lease_alloc(struct file *filp, long type)
570 {
571 	struct file_lock *fl = locks_alloc_lock();
572 	int error = -ENOMEM;
573 
574 	if (fl == NULL)
575 		return ERR_PTR(error);
576 
577 	error = lease_init(filp, type, fl);
578 	if (error) {
579 		locks_free_lock(fl);
580 		return ERR_PTR(error);
581 	}
582 	return fl;
583 }
584 
585 /* Check if two locks overlap each other.
586  */
587 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
588 {
589 	return ((fl1->fl_end >= fl2->fl_start) &&
590 		(fl2->fl_end >= fl1->fl_start));
591 }
592 
593 /*
594  * Check whether two locks have the same owner.
595  */
596 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
597 {
598 	return fl1->fl_owner == fl2->fl_owner;
599 }
600 
601 /* Must be called with the flc_lock held! */
602 static void locks_insert_global_locks(struct file_lock *fl)
603 {
604 	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
605 
606 	percpu_rwsem_assert_held(&file_rwsem);
607 
608 	spin_lock(&fll->lock);
609 	fl->fl_link_cpu = smp_processor_id();
610 	hlist_add_head(&fl->fl_link, &fll->hlist);
611 	spin_unlock(&fll->lock);
612 }
613 
614 /* Must be called with the flc_lock held! */
615 static void locks_delete_global_locks(struct file_lock *fl)
616 {
617 	struct file_lock_list_struct *fll;
618 
619 	percpu_rwsem_assert_held(&file_rwsem);
620 
621 	/*
622 	 * Avoid taking lock if already unhashed. This is safe since this check
623 	 * is done while holding the flc_lock, and new insertions into the list
624 	 * also require that it be held.
625 	 */
626 	if (hlist_unhashed(&fl->fl_link))
627 		return;
628 
629 	fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
630 	spin_lock(&fll->lock);
631 	hlist_del_init(&fl->fl_link);
632 	spin_unlock(&fll->lock);
633 }
634 
635 static unsigned long
636 posix_owner_key(struct file_lock *fl)
637 {
638 	return (unsigned long)fl->fl_owner;
639 }
640 
641 static void locks_insert_global_blocked(struct file_lock *waiter)
642 {
643 	lockdep_assert_held(&blocked_lock_lock);
644 
645 	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
646 }
647 
648 static void locks_delete_global_blocked(struct file_lock *waiter)
649 {
650 	lockdep_assert_held(&blocked_lock_lock);
651 
652 	hash_del(&waiter->fl_link);
653 }
654 
655 /* Remove waiter from blocker's block list.
656  * When blocker ends up pointing to itself then the list is empty.
657  *
658  * Must be called with blocked_lock_lock held.
659  */
660 static void __locks_delete_block(struct file_lock *waiter)
661 {
662 	locks_delete_global_blocked(waiter);
663 	list_del_init(&waiter->fl_blocked_member);
664 }
665 
666 static void __locks_wake_up_blocks(struct file_lock *blocker)
667 {
668 	while (!list_empty(&blocker->fl_blocked_requests)) {
669 		struct file_lock *waiter;
670 
671 		waiter = list_first_entry(&blocker->fl_blocked_requests,
672 					  struct file_lock, fl_blocked_member);
673 		__locks_delete_block(waiter);
674 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
675 			waiter->fl_lmops->lm_notify(waiter);
676 		else
677 			wake_up(&waiter->fl_wait);
678 
679 		/*
680 		 * The setting of fl_blocker to NULL marks the "done"
681 		 * point in deleting a block. Paired with acquire at the top
682 		 * of locks_delete_block().
683 		 */
684 		smp_store_release(&waiter->fl_blocker, NULL);
685 	}
686 }
687 
688 /**
689  *	locks_delete_block - stop waiting for a file lock
690  *	@waiter: the lock which was waiting
691  *
692  *	lockd/nfsd need to disconnect the lock while working on it.
693  */
694 int locks_delete_block(struct file_lock *waiter)
695 {
696 	int status = -ENOENT;
697 
698 	/*
699 	 * If fl_blocker is NULL, it won't be set again as this thread "owns"
700 	 * the lock and is the only one that might try to claim the lock.
701 	 *
702 	 * We use acquire/release to manage fl_blocker so that we can
703 	 * optimize away taking the blocked_lock_lock in many cases.
704 	 *
705 	 * The smp_load_acquire guarantees two things:
706 	 *
707 	 * 1/ that fl_blocked_requests can be tested locklessly. If something
708 	 * was recently added to that list it must have been in a locked region
709 	 * *before* the locked region when fl_blocker was set to NULL.
710 	 *
711 	 * 2/ that no other thread is accessing 'waiter', so it is safe to free
712 	 * it.  __locks_wake_up_blocks is careful not to touch waiter after
713 	 * fl_blocker is released.
714 	 *
715 	 * If a lockless check of fl_blocker shows it to be NULL, we know that
716 	 * no new locks can be inserted into its fl_blocked_requests list, and
717 	 * can avoid doing anything further if the list is empty.
718 	 */
719 	if (!smp_load_acquire(&waiter->fl_blocker) &&
720 	    list_empty(&waiter->fl_blocked_requests))
721 		return status;
722 
723 	spin_lock(&blocked_lock_lock);
724 	if (waiter->fl_blocker)
725 		status = 0;
726 	__locks_wake_up_blocks(waiter);
727 	__locks_delete_block(waiter);
728 
729 	/*
730 	 * The setting of fl_blocker to NULL marks the "done" point in deleting
731 	 * a block. Paired with acquire at the top of this function.
732 	 */
733 	smp_store_release(&waiter->fl_blocker, NULL);
734 	spin_unlock(&blocked_lock_lock);
735 	return status;
736 }
737 EXPORT_SYMBOL(locks_delete_block);
738 
739 /* Insert waiter into blocker's block list.
740  * We use a circular list so that processes can be easily woken up in
741  * the order they blocked. The documentation doesn't require this but
742  * it seems like the reasonable thing to do.
743  *
744  * Must be called with both the flc_lock and blocked_lock_lock held. The
745  * fl_blocked_requests list itself is protected by the blocked_lock_lock,
746  * but by ensuring that the flc_lock is also held on insertions we can avoid
747  * taking the blocked_lock_lock in some cases when we see that the
748  * fl_blocked_requests list is empty.
749  *
750  * Rather than just adding to the list, we check for conflicts with any existing
751  * waiters, and add beneath any waiter that blocks the new waiter.
752  * Thus wakeups don't happen until needed.
753  */
754 static void __locks_insert_block(struct file_lock *blocker,
755 				 struct file_lock *waiter,
756 				 bool conflict(struct file_lock *,
757 					       struct file_lock *))
758 {
759 	struct file_lock *fl;
760 	BUG_ON(!list_empty(&waiter->fl_blocked_member));
761 
762 new_blocker:
763 	list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
764 		if (conflict(fl, waiter)) {
765 			blocker =  fl;
766 			goto new_blocker;
767 		}
768 	waiter->fl_blocker = blocker;
769 	list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
770 	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
771 		locks_insert_global_blocked(waiter);
772 
773 	/* The requests in waiter->fl_blocked are known to conflict with
774 	 * waiter, but might not conflict with blocker, or the requests
775 	 * and lock which block it.  So they all need to be woken.
776 	 */
777 	__locks_wake_up_blocks(waiter);
778 }
779 
780 /* Must be called with flc_lock held. */
781 static void locks_insert_block(struct file_lock *blocker,
782 			       struct file_lock *waiter,
783 			       bool conflict(struct file_lock *,
784 					     struct file_lock *))
785 {
786 	spin_lock(&blocked_lock_lock);
787 	__locks_insert_block(blocker, waiter, conflict);
788 	spin_unlock(&blocked_lock_lock);
789 }
790 
791 /*
792  * Wake up processes blocked waiting for blocker.
793  *
794  * Must be called with the inode->flc_lock held!
795  */
796 static void locks_wake_up_blocks(struct file_lock *blocker)
797 {
798 	/*
799 	 * Avoid taking global lock if list is empty. This is safe since new
800 	 * blocked requests are only added to the list under the flc_lock, and
801 	 * the flc_lock is always held here. Note that removal from the
802 	 * fl_blocked_requests list does not require the flc_lock, so we must
803 	 * recheck list_empty() after acquiring the blocked_lock_lock.
804 	 */
805 	if (list_empty(&blocker->fl_blocked_requests))
806 		return;
807 
808 	spin_lock(&blocked_lock_lock);
809 	__locks_wake_up_blocks(blocker);
810 	spin_unlock(&blocked_lock_lock);
811 }
812 
813 static void
814 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
815 {
816 	list_add_tail(&fl->fl_list, before);
817 	locks_insert_global_locks(fl);
818 }
819 
820 static void
821 locks_unlink_lock_ctx(struct file_lock *fl)
822 {
823 	locks_delete_global_locks(fl);
824 	list_del_init(&fl->fl_list);
825 	locks_wake_up_blocks(fl);
826 }
827 
828 static void
829 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
830 {
831 	locks_unlink_lock_ctx(fl);
832 	if (dispose)
833 		list_add(&fl->fl_list, dispose);
834 	else
835 		locks_free_lock(fl);
836 }
837 
838 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
839  * checks for shared/exclusive status of overlapping locks.
840  */
841 static bool locks_conflict(struct file_lock *caller_fl,
842 			   struct file_lock *sys_fl)
843 {
844 	if (sys_fl->fl_type == F_WRLCK)
845 		return true;
846 	if (caller_fl->fl_type == F_WRLCK)
847 		return true;
848 	return false;
849 }
850 
851 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
852  * checking before calling the locks_conflict().
853  */
854 static bool posix_locks_conflict(struct file_lock *caller_fl,
855 				 struct file_lock *sys_fl)
856 {
857 	/* POSIX locks owned by the same process do not conflict with
858 	 * each other.
859 	 */
860 	if (posix_same_owner(caller_fl, sys_fl))
861 		return false;
862 
863 	/* Check whether they overlap */
864 	if (!locks_overlap(caller_fl, sys_fl))
865 		return false;
866 
867 	return locks_conflict(caller_fl, sys_fl);
868 }
869 
870 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
871  * checking before calling the locks_conflict().
872  */
873 static bool flock_locks_conflict(struct file_lock *caller_fl,
874 				 struct file_lock *sys_fl)
875 {
876 	/* FLOCK locks referring to the same filp do not conflict with
877 	 * each other.
878 	 */
879 	if (caller_fl->fl_file == sys_fl->fl_file)
880 		return false;
881 
882 	return locks_conflict(caller_fl, sys_fl);
883 }
884 
885 void
886 posix_test_lock(struct file *filp, struct file_lock *fl)
887 {
888 	struct file_lock *cfl;
889 	struct file_lock_context *ctx;
890 	struct inode *inode = locks_inode(filp);
891 	void *owner;
892 	void (*func)(void);
893 
894 	ctx = smp_load_acquire(&inode->i_flctx);
895 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
896 		fl->fl_type = F_UNLCK;
897 		return;
898 	}
899 
900 retry:
901 	spin_lock(&ctx->flc_lock);
902 	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
903 		if (!posix_locks_conflict(fl, cfl))
904 			continue;
905 		if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
906 			&& (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
907 			owner = cfl->fl_lmops->lm_mod_owner;
908 			func = cfl->fl_lmops->lm_expire_lock;
909 			__module_get(owner);
910 			spin_unlock(&ctx->flc_lock);
911 			(*func)();
912 			module_put(owner);
913 			goto retry;
914 		}
915 		locks_copy_conflock(fl, cfl);
916 		goto out;
917 	}
918 	fl->fl_type = F_UNLCK;
919 out:
920 	spin_unlock(&ctx->flc_lock);
921 	return;
922 }
923 EXPORT_SYMBOL(posix_test_lock);
924 
925 /*
926  * Deadlock detection:
927  *
928  * We attempt to detect deadlocks that are due purely to posix file
929  * locks.
930  *
931  * We assume that a task can be waiting for at most one lock at a time.
932  * So for any acquired lock, the process holding that lock may be
933  * waiting on at most one other lock.  That lock in turns may be held by
934  * someone waiting for at most one other lock.  Given a requested lock
935  * caller_fl which is about to wait for a conflicting lock block_fl, we
936  * follow this chain of waiters to ensure we are not about to create a
937  * cycle.
938  *
939  * Since we do this before we ever put a process to sleep on a lock, we
940  * are ensured that there is never a cycle; that is what guarantees that
941  * the while() loop in posix_locks_deadlock() eventually completes.
942  *
943  * Note: the above assumption may not be true when handling lock
944  * requests from a broken NFS client. It may also fail in the presence
945  * of tasks (such as posix threads) sharing the same open file table.
946  * To handle those cases, we just bail out after a few iterations.
947  *
948  * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
949  * Because the owner is not even nominally tied to a thread of
950  * execution, the deadlock detection below can't reasonably work well. Just
951  * skip it for those.
952  *
953  * In principle, we could do a more limited deadlock detection on FL_OFDLCK
954  * locks that just checks for the case where two tasks are attempting to
955  * upgrade from read to write locks on the same inode.
956  */
957 
958 #define MAX_DEADLK_ITERATIONS 10
959 
960 /* Find a lock that the owner of the given block_fl is blocking on. */
961 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
962 {
963 	struct file_lock *fl;
964 
965 	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
966 		if (posix_same_owner(fl, block_fl)) {
967 			while (fl->fl_blocker)
968 				fl = fl->fl_blocker;
969 			return fl;
970 		}
971 	}
972 	return NULL;
973 }
974 
975 /* Must be called with the blocked_lock_lock held! */
976 static int posix_locks_deadlock(struct file_lock *caller_fl,
977 				struct file_lock *block_fl)
978 {
979 	int i = 0;
980 
981 	lockdep_assert_held(&blocked_lock_lock);
982 
983 	/*
984 	 * This deadlock detector can't reasonably detect deadlocks with
985 	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
986 	 */
987 	if (IS_OFDLCK(caller_fl))
988 		return 0;
989 
990 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
991 		if (i++ > MAX_DEADLK_ITERATIONS)
992 			return 0;
993 		if (posix_same_owner(caller_fl, block_fl))
994 			return 1;
995 	}
996 	return 0;
997 }
998 
999 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1000  * after any leases, but before any posix locks.
1001  *
1002  * Note that if called with an FL_EXISTS argument, the caller may determine
1003  * whether or not a lock was successfully freed by testing the return
1004  * value for -ENOENT.
1005  */
1006 static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1007 {
1008 	struct file_lock *new_fl = NULL;
1009 	struct file_lock *fl;
1010 	struct file_lock_context *ctx;
1011 	int error = 0;
1012 	bool found = false;
1013 	LIST_HEAD(dispose);
1014 
1015 	ctx = locks_get_lock_context(inode, request->fl_type);
1016 	if (!ctx) {
1017 		if (request->fl_type != F_UNLCK)
1018 			return -ENOMEM;
1019 		return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1020 	}
1021 
1022 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1023 		new_fl = locks_alloc_lock();
1024 		if (!new_fl)
1025 			return -ENOMEM;
1026 	}
1027 
1028 	percpu_down_read(&file_rwsem);
1029 	spin_lock(&ctx->flc_lock);
1030 	if (request->fl_flags & FL_ACCESS)
1031 		goto find_conflict;
1032 
1033 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1034 		if (request->fl_file != fl->fl_file)
1035 			continue;
1036 		if (request->fl_type == fl->fl_type)
1037 			goto out;
1038 		found = true;
1039 		locks_delete_lock_ctx(fl, &dispose);
1040 		break;
1041 	}
1042 
1043 	if (request->fl_type == F_UNLCK) {
1044 		if ((request->fl_flags & FL_EXISTS) && !found)
1045 			error = -ENOENT;
1046 		goto out;
1047 	}
1048 
1049 find_conflict:
1050 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1051 		if (!flock_locks_conflict(request, fl))
1052 			continue;
1053 		error = -EAGAIN;
1054 		if (!(request->fl_flags & FL_SLEEP))
1055 			goto out;
1056 		error = FILE_LOCK_DEFERRED;
1057 		locks_insert_block(fl, request, flock_locks_conflict);
1058 		goto out;
1059 	}
1060 	if (request->fl_flags & FL_ACCESS)
1061 		goto out;
1062 	locks_copy_lock(new_fl, request);
1063 	locks_move_blocks(new_fl, request);
1064 	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1065 	new_fl = NULL;
1066 	error = 0;
1067 
1068 out:
1069 	spin_unlock(&ctx->flc_lock);
1070 	percpu_up_read(&file_rwsem);
1071 	if (new_fl)
1072 		locks_free_lock(new_fl);
1073 	locks_dispose_list(&dispose);
1074 	trace_flock_lock_inode(inode, request, error);
1075 	return error;
1076 }
1077 
1078 static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1079 			    struct file_lock *conflock)
1080 {
1081 	struct file_lock *fl, *tmp;
1082 	struct file_lock *new_fl = NULL;
1083 	struct file_lock *new_fl2 = NULL;
1084 	struct file_lock *left = NULL;
1085 	struct file_lock *right = NULL;
1086 	struct file_lock_context *ctx;
1087 	int error;
1088 	bool added = false;
1089 	LIST_HEAD(dispose);
1090 	void *owner;
1091 	void (*func)(void);
1092 
1093 	ctx = locks_get_lock_context(inode, request->fl_type);
1094 	if (!ctx)
1095 		return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1096 
1097 	/*
1098 	 * We may need two file_lock structures for this operation,
1099 	 * so we get them in advance to avoid races.
1100 	 *
1101 	 * In some cases we can be sure, that no new locks will be needed
1102 	 */
1103 	if (!(request->fl_flags & FL_ACCESS) &&
1104 	    (request->fl_type != F_UNLCK ||
1105 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1106 		new_fl = locks_alloc_lock();
1107 		new_fl2 = locks_alloc_lock();
1108 	}
1109 
1110 retry:
1111 	percpu_down_read(&file_rwsem);
1112 	spin_lock(&ctx->flc_lock);
1113 	/*
1114 	 * New lock request. Walk all POSIX locks and look for conflicts. If
1115 	 * there are any, either return error or put the request on the
1116 	 * blocker's list of waiters and the global blocked_hash.
1117 	 */
1118 	if (request->fl_type != F_UNLCK) {
1119 		list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1120 			if (!posix_locks_conflict(request, fl))
1121 				continue;
1122 			if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1123 				&& (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1124 				owner = fl->fl_lmops->lm_mod_owner;
1125 				func = fl->fl_lmops->lm_expire_lock;
1126 				__module_get(owner);
1127 				spin_unlock(&ctx->flc_lock);
1128 				percpu_up_read(&file_rwsem);
1129 				(*func)();
1130 				module_put(owner);
1131 				goto retry;
1132 			}
1133 			if (conflock)
1134 				locks_copy_conflock(conflock, fl);
1135 			error = -EAGAIN;
1136 			if (!(request->fl_flags & FL_SLEEP))
1137 				goto out;
1138 			/*
1139 			 * Deadlock detection and insertion into the blocked
1140 			 * locks list must be done while holding the same lock!
1141 			 */
1142 			error = -EDEADLK;
1143 			spin_lock(&blocked_lock_lock);
1144 			/*
1145 			 * Ensure that we don't find any locks blocked on this
1146 			 * request during deadlock detection.
1147 			 */
1148 			__locks_wake_up_blocks(request);
1149 			if (likely(!posix_locks_deadlock(request, fl))) {
1150 				error = FILE_LOCK_DEFERRED;
1151 				__locks_insert_block(fl, request,
1152 						     posix_locks_conflict);
1153 			}
1154 			spin_unlock(&blocked_lock_lock);
1155 			goto out;
1156 		}
1157 	}
1158 
1159 	/* If we're just looking for a conflict, we're done. */
1160 	error = 0;
1161 	if (request->fl_flags & FL_ACCESS)
1162 		goto out;
1163 
1164 	/* Find the first old lock with the same owner as the new lock */
1165 	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1166 		if (posix_same_owner(request, fl))
1167 			break;
1168 	}
1169 
1170 	/* Process locks with this owner. */
1171 	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1172 		if (!posix_same_owner(request, fl))
1173 			break;
1174 
1175 		/* Detect adjacent or overlapping regions (if same lock type) */
1176 		if (request->fl_type == fl->fl_type) {
1177 			/* In all comparisons of start vs end, use
1178 			 * "start - 1" rather than "end + 1". If end
1179 			 * is OFFSET_MAX, end + 1 will become negative.
1180 			 */
1181 			if (fl->fl_end < request->fl_start - 1)
1182 				continue;
1183 			/* If the next lock in the list has entirely bigger
1184 			 * addresses than the new one, insert the lock here.
1185 			 */
1186 			if (fl->fl_start - 1 > request->fl_end)
1187 				break;
1188 
1189 			/* If we come here, the new and old lock are of the
1190 			 * same type and adjacent or overlapping. Make one
1191 			 * lock yielding from the lower start address of both
1192 			 * locks to the higher end address.
1193 			 */
1194 			if (fl->fl_start > request->fl_start)
1195 				fl->fl_start = request->fl_start;
1196 			else
1197 				request->fl_start = fl->fl_start;
1198 			if (fl->fl_end < request->fl_end)
1199 				fl->fl_end = request->fl_end;
1200 			else
1201 				request->fl_end = fl->fl_end;
1202 			if (added) {
1203 				locks_delete_lock_ctx(fl, &dispose);
1204 				continue;
1205 			}
1206 			request = fl;
1207 			added = true;
1208 		} else {
1209 			/* Processing for different lock types is a bit
1210 			 * more complex.
1211 			 */
1212 			if (fl->fl_end < request->fl_start)
1213 				continue;
1214 			if (fl->fl_start > request->fl_end)
1215 				break;
1216 			if (request->fl_type == F_UNLCK)
1217 				added = true;
1218 			if (fl->fl_start < request->fl_start)
1219 				left = fl;
1220 			/* If the next lock in the list has a higher end
1221 			 * address than the new one, insert the new one here.
1222 			 */
1223 			if (fl->fl_end > request->fl_end) {
1224 				right = fl;
1225 				break;
1226 			}
1227 			if (fl->fl_start >= request->fl_start) {
1228 				/* The new lock completely replaces an old
1229 				 * one (This may happen several times).
1230 				 */
1231 				if (added) {
1232 					locks_delete_lock_ctx(fl, &dispose);
1233 					continue;
1234 				}
1235 				/*
1236 				 * Replace the old lock with new_fl, and
1237 				 * remove the old one. It's safe to do the
1238 				 * insert here since we know that we won't be
1239 				 * using new_fl later, and that the lock is
1240 				 * just replacing an existing lock.
1241 				 */
1242 				error = -ENOLCK;
1243 				if (!new_fl)
1244 					goto out;
1245 				locks_copy_lock(new_fl, request);
1246 				locks_move_blocks(new_fl, request);
1247 				request = new_fl;
1248 				new_fl = NULL;
1249 				locks_insert_lock_ctx(request, &fl->fl_list);
1250 				locks_delete_lock_ctx(fl, &dispose);
1251 				added = true;
1252 			}
1253 		}
1254 	}
1255 
1256 	/*
1257 	 * The above code only modifies existing locks in case of merging or
1258 	 * replacing. If new lock(s) need to be inserted all modifications are
1259 	 * done below this, so it's safe yet to bail out.
1260 	 */
1261 	error = -ENOLCK; /* "no luck" */
1262 	if (right && left == right && !new_fl2)
1263 		goto out;
1264 
1265 	error = 0;
1266 	if (!added) {
1267 		if (request->fl_type == F_UNLCK) {
1268 			if (request->fl_flags & FL_EXISTS)
1269 				error = -ENOENT;
1270 			goto out;
1271 		}
1272 
1273 		if (!new_fl) {
1274 			error = -ENOLCK;
1275 			goto out;
1276 		}
1277 		locks_copy_lock(new_fl, request);
1278 		locks_move_blocks(new_fl, request);
1279 		locks_insert_lock_ctx(new_fl, &fl->fl_list);
1280 		fl = new_fl;
1281 		new_fl = NULL;
1282 	}
1283 	if (right) {
1284 		if (left == right) {
1285 			/* The new lock breaks the old one in two pieces,
1286 			 * so we have to use the second new lock.
1287 			 */
1288 			left = new_fl2;
1289 			new_fl2 = NULL;
1290 			locks_copy_lock(left, right);
1291 			locks_insert_lock_ctx(left, &fl->fl_list);
1292 		}
1293 		right->fl_start = request->fl_end + 1;
1294 		locks_wake_up_blocks(right);
1295 	}
1296 	if (left) {
1297 		left->fl_end = request->fl_start - 1;
1298 		locks_wake_up_blocks(left);
1299 	}
1300  out:
1301 	spin_unlock(&ctx->flc_lock);
1302 	percpu_up_read(&file_rwsem);
1303 	/*
1304 	 * Free any unused locks.
1305 	 */
1306 	if (new_fl)
1307 		locks_free_lock(new_fl);
1308 	if (new_fl2)
1309 		locks_free_lock(new_fl2);
1310 	locks_dispose_list(&dispose);
1311 	trace_posix_lock_inode(inode, request, error);
1312 
1313 	return error;
1314 }
1315 
1316 /**
1317  * posix_lock_file - Apply a POSIX-style lock to a file
1318  * @filp: The file to apply the lock to
1319  * @fl: The lock to be applied
1320  * @conflock: Place to return a copy of the conflicting lock, if found.
1321  *
1322  * Add a POSIX style lock to a file.
1323  * We merge adjacent & overlapping locks whenever possible.
1324  * POSIX locks are sorted by owner task, then by starting address
1325  *
1326  * Note that if called with an FL_EXISTS argument, the caller may determine
1327  * whether or not a lock was successfully freed by testing the return
1328  * value for -ENOENT.
1329  */
1330 int posix_lock_file(struct file *filp, struct file_lock *fl,
1331 			struct file_lock *conflock)
1332 {
1333 	return posix_lock_inode(locks_inode(filp), fl, conflock);
1334 }
1335 EXPORT_SYMBOL(posix_lock_file);
1336 
1337 /**
1338  * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1339  * @inode: inode of file to which lock request should be applied
1340  * @fl: The lock to be applied
1341  *
1342  * Apply a POSIX style lock request to an inode.
1343  */
1344 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1345 {
1346 	int error;
1347 	might_sleep ();
1348 	for (;;) {
1349 		error = posix_lock_inode(inode, fl, NULL);
1350 		if (error != FILE_LOCK_DEFERRED)
1351 			break;
1352 		error = wait_event_interruptible(fl->fl_wait,
1353 					list_empty(&fl->fl_blocked_member));
1354 		if (error)
1355 			break;
1356 	}
1357 	locks_delete_block(fl);
1358 	return error;
1359 }
1360 
1361 static void lease_clear_pending(struct file_lock *fl, int arg)
1362 {
1363 	switch (arg) {
1364 	case F_UNLCK:
1365 		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1366 		fallthrough;
1367 	case F_RDLCK:
1368 		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1369 	}
1370 }
1371 
1372 /* We already had a lease on this file; just change its type */
1373 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1374 {
1375 	int error = assign_type(fl, arg);
1376 
1377 	if (error)
1378 		return error;
1379 	lease_clear_pending(fl, arg);
1380 	locks_wake_up_blocks(fl);
1381 	if (arg == F_UNLCK) {
1382 		struct file *filp = fl->fl_file;
1383 
1384 		f_delown(filp);
1385 		filp->f_owner.signum = 0;
1386 		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1387 		if (fl->fl_fasync != NULL) {
1388 			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1389 			fl->fl_fasync = NULL;
1390 		}
1391 		locks_delete_lock_ctx(fl, dispose);
1392 	}
1393 	return 0;
1394 }
1395 EXPORT_SYMBOL(lease_modify);
1396 
1397 static bool past_time(unsigned long then)
1398 {
1399 	if (!then)
1400 		/* 0 is a special value meaning "this never expires": */
1401 		return false;
1402 	return time_after(jiffies, then);
1403 }
1404 
1405 static void time_out_leases(struct inode *inode, struct list_head *dispose)
1406 {
1407 	struct file_lock_context *ctx = inode->i_flctx;
1408 	struct file_lock *fl, *tmp;
1409 
1410 	lockdep_assert_held(&ctx->flc_lock);
1411 
1412 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1413 		trace_time_out_leases(inode, fl);
1414 		if (past_time(fl->fl_downgrade_time))
1415 			lease_modify(fl, F_RDLCK, dispose);
1416 		if (past_time(fl->fl_break_time))
1417 			lease_modify(fl, F_UNLCK, dispose);
1418 	}
1419 }
1420 
1421 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1422 {
1423 	bool rc;
1424 
1425 	if (lease->fl_lmops->lm_breaker_owns_lease
1426 			&& lease->fl_lmops->lm_breaker_owns_lease(lease))
1427 		return false;
1428 	if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1429 		rc = false;
1430 		goto trace;
1431 	}
1432 	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1433 		rc = false;
1434 		goto trace;
1435 	}
1436 
1437 	rc = locks_conflict(breaker, lease);
1438 trace:
1439 	trace_leases_conflict(rc, lease, breaker);
1440 	return rc;
1441 }
1442 
1443 static bool
1444 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1445 {
1446 	struct file_lock_context *ctx = inode->i_flctx;
1447 	struct file_lock *fl;
1448 
1449 	lockdep_assert_held(&ctx->flc_lock);
1450 
1451 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1452 		if (leases_conflict(fl, breaker))
1453 			return true;
1454 	}
1455 	return false;
1456 }
1457 
1458 /**
1459  *	__break_lease	-	revoke all outstanding leases on file
1460  *	@inode: the inode of the file to return
1461  *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1462  *	    break all leases
1463  *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1464  *	    only delegations
1465  *
1466  *	break_lease (inlined for speed) has checked there already is at least
1467  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1468  *	a call to open() or truncate().  This function can sleep unless you
1469  *	specified %O_NONBLOCK to your open().
1470  */
1471 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1472 {
1473 	int error = 0;
1474 	struct file_lock_context *ctx;
1475 	struct file_lock *new_fl, *fl, *tmp;
1476 	unsigned long break_time;
1477 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1478 	LIST_HEAD(dispose);
1479 
1480 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1481 	if (IS_ERR(new_fl))
1482 		return PTR_ERR(new_fl);
1483 	new_fl->fl_flags = type;
1484 
1485 	/* typically we will check that ctx is non-NULL before calling */
1486 	ctx = smp_load_acquire(&inode->i_flctx);
1487 	if (!ctx) {
1488 		WARN_ON_ONCE(1);
1489 		goto free_lock;
1490 	}
1491 
1492 	percpu_down_read(&file_rwsem);
1493 	spin_lock(&ctx->flc_lock);
1494 
1495 	time_out_leases(inode, &dispose);
1496 
1497 	if (!any_leases_conflict(inode, new_fl))
1498 		goto out;
1499 
1500 	break_time = 0;
1501 	if (lease_break_time > 0) {
1502 		break_time = jiffies + lease_break_time * HZ;
1503 		if (break_time == 0)
1504 			break_time++;	/* so that 0 means no break time */
1505 	}
1506 
1507 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1508 		if (!leases_conflict(fl, new_fl))
1509 			continue;
1510 		if (want_write) {
1511 			if (fl->fl_flags & FL_UNLOCK_PENDING)
1512 				continue;
1513 			fl->fl_flags |= FL_UNLOCK_PENDING;
1514 			fl->fl_break_time = break_time;
1515 		} else {
1516 			if (lease_breaking(fl))
1517 				continue;
1518 			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1519 			fl->fl_downgrade_time = break_time;
1520 		}
1521 		if (fl->fl_lmops->lm_break(fl))
1522 			locks_delete_lock_ctx(fl, &dispose);
1523 	}
1524 
1525 	if (list_empty(&ctx->flc_lease))
1526 		goto out;
1527 
1528 	if (mode & O_NONBLOCK) {
1529 		trace_break_lease_noblock(inode, new_fl);
1530 		error = -EWOULDBLOCK;
1531 		goto out;
1532 	}
1533 
1534 restart:
1535 	fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1536 	break_time = fl->fl_break_time;
1537 	if (break_time != 0)
1538 		break_time -= jiffies;
1539 	if (break_time == 0)
1540 		break_time++;
1541 	locks_insert_block(fl, new_fl, leases_conflict);
1542 	trace_break_lease_block(inode, new_fl);
1543 	spin_unlock(&ctx->flc_lock);
1544 	percpu_up_read(&file_rwsem);
1545 
1546 	locks_dispose_list(&dispose);
1547 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1548 					list_empty(&new_fl->fl_blocked_member),
1549 					break_time);
1550 
1551 	percpu_down_read(&file_rwsem);
1552 	spin_lock(&ctx->flc_lock);
1553 	trace_break_lease_unblock(inode, new_fl);
1554 	locks_delete_block(new_fl);
1555 	if (error >= 0) {
1556 		/*
1557 		 * Wait for the next conflicting lease that has not been
1558 		 * broken yet
1559 		 */
1560 		if (error == 0)
1561 			time_out_leases(inode, &dispose);
1562 		if (any_leases_conflict(inode, new_fl))
1563 			goto restart;
1564 		error = 0;
1565 	}
1566 out:
1567 	spin_unlock(&ctx->flc_lock);
1568 	percpu_up_read(&file_rwsem);
1569 	locks_dispose_list(&dispose);
1570 free_lock:
1571 	locks_free_lock(new_fl);
1572 	return error;
1573 }
1574 EXPORT_SYMBOL(__break_lease);
1575 
1576 /**
1577  *	lease_get_mtime - update modified time of an inode with exclusive lease
1578  *	@inode: the inode
1579  *      @time:  pointer to a timespec which contains the last modified time
1580  *
1581  * This is to force NFS clients to flush their caches for files with
1582  * exclusive leases.  The justification is that if someone has an
1583  * exclusive lease, then they could be modifying it.
1584  */
1585 void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1586 {
1587 	bool has_lease = false;
1588 	struct file_lock_context *ctx;
1589 	struct file_lock *fl;
1590 
1591 	ctx = smp_load_acquire(&inode->i_flctx);
1592 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1593 		spin_lock(&ctx->flc_lock);
1594 		fl = list_first_entry_or_null(&ctx->flc_lease,
1595 					      struct file_lock, fl_list);
1596 		if (fl && (fl->fl_type == F_WRLCK))
1597 			has_lease = true;
1598 		spin_unlock(&ctx->flc_lock);
1599 	}
1600 
1601 	if (has_lease)
1602 		*time = current_time(inode);
1603 }
1604 EXPORT_SYMBOL(lease_get_mtime);
1605 
1606 /**
1607  *	fcntl_getlease - Enquire what lease is currently active
1608  *	@filp: the file
1609  *
1610  *	The value returned by this function will be one of
1611  *	(if no lease break is pending):
1612  *
1613  *	%F_RDLCK to indicate a shared lease is held.
1614  *
1615  *	%F_WRLCK to indicate an exclusive lease is held.
1616  *
1617  *	%F_UNLCK to indicate no lease is held.
1618  *
1619  *	(if a lease break is pending):
1620  *
1621  *	%F_RDLCK to indicate an exclusive lease needs to be
1622  *		changed to a shared lease (or removed).
1623  *
1624  *	%F_UNLCK to indicate the lease needs to be removed.
1625  *
1626  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1627  *	should be returned to userspace.
1628  */
1629 int fcntl_getlease(struct file *filp)
1630 {
1631 	struct file_lock *fl;
1632 	struct inode *inode = locks_inode(filp);
1633 	struct file_lock_context *ctx;
1634 	int type = F_UNLCK;
1635 	LIST_HEAD(dispose);
1636 
1637 	ctx = smp_load_acquire(&inode->i_flctx);
1638 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1639 		percpu_down_read(&file_rwsem);
1640 		spin_lock(&ctx->flc_lock);
1641 		time_out_leases(inode, &dispose);
1642 		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1643 			if (fl->fl_file != filp)
1644 				continue;
1645 			type = target_leasetype(fl);
1646 			break;
1647 		}
1648 		spin_unlock(&ctx->flc_lock);
1649 		percpu_up_read(&file_rwsem);
1650 
1651 		locks_dispose_list(&dispose);
1652 	}
1653 	return type;
1654 }
1655 
1656 /**
1657  * check_conflicting_open - see if the given file points to an inode that has
1658  *			    an existing open that would conflict with the
1659  *			    desired lease.
1660  * @filp:	file to check
1661  * @arg:	type of lease that we're trying to acquire
1662  * @flags:	current lock flags
1663  *
1664  * Check to see if there's an existing open fd on this file that would
1665  * conflict with the lease we're trying to set.
1666  */
1667 static int
1668 check_conflicting_open(struct file *filp, const long arg, int flags)
1669 {
1670 	struct inode *inode = locks_inode(filp);
1671 	int self_wcount = 0, self_rcount = 0;
1672 
1673 	if (flags & FL_LAYOUT)
1674 		return 0;
1675 	if (flags & FL_DELEG)
1676 		/* We leave these checks to the caller */
1677 		return 0;
1678 
1679 	if (arg == F_RDLCK)
1680 		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1681 	else if (arg != F_WRLCK)
1682 		return 0;
1683 
1684 	/*
1685 	 * Make sure that only read/write count is from lease requestor.
1686 	 * Note that this will result in denying write leases when i_writecount
1687 	 * is negative, which is what we want.  (We shouldn't grant write leases
1688 	 * on files open for execution.)
1689 	 */
1690 	if (filp->f_mode & FMODE_WRITE)
1691 		self_wcount = 1;
1692 	else if (filp->f_mode & FMODE_READ)
1693 		self_rcount = 1;
1694 
1695 	if (atomic_read(&inode->i_writecount) != self_wcount ||
1696 	    atomic_read(&inode->i_readcount) != self_rcount)
1697 		return -EAGAIN;
1698 
1699 	return 0;
1700 }
1701 
1702 static int
1703 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1704 {
1705 	struct file_lock *fl, *my_fl = NULL, *lease;
1706 	struct inode *inode = locks_inode(filp);
1707 	struct file_lock_context *ctx;
1708 	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1709 	int error;
1710 	LIST_HEAD(dispose);
1711 
1712 	lease = *flp;
1713 	trace_generic_add_lease(inode, lease);
1714 
1715 	/* Note that arg is never F_UNLCK here */
1716 	ctx = locks_get_lock_context(inode, arg);
1717 	if (!ctx)
1718 		return -ENOMEM;
1719 
1720 	/*
1721 	 * In the delegation case we need mutual exclusion with
1722 	 * a number of operations that take the i_mutex.  We trylock
1723 	 * because delegations are an optional optimization, and if
1724 	 * there's some chance of a conflict--we'd rather not
1725 	 * bother, maybe that's a sign this just isn't a good file to
1726 	 * hand out a delegation on.
1727 	 */
1728 	if (is_deleg && !inode_trylock(inode))
1729 		return -EAGAIN;
1730 
1731 	if (is_deleg && arg == F_WRLCK) {
1732 		/* Write delegations are not currently supported: */
1733 		inode_unlock(inode);
1734 		WARN_ON_ONCE(1);
1735 		return -EINVAL;
1736 	}
1737 
1738 	percpu_down_read(&file_rwsem);
1739 	spin_lock(&ctx->flc_lock);
1740 	time_out_leases(inode, &dispose);
1741 	error = check_conflicting_open(filp, arg, lease->fl_flags);
1742 	if (error)
1743 		goto out;
1744 
1745 	/*
1746 	 * At this point, we know that if there is an exclusive
1747 	 * lease on this file, then we hold it on this filp
1748 	 * (otherwise our open of this file would have blocked).
1749 	 * And if we are trying to acquire an exclusive lease,
1750 	 * then the file is not open by anyone (including us)
1751 	 * except for this filp.
1752 	 */
1753 	error = -EAGAIN;
1754 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1755 		if (fl->fl_file == filp &&
1756 		    fl->fl_owner == lease->fl_owner) {
1757 			my_fl = fl;
1758 			continue;
1759 		}
1760 
1761 		/*
1762 		 * No exclusive leases if someone else has a lease on
1763 		 * this file:
1764 		 */
1765 		if (arg == F_WRLCK)
1766 			goto out;
1767 		/*
1768 		 * Modifying our existing lease is OK, but no getting a
1769 		 * new lease if someone else is opening for write:
1770 		 */
1771 		if (fl->fl_flags & FL_UNLOCK_PENDING)
1772 			goto out;
1773 	}
1774 
1775 	if (my_fl != NULL) {
1776 		lease = my_fl;
1777 		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1778 		if (error)
1779 			goto out;
1780 		goto out_setup;
1781 	}
1782 
1783 	error = -EINVAL;
1784 	if (!leases_enable)
1785 		goto out;
1786 
1787 	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1788 	/*
1789 	 * The check in break_lease() is lockless. It's possible for another
1790 	 * open to race in after we did the earlier check for a conflicting
1791 	 * open but before the lease was inserted. Check again for a
1792 	 * conflicting open and cancel the lease if there is one.
1793 	 *
1794 	 * We also add a barrier here to ensure that the insertion of the lock
1795 	 * precedes these checks.
1796 	 */
1797 	smp_mb();
1798 	error = check_conflicting_open(filp, arg, lease->fl_flags);
1799 	if (error) {
1800 		locks_unlink_lock_ctx(lease);
1801 		goto out;
1802 	}
1803 
1804 out_setup:
1805 	if (lease->fl_lmops->lm_setup)
1806 		lease->fl_lmops->lm_setup(lease, priv);
1807 out:
1808 	spin_unlock(&ctx->flc_lock);
1809 	percpu_up_read(&file_rwsem);
1810 	locks_dispose_list(&dispose);
1811 	if (is_deleg)
1812 		inode_unlock(inode);
1813 	if (!error && !my_fl)
1814 		*flp = NULL;
1815 	return error;
1816 }
1817 
1818 static int generic_delete_lease(struct file *filp, void *owner)
1819 {
1820 	int error = -EAGAIN;
1821 	struct file_lock *fl, *victim = NULL;
1822 	struct inode *inode = locks_inode(filp);
1823 	struct file_lock_context *ctx;
1824 	LIST_HEAD(dispose);
1825 
1826 	ctx = smp_load_acquire(&inode->i_flctx);
1827 	if (!ctx) {
1828 		trace_generic_delete_lease(inode, NULL);
1829 		return error;
1830 	}
1831 
1832 	percpu_down_read(&file_rwsem);
1833 	spin_lock(&ctx->flc_lock);
1834 	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1835 		if (fl->fl_file == filp &&
1836 		    fl->fl_owner == owner) {
1837 			victim = fl;
1838 			break;
1839 		}
1840 	}
1841 	trace_generic_delete_lease(inode, victim);
1842 	if (victim)
1843 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1844 	spin_unlock(&ctx->flc_lock);
1845 	percpu_up_read(&file_rwsem);
1846 	locks_dispose_list(&dispose);
1847 	return error;
1848 }
1849 
1850 /**
1851  *	generic_setlease	-	sets a lease on an open file
1852  *	@filp:	file pointer
1853  *	@arg:	type of lease to obtain
1854  *	@flp:	input - file_lock to use, output - file_lock inserted
1855  *	@priv:	private data for lm_setup (may be NULL if lm_setup
1856  *		doesn't require it)
1857  *
1858  *	The (input) flp->fl_lmops->lm_break function is required
1859  *	by break_lease().
1860  */
1861 int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1862 			void **priv)
1863 {
1864 	struct inode *inode = locks_inode(filp);
1865 	int error;
1866 
1867 	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1868 		return -EACCES;
1869 	if (!S_ISREG(inode->i_mode))
1870 		return -EINVAL;
1871 	error = security_file_lock(filp, arg);
1872 	if (error)
1873 		return error;
1874 
1875 	switch (arg) {
1876 	case F_UNLCK:
1877 		return generic_delete_lease(filp, *priv);
1878 	case F_RDLCK:
1879 	case F_WRLCK:
1880 		if (!(*flp)->fl_lmops->lm_break) {
1881 			WARN_ON_ONCE(1);
1882 			return -ENOLCK;
1883 		}
1884 
1885 		return generic_add_lease(filp, arg, flp, priv);
1886 	default:
1887 		return -EINVAL;
1888 	}
1889 }
1890 EXPORT_SYMBOL(generic_setlease);
1891 
1892 #if IS_ENABLED(CONFIG_SRCU)
1893 /*
1894  * Kernel subsystems can register to be notified on any attempt to set
1895  * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1896  * to close files that it may have cached when there is an attempt to set a
1897  * conflicting lease.
1898  */
1899 static struct srcu_notifier_head lease_notifier_chain;
1900 
1901 static inline void
1902 lease_notifier_chain_init(void)
1903 {
1904 	srcu_init_notifier_head(&lease_notifier_chain);
1905 }
1906 
1907 static inline void
1908 setlease_notifier(long arg, struct file_lock *lease)
1909 {
1910 	if (arg != F_UNLCK)
1911 		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1912 }
1913 
1914 int lease_register_notifier(struct notifier_block *nb)
1915 {
1916 	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1917 }
1918 EXPORT_SYMBOL_GPL(lease_register_notifier);
1919 
1920 void lease_unregister_notifier(struct notifier_block *nb)
1921 {
1922 	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1923 }
1924 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1925 
1926 #else /* !IS_ENABLED(CONFIG_SRCU) */
1927 static inline void
1928 lease_notifier_chain_init(void)
1929 {
1930 }
1931 
1932 static inline void
1933 setlease_notifier(long arg, struct file_lock *lease)
1934 {
1935 }
1936 
1937 int lease_register_notifier(struct notifier_block *nb)
1938 {
1939 	return 0;
1940 }
1941 EXPORT_SYMBOL_GPL(lease_register_notifier);
1942 
1943 void lease_unregister_notifier(struct notifier_block *nb)
1944 {
1945 }
1946 EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1947 
1948 #endif /* IS_ENABLED(CONFIG_SRCU) */
1949 
1950 /**
1951  * vfs_setlease        -       sets a lease on an open file
1952  * @filp:	file pointer
1953  * @arg:	type of lease to obtain
1954  * @lease:	file_lock to use when adding a lease
1955  * @priv:	private info for lm_setup when adding a lease (may be
1956  *		NULL if lm_setup doesn't require it)
1957  *
1958  * Call this to establish a lease on the file. The "lease" argument is not
1959  * used for F_UNLCK requests and may be NULL. For commands that set or alter
1960  * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1961  * set; if not, this function will return -ENOLCK (and generate a scary-looking
1962  * stack trace).
1963  *
1964  * The "priv" pointer is passed directly to the lm_setup function as-is. It
1965  * may be NULL if the lm_setup operation doesn't require it.
1966  */
1967 int
1968 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1969 {
1970 	if (lease)
1971 		setlease_notifier(arg, *lease);
1972 	if (filp->f_op->setlease)
1973 		return filp->f_op->setlease(filp, arg, lease, priv);
1974 	else
1975 		return generic_setlease(filp, arg, lease, priv);
1976 }
1977 EXPORT_SYMBOL_GPL(vfs_setlease);
1978 
1979 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1980 {
1981 	struct file_lock *fl;
1982 	struct fasync_struct *new;
1983 	int error;
1984 
1985 	fl = lease_alloc(filp, arg);
1986 	if (IS_ERR(fl))
1987 		return PTR_ERR(fl);
1988 
1989 	new = fasync_alloc();
1990 	if (!new) {
1991 		locks_free_lock(fl);
1992 		return -ENOMEM;
1993 	}
1994 	new->fa_fd = fd;
1995 
1996 	error = vfs_setlease(filp, arg, &fl, (void **)&new);
1997 	if (fl)
1998 		locks_free_lock(fl);
1999 	if (new)
2000 		fasync_free(new);
2001 	return error;
2002 }
2003 
2004 /**
2005  *	fcntl_setlease	-	sets a lease on an open file
2006  *	@fd: open file descriptor
2007  *	@filp: file pointer
2008  *	@arg: type of lease to obtain
2009  *
2010  *	Call this fcntl to establish a lease on the file.
2011  *	Note that you also need to call %F_SETSIG to
2012  *	receive a signal when the lease is broken.
2013  */
2014 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2015 {
2016 	if (arg == F_UNLCK)
2017 		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2018 	return do_fcntl_add_lease(fd, filp, arg);
2019 }
2020 
2021 /**
2022  * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2023  * @inode: inode of the file to apply to
2024  * @fl: The lock to be applied
2025  *
2026  * Apply a FLOCK style lock request to an inode.
2027  */
2028 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2029 {
2030 	int error;
2031 	might_sleep();
2032 	for (;;) {
2033 		error = flock_lock_inode(inode, fl);
2034 		if (error != FILE_LOCK_DEFERRED)
2035 			break;
2036 		error = wait_event_interruptible(fl->fl_wait,
2037 				list_empty(&fl->fl_blocked_member));
2038 		if (error)
2039 			break;
2040 	}
2041 	locks_delete_block(fl);
2042 	return error;
2043 }
2044 
2045 /**
2046  * locks_lock_inode_wait - Apply a lock to an inode
2047  * @inode: inode of the file to apply to
2048  * @fl: The lock to be applied
2049  *
2050  * Apply a POSIX or FLOCK style lock request to an inode.
2051  */
2052 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2053 {
2054 	int res = 0;
2055 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2056 		case FL_POSIX:
2057 			res = posix_lock_inode_wait(inode, fl);
2058 			break;
2059 		case FL_FLOCK:
2060 			res = flock_lock_inode_wait(inode, fl);
2061 			break;
2062 		default:
2063 			BUG();
2064 	}
2065 	return res;
2066 }
2067 EXPORT_SYMBOL(locks_lock_inode_wait);
2068 
2069 /**
2070  *	sys_flock: - flock() system call.
2071  *	@fd: the file descriptor to lock.
2072  *	@cmd: the type of lock to apply.
2073  *
2074  *	Apply a %FL_FLOCK style lock to an open file descriptor.
2075  *	The @cmd can be one of:
2076  *
2077  *	- %LOCK_SH -- a shared lock.
2078  *	- %LOCK_EX -- an exclusive lock.
2079  *	- %LOCK_UN -- remove an existing lock.
2080  *	- %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2081  *
2082  *	%LOCK_MAND support has been removed from the kernel.
2083  */
2084 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2085 {
2086 	int can_sleep, error, type;
2087 	struct file_lock fl;
2088 	struct fd f;
2089 
2090 	/*
2091 	 * LOCK_MAND locks were broken for a long time in that they never
2092 	 * conflicted with one another and didn't prevent any sort of open,
2093 	 * read or write activity.
2094 	 *
2095 	 * Just ignore these requests now, to preserve legacy behavior, but
2096 	 * throw a warning to let people know that they don't actually work.
2097 	 */
2098 	if (cmd & LOCK_MAND) {
2099 		pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
2100 		return 0;
2101 	}
2102 
2103 	type = flock_translate_cmd(cmd & ~LOCK_NB);
2104 	if (type < 0)
2105 		return type;
2106 
2107 	error = -EBADF;
2108 	f = fdget(fd);
2109 	if (!f.file)
2110 		return error;
2111 
2112 	if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2113 		goto out_putf;
2114 
2115 	flock_make_lock(f.file, &fl, type);
2116 
2117 	error = security_file_lock(f.file, fl.fl_type);
2118 	if (error)
2119 		goto out_putf;
2120 
2121 	can_sleep = !(cmd & LOCK_NB);
2122 	if (can_sleep)
2123 		fl.fl_flags |= FL_SLEEP;
2124 
2125 	if (f.file->f_op->flock)
2126 		error = f.file->f_op->flock(f.file,
2127 					    (can_sleep) ? F_SETLKW : F_SETLK,
2128 					    &fl);
2129 	else
2130 		error = locks_lock_file_wait(f.file, &fl);
2131 
2132 	locks_release_private(&fl);
2133  out_putf:
2134 	fdput(f);
2135 
2136 	return error;
2137 }
2138 
2139 /**
2140  * vfs_test_lock - test file byte range lock
2141  * @filp: The file to test lock for
2142  * @fl: The lock to test; also used to hold result
2143  *
2144  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2145  * setting conf->fl_type to something other than F_UNLCK.
2146  */
2147 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2148 {
2149 	if (filp->f_op->lock)
2150 		return filp->f_op->lock(filp, F_GETLK, fl);
2151 	posix_test_lock(filp, fl);
2152 	return 0;
2153 }
2154 EXPORT_SYMBOL_GPL(vfs_test_lock);
2155 
2156 /**
2157  * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2158  * @fl: The file_lock who's fl_pid should be translated
2159  * @ns: The namespace into which the pid should be translated
2160  *
2161  * Used to tranlate a fl_pid into a namespace virtual pid number
2162  */
2163 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2164 {
2165 	pid_t vnr;
2166 	struct pid *pid;
2167 
2168 	if (IS_OFDLCK(fl))
2169 		return -1;
2170 	if (IS_REMOTELCK(fl))
2171 		return fl->fl_pid;
2172 	/*
2173 	 * If the flock owner process is dead and its pid has been already
2174 	 * freed, the translation below won't work, but we still want to show
2175 	 * flock owner pid number in init pidns.
2176 	 */
2177 	if (ns == &init_pid_ns)
2178 		return (pid_t)fl->fl_pid;
2179 
2180 	rcu_read_lock();
2181 	pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2182 	vnr = pid_nr_ns(pid, ns);
2183 	rcu_read_unlock();
2184 	return vnr;
2185 }
2186 
2187 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2188 {
2189 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2190 #if BITS_PER_LONG == 32
2191 	/*
2192 	 * Make sure we can represent the posix lock via
2193 	 * legacy 32bit flock.
2194 	 */
2195 	if (fl->fl_start > OFFT_OFFSET_MAX)
2196 		return -EOVERFLOW;
2197 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2198 		return -EOVERFLOW;
2199 #endif
2200 	flock->l_start = fl->fl_start;
2201 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2202 		fl->fl_end - fl->fl_start + 1;
2203 	flock->l_whence = 0;
2204 	flock->l_type = fl->fl_type;
2205 	return 0;
2206 }
2207 
2208 #if BITS_PER_LONG == 32
2209 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2210 {
2211 	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2212 	flock->l_start = fl->fl_start;
2213 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2214 		fl->fl_end - fl->fl_start + 1;
2215 	flock->l_whence = 0;
2216 	flock->l_type = fl->fl_type;
2217 }
2218 #endif
2219 
2220 /* Report the first existing lock that would conflict with l.
2221  * This implements the F_GETLK command of fcntl().
2222  */
2223 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2224 {
2225 	struct file_lock *fl;
2226 	int error;
2227 
2228 	fl = locks_alloc_lock();
2229 	if (fl == NULL)
2230 		return -ENOMEM;
2231 	error = -EINVAL;
2232 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2233 		goto out;
2234 
2235 	error = flock_to_posix_lock(filp, fl, flock);
2236 	if (error)
2237 		goto out;
2238 
2239 	if (cmd == F_OFD_GETLK) {
2240 		error = -EINVAL;
2241 		if (flock->l_pid != 0)
2242 			goto out;
2243 
2244 		fl->fl_flags |= FL_OFDLCK;
2245 		fl->fl_owner = filp;
2246 	}
2247 
2248 	error = vfs_test_lock(filp, fl);
2249 	if (error)
2250 		goto out;
2251 
2252 	flock->l_type = fl->fl_type;
2253 	if (fl->fl_type != F_UNLCK) {
2254 		error = posix_lock_to_flock(flock, fl);
2255 		if (error)
2256 			goto out;
2257 	}
2258 out:
2259 	locks_free_lock(fl);
2260 	return error;
2261 }
2262 
2263 /**
2264  * vfs_lock_file - file byte range lock
2265  * @filp: The file to apply the lock to
2266  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2267  * @fl: The lock to be applied
2268  * @conf: Place to return a copy of the conflicting lock, if found.
2269  *
2270  * A caller that doesn't care about the conflicting lock may pass NULL
2271  * as the final argument.
2272  *
2273  * If the filesystem defines a private ->lock() method, then @conf will
2274  * be left unchanged; so a caller that cares should initialize it to
2275  * some acceptable default.
2276  *
2277  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2278  * locks, the ->lock() interface may return asynchronously, before the lock has
2279  * been granted or denied by the underlying filesystem, if (and only if)
2280  * lm_grant is set. Callers expecting ->lock() to return asynchronously
2281  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2282  * the request is for a blocking lock. When ->lock() does return asynchronously,
2283  * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2284  * request completes.
2285  * If the request is for non-blocking lock the file system should return
2286  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2287  * with the result. If the request timed out the callback routine will return a
2288  * nonzero return code and the file system should release the lock. The file
2289  * system is also responsible to keep a corresponding posix lock when it
2290  * grants a lock so the VFS can find out which locks are locally held and do
2291  * the correct lock cleanup when required.
2292  * The underlying filesystem must not drop the kernel lock or call
2293  * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2294  * return code.
2295  */
2296 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2297 {
2298 	if (filp->f_op->lock)
2299 		return filp->f_op->lock(filp, cmd, fl);
2300 	else
2301 		return posix_lock_file(filp, fl, conf);
2302 }
2303 EXPORT_SYMBOL_GPL(vfs_lock_file);
2304 
2305 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2306 			     struct file_lock *fl)
2307 {
2308 	int error;
2309 
2310 	error = security_file_lock(filp, fl->fl_type);
2311 	if (error)
2312 		return error;
2313 
2314 	for (;;) {
2315 		error = vfs_lock_file(filp, cmd, fl, NULL);
2316 		if (error != FILE_LOCK_DEFERRED)
2317 			break;
2318 		error = wait_event_interruptible(fl->fl_wait,
2319 					list_empty(&fl->fl_blocked_member));
2320 		if (error)
2321 			break;
2322 	}
2323 	locks_delete_block(fl);
2324 
2325 	return error;
2326 }
2327 
2328 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2329 static int
2330 check_fmode_for_setlk(struct file_lock *fl)
2331 {
2332 	switch (fl->fl_type) {
2333 	case F_RDLCK:
2334 		if (!(fl->fl_file->f_mode & FMODE_READ))
2335 			return -EBADF;
2336 		break;
2337 	case F_WRLCK:
2338 		if (!(fl->fl_file->f_mode & FMODE_WRITE))
2339 			return -EBADF;
2340 	}
2341 	return 0;
2342 }
2343 
2344 /* Apply the lock described by l to an open file descriptor.
2345  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2346  */
2347 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2348 		struct flock *flock)
2349 {
2350 	struct file_lock *file_lock = locks_alloc_lock();
2351 	struct inode *inode = locks_inode(filp);
2352 	struct file *f;
2353 	int error;
2354 
2355 	if (file_lock == NULL)
2356 		return -ENOLCK;
2357 
2358 	error = flock_to_posix_lock(filp, file_lock, flock);
2359 	if (error)
2360 		goto out;
2361 
2362 	error = check_fmode_for_setlk(file_lock);
2363 	if (error)
2364 		goto out;
2365 
2366 	/*
2367 	 * If the cmd is requesting file-private locks, then set the
2368 	 * FL_OFDLCK flag and override the owner.
2369 	 */
2370 	switch (cmd) {
2371 	case F_OFD_SETLK:
2372 		error = -EINVAL;
2373 		if (flock->l_pid != 0)
2374 			goto out;
2375 
2376 		cmd = F_SETLK;
2377 		file_lock->fl_flags |= FL_OFDLCK;
2378 		file_lock->fl_owner = filp;
2379 		break;
2380 	case F_OFD_SETLKW:
2381 		error = -EINVAL;
2382 		if (flock->l_pid != 0)
2383 			goto out;
2384 
2385 		cmd = F_SETLKW;
2386 		file_lock->fl_flags |= FL_OFDLCK;
2387 		file_lock->fl_owner = filp;
2388 		fallthrough;
2389 	case F_SETLKW:
2390 		file_lock->fl_flags |= FL_SLEEP;
2391 	}
2392 
2393 	error = do_lock_file_wait(filp, cmd, file_lock);
2394 
2395 	/*
2396 	 * Attempt to detect a close/fcntl race and recover by releasing the
2397 	 * lock that was just acquired. There is no need to do that when we're
2398 	 * unlocking though, or for OFD locks.
2399 	 */
2400 	if (!error && file_lock->fl_type != F_UNLCK &&
2401 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2402 		struct files_struct *files = current->files;
2403 		/*
2404 		 * We need that spin_lock here - it prevents reordering between
2405 		 * update of i_flctx->flc_posix and check for it done in
2406 		 * close(). rcu_read_lock() wouldn't do.
2407 		 */
2408 		spin_lock(&files->file_lock);
2409 		f = files_lookup_fd_locked(files, fd);
2410 		spin_unlock(&files->file_lock);
2411 		if (f != filp) {
2412 			file_lock->fl_type = F_UNLCK;
2413 			error = do_lock_file_wait(filp, cmd, file_lock);
2414 			WARN_ON_ONCE(error);
2415 			error = -EBADF;
2416 		}
2417 	}
2418 out:
2419 	trace_fcntl_setlk(inode, file_lock, error);
2420 	locks_free_lock(file_lock);
2421 	return error;
2422 }
2423 
2424 #if BITS_PER_LONG == 32
2425 /* Report the first existing lock that would conflict with l.
2426  * This implements the F_GETLK command of fcntl().
2427  */
2428 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2429 {
2430 	struct file_lock *fl;
2431 	int error;
2432 
2433 	fl = locks_alloc_lock();
2434 	if (fl == NULL)
2435 		return -ENOMEM;
2436 
2437 	error = -EINVAL;
2438 	if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2439 		goto out;
2440 
2441 	error = flock64_to_posix_lock(filp, fl, flock);
2442 	if (error)
2443 		goto out;
2444 
2445 	if (cmd == F_OFD_GETLK) {
2446 		error = -EINVAL;
2447 		if (flock->l_pid != 0)
2448 			goto out;
2449 
2450 		cmd = F_GETLK64;
2451 		fl->fl_flags |= FL_OFDLCK;
2452 		fl->fl_owner = filp;
2453 	}
2454 
2455 	error = vfs_test_lock(filp, fl);
2456 	if (error)
2457 		goto out;
2458 
2459 	flock->l_type = fl->fl_type;
2460 	if (fl->fl_type != F_UNLCK)
2461 		posix_lock_to_flock64(flock, fl);
2462 
2463 out:
2464 	locks_free_lock(fl);
2465 	return error;
2466 }
2467 
2468 /* Apply the lock described by l to an open file descriptor.
2469  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2470  */
2471 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2472 		struct flock64 *flock)
2473 {
2474 	struct file_lock *file_lock = locks_alloc_lock();
2475 	struct file *f;
2476 	int error;
2477 
2478 	if (file_lock == NULL)
2479 		return -ENOLCK;
2480 
2481 	error = flock64_to_posix_lock(filp, file_lock, flock);
2482 	if (error)
2483 		goto out;
2484 
2485 	error = check_fmode_for_setlk(file_lock);
2486 	if (error)
2487 		goto out;
2488 
2489 	/*
2490 	 * If the cmd is requesting file-private locks, then set the
2491 	 * FL_OFDLCK flag and override the owner.
2492 	 */
2493 	switch (cmd) {
2494 	case F_OFD_SETLK:
2495 		error = -EINVAL;
2496 		if (flock->l_pid != 0)
2497 			goto out;
2498 
2499 		cmd = F_SETLK64;
2500 		file_lock->fl_flags |= FL_OFDLCK;
2501 		file_lock->fl_owner = filp;
2502 		break;
2503 	case F_OFD_SETLKW:
2504 		error = -EINVAL;
2505 		if (flock->l_pid != 0)
2506 			goto out;
2507 
2508 		cmd = F_SETLKW64;
2509 		file_lock->fl_flags |= FL_OFDLCK;
2510 		file_lock->fl_owner = filp;
2511 		fallthrough;
2512 	case F_SETLKW64:
2513 		file_lock->fl_flags |= FL_SLEEP;
2514 	}
2515 
2516 	error = do_lock_file_wait(filp, cmd, file_lock);
2517 
2518 	/*
2519 	 * Attempt to detect a close/fcntl race and recover by releasing the
2520 	 * lock that was just acquired. There is no need to do that when we're
2521 	 * unlocking though, or for OFD locks.
2522 	 */
2523 	if (!error && file_lock->fl_type != F_UNLCK &&
2524 	    !(file_lock->fl_flags & FL_OFDLCK)) {
2525 		struct files_struct *files = current->files;
2526 		/*
2527 		 * We need that spin_lock here - it prevents reordering between
2528 		 * update of i_flctx->flc_posix and check for it done in
2529 		 * close(). rcu_read_lock() wouldn't do.
2530 		 */
2531 		spin_lock(&files->file_lock);
2532 		f = files_lookup_fd_locked(files, fd);
2533 		spin_unlock(&files->file_lock);
2534 		if (f != filp) {
2535 			file_lock->fl_type = F_UNLCK;
2536 			error = do_lock_file_wait(filp, cmd, file_lock);
2537 			WARN_ON_ONCE(error);
2538 			error = -EBADF;
2539 		}
2540 	}
2541 out:
2542 	locks_free_lock(file_lock);
2543 	return error;
2544 }
2545 #endif /* BITS_PER_LONG == 32 */
2546 
2547 /*
2548  * This function is called when the file is being removed
2549  * from the task's fd array.  POSIX locks belonging to this task
2550  * are deleted at this time.
2551  */
2552 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2553 {
2554 	int error;
2555 	struct inode *inode = locks_inode(filp);
2556 	struct file_lock lock;
2557 	struct file_lock_context *ctx;
2558 
2559 	/*
2560 	 * If there are no locks held on this file, we don't need to call
2561 	 * posix_lock_file().  Another process could be setting a lock on this
2562 	 * file at the same time, but we wouldn't remove that lock anyway.
2563 	 */
2564 	ctx =  smp_load_acquire(&inode->i_flctx);
2565 	if (!ctx || list_empty(&ctx->flc_posix))
2566 		return;
2567 
2568 	locks_init_lock(&lock);
2569 	lock.fl_type = F_UNLCK;
2570 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2571 	lock.fl_start = 0;
2572 	lock.fl_end = OFFSET_MAX;
2573 	lock.fl_owner = owner;
2574 	lock.fl_pid = current->tgid;
2575 	lock.fl_file = filp;
2576 	lock.fl_ops = NULL;
2577 	lock.fl_lmops = NULL;
2578 
2579 	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2580 
2581 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2582 		lock.fl_ops->fl_release_private(&lock);
2583 	trace_locks_remove_posix(inode, &lock, error);
2584 }
2585 EXPORT_SYMBOL(locks_remove_posix);
2586 
2587 /* The i_flctx must be valid when calling into here */
2588 static void
2589 locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2590 {
2591 	struct file_lock fl;
2592 	struct inode *inode = locks_inode(filp);
2593 
2594 	if (list_empty(&flctx->flc_flock))
2595 		return;
2596 
2597 	flock_make_lock(filp, &fl, F_UNLCK);
2598 	fl.fl_flags |= FL_CLOSE;
2599 
2600 	if (filp->f_op->flock)
2601 		filp->f_op->flock(filp, F_SETLKW, &fl);
2602 	else
2603 		flock_lock_inode(inode, &fl);
2604 
2605 	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2606 		fl.fl_ops->fl_release_private(&fl);
2607 }
2608 
2609 /* The i_flctx must be valid when calling into here */
2610 static void
2611 locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2612 {
2613 	struct file_lock *fl, *tmp;
2614 	LIST_HEAD(dispose);
2615 
2616 	if (list_empty(&ctx->flc_lease))
2617 		return;
2618 
2619 	percpu_down_read(&file_rwsem);
2620 	spin_lock(&ctx->flc_lock);
2621 	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2622 		if (filp == fl->fl_file)
2623 			lease_modify(fl, F_UNLCK, &dispose);
2624 	spin_unlock(&ctx->flc_lock);
2625 	percpu_up_read(&file_rwsem);
2626 
2627 	locks_dispose_list(&dispose);
2628 }
2629 
2630 /*
2631  * This function is called on the last close of an open file.
2632  */
2633 void locks_remove_file(struct file *filp)
2634 {
2635 	struct file_lock_context *ctx;
2636 
2637 	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2638 	if (!ctx)
2639 		return;
2640 
2641 	/* remove any OFD locks */
2642 	locks_remove_posix(filp, filp);
2643 
2644 	/* remove flock locks */
2645 	locks_remove_flock(filp, ctx);
2646 
2647 	/* remove any leases */
2648 	locks_remove_lease(filp, ctx);
2649 
2650 	spin_lock(&ctx->flc_lock);
2651 	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2652 	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2653 	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2654 	spin_unlock(&ctx->flc_lock);
2655 }
2656 
2657 /**
2658  * vfs_cancel_lock - file byte range unblock lock
2659  * @filp: The file to apply the unblock to
2660  * @fl: The lock to be unblocked
2661  *
2662  * Used by lock managers to cancel blocked requests
2663  */
2664 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2665 {
2666 	if (filp->f_op->lock)
2667 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2668 	return 0;
2669 }
2670 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2671 
2672 #ifdef CONFIG_PROC_FS
2673 #include <linux/proc_fs.h>
2674 #include <linux/seq_file.h>
2675 
2676 struct locks_iterator {
2677 	int	li_cpu;
2678 	loff_t	li_pos;
2679 };
2680 
2681 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2682 			    loff_t id, char *pfx, int repeat)
2683 {
2684 	struct inode *inode = NULL;
2685 	unsigned int fl_pid;
2686 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2687 	int type;
2688 
2689 	fl_pid = locks_translate_pid(fl, proc_pidns);
2690 	/*
2691 	 * If lock owner is dead (and pid is freed) or not visible in current
2692 	 * pidns, zero is shown as a pid value. Check lock info from
2693 	 * init_pid_ns to get saved lock pid value.
2694 	 */
2695 
2696 	if (fl->fl_file != NULL)
2697 		inode = locks_inode(fl->fl_file);
2698 
2699 	seq_printf(f, "%lld: ", id);
2700 
2701 	if (repeat)
2702 		seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2703 
2704 	if (IS_POSIX(fl)) {
2705 		if (fl->fl_flags & FL_ACCESS)
2706 			seq_puts(f, "ACCESS");
2707 		else if (IS_OFDLCK(fl))
2708 			seq_puts(f, "OFDLCK");
2709 		else
2710 			seq_puts(f, "POSIX ");
2711 
2712 		seq_printf(f, " %s ",
2713 			     (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2714 	} else if (IS_FLOCK(fl)) {
2715 		seq_puts(f, "FLOCK  ADVISORY  ");
2716 	} else if (IS_LEASE(fl)) {
2717 		if (fl->fl_flags & FL_DELEG)
2718 			seq_puts(f, "DELEG  ");
2719 		else
2720 			seq_puts(f, "LEASE  ");
2721 
2722 		if (lease_breaking(fl))
2723 			seq_puts(f, "BREAKING  ");
2724 		else if (fl->fl_file)
2725 			seq_puts(f, "ACTIVE    ");
2726 		else
2727 			seq_puts(f, "BREAKER   ");
2728 	} else {
2729 		seq_puts(f, "UNKNOWN UNKNOWN  ");
2730 	}
2731 	type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2732 
2733 	seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2734 			     (type == F_RDLCK) ? "READ" : "UNLCK");
2735 	if (inode) {
2736 		/* userspace relies on this representation of dev_t */
2737 		seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2738 				MAJOR(inode->i_sb->s_dev),
2739 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2740 	} else {
2741 		seq_printf(f, "%d <none>:0 ", fl_pid);
2742 	}
2743 	if (IS_POSIX(fl)) {
2744 		if (fl->fl_end == OFFSET_MAX)
2745 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2746 		else
2747 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2748 	} else {
2749 		seq_puts(f, "0 EOF\n");
2750 	}
2751 }
2752 
2753 static struct file_lock *get_next_blocked_member(struct file_lock *node)
2754 {
2755 	struct file_lock *tmp;
2756 
2757 	/* NULL node or root node */
2758 	if (node == NULL || node->fl_blocker == NULL)
2759 		return NULL;
2760 
2761 	/* Next member in the linked list could be itself */
2762 	tmp = list_next_entry(node, fl_blocked_member);
2763 	if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2764 		|| tmp == node) {
2765 		return NULL;
2766 	}
2767 
2768 	return tmp;
2769 }
2770 
2771 static int locks_show(struct seq_file *f, void *v)
2772 {
2773 	struct locks_iterator *iter = f->private;
2774 	struct file_lock *cur, *tmp;
2775 	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2776 	int level = 0;
2777 
2778 	cur = hlist_entry(v, struct file_lock, fl_link);
2779 
2780 	if (locks_translate_pid(cur, proc_pidns) == 0)
2781 		return 0;
2782 
2783 	/* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2784 	 * is the left child of current node, the next silibing in fl_blocked_member is the
2785 	 * right child, we can alse get the parent of current node from fl_blocker, so this
2786 	 * question becomes traversal of a binary tree
2787 	 */
2788 	while (cur != NULL) {
2789 		if (level)
2790 			lock_get_status(f, cur, iter->li_pos, "-> ", level);
2791 		else
2792 			lock_get_status(f, cur, iter->li_pos, "", level);
2793 
2794 		if (!list_empty(&cur->fl_blocked_requests)) {
2795 			/* Turn left */
2796 			cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2797 				struct file_lock, fl_blocked_member);
2798 			level++;
2799 		} else {
2800 			/* Turn right */
2801 			tmp = get_next_blocked_member(cur);
2802 			/* Fall back to parent node */
2803 			while (tmp == NULL && cur->fl_blocker != NULL) {
2804 				cur = cur->fl_blocker;
2805 				level--;
2806 				tmp = get_next_blocked_member(cur);
2807 			}
2808 			cur = tmp;
2809 		}
2810 	}
2811 
2812 	return 0;
2813 }
2814 
2815 static void __show_fd_locks(struct seq_file *f,
2816 			struct list_head *head, int *id,
2817 			struct file *filp, struct files_struct *files)
2818 {
2819 	struct file_lock *fl;
2820 
2821 	list_for_each_entry(fl, head, fl_list) {
2822 
2823 		if (filp != fl->fl_file)
2824 			continue;
2825 		if (fl->fl_owner != files &&
2826 		    fl->fl_owner != filp)
2827 			continue;
2828 
2829 		(*id)++;
2830 		seq_puts(f, "lock:\t");
2831 		lock_get_status(f, fl, *id, "", 0);
2832 	}
2833 }
2834 
2835 void show_fd_locks(struct seq_file *f,
2836 		  struct file *filp, struct files_struct *files)
2837 {
2838 	struct inode *inode = locks_inode(filp);
2839 	struct file_lock_context *ctx;
2840 	int id = 0;
2841 
2842 	ctx = smp_load_acquire(&inode->i_flctx);
2843 	if (!ctx)
2844 		return;
2845 
2846 	spin_lock(&ctx->flc_lock);
2847 	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2848 	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2849 	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2850 	spin_unlock(&ctx->flc_lock);
2851 }
2852 
2853 static void *locks_start(struct seq_file *f, loff_t *pos)
2854 	__acquires(&blocked_lock_lock)
2855 {
2856 	struct locks_iterator *iter = f->private;
2857 
2858 	iter->li_pos = *pos + 1;
2859 	percpu_down_write(&file_rwsem);
2860 	spin_lock(&blocked_lock_lock);
2861 	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2862 }
2863 
2864 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2865 {
2866 	struct locks_iterator *iter = f->private;
2867 
2868 	++iter->li_pos;
2869 	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2870 }
2871 
2872 static void locks_stop(struct seq_file *f, void *v)
2873 	__releases(&blocked_lock_lock)
2874 {
2875 	spin_unlock(&blocked_lock_lock);
2876 	percpu_up_write(&file_rwsem);
2877 }
2878 
2879 static const struct seq_operations locks_seq_operations = {
2880 	.start	= locks_start,
2881 	.next	= locks_next,
2882 	.stop	= locks_stop,
2883 	.show	= locks_show,
2884 };
2885 
2886 static int __init proc_locks_init(void)
2887 {
2888 	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2889 			sizeof(struct locks_iterator), NULL);
2890 	return 0;
2891 }
2892 fs_initcall(proc_locks_init);
2893 #endif
2894 
2895 static int __init filelock_init(void)
2896 {
2897 	int i;
2898 
2899 	flctx_cache = kmem_cache_create("file_lock_ctx",
2900 			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2901 
2902 	filelock_cache = kmem_cache_create("file_lock_cache",
2903 			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2904 
2905 	for_each_possible_cpu(i) {
2906 		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2907 
2908 		spin_lock_init(&fll->lock);
2909 		INIT_HLIST_HEAD(&fll->hlist);
2910 	}
2911 
2912 	lease_notifier_chain_init();
2913 	return 0;
2914 }
2915 core_initcall(filelock_init);
2916