xref: /linux/fs/locks.c (revision 2504075d383fcefd746dac42a0cd1c3bdc006bd1)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/smp_lock.h>
126 #include <linux/syscalls.h>
127 #include <linux/time.h>
128 #include <linux/rcupdate.h>
129 #include <linux/pid_namespace.h>
130 
131 #include <asm/uaccess.h>
132 
133 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
134 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
135 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
136 
137 int leases_enable = 1;
138 int lease_break_time = 45;
139 
140 #define for_each_lock(inode, lockp) \
141 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142 
143 static LIST_HEAD(file_lock_list);
144 static LIST_HEAD(blocked_list);
145 static DEFINE_SPINLOCK(file_lock_lock);
146 
147 /*
148  * Protects the two list heads above, plus the inode->i_flock list
149  * FIXME: should use a spinlock, once lockd and ceph are ready.
150  */
151 void lock_flocks(void)
152 {
153 	spin_lock(&file_lock_lock);
154 }
155 EXPORT_SYMBOL_GPL(lock_flocks);
156 
157 void unlock_flocks(void)
158 {
159 	spin_unlock(&file_lock_lock);
160 }
161 EXPORT_SYMBOL_GPL(unlock_flocks);
162 
163 static struct kmem_cache *filelock_cache __read_mostly;
164 
165 /* Allocate an empty lock structure. */
166 struct file_lock *locks_alloc_lock(void)
167 {
168 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
169 }
170 EXPORT_SYMBOL_GPL(locks_alloc_lock);
171 
172 void locks_release_private(struct file_lock *fl)
173 {
174 	if (fl->fl_ops) {
175 		if (fl->fl_ops->fl_release_private)
176 			fl->fl_ops->fl_release_private(fl);
177 		fl->fl_ops = NULL;
178 	}
179 	if (fl->fl_lmops) {
180 		if (fl->fl_lmops->fl_release_private)
181 			fl->fl_lmops->fl_release_private(fl);
182 		fl->fl_lmops = NULL;
183 	}
184 
185 }
186 EXPORT_SYMBOL_GPL(locks_release_private);
187 
188 /* Free a lock which is not in use. */
189 static void locks_free_lock(struct file_lock *fl)
190 {
191 	BUG_ON(waitqueue_active(&fl->fl_wait));
192 	BUG_ON(!list_empty(&fl->fl_block));
193 	BUG_ON(!list_empty(&fl->fl_link));
194 
195 	locks_release_private(fl);
196 	kmem_cache_free(filelock_cache, fl);
197 }
198 
199 void locks_init_lock(struct file_lock *fl)
200 {
201 	INIT_LIST_HEAD(&fl->fl_link);
202 	INIT_LIST_HEAD(&fl->fl_block);
203 	init_waitqueue_head(&fl->fl_wait);
204 	fl->fl_next = NULL;
205 	fl->fl_fasync = NULL;
206 	fl->fl_owner = NULL;
207 	fl->fl_pid = 0;
208 	fl->fl_nspid = NULL;
209 	fl->fl_file = NULL;
210 	fl->fl_flags = 0;
211 	fl->fl_type = 0;
212 	fl->fl_start = fl->fl_end = 0;
213 	fl->fl_ops = NULL;
214 	fl->fl_lmops = NULL;
215 }
216 
217 EXPORT_SYMBOL(locks_init_lock);
218 
219 /*
220  * Initialises the fields of the file lock which are invariant for
221  * free file_locks.
222  */
223 static void init_once(void *foo)
224 {
225 	struct file_lock *lock = (struct file_lock *) foo;
226 
227 	locks_init_lock(lock);
228 }
229 
230 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231 {
232 	if (fl->fl_ops) {
233 		if (fl->fl_ops->fl_copy_lock)
234 			fl->fl_ops->fl_copy_lock(new, fl);
235 		new->fl_ops = fl->fl_ops;
236 	}
237 	if (fl->fl_lmops) {
238 		if (fl->fl_lmops->fl_copy_lock)
239 			fl->fl_lmops->fl_copy_lock(new, fl);
240 		new->fl_lmops = fl->fl_lmops;
241 	}
242 }
243 
244 /*
245  * Initialize a new lock from an existing file_lock structure.
246  */
247 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
248 {
249 	new->fl_owner = fl->fl_owner;
250 	new->fl_pid = fl->fl_pid;
251 	new->fl_file = NULL;
252 	new->fl_flags = fl->fl_flags;
253 	new->fl_type = fl->fl_type;
254 	new->fl_start = fl->fl_start;
255 	new->fl_end = fl->fl_end;
256 	new->fl_ops = NULL;
257 	new->fl_lmops = NULL;
258 }
259 EXPORT_SYMBOL(__locks_copy_lock);
260 
261 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
262 {
263 	locks_release_private(new);
264 
265 	__locks_copy_lock(new, fl);
266 	new->fl_file = fl->fl_file;
267 	new->fl_ops = fl->fl_ops;
268 	new->fl_lmops = fl->fl_lmops;
269 
270 	locks_copy_private(new, fl);
271 }
272 
273 EXPORT_SYMBOL(locks_copy_lock);
274 
275 static inline int flock_translate_cmd(int cmd) {
276 	if (cmd & LOCK_MAND)
277 		return cmd & (LOCK_MAND | LOCK_RW);
278 	switch (cmd) {
279 	case LOCK_SH:
280 		return F_RDLCK;
281 	case LOCK_EX:
282 		return F_WRLCK;
283 	case LOCK_UN:
284 		return F_UNLCK;
285 	}
286 	return -EINVAL;
287 }
288 
289 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
290 static int flock_make_lock(struct file *filp, struct file_lock **lock,
291 		unsigned int cmd)
292 {
293 	struct file_lock *fl;
294 	int type = flock_translate_cmd(cmd);
295 	if (type < 0)
296 		return type;
297 
298 	fl = locks_alloc_lock();
299 	if (fl == NULL)
300 		return -ENOMEM;
301 
302 	fl->fl_file = filp;
303 	fl->fl_pid = current->tgid;
304 	fl->fl_flags = FL_FLOCK;
305 	fl->fl_type = type;
306 	fl->fl_end = OFFSET_MAX;
307 
308 	*lock = fl;
309 	return 0;
310 }
311 
312 static int assign_type(struct file_lock *fl, int type)
313 {
314 	switch (type) {
315 	case F_RDLCK:
316 	case F_WRLCK:
317 	case F_UNLCK:
318 		fl->fl_type = type;
319 		break;
320 	default:
321 		return -EINVAL;
322 	}
323 	return 0;
324 }
325 
326 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
327  * style lock.
328  */
329 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
330 			       struct flock *l)
331 {
332 	off_t start, end;
333 
334 	switch (l->l_whence) {
335 	case SEEK_SET:
336 		start = 0;
337 		break;
338 	case SEEK_CUR:
339 		start = filp->f_pos;
340 		break;
341 	case SEEK_END:
342 		start = i_size_read(filp->f_path.dentry->d_inode);
343 		break;
344 	default:
345 		return -EINVAL;
346 	}
347 
348 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
349 	   POSIX-2001 defines it. */
350 	start += l->l_start;
351 	if (start < 0)
352 		return -EINVAL;
353 	fl->fl_end = OFFSET_MAX;
354 	if (l->l_len > 0) {
355 		end = start + l->l_len - 1;
356 		fl->fl_end = end;
357 	} else if (l->l_len < 0) {
358 		end = start - 1;
359 		fl->fl_end = end;
360 		start += l->l_len;
361 		if (start < 0)
362 			return -EINVAL;
363 	}
364 	fl->fl_start = start;	/* we record the absolute position */
365 	if (fl->fl_end < fl->fl_start)
366 		return -EOVERFLOW;
367 
368 	fl->fl_owner = current->files;
369 	fl->fl_pid = current->tgid;
370 	fl->fl_file = filp;
371 	fl->fl_flags = FL_POSIX;
372 	fl->fl_ops = NULL;
373 	fl->fl_lmops = NULL;
374 
375 	return assign_type(fl, l->l_type);
376 }
377 
378 #if BITS_PER_LONG == 32
379 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
380 				 struct flock64 *l)
381 {
382 	loff_t start;
383 
384 	switch (l->l_whence) {
385 	case SEEK_SET:
386 		start = 0;
387 		break;
388 	case SEEK_CUR:
389 		start = filp->f_pos;
390 		break;
391 	case SEEK_END:
392 		start = i_size_read(filp->f_path.dentry->d_inode);
393 		break;
394 	default:
395 		return -EINVAL;
396 	}
397 
398 	start += l->l_start;
399 	if (start < 0)
400 		return -EINVAL;
401 	fl->fl_end = OFFSET_MAX;
402 	if (l->l_len > 0) {
403 		fl->fl_end = start + l->l_len - 1;
404 	} else if (l->l_len < 0) {
405 		fl->fl_end = start - 1;
406 		start += l->l_len;
407 		if (start < 0)
408 			return -EINVAL;
409 	}
410 	fl->fl_start = start;	/* we record the absolute position */
411 	if (fl->fl_end < fl->fl_start)
412 		return -EOVERFLOW;
413 
414 	fl->fl_owner = current->files;
415 	fl->fl_pid = current->tgid;
416 	fl->fl_file = filp;
417 	fl->fl_flags = FL_POSIX;
418 	fl->fl_ops = NULL;
419 	fl->fl_lmops = NULL;
420 
421 	switch (l->l_type) {
422 	case F_RDLCK:
423 	case F_WRLCK:
424 	case F_UNLCK:
425 		fl->fl_type = l->l_type;
426 		break;
427 	default:
428 		return -EINVAL;
429 	}
430 
431 	return (0);
432 }
433 #endif
434 
435 /* default lease lock manager operations */
436 static void lease_break_callback(struct file_lock *fl)
437 {
438 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
439 }
440 
441 static void lease_release_private_callback(struct file_lock *fl)
442 {
443 	if (!fl->fl_file)
444 		return;
445 
446 	f_delown(fl->fl_file);
447 	fl->fl_file->f_owner.signum = 0;
448 }
449 
450 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
451 {
452 	return fl->fl_file == try->fl_file;
453 }
454 
455 static const struct lock_manager_operations lease_manager_ops = {
456 	.fl_break = lease_break_callback,
457 	.fl_release_private = lease_release_private_callback,
458 	.fl_mylease = lease_mylease_callback,
459 	.fl_change = lease_modify,
460 };
461 
462 /*
463  * Initialize a lease, use the default lock manager operations
464  */
465 static int lease_init(struct file *filp, int type, struct file_lock *fl)
466  {
467 	if (assign_type(fl, type) != 0)
468 		return -EINVAL;
469 
470 	fl->fl_owner = current->files;
471 	fl->fl_pid = current->tgid;
472 
473 	fl->fl_file = filp;
474 	fl->fl_flags = FL_LEASE;
475 	fl->fl_start = 0;
476 	fl->fl_end = OFFSET_MAX;
477 	fl->fl_ops = NULL;
478 	fl->fl_lmops = &lease_manager_ops;
479 	return 0;
480 }
481 
482 /* Allocate a file_lock initialised to this type of lease */
483 static struct file_lock *lease_alloc(struct file *filp, int type)
484 {
485 	struct file_lock *fl = locks_alloc_lock();
486 	int error = -ENOMEM;
487 
488 	if (fl == NULL)
489 		return ERR_PTR(error);
490 
491 	error = lease_init(filp, type, fl);
492 	if (error) {
493 		locks_free_lock(fl);
494 		return ERR_PTR(error);
495 	}
496 	return fl;
497 }
498 
499 /* Check if two locks overlap each other.
500  */
501 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
502 {
503 	return ((fl1->fl_end >= fl2->fl_start) &&
504 		(fl2->fl_end >= fl1->fl_start));
505 }
506 
507 /*
508  * Check whether two locks have the same owner.
509  */
510 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
511 {
512 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
513 		return fl2->fl_lmops == fl1->fl_lmops &&
514 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
515 	return fl1->fl_owner == fl2->fl_owner;
516 }
517 
518 /* Remove waiter from blocker's block list.
519  * When blocker ends up pointing to itself then the list is empty.
520  */
521 static void __locks_delete_block(struct file_lock *waiter)
522 {
523 	list_del_init(&waiter->fl_block);
524 	list_del_init(&waiter->fl_link);
525 	waiter->fl_next = NULL;
526 }
527 
528 /*
529  */
530 static void locks_delete_block(struct file_lock *waiter)
531 {
532 	lock_flocks();
533 	__locks_delete_block(waiter);
534 	unlock_flocks();
535 }
536 
537 /* Insert waiter into blocker's block list.
538  * We use a circular list so that processes can be easily woken up in
539  * the order they blocked. The documentation doesn't require this but
540  * it seems like the reasonable thing to do.
541  */
542 static void locks_insert_block(struct file_lock *blocker,
543 			       struct file_lock *waiter)
544 {
545 	BUG_ON(!list_empty(&waiter->fl_block));
546 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
547 	waiter->fl_next = blocker;
548 	if (IS_POSIX(blocker))
549 		list_add(&waiter->fl_link, &blocked_list);
550 }
551 
552 /* Wake up processes blocked waiting for blocker.
553  * If told to wait then schedule the processes until the block list
554  * is empty, otherwise empty the block list ourselves.
555  */
556 static void locks_wake_up_blocks(struct file_lock *blocker)
557 {
558 	while (!list_empty(&blocker->fl_block)) {
559 		struct file_lock *waiter;
560 
561 		waiter = list_first_entry(&blocker->fl_block,
562 				struct file_lock, fl_block);
563 		__locks_delete_block(waiter);
564 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
565 			waiter->fl_lmops->fl_notify(waiter);
566 		else
567 			wake_up(&waiter->fl_wait);
568 	}
569 }
570 
571 /* Insert file lock fl into an inode's lock list at the position indicated
572  * by pos. At the same time add the lock to the global file lock list.
573  */
574 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
575 {
576 	list_add(&fl->fl_link, &file_lock_list);
577 
578 	fl->fl_nspid = get_pid(task_tgid(current));
579 
580 	/* insert into file's list */
581 	fl->fl_next = *pos;
582 	*pos = fl;
583 }
584 
585 /*
586  * Delete a lock and then free it.
587  * Wake up processes that are blocked waiting for this lock,
588  * notify the FS that the lock has been cleared and
589  * finally free the lock.
590  */
591 static void locks_delete_lock(struct file_lock **thisfl_p)
592 {
593 	struct file_lock *fl = *thisfl_p;
594 
595 	*thisfl_p = fl->fl_next;
596 	fl->fl_next = NULL;
597 	list_del_init(&fl->fl_link);
598 
599 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
600 	if (fl->fl_fasync != NULL) {
601 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
602 		fl->fl_fasync = NULL;
603 	}
604 
605 	if (fl->fl_nspid) {
606 		put_pid(fl->fl_nspid);
607 		fl->fl_nspid = NULL;
608 	}
609 
610 	locks_wake_up_blocks(fl);
611 	locks_free_lock(fl);
612 }
613 
614 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
615  * checks for shared/exclusive status of overlapping locks.
616  */
617 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
618 {
619 	if (sys_fl->fl_type == F_WRLCK)
620 		return 1;
621 	if (caller_fl->fl_type == F_WRLCK)
622 		return 1;
623 	return 0;
624 }
625 
626 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
627  * checking before calling the locks_conflict().
628  */
629 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
630 {
631 	/* POSIX locks owned by the same process do not conflict with
632 	 * each other.
633 	 */
634 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
635 		return (0);
636 
637 	/* Check whether they overlap */
638 	if (!locks_overlap(caller_fl, sys_fl))
639 		return 0;
640 
641 	return (locks_conflict(caller_fl, sys_fl));
642 }
643 
644 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
645  * checking before calling the locks_conflict().
646  */
647 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
648 {
649 	/* FLOCK locks referring to the same filp do not conflict with
650 	 * each other.
651 	 */
652 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
653 		return (0);
654 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
655 		return 0;
656 
657 	return (locks_conflict(caller_fl, sys_fl));
658 }
659 
660 void
661 posix_test_lock(struct file *filp, struct file_lock *fl)
662 {
663 	struct file_lock *cfl;
664 
665 	lock_flocks();
666 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
667 		if (!IS_POSIX(cfl))
668 			continue;
669 		if (posix_locks_conflict(fl, cfl))
670 			break;
671 	}
672 	if (cfl) {
673 		__locks_copy_lock(fl, cfl);
674 		if (cfl->fl_nspid)
675 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
676 	} else
677 		fl->fl_type = F_UNLCK;
678 	unlock_flocks();
679 	return;
680 }
681 EXPORT_SYMBOL(posix_test_lock);
682 
683 /*
684  * Deadlock detection:
685  *
686  * We attempt to detect deadlocks that are due purely to posix file
687  * locks.
688  *
689  * We assume that a task can be waiting for at most one lock at a time.
690  * So for any acquired lock, the process holding that lock may be
691  * waiting on at most one other lock.  That lock in turns may be held by
692  * someone waiting for at most one other lock.  Given a requested lock
693  * caller_fl which is about to wait for a conflicting lock block_fl, we
694  * follow this chain of waiters to ensure we are not about to create a
695  * cycle.
696  *
697  * Since we do this before we ever put a process to sleep on a lock, we
698  * are ensured that there is never a cycle; that is what guarantees that
699  * the while() loop in posix_locks_deadlock() eventually completes.
700  *
701  * Note: the above assumption may not be true when handling lock
702  * requests from a broken NFS client. It may also fail in the presence
703  * of tasks (such as posix threads) sharing the same open file table.
704  *
705  * To handle those cases, we just bail out after a few iterations.
706  */
707 
708 #define MAX_DEADLK_ITERATIONS 10
709 
710 /* Find a lock that the owner of the given block_fl is blocking on. */
711 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
712 {
713 	struct file_lock *fl;
714 
715 	list_for_each_entry(fl, &blocked_list, fl_link) {
716 		if (posix_same_owner(fl, block_fl))
717 			return fl->fl_next;
718 	}
719 	return NULL;
720 }
721 
722 static int posix_locks_deadlock(struct file_lock *caller_fl,
723 				struct file_lock *block_fl)
724 {
725 	int i = 0;
726 
727 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
728 		if (i++ > MAX_DEADLK_ITERATIONS)
729 			return 0;
730 		if (posix_same_owner(caller_fl, block_fl))
731 			return 1;
732 	}
733 	return 0;
734 }
735 
736 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
737  * after any leases, but before any posix locks.
738  *
739  * Note that if called with an FL_EXISTS argument, the caller may determine
740  * whether or not a lock was successfully freed by testing the return
741  * value for -ENOENT.
742  */
743 static int flock_lock_file(struct file *filp, struct file_lock *request)
744 {
745 	struct file_lock *new_fl = NULL;
746 	struct file_lock **before;
747 	struct inode * inode = filp->f_path.dentry->d_inode;
748 	int error = 0;
749 	int found = 0;
750 
751 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
752 		new_fl = locks_alloc_lock();
753 		if (!new_fl)
754 			return -ENOMEM;
755 	}
756 
757 	lock_flocks();
758 	if (request->fl_flags & FL_ACCESS)
759 		goto find_conflict;
760 
761 	for_each_lock(inode, before) {
762 		struct file_lock *fl = *before;
763 		if (IS_POSIX(fl))
764 			break;
765 		if (IS_LEASE(fl))
766 			continue;
767 		if (filp != fl->fl_file)
768 			continue;
769 		if (request->fl_type == fl->fl_type)
770 			goto out;
771 		found = 1;
772 		locks_delete_lock(before);
773 		break;
774 	}
775 
776 	if (request->fl_type == F_UNLCK) {
777 		if ((request->fl_flags & FL_EXISTS) && !found)
778 			error = -ENOENT;
779 		goto out;
780 	}
781 
782 	/*
783 	 * If a higher-priority process was blocked on the old file lock,
784 	 * give it the opportunity to lock the file.
785 	 */
786 	if (found) {
787 		unlock_flocks();
788 		cond_resched();
789 		lock_flocks();
790 	}
791 
792 find_conflict:
793 	for_each_lock(inode, before) {
794 		struct file_lock *fl = *before;
795 		if (IS_POSIX(fl))
796 			break;
797 		if (IS_LEASE(fl))
798 			continue;
799 		if (!flock_locks_conflict(request, fl))
800 			continue;
801 		error = -EAGAIN;
802 		if (!(request->fl_flags & FL_SLEEP))
803 			goto out;
804 		error = FILE_LOCK_DEFERRED;
805 		locks_insert_block(fl, request);
806 		goto out;
807 	}
808 	if (request->fl_flags & FL_ACCESS)
809 		goto out;
810 	locks_copy_lock(new_fl, request);
811 	locks_insert_lock(before, new_fl);
812 	new_fl = NULL;
813 	error = 0;
814 
815 out:
816 	unlock_flocks();
817 	if (new_fl)
818 		locks_free_lock(new_fl);
819 	return error;
820 }
821 
822 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
823 {
824 	struct file_lock *fl;
825 	struct file_lock *new_fl = NULL;
826 	struct file_lock *new_fl2 = NULL;
827 	struct file_lock *left = NULL;
828 	struct file_lock *right = NULL;
829 	struct file_lock **before;
830 	int error, added = 0;
831 
832 	/*
833 	 * We may need two file_lock structures for this operation,
834 	 * so we get them in advance to avoid races.
835 	 *
836 	 * In some cases we can be sure, that no new locks will be needed
837 	 */
838 	if (!(request->fl_flags & FL_ACCESS) &&
839 	    (request->fl_type != F_UNLCK ||
840 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
841 		new_fl = locks_alloc_lock();
842 		new_fl2 = locks_alloc_lock();
843 	}
844 
845 	lock_flocks();
846 	if (request->fl_type != F_UNLCK) {
847 		for_each_lock(inode, before) {
848 			fl = *before;
849 			if (!IS_POSIX(fl))
850 				continue;
851 			if (!posix_locks_conflict(request, fl))
852 				continue;
853 			if (conflock)
854 				__locks_copy_lock(conflock, fl);
855 			error = -EAGAIN;
856 			if (!(request->fl_flags & FL_SLEEP))
857 				goto out;
858 			error = -EDEADLK;
859 			if (posix_locks_deadlock(request, fl))
860 				goto out;
861 			error = FILE_LOCK_DEFERRED;
862 			locks_insert_block(fl, request);
863 			goto out;
864   		}
865   	}
866 
867 	/* If we're just looking for a conflict, we're done. */
868 	error = 0;
869 	if (request->fl_flags & FL_ACCESS)
870 		goto out;
871 
872 	/*
873 	 * Find the first old lock with the same owner as the new lock.
874 	 */
875 
876 	before = &inode->i_flock;
877 
878 	/* First skip locks owned by other processes.  */
879 	while ((fl = *before) && (!IS_POSIX(fl) ||
880 				  !posix_same_owner(request, fl))) {
881 		before = &fl->fl_next;
882 	}
883 
884 	/* Process locks with this owner.  */
885 	while ((fl = *before) && posix_same_owner(request, fl)) {
886 		/* Detect adjacent or overlapping regions (if same lock type)
887 		 */
888 		if (request->fl_type == fl->fl_type) {
889 			/* In all comparisons of start vs end, use
890 			 * "start - 1" rather than "end + 1". If end
891 			 * is OFFSET_MAX, end + 1 will become negative.
892 			 */
893 			if (fl->fl_end < request->fl_start - 1)
894 				goto next_lock;
895 			/* If the next lock in the list has entirely bigger
896 			 * addresses than the new one, insert the lock here.
897 			 */
898 			if (fl->fl_start - 1 > request->fl_end)
899 				break;
900 
901 			/* If we come here, the new and old lock are of the
902 			 * same type and adjacent or overlapping. Make one
903 			 * lock yielding from the lower start address of both
904 			 * locks to the higher end address.
905 			 */
906 			if (fl->fl_start > request->fl_start)
907 				fl->fl_start = request->fl_start;
908 			else
909 				request->fl_start = fl->fl_start;
910 			if (fl->fl_end < request->fl_end)
911 				fl->fl_end = request->fl_end;
912 			else
913 				request->fl_end = fl->fl_end;
914 			if (added) {
915 				locks_delete_lock(before);
916 				continue;
917 			}
918 			request = fl;
919 			added = 1;
920 		}
921 		else {
922 			/* Processing for different lock types is a bit
923 			 * more complex.
924 			 */
925 			if (fl->fl_end < request->fl_start)
926 				goto next_lock;
927 			if (fl->fl_start > request->fl_end)
928 				break;
929 			if (request->fl_type == F_UNLCK)
930 				added = 1;
931 			if (fl->fl_start < request->fl_start)
932 				left = fl;
933 			/* If the next lock in the list has a higher end
934 			 * address than the new one, insert the new one here.
935 			 */
936 			if (fl->fl_end > request->fl_end) {
937 				right = fl;
938 				break;
939 			}
940 			if (fl->fl_start >= request->fl_start) {
941 				/* The new lock completely replaces an old
942 				 * one (This may happen several times).
943 				 */
944 				if (added) {
945 					locks_delete_lock(before);
946 					continue;
947 				}
948 				/* Replace the old lock with the new one.
949 				 * Wake up anybody waiting for the old one,
950 				 * as the change in lock type might satisfy
951 				 * their needs.
952 				 */
953 				locks_wake_up_blocks(fl);
954 				fl->fl_start = request->fl_start;
955 				fl->fl_end = request->fl_end;
956 				fl->fl_type = request->fl_type;
957 				locks_release_private(fl);
958 				locks_copy_private(fl, request);
959 				request = fl;
960 				added = 1;
961 			}
962 		}
963 		/* Go on to next lock.
964 		 */
965 	next_lock:
966 		before = &fl->fl_next;
967 	}
968 
969 	/*
970 	 * The above code only modifies existing locks in case of
971 	 * merging or replacing.  If new lock(s) need to be inserted
972 	 * all modifications are done bellow this, so it's safe yet to
973 	 * bail out.
974 	 */
975 	error = -ENOLCK; /* "no luck" */
976 	if (right && left == right && !new_fl2)
977 		goto out;
978 
979 	error = 0;
980 	if (!added) {
981 		if (request->fl_type == F_UNLCK) {
982 			if (request->fl_flags & FL_EXISTS)
983 				error = -ENOENT;
984 			goto out;
985 		}
986 
987 		if (!new_fl) {
988 			error = -ENOLCK;
989 			goto out;
990 		}
991 		locks_copy_lock(new_fl, request);
992 		locks_insert_lock(before, new_fl);
993 		new_fl = NULL;
994 	}
995 	if (right) {
996 		if (left == right) {
997 			/* The new lock breaks the old one in two pieces,
998 			 * so we have to use the second new lock.
999 			 */
1000 			left = new_fl2;
1001 			new_fl2 = NULL;
1002 			locks_copy_lock(left, right);
1003 			locks_insert_lock(before, left);
1004 		}
1005 		right->fl_start = request->fl_end + 1;
1006 		locks_wake_up_blocks(right);
1007 	}
1008 	if (left) {
1009 		left->fl_end = request->fl_start - 1;
1010 		locks_wake_up_blocks(left);
1011 	}
1012  out:
1013 	unlock_flocks();
1014 	/*
1015 	 * Free any unused locks.
1016 	 */
1017 	if (new_fl)
1018 		locks_free_lock(new_fl);
1019 	if (new_fl2)
1020 		locks_free_lock(new_fl2);
1021 	return error;
1022 }
1023 
1024 /**
1025  * posix_lock_file - Apply a POSIX-style lock to a file
1026  * @filp: The file to apply the lock to
1027  * @fl: The lock to be applied
1028  * @conflock: Place to return a copy of the conflicting lock, if found.
1029  *
1030  * Add a POSIX style lock to a file.
1031  * We merge adjacent & overlapping locks whenever possible.
1032  * POSIX locks are sorted by owner task, then by starting address
1033  *
1034  * Note that if called with an FL_EXISTS argument, the caller may determine
1035  * whether or not a lock was successfully freed by testing the return
1036  * value for -ENOENT.
1037  */
1038 int posix_lock_file(struct file *filp, struct file_lock *fl,
1039 			struct file_lock *conflock)
1040 {
1041 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1042 }
1043 EXPORT_SYMBOL(posix_lock_file);
1044 
1045 /**
1046  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1047  * @filp: The file to apply the lock to
1048  * @fl: The lock to be applied
1049  *
1050  * Add a POSIX style lock to a file.
1051  * We merge adjacent & overlapping locks whenever possible.
1052  * POSIX locks are sorted by owner task, then by starting address
1053  */
1054 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1055 {
1056 	int error;
1057 	might_sleep ();
1058 	for (;;) {
1059 		error = posix_lock_file(filp, fl, NULL);
1060 		if (error != FILE_LOCK_DEFERRED)
1061 			break;
1062 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1063 		if (!error)
1064 			continue;
1065 
1066 		locks_delete_block(fl);
1067 		break;
1068 	}
1069 	return error;
1070 }
1071 EXPORT_SYMBOL(posix_lock_file_wait);
1072 
1073 /**
1074  * locks_mandatory_locked - Check for an active lock
1075  * @inode: the file to check
1076  *
1077  * Searches the inode's list of locks to find any POSIX locks which conflict.
1078  * This function is called from locks_verify_locked() only.
1079  */
1080 int locks_mandatory_locked(struct inode *inode)
1081 {
1082 	fl_owner_t owner = current->files;
1083 	struct file_lock *fl;
1084 
1085 	/*
1086 	 * Search the lock list for this inode for any POSIX locks.
1087 	 */
1088 	lock_flocks();
1089 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1090 		if (!IS_POSIX(fl))
1091 			continue;
1092 		if (fl->fl_owner != owner)
1093 			break;
1094 	}
1095 	unlock_flocks();
1096 	return fl ? -EAGAIN : 0;
1097 }
1098 
1099 /**
1100  * locks_mandatory_area - Check for a conflicting lock
1101  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1102  *		for shared
1103  * @inode:      the file to check
1104  * @filp:       how the file was opened (if it was)
1105  * @offset:     start of area to check
1106  * @count:      length of area to check
1107  *
1108  * Searches the inode's list of locks to find any POSIX locks which conflict.
1109  * This function is called from rw_verify_area() and
1110  * locks_verify_truncate().
1111  */
1112 int locks_mandatory_area(int read_write, struct inode *inode,
1113 			 struct file *filp, loff_t offset,
1114 			 size_t count)
1115 {
1116 	struct file_lock fl;
1117 	int error;
1118 
1119 	locks_init_lock(&fl);
1120 	fl.fl_owner = current->files;
1121 	fl.fl_pid = current->tgid;
1122 	fl.fl_file = filp;
1123 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1124 	if (filp && !(filp->f_flags & O_NONBLOCK))
1125 		fl.fl_flags |= FL_SLEEP;
1126 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1127 	fl.fl_start = offset;
1128 	fl.fl_end = offset + count - 1;
1129 
1130 	for (;;) {
1131 		error = __posix_lock_file(inode, &fl, NULL);
1132 		if (error != FILE_LOCK_DEFERRED)
1133 			break;
1134 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1135 		if (!error) {
1136 			/*
1137 			 * If we've been sleeping someone might have
1138 			 * changed the permissions behind our back.
1139 			 */
1140 			if (__mandatory_lock(inode))
1141 				continue;
1142 		}
1143 
1144 		locks_delete_block(&fl);
1145 		break;
1146 	}
1147 
1148 	return error;
1149 }
1150 
1151 EXPORT_SYMBOL(locks_mandatory_area);
1152 
1153 /* We already had a lease on this file; just change its type */
1154 int lease_modify(struct file_lock **before, int arg)
1155 {
1156 	struct file_lock *fl = *before;
1157 	int error = assign_type(fl, arg);
1158 
1159 	if (error)
1160 		return error;
1161 	locks_wake_up_blocks(fl);
1162 	if (arg == F_UNLCK)
1163 		locks_delete_lock(before);
1164 	return 0;
1165 }
1166 
1167 EXPORT_SYMBOL(lease_modify);
1168 
1169 static void time_out_leases(struct inode *inode)
1170 {
1171 	struct file_lock **before;
1172 	struct file_lock *fl;
1173 
1174 	before = &inode->i_flock;
1175 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1176 		if ((fl->fl_break_time == 0)
1177 				|| time_before(jiffies, fl->fl_break_time)) {
1178 			before = &fl->fl_next;
1179 			continue;
1180 		}
1181 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1182 		if (fl == *before)	/* lease_modify may have freed fl */
1183 			before = &fl->fl_next;
1184 	}
1185 }
1186 
1187 /**
1188  *	__break_lease	-	revoke all outstanding leases on file
1189  *	@inode: the inode of the file to return
1190  *	@mode: the open mode (read or write)
1191  *
1192  *	break_lease (inlined for speed) has checked there already is at least
1193  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1194  *	a call to open() or truncate().  This function can sleep unless you
1195  *	specified %O_NONBLOCK to your open().
1196  */
1197 int __break_lease(struct inode *inode, unsigned int mode)
1198 {
1199 	int error = 0, future;
1200 	struct file_lock *new_fl, *flock;
1201 	struct file_lock *fl;
1202 	unsigned long break_time;
1203 	int i_have_this_lease = 0;
1204 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1205 
1206 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1207 
1208 	lock_flocks();
1209 
1210 	time_out_leases(inode);
1211 
1212 	flock = inode->i_flock;
1213 	if ((flock == NULL) || !IS_LEASE(flock))
1214 		goto out;
1215 
1216 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1217 		if (fl->fl_owner == current->files)
1218 			i_have_this_lease = 1;
1219 
1220 	if (want_write) {
1221 		/* If we want write access, we have to revoke any lease. */
1222 		future = F_UNLCK | F_INPROGRESS;
1223 	} else if (flock->fl_type & F_INPROGRESS) {
1224 		/* If the lease is already being broken, we just leave it */
1225 		future = flock->fl_type;
1226 	} else if (flock->fl_type & F_WRLCK) {
1227 		/* Downgrade the exclusive lease to a read-only lease. */
1228 		future = F_RDLCK | F_INPROGRESS;
1229 	} else {
1230 		/* the existing lease was read-only, so we can read too. */
1231 		goto out;
1232 	}
1233 
1234 	if (IS_ERR(new_fl) && !i_have_this_lease
1235 			&& ((mode & O_NONBLOCK) == 0)) {
1236 		error = PTR_ERR(new_fl);
1237 		goto out;
1238 	}
1239 
1240 	break_time = 0;
1241 	if (lease_break_time > 0) {
1242 		break_time = jiffies + lease_break_time * HZ;
1243 		if (break_time == 0)
1244 			break_time++;	/* so that 0 means no break time */
1245 	}
1246 
1247 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1248 		if (fl->fl_type != future) {
1249 			fl->fl_type = future;
1250 			fl->fl_break_time = break_time;
1251 			/* lease must have lmops break callback */
1252 			fl->fl_lmops->fl_break(fl);
1253 		}
1254 	}
1255 
1256 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1257 		error = -EWOULDBLOCK;
1258 		goto out;
1259 	}
1260 
1261 restart:
1262 	break_time = flock->fl_break_time;
1263 	if (break_time != 0) {
1264 		break_time -= jiffies;
1265 		if (break_time == 0)
1266 			break_time++;
1267 	}
1268 	locks_insert_block(flock, new_fl);
1269 	unlock_flocks();
1270 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1271 						!new_fl->fl_next, break_time);
1272 	lock_flocks();
1273 	__locks_delete_block(new_fl);
1274 	if (error >= 0) {
1275 		if (error == 0)
1276 			time_out_leases(inode);
1277 		/* Wait for the next lease that has not been broken yet */
1278 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1279 				flock = flock->fl_next) {
1280 			if (flock->fl_type & F_INPROGRESS)
1281 				goto restart;
1282 		}
1283 		error = 0;
1284 	}
1285 
1286 out:
1287 	unlock_flocks();
1288 	if (!IS_ERR(new_fl))
1289 		locks_free_lock(new_fl);
1290 	return error;
1291 }
1292 
1293 EXPORT_SYMBOL(__break_lease);
1294 
1295 /**
1296  *	lease_get_mtime - get the last modified time of an inode
1297  *	@inode: the inode
1298  *      @time:  pointer to a timespec which will contain the last modified time
1299  *
1300  * This is to force NFS clients to flush their caches for files with
1301  * exclusive leases.  The justification is that if someone has an
1302  * exclusive lease, then they could be modifying it.
1303  */
1304 void lease_get_mtime(struct inode *inode, struct timespec *time)
1305 {
1306 	struct file_lock *flock = inode->i_flock;
1307 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1308 		*time = current_fs_time(inode->i_sb);
1309 	else
1310 		*time = inode->i_mtime;
1311 }
1312 
1313 EXPORT_SYMBOL(lease_get_mtime);
1314 
1315 /**
1316  *	fcntl_getlease - Enquire what lease is currently active
1317  *	@filp: the file
1318  *
1319  *	The value returned by this function will be one of
1320  *	(if no lease break is pending):
1321  *
1322  *	%F_RDLCK to indicate a shared lease is held.
1323  *
1324  *	%F_WRLCK to indicate an exclusive lease is held.
1325  *
1326  *	%F_UNLCK to indicate no lease is held.
1327  *
1328  *	(if a lease break is pending):
1329  *
1330  *	%F_RDLCK to indicate an exclusive lease needs to be
1331  *		changed to a shared lease (or removed).
1332  *
1333  *	%F_UNLCK to indicate the lease needs to be removed.
1334  *
1335  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1336  *	should be returned to userspace.
1337  */
1338 int fcntl_getlease(struct file *filp)
1339 {
1340 	struct file_lock *fl;
1341 	int type = F_UNLCK;
1342 
1343 	lock_flocks();
1344 	time_out_leases(filp->f_path.dentry->d_inode);
1345 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1346 			fl = fl->fl_next) {
1347 		if (fl->fl_file == filp) {
1348 			type = fl->fl_type & ~F_INPROGRESS;
1349 			break;
1350 		}
1351 	}
1352 	unlock_flocks();
1353 	return type;
1354 }
1355 
1356 /**
1357  *	generic_setlease	-	sets a lease on an open file
1358  *	@filp: file pointer
1359  *	@arg: type of lease to obtain
1360  *	@flp: input - file_lock to use, output - file_lock inserted
1361  *
1362  *	The (input) flp->fl_lmops->fl_break function is required
1363  *	by break_lease().
1364  *
1365  *	Called with file_lock_lock held.
1366  */
1367 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1368 {
1369 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1370 	struct dentry *dentry = filp->f_path.dentry;
1371 	struct inode *inode = dentry->d_inode;
1372 	int error, rdlease_count = 0, wrlease_count = 0;
1373 
1374 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1375 		return -EACCES;
1376 	if (!S_ISREG(inode->i_mode))
1377 		return -EINVAL;
1378 	error = security_file_lock(filp, arg);
1379 	if (error)
1380 		return error;
1381 
1382 	time_out_leases(inode);
1383 
1384 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1385 
1386 	lease = *flp;
1387 
1388 	if (arg != F_UNLCK) {
1389 		error = -EAGAIN;
1390 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1391 			goto out;
1392 		if ((arg == F_WRLCK)
1393 		    && ((atomic_read(&dentry->d_count) > 1)
1394 			|| (atomic_read(&inode->i_count) > 1)))
1395 			goto out;
1396 	}
1397 
1398 	/*
1399 	 * At this point, we know that if there is an exclusive
1400 	 * lease on this file, then we hold it on this filp
1401 	 * (otherwise our open of this file would have blocked).
1402 	 * And if we are trying to acquire an exclusive lease,
1403 	 * then the file is not open by anyone (including us)
1404 	 * except for this filp.
1405 	 */
1406 	for (before = &inode->i_flock;
1407 			((fl = *before) != NULL) && IS_LEASE(fl);
1408 			before = &fl->fl_next) {
1409 		if (lease->fl_lmops->fl_mylease(fl, lease))
1410 			my_before = before;
1411 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1412 			/*
1413 			 * Someone is in the process of opening this
1414 			 * file for writing so we may not take an
1415 			 * exclusive lease on it.
1416 			 */
1417 			wrlease_count++;
1418 		else
1419 			rdlease_count++;
1420 	}
1421 
1422 	error = -EAGAIN;
1423 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1424 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1425 		goto out;
1426 
1427 	if (my_before != NULL) {
1428 		*flp = *my_before;
1429 		error = lease->fl_lmops->fl_change(my_before, arg);
1430 		goto out;
1431 	}
1432 
1433 	if (arg == F_UNLCK)
1434 		goto out;
1435 
1436 	error = -EINVAL;
1437 	if (!leases_enable)
1438 		goto out;
1439 
1440 	locks_insert_lock(before, lease);
1441 	return 0;
1442 
1443 out:
1444 	locks_free_lock(lease);
1445 	return error;
1446 }
1447 EXPORT_SYMBOL(generic_setlease);
1448 
1449 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1450 {
1451 	if (filp->f_op && filp->f_op->setlease)
1452 		return filp->f_op->setlease(filp, arg, lease);
1453 	else
1454 		return generic_setlease(filp, arg, lease);
1455 }
1456 
1457 /**
1458  *	vfs_setlease        -       sets a lease on an open file
1459  *	@filp: file pointer
1460  *	@arg: type of lease to obtain
1461  *	@lease: file_lock to use
1462  *
1463  *	Call this to establish a lease on the file.
1464  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1465  *	break_lease will oops!
1466  *
1467  *	This will call the filesystem's setlease file method, if
1468  *	defined.  Note that there is no getlease method; instead, the
1469  *	filesystem setlease method should call back to setlease() to
1470  *	add a lease to the inode's lease list, where fcntl_getlease() can
1471  *	find it.  Since fcntl_getlease() only reports whether the current
1472  *	task holds a lease, a cluster filesystem need only do this for
1473  *	leases held by processes on this node.
1474  *
1475  *	There is also no break_lease method; filesystems that
1476  *	handle their own leases should break leases themselves from the
1477  *	filesystem's open, create, and (on truncate) setattr methods.
1478  *
1479  *	Warning: the only current setlease methods exist only to disable
1480  *	leases in certain cases.  More vfs changes may be required to
1481  *	allow a full filesystem lease implementation.
1482  */
1483 
1484 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1485 {
1486 	int error;
1487 
1488 	lock_flocks();
1489 	error = __vfs_setlease(filp, arg, lease);
1490 	unlock_flocks();
1491 
1492 	return error;
1493 }
1494 EXPORT_SYMBOL_GPL(vfs_setlease);
1495 
1496 /**
1497  *	fcntl_setlease	-	sets a lease on an open file
1498  *	@fd: open file descriptor
1499  *	@filp: file pointer
1500  *	@arg: type of lease to obtain
1501  *
1502  *	Call this fcntl to establish a lease on the file.
1503  *	Note that you also need to call %F_SETSIG to
1504  *	receive a signal when the lease is broken.
1505  */
1506 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1507 {
1508 	struct file_lock *fl;
1509 	struct fasync_struct *new;
1510 	struct inode *inode = filp->f_path.dentry->d_inode;
1511 	int error;
1512 
1513 	fl = lease_alloc(filp, arg);
1514 	if (IS_ERR(fl))
1515 		return PTR_ERR(fl);
1516 
1517 	new = fasync_alloc();
1518 	if (!new) {
1519 		locks_free_lock(fl);
1520 		return -ENOMEM;
1521 	}
1522 	lock_flocks();
1523 	error = __vfs_setlease(filp, arg, &fl);
1524 	if (error || arg == F_UNLCK)
1525 		goto out_unlock;
1526 
1527 	/*
1528 	 * fasync_insert_entry() returns the old entry if any.
1529 	 * If there was no old entry, then it used 'new' and
1530 	 * inserted it into the fasync list. Clear new so that
1531 	 * we don't release it here.
1532 	 */
1533 	if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
1534 		new = NULL;
1535 
1536 	if (error < 0) {
1537 		/* remove lease just inserted by setlease */
1538 		fl->fl_type = F_UNLCK | F_INPROGRESS;
1539 		fl->fl_break_time = jiffies - 10;
1540 		time_out_leases(inode);
1541 		goto out_unlock;
1542 	}
1543 
1544 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1545 out_unlock:
1546 	unlock_flocks();
1547 	if (new)
1548 		fasync_free(new);
1549 	return error;
1550 }
1551 
1552 /**
1553  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1554  * @filp: The file to apply the lock to
1555  * @fl: The lock to be applied
1556  *
1557  * Add a FLOCK style lock to a file.
1558  */
1559 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1560 {
1561 	int error;
1562 	might_sleep();
1563 	for (;;) {
1564 		error = flock_lock_file(filp, fl);
1565 		if (error != FILE_LOCK_DEFERRED)
1566 			break;
1567 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1568 		if (!error)
1569 			continue;
1570 
1571 		locks_delete_block(fl);
1572 		break;
1573 	}
1574 	return error;
1575 }
1576 
1577 EXPORT_SYMBOL(flock_lock_file_wait);
1578 
1579 /**
1580  *	sys_flock: - flock() system call.
1581  *	@fd: the file descriptor to lock.
1582  *	@cmd: the type of lock to apply.
1583  *
1584  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1585  *	The @cmd can be one of
1586  *
1587  *	%LOCK_SH -- a shared lock.
1588  *
1589  *	%LOCK_EX -- an exclusive lock.
1590  *
1591  *	%LOCK_UN -- remove an existing lock.
1592  *
1593  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1594  *
1595  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1596  *	processes read and write access respectively.
1597  */
1598 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1599 {
1600 	struct file *filp;
1601 	struct file_lock *lock;
1602 	int can_sleep, unlock;
1603 	int error;
1604 
1605 	error = -EBADF;
1606 	filp = fget(fd);
1607 	if (!filp)
1608 		goto out;
1609 
1610 	can_sleep = !(cmd & LOCK_NB);
1611 	cmd &= ~LOCK_NB;
1612 	unlock = (cmd == LOCK_UN);
1613 
1614 	if (!unlock && !(cmd & LOCK_MAND) &&
1615 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1616 		goto out_putf;
1617 
1618 	error = flock_make_lock(filp, &lock, cmd);
1619 	if (error)
1620 		goto out_putf;
1621 	if (can_sleep)
1622 		lock->fl_flags |= FL_SLEEP;
1623 
1624 	error = security_file_lock(filp, lock->fl_type);
1625 	if (error)
1626 		goto out_free;
1627 
1628 	if (filp->f_op && filp->f_op->flock)
1629 		error = filp->f_op->flock(filp,
1630 					  (can_sleep) ? F_SETLKW : F_SETLK,
1631 					  lock);
1632 	else
1633 		error = flock_lock_file_wait(filp, lock);
1634 
1635  out_free:
1636 	locks_free_lock(lock);
1637 
1638  out_putf:
1639 	fput(filp);
1640  out:
1641 	return error;
1642 }
1643 
1644 /**
1645  * vfs_test_lock - test file byte range lock
1646  * @filp: The file to test lock for
1647  * @fl: The lock to test; also used to hold result
1648  *
1649  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1650  * setting conf->fl_type to something other than F_UNLCK.
1651  */
1652 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1653 {
1654 	if (filp->f_op && filp->f_op->lock)
1655 		return filp->f_op->lock(filp, F_GETLK, fl);
1656 	posix_test_lock(filp, fl);
1657 	return 0;
1658 }
1659 EXPORT_SYMBOL_GPL(vfs_test_lock);
1660 
1661 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1662 {
1663 	flock->l_pid = fl->fl_pid;
1664 #if BITS_PER_LONG == 32
1665 	/*
1666 	 * Make sure we can represent the posix lock via
1667 	 * legacy 32bit flock.
1668 	 */
1669 	if (fl->fl_start > OFFT_OFFSET_MAX)
1670 		return -EOVERFLOW;
1671 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1672 		return -EOVERFLOW;
1673 #endif
1674 	flock->l_start = fl->fl_start;
1675 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1676 		fl->fl_end - fl->fl_start + 1;
1677 	flock->l_whence = 0;
1678 	flock->l_type = fl->fl_type;
1679 	return 0;
1680 }
1681 
1682 #if BITS_PER_LONG == 32
1683 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1684 {
1685 	flock->l_pid = fl->fl_pid;
1686 	flock->l_start = fl->fl_start;
1687 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1688 		fl->fl_end - fl->fl_start + 1;
1689 	flock->l_whence = 0;
1690 	flock->l_type = fl->fl_type;
1691 }
1692 #endif
1693 
1694 /* Report the first existing lock that would conflict with l.
1695  * This implements the F_GETLK command of fcntl().
1696  */
1697 int fcntl_getlk(struct file *filp, struct flock __user *l)
1698 {
1699 	struct file_lock file_lock;
1700 	struct flock flock;
1701 	int error;
1702 
1703 	error = -EFAULT;
1704 	if (copy_from_user(&flock, l, sizeof(flock)))
1705 		goto out;
1706 	error = -EINVAL;
1707 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1708 		goto out;
1709 
1710 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1711 	if (error)
1712 		goto out;
1713 
1714 	error = vfs_test_lock(filp, &file_lock);
1715 	if (error)
1716 		goto out;
1717 
1718 	flock.l_type = file_lock.fl_type;
1719 	if (file_lock.fl_type != F_UNLCK) {
1720 		error = posix_lock_to_flock(&flock, &file_lock);
1721 		if (error)
1722 			goto out;
1723 	}
1724 	error = -EFAULT;
1725 	if (!copy_to_user(l, &flock, sizeof(flock)))
1726 		error = 0;
1727 out:
1728 	return error;
1729 }
1730 
1731 /**
1732  * vfs_lock_file - file byte range lock
1733  * @filp: The file to apply the lock to
1734  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1735  * @fl: The lock to be applied
1736  * @conf: Place to return a copy of the conflicting lock, if found.
1737  *
1738  * A caller that doesn't care about the conflicting lock may pass NULL
1739  * as the final argument.
1740  *
1741  * If the filesystem defines a private ->lock() method, then @conf will
1742  * be left unchanged; so a caller that cares should initialize it to
1743  * some acceptable default.
1744  *
1745  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1746  * locks, the ->lock() interface may return asynchronously, before the lock has
1747  * been granted or denied by the underlying filesystem, if (and only if)
1748  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1749  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1750  * the request is for a blocking lock. When ->lock() does return asynchronously,
1751  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1752  * request completes.
1753  * If the request is for non-blocking lock the file system should return
1754  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1755  * with the result. If the request timed out the callback routine will return a
1756  * nonzero return code and the file system should release the lock. The file
1757  * system is also responsible to keep a corresponding posix lock when it
1758  * grants a lock so the VFS can find out which locks are locally held and do
1759  * the correct lock cleanup when required.
1760  * The underlying filesystem must not drop the kernel lock or call
1761  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1762  * return code.
1763  */
1764 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1765 {
1766 	if (filp->f_op && filp->f_op->lock)
1767 		return filp->f_op->lock(filp, cmd, fl);
1768 	else
1769 		return posix_lock_file(filp, fl, conf);
1770 }
1771 EXPORT_SYMBOL_GPL(vfs_lock_file);
1772 
1773 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1774 			     struct file_lock *fl)
1775 {
1776 	int error;
1777 
1778 	error = security_file_lock(filp, fl->fl_type);
1779 	if (error)
1780 		return error;
1781 
1782 	for (;;) {
1783 		error = vfs_lock_file(filp, cmd, fl, NULL);
1784 		if (error != FILE_LOCK_DEFERRED)
1785 			break;
1786 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1787 		if (!error)
1788 			continue;
1789 
1790 		locks_delete_block(fl);
1791 		break;
1792 	}
1793 
1794 	return error;
1795 }
1796 
1797 /* Apply the lock described by l to an open file descriptor.
1798  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1799  */
1800 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1801 		struct flock __user *l)
1802 {
1803 	struct file_lock *file_lock = locks_alloc_lock();
1804 	struct flock flock;
1805 	struct inode *inode;
1806 	struct file *f;
1807 	int error;
1808 
1809 	if (file_lock == NULL)
1810 		return -ENOLCK;
1811 
1812 	/*
1813 	 * This might block, so we do it before checking the inode.
1814 	 */
1815 	error = -EFAULT;
1816 	if (copy_from_user(&flock, l, sizeof(flock)))
1817 		goto out;
1818 
1819 	inode = filp->f_path.dentry->d_inode;
1820 
1821 	/* Don't allow mandatory locks on files that may be memory mapped
1822 	 * and shared.
1823 	 */
1824 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1825 		error = -EAGAIN;
1826 		goto out;
1827 	}
1828 
1829 again:
1830 	error = flock_to_posix_lock(filp, file_lock, &flock);
1831 	if (error)
1832 		goto out;
1833 	if (cmd == F_SETLKW) {
1834 		file_lock->fl_flags |= FL_SLEEP;
1835 	}
1836 
1837 	error = -EBADF;
1838 	switch (flock.l_type) {
1839 	case F_RDLCK:
1840 		if (!(filp->f_mode & FMODE_READ))
1841 			goto out;
1842 		break;
1843 	case F_WRLCK:
1844 		if (!(filp->f_mode & FMODE_WRITE))
1845 			goto out;
1846 		break;
1847 	case F_UNLCK:
1848 		break;
1849 	default:
1850 		error = -EINVAL;
1851 		goto out;
1852 	}
1853 
1854 	error = do_lock_file_wait(filp, cmd, file_lock);
1855 
1856 	/*
1857 	 * Attempt to detect a close/fcntl race and recover by
1858 	 * releasing the lock that was just acquired.
1859 	 */
1860 	/*
1861 	 * we need that spin_lock here - it prevents reordering between
1862 	 * update of inode->i_flock and check for it done in close().
1863 	 * rcu_read_lock() wouldn't do.
1864 	 */
1865 	spin_lock(&current->files->file_lock);
1866 	f = fcheck(fd);
1867 	spin_unlock(&current->files->file_lock);
1868 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1869 		flock.l_type = F_UNLCK;
1870 		goto again;
1871 	}
1872 
1873 out:
1874 	locks_free_lock(file_lock);
1875 	return error;
1876 }
1877 
1878 #if BITS_PER_LONG == 32
1879 /* Report the first existing lock that would conflict with l.
1880  * This implements the F_GETLK command of fcntl().
1881  */
1882 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1883 {
1884 	struct file_lock file_lock;
1885 	struct flock64 flock;
1886 	int error;
1887 
1888 	error = -EFAULT;
1889 	if (copy_from_user(&flock, l, sizeof(flock)))
1890 		goto out;
1891 	error = -EINVAL;
1892 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1893 		goto out;
1894 
1895 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1896 	if (error)
1897 		goto out;
1898 
1899 	error = vfs_test_lock(filp, &file_lock);
1900 	if (error)
1901 		goto out;
1902 
1903 	flock.l_type = file_lock.fl_type;
1904 	if (file_lock.fl_type != F_UNLCK)
1905 		posix_lock_to_flock64(&flock, &file_lock);
1906 
1907 	error = -EFAULT;
1908 	if (!copy_to_user(l, &flock, sizeof(flock)))
1909 		error = 0;
1910 
1911 out:
1912 	return error;
1913 }
1914 
1915 /* Apply the lock described by l to an open file descriptor.
1916  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1917  */
1918 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1919 		struct flock64 __user *l)
1920 {
1921 	struct file_lock *file_lock = locks_alloc_lock();
1922 	struct flock64 flock;
1923 	struct inode *inode;
1924 	struct file *f;
1925 	int error;
1926 
1927 	if (file_lock == NULL)
1928 		return -ENOLCK;
1929 
1930 	/*
1931 	 * This might block, so we do it before checking the inode.
1932 	 */
1933 	error = -EFAULT;
1934 	if (copy_from_user(&flock, l, sizeof(flock)))
1935 		goto out;
1936 
1937 	inode = filp->f_path.dentry->d_inode;
1938 
1939 	/* Don't allow mandatory locks on files that may be memory mapped
1940 	 * and shared.
1941 	 */
1942 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1943 		error = -EAGAIN;
1944 		goto out;
1945 	}
1946 
1947 again:
1948 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1949 	if (error)
1950 		goto out;
1951 	if (cmd == F_SETLKW64) {
1952 		file_lock->fl_flags |= FL_SLEEP;
1953 	}
1954 
1955 	error = -EBADF;
1956 	switch (flock.l_type) {
1957 	case F_RDLCK:
1958 		if (!(filp->f_mode & FMODE_READ))
1959 			goto out;
1960 		break;
1961 	case F_WRLCK:
1962 		if (!(filp->f_mode & FMODE_WRITE))
1963 			goto out;
1964 		break;
1965 	case F_UNLCK:
1966 		break;
1967 	default:
1968 		error = -EINVAL;
1969 		goto out;
1970 	}
1971 
1972 	error = do_lock_file_wait(filp, cmd, file_lock);
1973 
1974 	/*
1975 	 * Attempt to detect a close/fcntl race and recover by
1976 	 * releasing the lock that was just acquired.
1977 	 */
1978 	spin_lock(&current->files->file_lock);
1979 	f = fcheck(fd);
1980 	spin_unlock(&current->files->file_lock);
1981 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1982 		flock.l_type = F_UNLCK;
1983 		goto again;
1984 	}
1985 
1986 out:
1987 	locks_free_lock(file_lock);
1988 	return error;
1989 }
1990 #endif /* BITS_PER_LONG == 32 */
1991 
1992 /*
1993  * This function is called when the file is being removed
1994  * from the task's fd array.  POSIX locks belonging to this task
1995  * are deleted at this time.
1996  */
1997 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1998 {
1999 	struct file_lock lock;
2000 
2001 	/*
2002 	 * If there are no locks held on this file, we don't need to call
2003 	 * posix_lock_file().  Another process could be setting a lock on this
2004 	 * file at the same time, but we wouldn't remove that lock anyway.
2005 	 */
2006 	if (!filp->f_path.dentry->d_inode->i_flock)
2007 		return;
2008 
2009 	lock.fl_type = F_UNLCK;
2010 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2011 	lock.fl_start = 0;
2012 	lock.fl_end = OFFSET_MAX;
2013 	lock.fl_owner = owner;
2014 	lock.fl_pid = current->tgid;
2015 	lock.fl_file = filp;
2016 	lock.fl_ops = NULL;
2017 	lock.fl_lmops = NULL;
2018 
2019 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2020 
2021 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2022 		lock.fl_ops->fl_release_private(&lock);
2023 }
2024 
2025 EXPORT_SYMBOL(locks_remove_posix);
2026 
2027 /*
2028  * This function is called on the last close of an open file.
2029  */
2030 void locks_remove_flock(struct file *filp)
2031 {
2032 	struct inode * inode = filp->f_path.dentry->d_inode;
2033 	struct file_lock *fl;
2034 	struct file_lock **before;
2035 
2036 	if (!inode->i_flock)
2037 		return;
2038 
2039 	if (filp->f_op && filp->f_op->flock) {
2040 		struct file_lock fl = {
2041 			.fl_pid = current->tgid,
2042 			.fl_file = filp,
2043 			.fl_flags = FL_FLOCK,
2044 			.fl_type = F_UNLCK,
2045 			.fl_end = OFFSET_MAX,
2046 		};
2047 		filp->f_op->flock(filp, F_SETLKW, &fl);
2048 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2049 			fl.fl_ops->fl_release_private(&fl);
2050 	}
2051 
2052 	lock_flocks();
2053 	before = &inode->i_flock;
2054 
2055 	while ((fl = *before) != NULL) {
2056 		if (fl->fl_file == filp) {
2057 			if (IS_FLOCK(fl)) {
2058 				locks_delete_lock(before);
2059 				continue;
2060 			}
2061 			if (IS_LEASE(fl)) {
2062 				lease_modify(before, F_UNLCK);
2063 				continue;
2064 			}
2065 			/* What? */
2066 			BUG();
2067  		}
2068 		before = &fl->fl_next;
2069 	}
2070 	unlock_flocks();
2071 }
2072 
2073 /**
2074  *	posix_unblock_lock - stop waiting for a file lock
2075  *      @filp:   how the file was opened
2076  *	@waiter: the lock which was waiting
2077  *
2078  *	lockd needs to block waiting for locks.
2079  */
2080 int
2081 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2082 {
2083 	int status = 0;
2084 
2085 	lock_flocks();
2086 	if (waiter->fl_next)
2087 		__locks_delete_block(waiter);
2088 	else
2089 		status = -ENOENT;
2090 	unlock_flocks();
2091 	return status;
2092 }
2093 
2094 EXPORT_SYMBOL(posix_unblock_lock);
2095 
2096 /**
2097  * vfs_cancel_lock - file byte range unblock lock
2098  * @filp: The file to apply the unblock to
2099  * @fl: The lock to be unblocked
2100  *
2101  * Used by lock managers to cancel blocked requests
2102  */
2103 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2104 {
2105 	if (filp->f_op && filp->f_op->lock)
2106 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2107 	return 0;
2108 }
2109 
2110 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2111 
2112 #ifdef CONFIG_PROC_FS
2113 #include <linux/proc_fs.h>
2114 #include <linux/seq_file.h>
2115 
2116 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2117 			    loff_t id, char *pfx)
2118 {
2119 	struct inode *inode = NULL;
2120 	unsigned int fl_pid;
2121 
2122 	if (fl->fl_nspid)
2123 		fl_pid = pid_vnr(fl->fl_nspid);
2124 	else
2125 		fl_pid = fl->fl_pid;
2126 
2127 	if (fl->fl_file != NULL)
2128 		inode = fl->fl_file->f_path.dentry->d_inode;
2129 
2130 	seq_printf(f, "%lld:%s ", id, pfx);
2131 	if (IS_POSIX(fl)) {
2132 		seq_printf(f, "%6s %s ",
2133 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2134 			     (inode == NULL) ? "*NOINODE*" :
2135 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2136 	} else if (IS_FLOCK(fl)) {
2137 		if (fl->fl_type & LOCK_MAND) {
2138 			seq_printf(f, "FLOCK  MSNFS     ");
2139 		} else {
2140 			seq_printf(f, "FLOCK  ADVISORY  ");
2141 		}
2142 	} else if (IS_LEASE(fl)) {
2143 		seq_printf(f, "LEASE  ");
2144 		if (fl->fl_type & F_INPROGRESS)
2145 			seq_printf(f, "BREAKING  ");
2146 		else if (fl->fl_file)
2147 			seq_printf(f, "ACTIVE    ");
2148 		else
2149 			seq_printf(f, "BREAKER   ");
2150 	} else {
2151 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2152 	}
2153 	if (fl->fl_type & LOCK_MAND) {
2154 		seq_printf(f, "%s ",
2155 			       (fl->fl_type & LOCK_READ)
2156 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2157 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2158 	} else {
2159 		seq_printf(f, "%s ",
2160 			       (fl->fl_type & F_INPROGRESS)
2161 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2162 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2163 	}
2164 	if (inode) {
2165 #ifdef WE_CAN_BREAK_LSLK_NOW
2166 		seq_printf(f, "%d %s:%ld ", fl_pid,
2167 				inode->i_sb->s_id, inode->i_ino);
2168 #else
2169 		/* userspace relies on this representation of dev_t ;-( */
2170 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2171 				MAJOR(inode->i_sb->s_dev),
2172 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2173 #endif
2174 	} else {
2175 		seq_printf(f, "%d <none>:0 ", fl_pid);
2176 	}
2177 	if (IS_POSIX(fl)) {
2178 		if (fl->fl_end == OFFSET_MAX)
2179 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2180 		else
2181 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2182 	} else {
2183 		seq_printf(f, "0 EOF\n");
2184 	}
2185 }
2186 
2187 static int locks_show(struct seq_file *f, void *v)
2188 {
2189 	struct file_lock *fl, *bfl;
2190 
2191 	fl = list_entry(v, struct file_lock, fl_link);
2192 
2193 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2194 
2195 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2196 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2197 
2198 	return 0;
2199 }
2200 
2201 static void *locks_start(struct seq_file *f, loff_t *pos)
2202 {
2203 	loff_t *p = f->private;
2204 
2205 	lock_flocks();
2206 	*p = (*pos + 1);
2207 	return seq_list_start(&file_lock_list, *pos);
2208 }
2209 
2210 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2211 {
2212 	loff_t *p = f->private;
2213 	++*p;
2214 	return seq_list_next(v, &file_lock_list, pos);
2215 }
2216 
2217 static void locks_stop(struct seq_file *f, void *v)
2218 {
2219 	unlock_flocks();
2220 }
2221 
2222 static const struct seq_operations locks_seq_operations = {
2223 	.start	= locks_start,
2224 	.next	= locks_next,
2225 	.stop	= locks_stop,
2226 	.show	= locks_show,
2227 };
2228 
2229 static int locks_open(struct inode *inode, struct file *filp)
2230 {
2231 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2232 }
2233 
2234 static const struct file_operations proc_locks_operations = {
2235 	.open		= locks_open,
2236 	.read		= seq_read,
2237 	.llseek		= seq_lseek,
2238 	.release	= seq_release_private,
2239 };
2240 
2241 static int __init proc_locks_init(void)
2242 {
2243 	proc_create("locks", 0, NULL, &proc_locks_operations);
2244 	return 0;
2245 }
2246 module_init(proc_locks_init);
2247 #endif
2248 
2249 /**
2250  *	lock_may_read - checks that the region is free of locks
2251  *	@inode: the inode that is being read
2252  *	@start: the first byte to read
2253  *	@len: the number of bytes to read
2254  *
2255  *	Emulates Windows locking requirements.  Whole-file
2256  *	mandatory locks (share modes) can prohibit a read and
2257  *	byte-range POSIX locks can prohibit a read if they overlap.
2258  *
2259  *	N.B. this function is only ever called
2260  *	from knfsd and ownership of locks is never checked.
2261  */
2262 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2263 {
2264 	struct file_lock *fl;
2265 	int result = 1;
2266 	lock_flocks();
2267 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2268 		if (IS_POSIX(fl)) {
2269 			if (fl->fl_type == F_RDLCK)
2270 				continue;
2271 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2272 				continue;
2273 		} else if (IS_FLOCK(fl)) {
2274 			if (!(fl->fl_type & LOCK_MAND))
2275 				continue;
2276 			if (fl->fl_type & LOCK_READ)
2277 				continue;
2278 		} else
2279 			continue;
2280 		result = 0;
2281 		break;
2282 	}
2283 	unlock_flocks();
2284 	return result;
2285 }
2286 
2287 EXPORT_SYMBOL(lock_may_read);
2288 
2289 /**
2290  *	lock_may_write - checks that the region is free of locks
2291  *	@inode: the inode that is being written
2292  *	@start: the first byte to write
2293  *	@len: the number of bytes to write
2294  *
2295  *	Emulates Windows locking requirements.  Whole-file
2296  *	mandatory locks (share modes) can prohibit a write and
2297  *	byte-range POSIX locks can prohibit a write if they overlap.
2298  *
2299  *	N.B. this function is only ever called
2300  *	from knfsd and ownership of locks is never checked.
2301  */
2302 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2303 {
2304 	struct file_lock *fl;
2305 	int result = 1;
2306 	lock_flocks();
2307 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2308 		if (IS_POSIX(fl)) {
2309 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2310 				continue;
2311 		} else if (IS_FLOCK(fl)) {
2312 			if (!(fl->fl_type & LOCK_MAND))
2313 				continue;
2314 			if (fl->fl_type & LOCK_WRITE)
2315 				continue;
2316 		} else
2317 			continue;
2318 		result = 0;
2319 		break;
2320 	}
2321 	unlock_flocks();
2322 	return result;
2323 }
2324 
2325 EXPORT_SYMBOL(lock_may_write);
2326 
2327 static int __init filelock_init(void)
2328 {
2329 	filelock_cache = kmem_cache_create("file_lock_cache",
2330 			sizeof(struct file_lock), 0, SLAB_PANIC,
2331 			init_once);
2332 	return 0;
2333 }
2334 
2335 core_initcall(filelock_init);
2336