xref: /linux/fs/locks.c (revision d91958815d214ea365b98cbff6215383897edcb6)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/smp_lock.h>
126 #include <linux/syscalls.h>
127 #include <linux/time.h>
128 #include <linux/rcupdate.h>
129 #include <linux/pid_namespace.h>
130 
131 #include <asm/uaccess.h>
132 
133 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
134 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
135 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
136 
137 int leases_enable = 1;
138 int lease_break_time = 45;
139 
140 #define for_each_lock(inode, lockp) \
141 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142 
143 static LIST_HEAD(file_lock_list);
144 static LIST_HEAD(blocked_list);
145 
146 static struct kmem_cache *filelock_cache __read_mostly;
147 
148 /* Allocate an empty lock structure. */
149 static struct file_lock *locks_alloc_lock(void)
150 {
151 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
152 }
153 
154 static void locks_release_private(struct file_lock *fl)
155 {
156 	if (fl->fl_ops) {
157 		if (fl->fl_ops->fl_release_private)
158 			fl->fl_ops->fl_release_private(fl);
159 		fl->fl_ops = NULL;
160 	}
161 	if (fl->fl_lmops) {
162 		if (fl->fl_lmops->fl_release_private)
163 			fl->fl_lmops->fl_release_private(fl);
164 		fl->fl_lmops = NULL;
165 	}
166 
167 }
168 
169 /* Free a lock which is not in use. */
170 static void locks_free_lock(struct file_lock *fl)
171 {
172 	BUG_ON(waitqueue_active(&fl->fl_wait));
173 	BUG_ON(!list_empty(&fl->fl_block));
174 	BUG_ON(!list_empty(&fl->fl_link));
175 
176 	locks_release_private(fl);
177 	kmem_cache_free(filelock_cache, fl);
178 }
179 
180 void locks_init_lock(struct file_lock *fl)
181 {
182 	INIT_LIST_HEAD(&fl->fl_link);
183 	INIT_LIST_HEAD(&fl->fl_block);
184 	init_waitqueue_head(&fl->fl_wait);
185 	fl->fl_next = NULL;
186 	fl->fl_fasync = NULL;
187 	fl->fl_owner = NULL;
188 	fl->fl_pid = 0;
189 	fl->fl_nspid = NULL;
190 	fl->fl_file = NULL;
191 	fl->fl_flags = 0;
192 	fl->fl_type = 0;
193 	fl->fl_start = fl->fl_end = 0;
194 	fl->fl_ops = NULL;
195 	fl->fl_lmops = NULL;
196 }
197 
198 EXPORT_SYMBOL(locks_init_lock);
199 
200 /*
201  * Initialises the fields of the file lock which are invariant for
202  * free file_locks.
203  */
204 static void init_once(struct kmem_cache *cache, void *foo)
205 {
206 	struct file_lock *lock = (struct file_lock *) foo;
207 
208 	locks_init_lock(lock);
209 }
210 
211 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
212 {
213 	if (fl->fl_ops) {
214 		if (fl->fl_ops->fl_copy_lock)
215 			fl->fl_ops->fl_copy_lock(new, fl);
216 		new->fl_ops = fl->fl_ops;
217 	}
218 	if (fl->fl_lmops) {
219 		if (fl->fl_lmops->fl_copy_lock)
220 			fl->fl_lmops->fl_copy_lock(new, fl);
221 		new->fl_lmops = fl->fl_lmops;
222 	}
223 }
224 
225 /*
226  * Initialize a new lock from an existing file_lock structure.
227  */
228 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
229 {
230 	new->fl_owner = fl->fl_owner;
231 	new->fl_pid = fl->fl_pid;
232 	new->fl_file = NULL;
233 	new->fl_flags = fl->fl_flags;
234 	new->fl_type = fl->fl_type;
235 	new->fl_start = fl->fl_start;
236 	new->fl_end = fl->fl_end;
237 	new->fl_ops = NULL;
238 	new->fl_lmops = NULL;
239 }
240 EXPORT_SYMBOL(__locks_copy_lock);
241 
242 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
243 {
244 	locks_release_private(new);
245 
246 	__locks_copy_lock(new, fl);
247 	new->fl_file = fl->fl_file;
248 	new->fl_ops = fl->fl_ops;
249 	new->fl_lmops = fl->fl_lmops;
250 
251 	locks_copy_private(new, fl);
252 }
253 
254 EXPORT_SYMBOL(locks_copy_lock);
255 
256 static inline int flock_translate_cmd(int cmd) {
257 	if (cmd & LOCK_MAND)
258 		return cmd & (LOCK_MAND | LOCK_RW);
259 	switch (cmd) {
260 	case LOCK_SH:
261 		return F_RDLCK;
262 	case LOCK_EX:
263 		return F_WRLCK;
264 	case LOCK_UN:
265 		return F_UNLCK;
266 	}
267 	return -EINVAL;
268 }
269 
270 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
271 static int flock_make_lock(struct file *filp, struct file_lock **lock,
272 		unsigned int cmd)
273 {
274 	struct file_lock *fl;
275 	int type = flock_translate_cmd(cmd);
276 	if (type < 0)
277 		return type;
278 
279 	fl = locks_alloc_lock();
280 	if (fl == NULL)
281 		return -ENOMEM;
282 
283 	fl->fl_file = filp;
284 	fl->fl_pid = current->tgid;
285 	fl->fl_flags = FL_FLOCK;
286 	fl->fl_type = type;
287 	fl->fl_end = OFFSET_MAX;
288 
289 	*lock = fl;
290 	return 0;
291 }
292 
293 static int assign_type(struct file_lock *fl, int type)
294 {
295 	switch (type) {
296 	case F_RDLCK:
297 	case F_WRLCK:
298 	case F_UNLCK:
299 		fl->fl_type = type;
300 		break;
301 	default:
302 		return -EINVAL;
303 	}
304 	return 0;
305 }
306 
307 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
308  * style lock.
309  */
310 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
311 			       struct flock *l)
312 {
313 	off_t start, end;
314 
315 	switch (l->l_whence) {
316 	case SEEK_SET:
317 		start = 0;
318 		break;
319 	case SEEK_CUR:
320 		start = filp->f_pos;
321 		break;
322 	case SEEK_END:
323 		start = i_size_read(filp->f_path.dentry->d_inode);
324 		break;
325 	default:
326 		return -EINVAL;
327 	}
328 
329 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
330 	   POSIX-2001 defines it. */
331 	start += l->l_start;
332 	if (start < 0)
333 		return -EINVAL;
334 	fl->fl_end = OFFSET_MAX;
335 	if (l->l_len > 0) {
336 		end = start + l->l_len - 1;
337 		fl->fl_end = end;
338 	} else if (l->l_len < 0) {
339 		end = start - 1;
340 		fl->fl_end = end;
341 		start += l->l_len;
342 		if (start < 0)
343 			return -EINVAL;
344 	}
345 	fl->fl_start = start;	/* we record the absolute position */
346 	if (fl->fl_end < fl->fl_start)
347 		return -EOVERFLOW;
348 
349 	fl->fl_owner = current->files;
350 	fl->fl_pid = current->tgid;
351 	fl->fl_file = filp;
352 	fl->fl_flags = FL_POSIX;
353 	fl->fl_ops = NULL;
354 	fl->fl_lmops = NULL;
355 
356 	return assign_type(fl, l->l_type);
357 }
358 
359 #if BITS_PER_LONG == 32
360 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
361 				 struct flock64 *l)
362 {
363 	loff_t start;
364 
365 	switch (l->l_whence) {
366 	case SEEK_SET:
367 		start = 0;
368 		break;
369 	case SEEK_CUR:
370 		start = filp->f_pos;
371 		break;
372 	case SEEK_END:
373 		start = i_size_read(filp->f_path.dentry->d_inode);
374 		break;
375 	default:
376 		return -EINVAL;
377 	}
378 
379 	start += l->l_start;
380 	if (start < 0)
381 		return -EINVAL;
382 	fl->fl_end = OFFSET_MAX;
383 	if (l->l_len > 0) {
384 		fl->fl_end = start + l->l_len - 1;
385 	} else if (l->l_len < 0) {
386 		fl->fl_end = start - 1;
387 		start += l->l_len;
388 		if (start < 0)
389 			return -EINVAL;
390 	}
391 	fl->fl_start = start;	/* we record the absolute position */
392 	if (fl->fl_end < fl->fl_start)
393 		return -EOVERFLOW;
394 
395 	fl->fl_owner = current->files;
396 	fl->fl_pid = current->tgid;
397 	fl->fl_file = filp;
398 	fl->fl_flags = FL_POSIX;
399 	fl->fl_ops = NULL;
400 	fl->fl_lmops = NULL;
401 
402 	switch (l->l_type) {
403 	case F_RDLCK:
404 	case F_WRLCK:
405 	case F_UNLCK:
406 		fl->fl_type = l->l_type;
407 		break;
408 	default:
409 		return -EINVAL;
410 	}
411 
412 	return (0);
413 }
414 #endif
415 
416 /* default lease lock manager operations */
417 static void lease_break_callback(struct file_lock *fl)
418 {
419 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
420 }
421 
422 static void lease_release_private_callback(struct file_lock *fl)
423 {
424 	if (!fl->fl_file)
425 		return;
426 
427 	f_delown(fl->fl_file);
428 	fl->fl_file->f_owner.signum = 0;
429 }
430 
431 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
432 {
433 	return fl->fl_file == try->fl_file;
434 }
435 
436 static struct lock_manager_operations lease_manager_ops = {
437 	.fl_break = lease_break_callback,
438 	.fl_release_private = lease_release_private_callback,
439 	.fl_mylease = lease_mylease_callback,
440 	.fl_change = lease_modify,
441 };
442 
443 /*
444  * Initialize a lease, use the default lock manager operations
445  */
446 static int lease_init(struct file *filp, int type, struct file_lock *fl)
447  {
448 	if (assign_type(fl, type) != 0)
449 		return -EINVAL;
450 
451 	fl->fl_owner = current->files;
452 	fl->fl_pid = current->tgid;
453 
454 	fl->fl_file = filp;
455 	fl->fl_flags = FL_LEASE;
456 	fl->fl_start = 0;
457 	fl->fl_end = OFFSET_MAX;
458 	fl->fl_ops = NULL;
459 	fl->fl_lmops = &lease_manager_ops;
460 	return 0;
461 }
462 
463 /* Allocate a file_lock initialised to this type of lease */
464 static struct file_lock *lease_alloc(struct file *filp, int type)
465 {
466 	struct file_lock *fl = locks_alloc_lock();
467 	int error = -ENOMEM;
468 
469 	if (fl == NULL)
470 		return ERR_PTR(error);
471 
472 	error = lease_init(filp, type, fl);
473 	if (error) {
474 		locks_free_lock(fl);
475 		return ERR_PTR(error);
476 	}
477 	return fl;
478 }
479 
480 /* Check if two locks overlap each other.
481  */
482 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
483 {
484 	return ((fl1->fl_end >= fl2->fl_start) &&
485 		(fl2->fl_end >= fl1->fl_start));
486 }
487 
488 /*
489  * Check whether two locks have the same owner.
490  */
491 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
492 {
493 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
494 		return fl2->fl_lmops == fl1->fl_lmops &&
495 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
496 	return fl1->fl_owner == fl2->fl_owner;
497 }
498 
499 /* Remove waiter from blocker's block list.
500  * When blocker ends up pointing to itself then the list is empty.
501  */
502 static void __locks_delete_block(struct file_lock *waiter)
503 {
504 	list_del_init(&waiter->fl_block);
505 	list_del_init(&waiter->fl_link);
506 	waiter->fl_next = NULL;
507 }
508 
509 /*
510  */
511 static void locks_delete_block(struct file_lock *waiter)
512 {
513 	lock_kernel();
514 	__locks_delete_block(waiter);
515 	unlock_kernel();
516 }
517 
518 /* Insert waiter into blocker's block list.
519  * We use a circular list so that processes can be easily woken up in
520  * the order they blocked. The documentation doesn't require this but
521  * it seems like the reasonable thing to do.
522  */
523 static void locks_insert_block(struct file_lock *blocker,
524 			       struct file_lock *waiter)
525 {
526 	BUG_ON(!list_empty(&waiter->fl_block));
527 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
528 	waiter->fl_next = blocker;
529 	if (IS_POSIX(blocker))
530 		list_add(&waiter->fl_link, &blocked_list);
531 }
532 
533 /* Wake up processes blocked waiting for blocker.
534  * If told to wait then schedule the processes until the block list
535  * is empty, otherwise empty the block list ourselves.
536  */
537 static void locks_wake_up_blocks(struct file_lock *blocker)
538 {
539 	while (!list_empty(&blocker->fl_block)) {
540 		struct file_lock *waiter;
541 
542 		waiter = list_first_entry(&blocker->fl_block,
543 				struct file_lock, fl_block);
544 		__locks_delete_block(waiter);
545 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
546 			waiter->fl_lmops->fl_notify(waiter);
547 		else
548 			wake_up(&waiter->fl_wait);
549 	}
550 }
551 
552 /* Insert file lock fl into an inode's lock list at the position indicated
553  * by pos. At the same time add the lock to the global file lock list.
554  */
555 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
556 {
557 	list_add(&fl->fl_link, &file_lock_list);
558 
559 	fl->fl_nspid = get_pid(task_tgid(current));
560 
561 	/* insert into file's list */
562 	fl->fl_next = *pos;
563 	*pos = fl;
564 }
565 
566 /*
567  * Delete a lock and then free it.
568  * Wake up processes that are blocked waiting for this lock,
569  * notify the FS that the lock has been cleared and
570  * finally free the lock.
571  */
572 static void locks_delete_lock(struct file_lock **thisfl_p)
573 {
574 	struct file_lock *fl = *thisfl_p;
575 
576 	*thisfl_p = fl->fl_next;
577 	fl->fl_next = NULL;
578 	list_del_init(&fl->fl_link);
579 
580 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
581 	if (fl->fl_fasync != NULL) {
582 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
583 		fl->fl_fasync = NULL;
584 	}
585 
586 	if (fl->fl_nspid) {
587 		put_pid(fl->fl_nspid);
588 		fl->fl_nspid = NULL;
589 	}
590 
591 	locks_wake_up_blocks(fl);
592 	locks_free_lock(fl);
593 }
594 
595 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
596  * checks for shared/exclusive status of overlapping locks.
597  */
598 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
599 {
600 	if (sys_fl->fl_type == F_WRLCK)
601 		return 1;
602 	if (caller_fl->fl_type == F_WRLCK)
603 		return 1;
604 	return 0;
605 }
606 
607 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
608  * checking before calling the locks_conflict().
609  */
610 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
611 {
612 	/* POSIX locks owned by the same process do not conflict with
613 	 * each other.
614 	 */
615 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
616 		return (0);
617 
618 	/* Check whether they overlap */
619 	if (!locks_overlap(caller_fl, sys_fl))
620 		return 0;
621 
622 	return (locks_conflict(caller_fl, sys_fl));
623 }
624 
625 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
626  * checking before calling the locks_conflict().
627  */
628 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
629 {
630 	/* FLOCK locks referring to the same filp do not conflict with
631 	 * each other.
632 	 */
633 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
634 		return (0);
635 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
636 		return 0;
637 
638 	return (locks_conflict(caller_fl, sys_fl));
639 }
640 
641 void
642 posix_test_lock(struct file *filp, struct file_lock *fl)
643 {
644 	struct file_lock *cfl;
645 
646 	lock_kernel();
647 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
648 		if (!IS_POSIX(cfl))
649 			continue;
650 		if (posix_locks_conflict(fl, cfl))
651 			break;
652 	}
653 	if (cfl) {
654 		__locks_copy_lock(fl, cfl);
655 		if (cfl->fl_nspid)
656 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
657 	} else
658 		fl->fl_type = F_UNLCK;
659 	unlock_kernel();
660 	return;
661 }
662 EXPORT_SYMBOL(posix_test_lock);
663 
664 /*
665  * Deadlock detection:
666  *
667  * We attempt to detect deadlocks that are due purely to posix file
668  * locks.
669  *
670  * We assume that a task can be waiting for at most one lock at a time.
671  * So for any acquired lock, the process holding that lock may be
672  * waiting on at most one other lock.  That lock in turns may be held by
673  * someone waiting for at most one other lock.  Given a requested lock
674  * caller_fl which is about to wait for a conflicting lock block_fl, we
675  * follow this chain of waiters to ensure we are not about to create a
676  * cycle.
677  *
678  * Since we do this before we ever put a process to sleep on a lock, we
679  * are ensured that there is never a cycle; that is what guarantees that
680  * the while() loop in posix_locks_deadlock() eventually completes.
681  *
682  * Note: the above assumption may not be true when handling lock
683  * requests from a broken NFS client. It may also fail in the presence
684  * of tasks (such as posix threads) sharing the same open file table.
685  *
686  * To handle those cases, we just bail out after a few iterations.
687  */
688 
689 #define MAX_DEADLK_ITERATIONS 10
690 
691 /* Find a lock that the owner of the given block_fl is blocking on. */
692 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
693 {
694 	struct file_lock *fl;
695 
696 	list_for_each_entry(fl, &blocked_list, fl_link) {
697 		if (posix_same_owner(fl, block_fl))
698 			return fl->fl_next;
699 	}
700 	return NULL;
701 }
702 
703 static int posix_locks_deadlock(struct file_lock *caller_fl,
704 				struct file_lock *block_fl)
705 {
706 	int i = 0;
707 
708 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
709 		if (i++ > MAX_DEADLK_ITERATIONS)
710 			return 0;
711 		if (posix_same_owner(caller_fl, block_fl))
712 			return 1;
713 	}
714 	return 0;
715 }
716 
717 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
718  * after any leases, but before any posix locks.
719  *
720  * Note that if called with an FL_EXISTS argument, the caller may determine
721  * whether or not a lock was successfully freed by testing the return
722  * value for -ENOENT.
723  */
724 static int flock_lock_file(struct file *filp, struct file_lock *request)
725 {
726 	struct file_lock *new_fl = NULL;
727 	struct file_lock **before;
728 	struct inode * inode = filp->f_path.dentry->d_inode;
729 	int error = 0;
730 	int found = 0;
731 
732 	lock_kernel();
733 	if (request->fl_flags & FL_ACCESS)
734 		goto find_conflict;
735 
736 	if (request->fl_type != F_UNLCK) {
737 		error = -ENOMEM;
738 		new_fl = locks_alloc_lock();
739 		if (new_fl == NULL)
740 			goto out;
741 		error = 0;
742 	}
743 
744 	for_each_lock(inode, before) {
745 		struct file_lock *fl = *before;
746 		if (IS_POSIX(fl))
747 			break;
748 		if (IS_LEASE(fl))
749 			continue;
750 		if (filp != fl->fl_file)
751 			continue;
752 		if (request->fl_type == fl->fl_type)
753 			goto out;
754 		found = 1;
755 		locks_delete_lock(before);
756 		break;
757 	}
758 
759 	if (request->fl_type == F_UNLCK) {
760 		if ((request->fl_flags & FL_EXISTS) && !found)
761 			error = -ENOENT;
762 		goto out;
763 	}
764 
765 	/*
766 	 * If a higher-priority process was blocked on the old file lock,
767 	 * give it the opportunity to lock the file.
768 	 */
769 	if (found)
770 		cond_resched_bkl();
771 
772 find_conflict:
773 	for_each_lock(inode, before) {
774 		struct file_lock *fl = *before;
775 		if (IS_POSIX(fl))
776 			break;
777 		if (IS_LEASE(fl))
778 			continue;
779 		if (!flock_locks_conflict(request, fl))
780 			continue;
781 		error = -EAGAIN;
782 		if (!(request->fl_flags & FL_SLEEP))
783 			goto out;
784 		error = FILE_LOCK_DEFERRED;
785 		locks_insert_block(fl, request);
786 		goto out;
787 	}
788 	if (request->fl_flags & FL_ACCESS)
789 		goto out;
790 	locks_copy_lock(new_fl, request);
791 	locks_insert_lock(before, new_fl);
792 	new_fl = NULL;
793 	error = 0;
794 
795 out:
796 	unlock_kernel();
797 	if (new_fl)
798 		locks_free_lock(new_fl);
799 	return error;
800 }
801 
802 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
803 {
804 	struct file_lock *fl;
805 	struct file_lock *new_fl = NULL;
806 	struct file_lock *new_fl2 = NULL;
807 	struct file_lock *left = NULL;
808 	struct file_lock *right = NULL;
809 	struct file_lock **before;
810 	int error, added = 0;
811 
812 	/*
813 	 * We may need two file_lock structures for this operation,
814 	 * so we get them in advance to avoid races.
815 	 *
816 	 * In some cases we can be sure, that no new locks will be needed
817 	 */
818 	if (!(request->fl_flags & FL_ACCESS) &&
819 	    (request->fl_type != F_UNLCK ||
820 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
821 		new_fl = locks_alloc_lock();
822 		new_fl2 = locks_alloc_lock();
823 	}
824 
825 	lock_kernel();
826 	if (request->fl_type != F_UNLCK) {
827 		for_each_lock(inode, before) {
828 			fl = *before;
829 			if (!IS_POSIX(fl))
830 				continue;
831 			if (!posix_locks_conflict(request, fl))
832 				continue;
833 			if (conflock)
834 				__locks_copy_lock(conflock, fl);
835 			error = -EAGAIN;
836 			if (!(request->fl_flags & FL_SLEEP))
837 				goto out;
838 			error = -EDEADLK;
839 			if (posix_locks_deadlock(request, fl))
840 				goto out;
841 			error = FILE_LOCK_DEFERRED;
842 			locks_insert_block(fl, request);
843 			goto out;
844   		}
845   	}
846 
847 	/* If we're just looking for a conflict, we're done. */
848 	error = 0;
849 	if (request->fl_flags & FL_ACCESS)
850 		goto out;
851 
852 	/*
853 	 * Find the first old lock with the same owner as the new lock.
854 	 */
855 
856 	before = &inode->i_flock;
857 
858 	/* First skip locks owned by other processes.  */
859 	while ((fl = *before) && (!IS_POSIX(fl) ||
860 				  !posix_same_owner(request, fl))) {
861 		before = &fl->fl_next;
862 	}
863 
864 	/* Process locks with this owner.  */
865 	while ((fl = *before) && posix_same_owner(request, fl)) {
866 		/* Detect adjacent or overlapping regions (if same lock type)
867 		 */
868 		if (request->fl_type == fl->fl_type) {
869 			/* In all comparisons of start vs end, use
870 			 * "start - 1" rather than "end + 1". If end
871 			 * is OFFSET_MAX, end + 1 will become negative.
872 			 */
873 			if (fl->fl_end < request->fl_start - 1)
874 				goto next_lock;
875 			/* If the next lock in the list has entirely bigger
876 			 * addresses than the new one, insert the lock here.
877 			 */
878 			if (fl->fl_start - 1 > request->fl_end)
879 				break;
880 
881 			/* If we come here, the new and old lock are of the
882 			 * same type and adjacent or overlapping. Make one
883 			 * lock yielding from the lower start address of both
884 			 * locks to the higher end address.
885 			 */
886 			if (fl->fl_start > request->fl_start)
887 				fl->fl_start = request->fl_start;
888 			else
889 				request->fl_start = fl->fl_start;
890 			if (fl->fl_end < request->fl_end)
891 				fl->fl_end = request->fl_end;
892 			else
893 				request->fl_end = fl->fl_end;
894 			if (added) {
895 				locks_delete_lock(before);
896 				continue;
897 			}
898 			request = fl;
899 			added = 1;
900 		}
901 		else {
902 			/* Processing for different lock types is a bit
903 			 * more complex.
904 			 */
905 			if (fl->fl_end < request->fl_start)
906 				goto next_lock;
907 			if (fl->fl_start > request->fl_end)
908 				break;
909 			if (request->fl_type == F_UNLCK)
910 				added = 1;
911 			if (fl->fl_start < request->fl_start)
912 				left = fl;
913 			/* If the next lock in the list has a higher end
914 			 * address than the new one, insert the new one here.
915 			 */
916 			if (fl->fl_end > request->fl_end) {
917 				right = fl;
918 				break;
919 			}
920 			if (fl->fl_start >= request->fl_start) {
921 				/* The new lock completely replaces an old
922 				 * one (This may happen several times).
923 				 */
924 				if (added) {
925 					locks_delete_lock(before);
926 					continue;
927 				}
928 				/* Replace the old lock with the new one.
929 				 * Wake up anybody waiting for the old one,
930 				 * as the change in lock type might satisfy
931 				 * their needs.
932 				 */
933 				locks_wake_up_blocks(fl);
934 				fl->fl_start = request->fl_start;
935 				fl->fl_end = request->fl_end;
936 				fl->fl_type = request->fl_type;
937 				locks_release_private(fl);
938 				locks_copy_private(fl, request);
939 				request = fl;
940 				added = 1;
941 			}
942 		}
943 		/* Go on to next lock.
944 		 */
945 	next_lock:
946 		before = &fl->fl_next;
947 	}
948 
949 	/*
950 	 * The above code only modifies existing locks in case of
951 	 * merging or replacing.  If new lock(s) need to be inserted
952 	 * all modifications are done bellow this, so it's safe yet to
953 	 * bail out.
954 	 */
955 	error = -ENOLCK; /* "no luck" */
956 	if (right && left == right && !new_fl2)
957 		goto out;
958 
959 	error = 0;
960 	if (!added) {
961 		if (request->fl_type == F_UNLCK) {
962 			if (request->fl_flags & FL_EXISTS)
963 				error = -ENOENT;
964 			goto out;
965 		}
966 
967 		if (!new_fl) {
968 			error = -ENOLCK;
969 			goto out;
970 		}
971 		locks_copy_lock(new_fl, request);
972 		locks_insert_lock(before, new_fl);
973 		new_fl = NULL;
974 	}
975 	if (right) {
976 		if (left == right) {
977 			/* The new lock breaks the old one in two pieces,
978 			 * so we have to use the second new lock.
979 			 */
980 			left = new_fl2;
981 			new_fl2 = NULL;
982 			locks_copy_lock(left, right);
983 			locks_insert_lock(before, left);
984 		}
985 		right->fl_start = request->fl_end + 1;
986 		locks_wake_up_blocks(right);
987 	}
988 	if (left) {
989 		left->fl_end = request->fl_start - 1;
990 		locks_wake_up_blocks(left);
991 	}
992  out:
993 	unlock_kernel();
994 	/*
995 	 * Free any unused locks.
996 	 */
997 	if (new_fl)
998 		locks_free_lock(new_fl);
999 	if (new_fl2)
1000 		locks_free_lock(new_fl2);
1001 	return error;
1002 }
1003 
1004 /**
1005  * posix_lock_file - Apply a POSIX-style lock to a file
1006  * @filp: The file to apply the lock to
1007  * @fl: The lock to be applied
1008  * @conflock: Place to return a copy of the conflicting lock, if found.
1009  *
1010  * Add a POSIX style lock to a file.
1011  * We merge adjacent & overlapping locks whenever possible.
1012  * POSIX locks are sorted by owner task, then by starting address
1013  *
1014  * Note that if called with an FL_EXISTS argument, the caller may determine
1015  * whether or not a lock was successfully freed by testing the return
1016  * value for -ENOENT.
1017  */
1018 int posix_lock_file(struct file *filp, struct file_lock *fl,
1019 			struct file_lock *conflock)
1020 {
1021 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1022 }
1023 EXPORT_SYMBOL(posix_lock_file);
1024 
1025 /**
1026  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1027  * @filp: The file to apply the lock to
1028  * @fl: The lock to be applied
1029  *
1030  * Add a POSIX style lock to a file.
1031  * We merge adjacent & overlapping locks whenever possible.
1032  * POSIX locks are sorted by owner task, then by starting address
1033  */
1034 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1035 {
1036 	int error;
1037 	might_sleep ();
1038 	for (;;) {
1039 		error = posix_lock_file(filp, fl, NULL);
1040 		if (error != FILE_LOCK_DEFERRED)
1041 			break;
1042 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1043 		if (!error)
1044 			continue;
1045 
1046 		locks_delete_block(fl);
1047 		break;
1048 	}
1049 	return error;
1050 }
1051 EXPORT_SYMBOL(posix_lock_file_wait);
1052 
1053 /**
1054  * locks_mandatory_locked - Check for an active lock
1055  * @inode: the file to check
1056  *
1057  * Searches the inode's list of locks to find any POSIX locks which conflict.
1058  * This function is called from locks_verify_locked() only.
1059  */
1060 int locks_mandatory_locked(struct inode *inode)
1061 {
1062 	fl_owner_t owner = current->files;
1063 	struct file_lock *fl;
1064 
1065 	/*
1066 	 * Search the lock list for this inode for any POSIX locks.
1067 	 */
1068 	lock_kernel();
1069 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1070 		if (!IS_POSIX(fl))
1071 			continue;
1072 		if (fl->fl_owner != owner)
1073 			break;
1074 	}
1075 	unlock_kernel();
1076 	return fl ? -EAGAIN : 0;
1077 }
1078 
1079 /**
1080  * locks_mandatory_area - Check for a conflicting lock
1081  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1082  *		for shared
1083  * @inode:      the file to check
1084  * @filp:       how the file was opened (if it was)
1085  * @offset:     start of area to check
1086  * @count:      length of area to check
1087  *
1088  * Searches the inode's list of locks to find any POSIX locks which conflict.
1089  * This function is called from rw_verify_area() and
1090  * locks_verify_truncate().
1091  */
1092 int locks_mandatory_area(int read_write, struct inode *inode,
1093 			 struct file *filp, loff_t offset,
1094 			 size_t count)
1095 {
1096 	struct file_lock fl;
1097 	int error;
1098 
1099 	locks_init_lock(&fl);
1100 	fl.fl_owner = current->files;
1101 	fl.fl_pid = current->tgid;
1102 	fl.fl_file = filp;
1103 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1104 	if (filp && !(filp->f_flags & O_NONBLOCK))
1105 		fl.fl_flags |= FL_SLEEP;
1106 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1107 	fl.fl_start = offset;
1108 	fl.fl_end = offset + count - 1;
1109 
1110 	for (;;) {
1111 		error = __posix_lock_file(inode, &fl, NULL);
1112 		if (error != FILE_LOCK_DEFERRED)
1113 			break;
1114 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1115 		if (!error) {
1116 			/*
1117 			 * If we've been sleeping someone might have
1118 			 * changed the permissions behind our back.
1119 			 */
1120 			if (__mandatory_lock(inode))
1121 				continue;
1122 		}
1123 
1124 		locks_delete_block(&fl);
1125 		break;
1126 	}
1127 
1128 	return error;
1129 }
1130 
1131 EXPORT_SYMBOL(locks_mandatory_area);
1132 
1133 /* We already had a lease on this file; just change its type */
1134 int lease_modify(struct file_lock **before, int arg)
1135 {
1136 	struct file_lock *fl = *before;
1137 	int error = assign_type(fl, arg);
1138 
1139 	if (error)
1140 		return error;
1141 	locks_wake_up_blocks(fl);
1142 	if (arg == F_UNLCK)
1143 		locks_delete_lock(before);
1144 	return 0;
1145 }
1146 
1147 EXPORT_SYMBOL(lease_modify);
1148 
1149 static void time_out_leases(struct inode *inode)
1150 {
1151 	struct file_lock **before;
1152 	struct file_lock *fl;
1153 
1154 	before = &inode->i_flock;
1155 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1156 		if ((fl->fl_break_time == 0)
1157 				|| time_before(jiffies, fl->fl_break_time)) {
1158 			before = &fl->fl_next;
1159 			continue;
1160 		}
1161 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1162 		if (fl == *before)	/* lease_modify may have freed fl */
1163 			before = &fl->fl_next;
1164 	}
1165 }
1166 
1167 /**
1168  *	__break_lease	-	revoke all outstanding leases on file
1169  *	@inode: the inode of the file to return
1170  *	@mode: the open mode (read or write)
1171  *
1172  *	break_lease (inlined for speed) has checked there already is at least
1173  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1174  *	a call to open() or truncate().  This function can sleep unless you
1175  *	specified %O_NONBLOCK to your open().
1176  */
1177 int __break_lease(struct inode *inode, unsigned int mode)
1178 {
1179 	int error = 0, future;
1180 	struct file_lock *new_fl, *flock;
1181 	struct file_lock *fl;
1182 	unsigned long break_time;
1183 	int i_have_this_lease = 0;
1184 
1185 	new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
1186 
1187 	lock_kernel();
1188 
1189 	time_out_leases(inode);
1190 
1191 	flock = inode->i_flock;
1192 	if ((flock == NULL) || !IS_LEASE(flock))
1193 		goto out;
1194 
1195 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1196 		if (fl->fl_owner == current->files)
1197 			i_have_this_lease = 1;
1198 
1199 	if (mode & FMODE_WRITE) {
1200 		/* If we want write access, we have to revoke any lease. */
1201 		future = F_UNLCK | F_INPROGRESS;
1202 	} else if (flock->fl_type & F_INPROGRESS) {
1203 		/* If the lease is already being broken, we just leave it */
1204 		future = flock->fl_type;
1205 	} else if (flock->fl_type & F_WRLCK) {
1206 		/* Downgrade the exclusive lease to a read-only lease. */
1207 		future = F_RDLCK | F_INPROGRESS;
1208 	} else {
1209 		/* the existing lease was read-only, so we can read too. */
1210 		goto out;
1211 	}
1212 
1213 	if (IS_ERR(new_fl) && !i_have_this_lease
1214 			&& ((mode & O_NONBLOCK) == 0)) {
1215 		error = PTR_ERR(new_fl);
1216 		goto out;
1217 	}
1218 
1219 	break_time = 0;
1220 	if (lease_break_time > 0) {
1221 		break_time = jiffies + lease_break_time * HZ;
1222 		if (break_time == 0)
1223 			break_time++;	/* so that 0 means no break time */
1224 	}
1225 
1226 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1227 		if (fl->fl_type != future) {
1228 			fl->fl_type = future;
1229 			fl->fl_break_time = break_time;
1230 			/* lease must have lmops break callback */
1231 			fl->fl_lmops->fl_break(fl);
1232 		}
1233 	}
1234 
1235 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1236 		error = -EWOULDBLOCK;
1237 		goto out;
1238 	}
1239 
1240 restart:
1241 	break_time = flock->fl_break_time;
1242 	if (break_time != 0) {
1243 		break_time -= jiffies;
1244 		if (break_time == 0)
1245 			break_time++;
1246 	}
1247 	locks_insert_block(flock, new_fl);
1248 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1249 						!new_fl->fl_next, break_time);
1250 	__locks_delete_block(new_fl);
1251 	if (error >= 0) {
1252 		if (error == 0)
1253 			time_out_leases(inode);
1254 		/* Wait for the next lease that has not been broken yet */
1255 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1256 				flock = flock->fl_next) {
1257 			if (flock->fl_type & F_INPROGRESS)
1258 				goto restart;
1259 		}
1260 		error = 0;
1261 	}
1262 
1263 out:
1264 	unlock_kernel();
1265 	if (!IS_ERR(new_fl))
1266 		locks_free_lock(new_fl);
1267 	return error;
1268 }
1269 
1270 EXPORT_SYMBOL(__break_lease);
1271 
1272 /**
1273  *	lease_get_mtime - get the last modified time of an inode
1274  *	@inode: the inode
1275  *      @time:  pointer to a timespec which will contain the last modified time
1276  *
1277  * This is to force NFS clients to flush their caches for files with
1278  * exclusive leases.  The justification is that if someone has an
1279  * exclusive lease, then they could be modifying it.
1280  */
1281 void lease_get_mtime(struct inode *inode, struct timespec *time)
1282 {
1283 	struct file_lock *flock = inode->i_flock;
1284 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1285 		*time = current_fs_time(inode->i_sb);
1286 	else
1287 		*time = inode->i_mtime;
1288 }
1289 
1290 EXPORT_SYMBOL(lease_get_mtime);
1291 
1292 /**
1293  *	fcntl_getlease - Enquire what lease is currently active
1294  *	@filp: the file
1295  *
1296  *	The value returned by this function will be one of
1297  *	(if no lease break is pending):
1298  *
1299  *	%F_RDLCK to indicate a shared lease is held.
1300  *
1301  *	%F_WRLCK to indicate an exclusive lease is held.
1302  *
1303  *	%F_UNLCK to indicate no lease is held.
1304  *
1305  *	(if a lease break is pending):
1306  *
1307  *	%F_RDLCK to indicate an exclusive lease needs to be
1308  *		changed to a shared lease (or removed).
1309  *
1310  *	%F_UNLCK to indicate the lease needs to be removed.
1311  *
1312  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1313  *	should be returned to userspace.
1314  */
1315 int fcntl_getlease(struct file *filp)
1316 {
1317 	struct file_lock *fl;
1318 	int type = F_UNLCK;
1319 
1320 	lock_kernel();
1321 	time_out_leases(filp->f_path.dentry->d_inode);
1322 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1323 			fl = fl->fl_next) {
1324 		if (fl->fl_file == filp) {
1325 			type = fl->fl_type & ~F_INPROGRESS;
1326 			break;
1327 		}
1328 	}
1329 	unlock_kernel();
1330 	return type;
1331 }
1332 
1333 /**
1334  *	generic_setlease	-	sets a lease on an open file
1335  *	@filp: file pointer
1336  *	@arg: type of lease to obtain
1337  *	@flp: input - file_lock to use, output - file_lock inserted
1338  *
1339  *	The (input) flp->fl_lmops->fl_break function is required
1340  *	by break_lease().
1341  *
1342  *	Called with kernel lock held.
1343  */
1344 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1345 {
1346 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1347 	struct file_lock *new_fl = NULL;
1348 	struct dentry *dentry = filp->f_path.dentry;
1349 	struct inode *inode = dentry->d_inode;
1350 	int error, rdlease_count = 0, wrlease_count = 0;
1351 
1352 	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
1353 		return -EACCES;
1354 	if (!S_ISREG(inode->i_mode))
1355 		return -EINVAL;
1356 	error = security_file_lock(filp, arg);
1357 	if (error)
1358 		return error;
1359 
1360 	time_out_leases(inode);
1361 
1362 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1363 
1364 	lease = *flp;
1365 
1366 	if (arg != F_UNLCK) {
1367 		error = -ENOMEM;
1368 		new_fl = locks_alloc_lock();
1369 		if (new_fl == NULL)
1370 			goto out;
1371 
1372 		error = -EAGAIN;
1373 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1374 			goto out;
1375 		if ((arg == F_WRLCK)
1376 		    && ((atomic_read(&dentry->d_count) > 1)
1377 			|| (atomic_read(&inode->i_count) > 1)))
1378 			goto out;
1379 	}
1380 
1381 	/*
1382 	 * At this point, we know that if there is an exclusive
1383 	 * lease on this file, then we hold it on this filp
1384 	 * (otherwise our open of this file would have blocked).
1385 	 * And if we are trying to acquire an exclusive lease,
1386 	 * then the file is not open by anyone (including us)
1387 	 * except for this filp.
1388 	 */
1389 	for (before = &inode->i_flock;
1390 			((fl = *before) != NULL) && IS_LEASE(fl);
1391 			before = &fl->fl_next) {
1392 		if (lease->fl_lmops->fl_mylease(fl, lease))
1393 			my_before = before;
1394 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1395 			/*
1396 			 * Someone is in the process of opening this
1397 			 * file for writing so we may not take an
1398 			 * exclusive lease on it.
1399 			 */
1400 			wrlease_count++;
1401 		else
1402 			rdlease_count++;
1403 	}
1404 
1405 	error = -EAGAIN;
1406 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1407 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1408 		goto out;
1409 
1410 	if (my_before != NULL) {
1411 		*flp = *my_before;
1412 		error = lease->fl_lmops->fl_change(my_before, arg);
1413 		goto out;
1414 	}
1415 
1416 	error = 0;
1417 	if (arg == F_UNLCK)
1418 		goto out;
1419 
1420 	error = -EINVAL;
1421 	if (!leases_enable)
1422 		goto out;
1423 
1424 	locks_copy_lock(new_fl, lease);
1425 	locks_insert_lock(before, new_fl);
1426 
1427 	*flp = new_fl;
1428 	return 0;
1429 
1430 out:
1431 	if (new_fl != NULL)
1432 		locks_free_lock(new_fl);
1433 	return error;
1434 }
1435 EXPORT_SYMBOL(generic_setlease);
1436 
1437  /**
1438  *	vfs_setlease        -       sets a lease on an open file
1439  *	@filp: file pointer
1440  *	@arg: type of lease to obtain
1441  *	@lease: file_lock to use
1442  *
1443  *	Call this to establish a lease on the file.
1444  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1445  *	break_lease will oops!
1446  *
1447  *	This will call the filesystem's setlease file method, if
1448  *	defined.  Note that there is no getlease method; instead, the
1449  *	filesystem setlease method should call back to setlease() to
1450  *	add a lease to the inode's lease list, where fcntl_getlease() can
1451  *	find it.  Since fcntl_getlease() only reports whether the current
1452  *	task holds a lease, a cluster filesystem need only do this for
1453  *	leases held by processes on this node.
1454  *
1455  *	There is also no break_lease method; filesystems that
1456  *	handle their own leases shoud break leases themselves from the
1457  *	filesystem's open, create, and (on truncate) setattr methods.
1458  *
1459  *	Warning: the only current setlease methods exist only to disable
1460  *	leases in certain cases.  More vfs changes may be required to
1461  *	allow a full filesystem lease implementation.
1462  */
1463 
1464 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1465 {
1466 	int error;
1467 
1468 	lock_kernel();
1469 	if (filp->f_op && filp->f_op->setlease)
1470 		error = filp->f_op->setlease(filp, arg, lease);
1471 	else
1472 		error = generic_setlease(filp, arg, lease);
1473 	unlock_kernel();
1474 
1475 	return error;
1476 }
1477 EXPORT_SYMBOL_GPL(vfs_setlease);
1478 
1479 /**
1480  *	fcntl_setlease	-	sets a lease on an open file
1481  *	@fd: open file descriptor
1482  *	@filp: file pointer
1483  *	@arg: type of lease to obtain
1484  *
1485  *	Call this fcntl to establish a lease on the file.
1486  *	Note that you also need to call %F_SETSIG to
1487  *	receive a signal when the lease is broken.
1488  */
1489 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1490 {
1491 	struct file_lock fl, *flp = &fl;
1492 	struct inode *inode = filp->f_path.dentry->d_inode;
1493 	int error;
1494 
1495 	locks_init_lock(&fl);
1496 	error = lease_init(filp, arg, &fl);
1497 	if (error)
1498 		return error;
1499 
1500 	lock_kernel();
1501 
1502 	error = vfs_setlease(filp, arg, &flp);
1503 	if (error || arg == F_UNLCK)
1504 		goto out_unlock;
1505 
1506 	error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
1507 	if (error < 0) {
1508 		/* remove lease just inserted by setlease */
1509 		flp->fl_type = F_UNLCK | F_INPROGRESS;
1510 		flp->fl_break_time = jiffies - 10;
1511 		time_out_leases(inode);
1512 		goto out_unlock;
1513 	}
1514 
1515 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1516 out_unlock:
1517 	unlock_kernel();
1518 	return error;
1519 }
1520 
1521 /**
1522  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1523  * @filp: The file to apply the lock to
1524  * @fl: The lock to be applied
1525  *
1526  * Add a FLOCK style lock to a file.
1527  */
1528 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1529 {
1530 	int error;
1531 	might_sleep();
1532 	for (;;) {
1533 		error = flock_lock_file(filp, fl);
1534 		if (error != FILE_LOCK_DEFERRED)
1535 			break;
1536 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1537 		if (!error)
1538 			continue;
1539 
1540 		locks_delete_block(fl);
1541 		break;
1542 	}
1543 	return error;
1544 }
1545 
1546 EXPORT_SYMBOL(flock_lock_file_wait);
1547 
1548 /**
1549  *	sys_flock: - flock() system call.
1550  *	@fd: the file descriptor to lock.
1551  *	@cmd: the type of lock to apply.
1552  *
1553  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1554  *	The @cmd can be one of
1555  *
1556  *	%LOCK_SH -- a shared lock.
1557  *
1558  *	%LOCK_EX -- an exclusive lock.
1559  *
1560  *	%LOCK_UN -- remove an existing lock.
1561  *
1562  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1563  *
1564  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1565  *	processes read and write access respectively.
1566  */
1567 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
1568 {
1569 	struct file *filp;
1570 	struct file_lock *lock;
1571 	int can_sleep, unlock;
1572 	int error;
1573 
1574 	error = -EBADF;
1575 	filp = fget(fd);
1576 	if (!filp)
1577 		goto out;
1578 
1579 	can_sleep = !(cmd & LOCK_NB);
1580 	cmd &= ~LOCK_NB;
1581 	unlock = (cmd == LOCK_UN);
1582 
1583 	if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3))
1584 		goto out_putf;
1585 
1586 	error = flock_make_lock(filp, &lock, cmd);
1587 	if (error)
1588 		goto out_putf;
1589 	if (can_sleep)
1590 		lock->fl_flags |= FL_SLEEP;
1591 
1592 	error = security_file_lock(filp, cmd);
1593 	if (error)
1594 		goto out_free;
1595 
1596 	if (filp->f_op && filp->f_op->flock)
1597 		error = filp->f_op->flock(filp,
1598 					  (can_sleep) ? F_SETLKW : F_SETLK,
1599 					  lock);
1600 	else
1601 		error = flock_lock_file_wait(filp, lock);
1602 
1603  out_free:
1604 	locks_free_lock(lock);
1605 
1606  out_putf:
1607 	fput(filp);
1608  out:
1609 	return error;
1610 }
1611 
1612 /**
1613  * vfs_test_lock - test file byte range lock
1614  * @filp: The file to test lock for
1615  * @fl: The lock to test; also used to hold result
1616  *
1617  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1618  * setting conf->fl_type to something other than F_UNLCK.
1619  */
1620 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1621 {
1622 	if (filp->f_op && filp->f_op->lock)
1623 		return filp->f_op->lock(filp, F_GETLK, fl);
1624 	posix_test_lock(filp, fl);
1625 	return 0;
1626 }
1627 EXPORT_SYMBOL_GPL(vfs_test_lock);
1628 
1629 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1630 {
1631 	flock->l_pid = fl->fl_pid;
1632 #if BITS_PER_LONG == 32
1633 	/*
1634 	 * Make sure we can represent the posix lock via
1635 	 * legacy 32bit flock.
1636 	 */
1637 	if (fl->fl_start > OFFT_OFFSET_MAX)
1638 		return -EOVERFLOW;
1639 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1640 		return -EOVERFLOW;
1641 #endif
1642 	flock->l_start = fl->fl_start;
1643 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1644 		fl->fl_end - fl->fl_start + 1;
1645 	flock->l_whence = 0;
1646 	flock->l_type = fl->fl_type;
1647 	return 0;
1648 }
1649 
1650 #if BITS_PER_LONG == 32
1651 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1652 {
1653 	flock->l_pid = fl->fl_pid;
1654 	flock->l_start = fl->fl_start;
1655 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1656 		fl->fl_end - fl->fl_start + 1;
1657 	flock->l_whence = 0;
1658 	flock->l_type = fl->fl_type;
1659 }
1660 #endif
1661 
1662 /* Report the first existing lock that would conflict with l.
1663  * This implements the F_GETLK command of fcntl().
1664  */
1665 int fcntl_getlk(struct file *filp, struct flock __user *l)
1666 {
1667 	struct file_lock file_lock;
1668 	struct flock flock;
1669 	int error;
1670 
1671 	error = -EFAULT;
1672 	if (copy_from_user(&flock, l, sizeof(flock)))
1673 		goto out;
1674 	error = -EINVAL;
1675 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1676 		goto out;
1677 
1678 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1679 	if (error)
1680 		goto out;
1681 
1682 	error = vfs_test_lock(filp, &file_lock);
1683 	if (error)
1684 		goto out;
1685 
1686 	flock.l_type = file_lock.fl_type;
1687 	if (file_lock.fl_type != F_UNLCK) {
1688 		error = posix_lock_to_flock(&flock, &file_lock);
1689 		if (error)
1690 			goto out;
1691 	}
1692 	error = -EFAULT;
1693 	if (!copy_to_user(l, &flock, sizeof(flock)))
1694 		error = 0;
1695 out:
1696 	return error;
1697 }
1698 
1699 /**
1700  * vfs_lock_file - file byte range lock
1701  * @filp: The file to apply the lock to
1702  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1703  * @fl: The lock to be applied
1704  * @conf: Place to return a copy of the conflicting lock, if found.
1705  *
1706  * A caller that doesn't care about the conflicting lock may pass NULL
1707  * as the final argument.
1708  *
1709  * If the filesystem defines a private ->lock() method, then @conf will
1710  * be left unchanged; so a caller that cares should initialize it to
1711  * some acceptable default.
1712  *
1713  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1714  * locks, the ->lock() interface may return asynchronously, before the lock has
1715  * been granted or denied by the underlying filesystem, if (and only if)
1716  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1717  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1718  * the request is for a blocking lock. When ->lock() does return asynchronously,
1719  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1720  * request completes.
1721  * If the request is for non-blocking lock the file system should return
1722  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1723  * with the result. If the request timed out the callback routine will return a
1724  * nonzero return code and the file system should release the lock. The file
1725  * system is also responsible to keep a corresponding posix lock when it
1726  * grants a lock so the VFS can find out which locks are locally held and do
1727  * the correct lock cleanup when required.
1728  * The underlying filesystem must not drop the kernel lock or call
1729  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1730  * return code.
1731  */
1732 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1733 {
1734 	if (filp->f_op && filp->f_op->lock)
1735 		return filp->f_op->lock(filp, cmd, fl);
1736 	else
1737 		return posix_lock_file(filp, fl, conf);
1738 }
1739 EXPORT_SYMBOL_GPL(vfs_lock_file);
1740 
1741 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1742 			     struct file_lock *fl)
1743 {
1744 	int error;
1745 
1746 	error = security_file_lock(filp, fl->fl_type);
1747 	if (error)
1748 		return error;
1749 
1750 	for (;;) {
1751 		error = vfs_lock_file(filp, cmd, fl, NULL);
1752 		if (error != FILE_LOCK_DEFERRED)
1753 			break;
1754 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1755 		if (!error)
1756 			continue;
1757 
1758 		locks_delete_block(fl);
1759 		break;
1760 	}
1761 
1762 	return error;
1763 }
1764 
1765 /* Apply the lock described by l to an open file descriptor.
1766  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1767  */
1768 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1769 		struct flock __user *l)
1770 {
1771 	struct file_lock *file_lock = locks_alloc_lock();
1772 	struct flock flock;
1773 	struct inode *inode;
1774 	struct file *f;
1775 	int error;
1776 
1777 	if (file_lock == NULL)
1778 		return -ENOLCK;
1779 
1780 	/*
1781 	 * This might block, so we do it before checking the inode.
1782 	 */
1783 	error = -EFAULT;
1784 	if (copy_from_user(&flock, l, sizeof(flock)))
1785 		goto out;
1786 
1787 	inode = filp->f_path.dentry->d_inode;
1788 
1789 	/* Don't allow mandatory locks on files that may be memory mapped
1790 	 * and shared.
1791 	 */
1792 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1793 		error = -EAGAIN;
1794 		goto out;
1795 	}
1796 
1797 again:
1798 	error = flock_to_posix_lock(filp, file_lock, &flock);
1799 	if (error)
1800 		goto out;
1801 	if (cmd == F_SETLKW) {
1802 		file_lock->fl_flags |= FL_SLEEP;
1803 	}
1804 
1805 	error = -EBADF;
1806 	switch (flock.l_type) {
1807 	case F_RDLCK:
1808 		if (!(filp->f_mode & FMODE_READ))
1809 			goto out;
1810 		break;
1811 	case F_WRLCK:
1812 		if (!(filp->f_mode & FMODE_WRITE))
1813 			goto out;
1814 		break;
1815 	case F_UNLCK:
1816 		break;
1817 	default:
1818 		error = -EINVAL;
1819 		goto out;
1820 	}
1821 
1822 	error = do_lock_file_wait(filp, cmd, file_lock);
1823 
1824 	/*
1825 	 * Attempt to detect a close/fcntl race and recover by
1826 	 * releasing the lock that was just acquired.
1827 	 */
1828 	/*
1829 	 * we need that spin_lock here - it prevents reordering between
1830 	 * update of inode->i_flock and check for it done in close().
1831 	 * rcu_read_lock() wouldn't do.
1832 	 */
1833 	spin_lock(&current->files->file_lock);
1834 	f = fcheck(fd);
1835 	spin_unlock(&current->files->file_lock);
1836 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1837 		flock.l_type = F_UNLCK;
1838 		goto again;
1839 	}
1840 
1841 out:
1842 	locks_free_lock(file_lock);
1843 	return error;
1844 }
1845 
1846 #if BITS_PER_LONG == 32
1847 /* Report the first existing lock that would conflict with l.
1848  * This implements the F_GETLK command of fcntl().
1849  */
1850 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1851 {
1852 	struct file_lock file_lock;
1853 	struct flock64 flock;
1854 	int error;
1855 
1856 	error = -EFAULT;
1857 	if (copy_from_user(&flock, l, sizeof(flock)))
1858 		goto out;
1859 	error = -EINVAL;
1860 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1861 		goto out;
1862 
1863 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1864 	if (error)
1865 		goto out;
1866 
1867 	error = vfs_test_lock(filp, &file_lock);
1868 	if (error)
1869 		goto out;
1870 
1871 	flock.l_type = file_lock.fl_type;
1872 	if (file_lock.fl_type != F_UNLCK)
1873 		posix_lock_to_flock64(&flock, &file_lock);
1874 
1875 	error = -EFAULT;
1876 	if (!copy_to_user(l, &flock, sizeof(flock)))
1877 		error = 0;
1878 
1879 out:
1880 	return error;
1881 }
1882 
1883 /* Apply the lock described by l to an open file descriptor.
1884  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1885  */
1886 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1887 		struct flock64 __user *l)
1888 {
1889 	struct file_lock *file_lock = locks_alloc_lock();
1890 	struct flock64 flock;
1891 	struct inode *inode;
1892 	struct file *f;
1893 	int error;
1894 
1895 	if (file_lock == NULL)
1896 		return -ENOLCK;
1897 
1898 	/*
1899 	 * This might block, so we do it before checking the inode.
1900 	 */
1901 	error = -EFAULT;
1902 	if (copy_from_user(&flock, l, sizeof(flock)))
1903 		goto out;
1904 
1905 	inode = filp->f_path.dentry->d_inode;
1906 
1907 	/* Don't allow mandatory locks on files that may be memory mapped
1908 	 * and shared.
1909 	 */
1910 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1911 		error = -EAGAIN;
1912 		goto out;
1913 	}
1914 
1915 again:
1916 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1917 	if (error)
1918 		goto out;
1919 	if (cmd == F_SETLKW64) {
1920 		file_lock->fl_flags |= FL_SLEEP;
1921 	}
1922 
1923 	error = -EBADF;
1924 	switch (flock.l_type) {
1925 	case F_RDLCK:
1926 		if (!(filp->f_mode & FMODE_READ))
1927 			goto out;
1928 		break;
1929 	case F_WRLCK:
1930 		if (!(filp->f_mode & FMODE_WRITE))
1931 			goto out;
1932 		break;
1933 	case F_UNLCK:
1934 		break;
1935 	default:
1936 		error = -EINVAL;
1937 		goto out;
1938 	}
1939 
1940 	error = do_lock_file_wait(filp, cmd, file_lock);
1941 
1942 	/*
1943 	 * Attempt to detect a close/fcntl race and recover by
1944 	 * releasing the lock that was just acquired.
1945 	 */
1946 	spin_lock(&current->files->file_lock);
1947 	f = fcheck(fd);
1948 	spin_unlock(&current->files->file_lock);
1949 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1950 		flock.l_type = F_UNLCK;
1951 		goto again;
1952 	}
1953 
1954 out:
1955 	locks_free_lock(file_lock);
1956 	return error;
1957 }
1958 #endif /* BITS_PER_LONG == 32 */
1959 
1960 /*
1961  * This function is called when the file is being removed
1962  * from the task's fd array.  POSIX locks belonging to this task
1963  * are deleted at this time.
1964  */
1965 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1966 {
1967 	struct file_lock lock;
1968 
1969 	/*
1970 	 * If there are no locks held on this file, we don't need to call
1971 	 * posix_lock_file().  Another process could be setting a lock on this
1972 	 * file at the same time, but we wouldn't remove that lock anyway.
1973 	 */
1974 	if (!filp->f_path.dentry->d_inode->i_flock)
1975 		return;
1976 
1977 	lock.fl_type = F_UNLCK;
1978 	lock.fl_flags = FL_POSIX | FL_CLOSE;
1979 	lock.fl_start = 0;
1980 	lock.fl_end = OFFSET_MAX;
1981 	lock.fl_owner = owner;
1982 	lock.fl_pid = current->tgid;
1983 	lock.fl_file = filp;
1984 	lock.fl_ops = NULL;
1985 	lock.fl_lmops = NULL;
1986 
1987 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
1988 
1989 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
1990 		lock.fl_ops->fl_release_private(&lock);
1991 }
1992 
1993 EXPORT_SYMBOL(locks_remove_posix);
1994 
1995 /*
1996  * This function is called on the last close of an open file.
1997  */
1998 void locks_remove_flock(struct file *filp)
1999 {
2000 	struct inode * inode = filp->f_path.dentry->d_inode;
2001 	struct file_lock *fl;
2002 	struct file_lock **before;
2003 
2004 	if (!inode->i_flock)
2005 		return;
2006 
2007 	if (filp->f_op && filp->f_op->flock) {
2008 		struct file_lock fl = {
2009 			.fl_pid = current->tgid,
2010 			.fl_file = filp,
2011 			.fl_flags = FL_FLOCK,
2012 			.fl_type = F_UNLCK,
2013 			.fl_end = OFFSET_MAX,
2014 		};
2015 		filp->f_op->flock(filp, F_SETLKW, &fl);
2016 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2017 			fl.fl_ops->fl_release_private(&fl);
2018 	}
2019 
2020 	lock_kernel();
2021 	before = &inode->i_flock;
2022 
2023 	while ((fl = *before) != NULL) {
2024 		if (fl->fl_file == filp) {
2025 			if (IS_FLOCK(fl)) {
2026 				locks_delete_lock(before);
2027 				continue;
2028 			}
2029 			if (IS_LEASE(fl)) {
2030 				lease_modify(before, F_UNLCK);
2031 				continue;
2032 			}
2033 			/* What? */
2034 			BUG();
2035  		}
2036 		before = &fl->fl_next;
2037 	}
2038 	unlock_kernel();
2039 }
2040 
2041 /**
2042  *	posix_unblock_lock - stop waiting for a file lock
2043  *      @filp:   how the file was opened
2044  *	@waiter: the lock which was waiting
2045  *
2046  *	lockd needs to block waiting for locks.
2047  */
2048 int
2049 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2050 {
2051 	int status = 0;
2052 
2053 	lock_kernel();
2054 	if (waiter->fl_next)
2055 		__locks_delete_block(waiter);
2056 	else
2057 		status = -ENOENT;
2058 	unlock_kernel();
2059 	return status;
2060 }
2061 
2062 EXPORT_SYMBOL(posix_unblock_lock);
2063 
2064 /**
2065  * vfs_cancel_lock - file byte range unblock lock
2066  * @filp: The file to apply the unblock to
2067  * @fl: The lock to be unblocked
2068  *
2069  * Used by lock managers to cancel blocked requests
2070  */
2071 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2072 {
2073 	if (filp->f_op && filp->f_op->lock)
2074 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2075 	return 0;
2076 }
2077 
2078 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2079 
2080 #ifdef CONFIG_PROC_FS
2081 #include <linux/seq_file.h>
2082 
2083 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2084 							int id, char *pfx)
2085 {
2086 	struct inode *inode = NULL;
2087 	unsigned int fl_pid;
2088 
2089 	if (fl->fl_nspid)
2090 		fl_pid = pid_vnr(fl->fl_nspid);
2091 	else
2092 		fl_pid = fl->fl_pid;
2093 
2094 	if (fl->fl_file != NULL)
2095 		inode = fl->fl_file->f_path.dentry->d_inode;
2096 
2097 	seq_printf(f, "%d:%s ", id, pfx);
2098 	if (IS_POSIX(fl)) {
2099 		seq_printf(f, "%6s %s ",
2100 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2101 			     (inode == NULL) ? "*NOINODE*" :
2102 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2103 	} else if (IS_FLOCK(fl)) {
2104 		if (fl->fl_type & LOCK_MAND) {
2105 			seq_printf(f, "FLOCK  MSNFS     ");
2106 		} else {
2107 			seq_printf(f, "FLOCK  ADVISORY  ");
2108 		}
2109 	} else if (IS_LEASE(fl)) {
2110 		seq_printf(f, "LEASE  ");
2111 		if (fl->fl_type & F_INPROGRESS)
2112 			seq_printf(f, "BREAKING  ");
2113 		else if (fl->fl_file)
2114 			seq_printf(f, "ACTIVE    ");
2115 		else
2116 			seq_printf(f, "BREAKER   ");
2117 	} else {
2118 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2119 	}
2120 	if (fl->fl_type & LOCK_MAND) {
2121 		seq_printf(f, "%s ",
2122 			       (fl->fl_type & LOCK_READ)
2123 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2124 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2125 	} else {
2126 		seq_printf(f, "%s ",
2127 			       (fl->fl_type & F_INPROGRESS)
2128 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2129 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2130 	}
2131 	if (inode) {
2132 #ifdef WE_CAN_BREAK_LSLK_NOW
2133 		seq_printf(f, "%d %s:%ld ", fl_pid,
2134 				inode->i_sb->s_id, inode->i_ino);
2135 #else
2136 		/* userspace relies on this representation of dev_t ;-( */
2137 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2138 				MAJOR(inode->i_sb->s_dev),
2139 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2140 #endif
2141 	} else {
2142 		seq_printf(f, "%d <none>:0 ", fl_pid);
2143 	}
2144 	if (IS_POSIX(fl)) {
2145 		if (fl->fl_end == OFFSET_MAX)
2146 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2147 		else
2148 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2149 	} else {
2150 		seq_printf(f, "0 EOF\n");
2151 	}
2152 }
2153 
2154 static int locks_show(struct seq_file *f, void *v)
2155 {
2156 	struct file_lock *fl, *bfl;
2157 
2158 	fl = list_entry(v, struct file_lock, fl_link);
2159 
2160 	lock_get_status(f, fl, (long)f->private, "");
2161 
2162 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2163 		lock_get_status(f, bfl, (long)f->private, " ->");
2164 
2165 	f->private++;
2166 	return 0;
2167 }
2168 
2169 static void *locks_start(struct seq_file *f, loff_t *pos)
2170 {
2171 	lock_kernel();
2172 	f->private = (void *)1;
2173 	return seq_list_start(&file_lock_list, *pos);
2174 }
2175 
2176 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2177 {
2178 	return seq_list_next(v, &file_lock_list, pos);
2179 }
2180 
2181 static void locks_stop(struct seq_file *f, void *v)
2182 {
2183 	unlock_kernel();
2184 }
2185 
2186 struct seq_operations locks_seq_operations = {
2187 	.start	= locks_start,
2188 	.next	= locks_next,
2189 	.stop	= locks_stop,
2190 	.show	= locks_show,
2191 };
2192 #endif
2193 
2194 /**
2195  *	lock_may_read - checks that the region is free of locks
2196  *	@inode: the inode that is being read
2197  *	@start: the first byte to read
2198  *	@len: the number of bytes to read
2199  *
2200  *	Emulates Windows locking requirements.  Whole-file
2201  *	mandatory locks (share modes) can prohibit a read and
2202  *	byte-range POSIX locks can prohibit a read if they overlap.
2203  *
2204  *	N.B. this function is only ever called
2205  *	from knfsd and ownership of locks is never checked.
2206  */
2207 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2208 {
2209 	struct file_lock *fl;
2210 	int result = 1;
2211 	lock_kernel();
2212 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2213 		if (IS_POSIX(fl)) {
2214 			if (fl->fl_type == F_RDLCK)
2215 				continue;
2216 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2217 				continue;
2218 		} else if (IS_FLOCK(fl)) {
2219 			if (!(fl->fl_type & LOCK_MAND))
2220 				continue;
2221 			if (fl->fl_type & LOCK_READ)
2222 				continue;
2223 		} else
2224 			continue;
2225 		result = 0;
2226 		break;
2227 	}
2228 	unlock_kernel();
2229 	return result;
2230 }
2231 
2232 EXPORT_SYMBOL(lock_may_read);
2233 
2234 /**
2235  *	lock_may_write - checks that the region is free of locks
2236  *	@inode: the inode that is being written
2237  *	@start: the first byte to write
2238  *	@len: the number of bytes to write
2239  *
2240  *	Emulates Windows locking requirements.  Whole-file
2241  *	mandatory locks (share modes) can prohibit a write and
2242  *	byte-range POSIX locks can prohibit a write if they overlap.
2243  *
2244  *	N.B. this function is only ever called
2245  *	from knfsd and ownership of locks is never checked.
2246  */
2247 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2248 {
2249 	struct file_lock *fl;
2250 	int result = 1;
2251 	lock_kernel();
2252 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2253 		if (IS_POSIX(fl)) {
2254 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2255 				continue;
2256 		} else if (IS_FLOCK(fl)) {
2257 			if (!(fl->fl_type & LOCK_MAND))
2258 				continue;
2259 			if (fl->fl_type & LOCK_WRITE)
2260 				continue;
2261 		} else
2262 			continue;
2263 		result = 0;
2264 		break;
2265 	}
2266 	unlock_kernel();
2267 	return result;
2268 }
2269 
2270 EXPORT_SYMBOL(lock_may_write);
2271 
2272 static int __init filelock_init(void)
2273 {
2274 	filelock_cache = kmem_cache_create("file_lock_cache",
2275 			sizeof(struct file_lock), 0, SLAB_PANIC,
2276 			init_once);
2277 	return 0;
2278 }
2279 
2280 core_initcall(filelock_init);
2281