xref: /linux/fs/locks.c (revision ecba1060583635ab55092072441ff903b5e9a659)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/smp_lock.h>
126 #include <linux/syscalls.h>
127 #include <linux/time.h>
128 #include <linux/rcupdate.h>
129 #include <linux/pid_namespace.h>
130 
131 #include <asm/uaccess.h>
132 
133 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
134 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
135 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
136 
137 int leases_enable = 1;
138 int lease_break_time = 45;
139 
140 #define for_each_lock(inode, lockp) \
141 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142 
143 static LIST_HEAD(file_lock_list);
144 static LIST_HEAD(blocked_list);
145 
146 static struct kmem_cache *filelock_cache __read_mostly;
147 
148 /* Allocate an empty lock structure. */
149 static struct file_lock *locks_alloc_lock(void)
150 {
151 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
152 }
153 
154 void locks_release_private(struct file_lock *fl)
155 {
156 	if (fl->fl_ops) {
157 		if (fl->fl_ops->fl_release_private)
158 			fl->fl_ops->fl_release_private(fl);
159 		fl->fl_ops = NULL;
160 	}
161 	if (fl->fl_lmops) {
162 		if (fl->fl_lmops->fl_release_private)
163 			fl->fl_lmops->fl_release_private(fl);
164 		fl->fl_lmops = NULL;
165 	}
166 
167 }
168 EXPORT_SYMBOL_GPL(locks_release_private);
169 
170 /* Free a lock which is not in use. */
171 static void locks_free_lock(struct file_lock *fl)
172 {
173 	BUG_ON(waitqueue_active(&fl->fl_wait));
174 	BUG_ON(!list_empty(&fl->fl_block));
175 	BUG_ON(!list_empty(&fl->fl_link));
176 
177 	locks_release_private(fl);
178 	kmem_cache_free(filelock_cache, fl);
179 }
180 
181 void locks_init_lock(struct file_lock *fl)
182 {
183 	INIT_LIST_HEAD(&fl->fl_link);
184 	INIT_LIST_HEAD(&fl->fl_block);
185 	init_waitqueue_head(&fl->fl_wait);
186 	fl->fl_next = NULL;
187 	fl->fl_fasync = NULL;
188 	fl->fl_owner = NULL;
189 	fl->fl_pid = 0;
190 	fl->fl_nspid = NULL;
191 	fl->fl_file = NULL;
192 	fl->fl_flags = 0;
193 	fl->fl_type = 0;
194 	fl->fl_start = fl->fl_end = 0;
195 	fl->fl_ops = NULL;
196 	fl->fl_lmops = NULL;
197 }
198 
199 EXPORT_SYMBOL(locks_init_lock);
200 
201 /*
202  * Initialises the fields of the file lock which are invariant for
203  * free file_locks.
204  */
205 static void init_once(void *foo)
206 {
207 	struct file_lock *lock = (struct file_lock *) foo;
208 
209 	locks_init_lock(lock);
210 }
211 
212 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
213 {
214 	if (fl->fl_ops) {
215 		if (fl->fl_ops->fl_copy_lock)
216 			fl->fl_ops->fl_copy_lock(new, fl);
217 		new->fl_ops = fl->fl_ops;
218 	}
219 	if (fl->fl_lmops) {
220 		if (fl->fl_lmops->fl_copy_lock)
221 			fl->fl_lmops->fl_copy_lock(new, fl);
222 		new->fl_lmops = fl->fl_lmops;
223 	}
224 }
225 
226 /*
227  * Initialize a new lock from an existing file_lock structure.
228  */
229 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
230 {
231 	new->fl_owner = fl->fl_owner;
232 	new->fl_pid = fl->fl_pid;
233 	new->fl_file = NULL;
234 	new->fl_flags = fl->fl_flags;
235 	new->fl_type = fl->fl_type;
236 	new->fl_start = fl->fl_start;
237 	new->fl_end = fl->fl_end;
238 	new->fl_ops = NULL;
239 	new->fl_lmops = NULL;
240 }
241 EXPORT_SYMBOL(__locks_copy_lock);
242 
243 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
244 {
245 	locks_release_private(new);
246 
247 	__locks_copy_lock(new, fl);
248 	new->fl_file = fl->fl_file;
249 	new->fl_ops = fl->fl_ops;
250 	new->fl_lmops = fl->fl_lmops;
251 
252 	locks_copy_private(new, fl);
253 }
254 
255 EXPORT_SYMBOL(locks_copy_lock);
256 
257 static inline int flock_translate_cmd(int cmd) {
258 	if (cmd & LOCK_MAND)
259 		return cmd & (LOCK_MAND | LOCK_RW);
260 	switch (cmd) {
261 	case LOCK_SH:
262 		return F_RDLCK;
263 	case LOCK_EX:
264 		return F_WRLCK;
265 	case LOCK_UN:
266 		return F_UNLCK;
267 	}
268 	return -EINVAL;
269 }
270 
271 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
272 static int flock_make_lock(struct file *filp, struct file_lock **lock,
273 		unsigned int cmd)
274 {
275 	struct file_lock *fl;
276 	int type = flock_translate_cmd(cmd);
277 	if (type < 0)
278 		return type;
279 
280 	fl = locks_alloc_lock();
281 	if (fl == NULL)
282 		return -ENOMEM;
283 
284 	fl->fl_file = filp;
285 	fl->fl_pid = current->tgid;
286 	fl->fl_flags = FL_FLOCK;
287 	fl->fl_type = type;
288 	fl->fl_end = OFFSET_MAX;
289 
290 	*lock = fl;
291 	return 0;
292 }
293 
294 static int assign_type(struct file_lock *fl, int type)
295 {
296 	switch (type) {
297 	case F_RDLCK:
298 	case F_WRLCK:
299 	case F_UNLCK:
300 		fl->fl_type = type;
301 		break;
302 	default:
303 		return -EINVAL;
304 	}
305 	return 0;
306 }
307 
308 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
309  * style lock.
310  */
311 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
312 			       struct flock *l)
313 {
314 	off_t start, end;
315 
316 	switch (l->l_whence) {
317 	case SEEK_SET:
318 		start = 0;
319 		break;
320 	case SEEK_CUR:
321 		start = filp->f_pos;
322 		break;
323 	case SEEK_END:
324 		start = i_size_read(filp->f_path.dentry->d_inode);
325 		break;
326 	default:
327 		return -EINVAL;
328 	}
329 
330 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
331 	   POSIX-2001 defines it. */
332 	start += l->l_start;
333 	if (start < 0)
334 		return -EINVAL;
335 	fl->fl_end = OFFSET_MAX;
336 	if (l->l_len > 0) {
337 		end = start + l->l_len - 1;
338 		fl->fl_end = end;
339 	} else if (l->l_len < 0) {
340 		end = start - 1;
341 		fl->fl_end = end;
342 		start += l->l_len;
343 		if (start < 0)
344 			return -EINVAL;
345 	}
346 	fl->fl_start = start;	/* we record the absolute position */
347 	if (fl->fl_end < fl->fl_start)
348 		return -EOVERFLOW;
349 
350 	fl->fl_owner = current->files;
351 	fl->fl_pid = current->tgid;
352 	fl->fl_file = filp;
353 	fl->fl_flags = FL_POSIX;
354 	fl->fl_ops = NULL;
355 	fl->fl_lmops = NULL;
356 
357 	return assign_type(fl, l->l_type);
358 }
359 
360 #if BITS_PER_LONG == 32
361 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
362 				 struct flock64 *l)
363 {
364 	loff_t start;
365 
366 	switch (l->l_whence) {
367 	case SEEK_SET:
368 		start = 0;
369 		break;
370 	case SEEK_CUR:
371 		start = filp->f_pos;
372 		break;
373 	case SEEK_END:
374 		start = i_size_read(filp->f_path.dentry->d_inode);
375 		break;
376 	default:
377 		return -EINVAL;
378 	}
379 
380 	start += l->l_start;
381 	if (start < 0)
382 		return -EINVAL;
383 	fl->fl_end = OFFSET_MAX;
384 	if (l->l_len > 0) {
385 		fl->fl_end = start + l->l_len - 1;
386 	} else if (l->l_len < 0) {
387 		fl->fl_end = start - 1;
388 		start += l->l_len;
389 		if (start < 0)
390 			return -EINVAL;
391 	}
392 	fl->fl_start = start;	/* we record the absolute position */
393 	if (fl->fl_end < fl->fl_start)
394 		return -EOVERFLOW;
395 
396 	fl->fl_owner = current->files;
397 	fl->fl_pid = current->tgid;
398 	fl->fl_file = filp;
399 	fl->fl_flags = FL_POSIX;
400 	fl->fl_ops = NULL;
401 	fl->fl_lmops = NULL;
402 
403 	switch (l->l_type) {
404 	case F_RDLCK:
405 	case F_WRLCK:
406 	case F_UNLCK:
407 		fl->fl_type = l->l_type;
408 		break;
409 	default:
410 		return -EINVAL;
411 	}
412 
413 	return (0);
414 }
415 #endif
416 
417 /* default lease lock manager operations */
418 static void lease_break_callback(struct file_lock *fl)
419 {
420 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
421 }
422 
423 static void lease_release_private_callback(struct file_lock *fl)
424 {
425 	if (!fl->fl_file)
426 		return;
427 
428 	f_delown(fl->fl_file);
429 	fl->fl_file->f_owner.signum = 0;
430 }
431 
432 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
433 {
434 	return fl->fl_file == try->fl_file;
435 }
436 
437 static struct lock_manager_operations lease_manager_ops = {
438 	.fl_break = lease_break_callback,
439 	.fl_release_private = lease_release_private_callback,
440 	.fl_mylease = lease_mylease_callback,
441 	.fl_change = lease_modify,
442 };
443 
444 /*
445  * Initialize a lease, use the default lock manager operations
446  */
447 static int lease_init(struct file *filp, int type, struct file_lock *fl)
448  {
449 	if (assign_type(fl, type) != 0)
450 		return -EINVAL;
451 
452 	fl->fl_owner = current->files;
453 	fl->fl_pid = current->tgid;
454 
455 	fl->fl_file = filp;
456 	fl->fl_flags = FL_LEASE;
457 	fl->fl_start = 0;
458 	fl->fl_end = OFFSET_MAX;
459 	fl->fl_ops = NULL;
460 	fl->fl_lmops = &lease_manager_ops;
461 	return 0;
462 }
463 
464 /* Allocate a file_lock initialised to this type of lease */
465 static struct file_lock *lease_alloc(struct file *filp, int type)
466 {
467 	struct file_lock *fl = locks_alloc_lock();
468 	int error = -ENOMEM;
469 
470 	if (fl == NULL)
471 		return ERR_PTR(error);
472 
473 	error = lease_init(filp, type, fl);
474 	if (error) {
475 		locks_free_lock(fl);
476 		return ERR_PTR(error);
477 	}
478 	return fl;
479 }
480 
481 /* Check if two locks overlap each other.
482  */
483 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
484 {
485 	return ((fl1->fl_end >= fl2->fl_start) &&
486 		(fl2->fl_end >= fl1->fl_start));
487 }
488 
489 /*
490  * Check whether two locks have the same owner.
491  */
492 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
493 {
494 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
495 		return fl2->fl_lmops == fl1->fl_lmops &&
496 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
497 	return fl1->fl_owner == fl2->fl_owner;
498 }
499 
500 /* Remove waiter from blocker's block list.
501  * When blocker ends up pointing to itself then the list is empty.
502  */
503 static void __locks_delete_block(struct file_lock *waiter)
504 {
505 	list_del_init(&waiter->fl_block);
506 	list_del_init(&waiter->fl_link);
507 	waiter->fl_next = NULL;
508 }
509 
510 /*
511  */
512 static void locks_delete_block(struct file_lock *waiter)
513 {
514 	lock_kernel();
515 	__locks_delete_block(waiter);
516 	unlock_kernel();
517 }
518 
519 /* Insert waiter into blocker's block list.
520  * We use a circular list so that processes can be easily woken up in
521  * the order they blocked. The documentation doesn't require this but
522  * it seems like the reasonable thing to do.
523  */
524 static void locks_insert_block(struct file_lock *blocker,
525 			       struct file_lock *waiter)
526 {
527 	BUG_ON(!list_empty(&waiter->fl_block));
528 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
529 	waiter->fl_next = blocker;
530 	if (IS_POSIX(blocker))
531 		list_add(&waiter->fl_link, &blocked_list);
532 }
533 
534 /* Wake up processes blocked waiting for blocker.
535  * If told to wait then schedule the processes until the block list
536  * is empty, otherwise empty the block list ourselves.
537  */
538 static void locks_wake_up_blocks(struct file_lock *blocker)
539 {
540 	while (!list_empty(&blocker->fl_block)) {
541 		struct file_lock *waiter;
542 
543 		waiter = list_first_entry(&blocker->fl_block,
544 				struct file_lock, fl_block);
545 		__locks_delete_block(waiter);
546 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
547 			waiter->fl_lmops->fl_notify(waiter);
548 		else
549 			wake_up(&waiter->fl_wait);
550 	}
551 }
552 
553 /* Insert file lock fl into an inode's lock list at the position indicated
554  * by pos. At the same time add the lock to the global file lock list.
555  */
556 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
557 {
558 	list_add(&fl->fl_link, &file_lock_list);
559 
560 	fl->fl_nspid = get_pid(task_tgid(current));
561 
562 	/* insert into file's list */
563 	fl->fl_next = *pos;
564 	*pos = fl;
565 }
566 
567 /*
568  * Delete a lock and then free it.
569  * Wake up processes that are blocked waiting for this lock,
570  * notify the FS that the lock has been cleared and
571  * finally free the lock.
572  */
573 static void locks_delete_lock(struct file_lock **thisfl_p)
574 {
575 	struct file_lock *fl = *thisfl_p;
576 
577 	*thisfl_p = fl->fl_next;
578 	fl->fl_next = NULL;
579 	list_del_init(&fl->fl_link);
580 
581 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
582 	if (fl->fl_fasync != NULL) {
583 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
584 		fl->fl_fasync = NULL;
585 	}
586 
587 	if (fl->fl_nspid) {
588 		put_pid(fl->fl_nspid);
589 		fl->fl_nspid = NULL;
590 	}
591 
592 	locks_wake_up_blocks(fl);
593 	locks_free_lock(fl);
594 }
595 
596 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
597  * checks for shared/exclusive status of overlapping locks.
598  */
599 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
600 {
601 	if (sys_fl->fl_type == F_WRLCK)
602 		return 1;
603 	if (caller_fl->fl_type == F_WRLCK)
604 		return 1;
605 	return 0;
606 }
607 
608 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
609  * checking before calling the locks_conflict().
610  */
611 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
612 {
613 	/* POSIX locks owned by the same process do not conflict with
614 	 * each other.
615 	 */
616 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
617 		return (0);
618 
619 	/* Check whether they overlap */
620 	if (!locks_overlap(caller_fl, sys_fl))
621 		return 0;
622 
623 	return (locks_conflict(caller_fl, sys_fl));
624 }
625 
626 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
627  * checking before calling the locks_conflict().
628  */
629 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
630 {
631 	/* FLOCK locks referring to the same filp do not conflict with
632 	 * each other.
633 	 */
634 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
635 		return (0);
636 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
637 		return 0;
638 
639 	return (locks_conflict(caller_fl, sys_fl));
640 }
641 
642 void
643 posix_test_lock(struct file *filp, struct file_lock *fl)
644 {
645 	struct file_lock *cfl;
646 
647 	lock_kernel();
648 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
649 		if (!IS_POSIX(cfl))
650 			continue;
651 		if (posix_locks_conflict(fl, cfl))
652 			break;
653 	}
654 	if (cfl) {
655 		__locks_copy_lock(fl, cfl);
656 		if (cfl->fl_nspid)
657 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
658 	} else
659 		fl->fl_type = F_UNLCK;
660 	unlock_kernel();
661 	return;
662 }
663 EXPORT_SYMBOL(posix_test_lock);
664 
665 /*
666  * Deadlock detection:
667  *
668  * We attempt to detect deadlocks that are due purely to posix file
669  * locks.
670  *
671  * We assume that a task can be waiting for at most one lock at a time.
672  * So for any acquired lock, the process holding that lock may be
673  * waiting on at most one other lock.  That lock in turns may be held by
674  * someone waiting for at most one other lock.  Given a requested lock
675  * caller_fl which is about to wait for a conflicting lock block_fl, we
676  * follow this chain of waiters to ensure we are not about to create a
677  * cycle.
678  *
679  * Since we do this before we ever put a process to sleep on a lock, we
680  * are ensured that there is never a cycle; that is what guarantees that
681  * the while() loop in posix_locks_deadlock() eventually completes.
682  *
683  * Note: the above assumption may not be true when handling lock
684  * requests from a broken NFS client. It may also fail in the presence
685  * of tasks (such as posix threads) sharing the same open file table.
686  *
687  * To handle those cases, we just bail out after a few iterations.
688  */
689 
690 #define MAX_DEADLK_ITERATIONS 10
691 
692 /* Find a lock that the owner of the given block_fl is blocking on. */
693 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
694 {
695 	struct file_lock *fl;
696 
697 	list_for_each_entry(fl, &blocked_list, fl_link) {
698 		if (posix_same_owner(fl, block_fl))
699 			return fl->fl_next;
700 	}
701 	return NULL;
702 }
703 
704 static int posix_locks_deadlock(struct file_lock *caller_fl,
705 				struct file_lock *block_fl)
706 {
707 	int i = 0;
708 
709 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
710 		if (i++ > MAX_DEADLK_ITERATIONS)
711 			return 0;
712 		if (posix_same_owner(caller_fl, block_fl))
713 			return 1;
714 	}
715 	return 0;
716 }
717 
718 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
719  * after any leases, but before any posix locks.
720  *
721  * Note that if called with an FL_EXISTS argument, the caller may determine
722  * whether or not a lock was successfully freed by testing the return
723  * value for -ENOENT.
724  */
725 static int flock_lock_file(struct file *filp, struct file_lock *request)
726 {
727 	struct file_lock *new_fl = NULL;
728 	struct file_lock **before;
729 	struct inode * inode = filp->f_path.dentry->d_inode;
730 	int error = 0;
731 	int found = 0;
732 
733 	lock_kernel();
734 	if (request->fl_flags & FL_ACCESS)
735 		goto find_conflict;
736 
737 	if (request->fl_type != F_UNLCK) {
738 		error = -ENOMEM;
739 		new_fl = locks_alloc_lock();
740 		if (new_fl == NULL)
741 			goto out;
742 		error = 0;
743 	}
744 
745 	for_each_lock(inode, before) {
746 		struct file_lock *fl = *before;
747 		if (IS_POSIX(fl))
748 			break;
749 		if (IS_LEASE(fl))
750 			continue;
751 		if (filp != fl->fl_file)
752 			continue;
753 		if (request->fl_type == fl->fl_type)
754 			goto out;
755 		found = 1;
756 		locks_delete_lock(before);
757 		break;
758 	}
759 
760 	if (request->fl_type == F_UNLCK) {
761 		if ((request->fl_flags & FL_EXISTS) && !found)
762 			error = -ENOENT;
763 		goto out;
764 	}
765 
766 	/*
767 	 * If a higher-priority process was blocked on the old file lock,
768 	 * give it the opportunity to lock the file.
769 	 */
770 	if (found)
771 		cond_resched_bkl();
772 
773 find_conflict:
774 	for_each_lock(inode, before) {
775 		struct file_lock *fl = *before;
776 		if (IS_POSIX(fl))
777 			break;
778 		if (IS_LEASE(fl))
779 			continue;
780 		if (!flock_locks_conflict(request, fl))
781 			continue;
782 		error = -EAGAIN;
783 		if (!(request->fl_flags & FL_SLEEP))
784 			goto out;
785 		error = FILE_LOCK_DEFERRED;
786 		locks_insert_block(fl, request);
787 		goto out;
788 	}
789 	if (request->fl_flags & FL_ACCESS)
790 		goto out;
791 	locks_copy_lock(new_fl, request);
792 	locks_insert_lock(before, new_fl);
793 	new_fl = NULL;
794 	error = 0;
795 
796 out:
797 	unlock_kernel();
798 	if (new_fl)
799 		locks_free_lock(new_fl);
800 	return error;
801 }
802 
803 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
804 {
805 	struct file_lock *fl;
806 	struct file_lock *new_fl = NULL;
807 	struct file_lock *new_fl2 = NULL;
808 	struct file_lock *left = NULL;
809 	struct file_lock *right = NULL;
810 	struct file_lock **before;
811 	int error, added = 0;
812 
813 	/*
814 	 * We may need two file_lock structures for this operation,
815 	 * so we get them in advance to avoid races.
816 	 *
817 	 * In some cases we can be sure, that no new locks will be needed
818 	 */
819 	if (!(request->fl_flags & FL_ACCESS) &&
820 	    (request->fl_type != F_UNLCK ||
821 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
822 		new_fl = locks_alloc_lock();
823 		new_fl2 = locks_alloc_lock();
824 	}
825 
826 	lock_kernel();
827 	if (request->fl_type != F_UNLCK) {
828 		for_each_lock(inode, before) {
829 			fl = *before;
830 			if (!IS_POSIX(fl))
831 				continue;
832 			if (!posix_locks_conflict(request, fl))
833 				continue;
834 			if (conflock)
835 				__locks_copy_lock(conflock, fl);
836 			error = -EAGAIN;
837 			if (!(request->fl_flags & FL_SLEEP))
838 				goto out;
839 			error = -EDEADLK;
840 			if (posix_locks_deadlock(request, fl))
841 				goto out;
842 			error = FILE_LOCK_DEFERRED;
843 			locks_insert_block(fl, request);
844 			goto out;
845   		}
846   	}
847 
848 	/* If we're just looking for a conflict, we're done. */
849 	error = 0;
850 	if (request->fl_flags & FL_ACCESS)
851 		goto out;
852 
853 	/*
854 	 * Find the first old lock with the same owner as the new lock.
855 	 */
856 
857 	before = &inode->i_flock;
858 
859 	/* First skip locks owned by other processes.  */
860 	while ((fl = *before) && (!IS_POSIX(fl) ||
861 				  !posix_same_owner(request, fl))) {
862 		before = &fl->fl_next;
863 	}
864 
865 	/* Process locks with this owner.  */
866 	while ((fl = *before) && posix_same_owner(request, fl)) {
867 		/* Detect adjacent or overlapping regions (if same lock type)
868 		 */
869 		if (request->fl_type == fl->fl_type) {
870 			/* In all comparisons of start vs end, use
871 			 * "start - 1" rather than "end + 1". If end
872 			 * is OFFSET_MAX, end + 1 will become negative.
873 			 */
874 			if (fl->fl_end < request->fl_start - 1)
875 				goto next_lock;
876 			/* If the next lock in the list has entirely bigger
877 			 * addresses than the new one, insert the lock here.
878 			 */
879 			if (fl->fl_start - 1 > request->fl_end)
880 				break;
881 
882 			/* If we come here, the new and old lock are of the
883 			 * same type and adjacent or overlapping. Make one
884 			 * lock yielding from the lower start address of both
885 			 * locks to the higher end address.
886 			 */
887 			if (fl->fl_start > request->fl_start)
888 				fl->fl_start = request->fl_start;
889 			else
890 				request->fl_start = fl->fl_start;
891 			if (fl->fl_end < request->fl_end)
892 				fl->fl_end = request->fl_end;
893 			else
894 				request->fl_end = fl->fl_end;
895 			if (added) {
896 				locks_delete_lock(before);
897 				continue;
898 			}
899 			request = fl;
900 			added = 1;
901 		}
902 		else {
903 			/* Processing for different lock types is a bit
904 			 * more complex.
905 			 */
906 			if (fl->fl_end < request->fl_start)
907 				goto next_lock;
908 			if (fl->fl_start > request->fl_end)
909 				break;
910 			if (request->fl_type == F_UNLCK)
911 				added = 1;
912 			if (fl->fl_start < request->fl_start)
913 				left = fl;
914 			/* If the next lock in the list has a higher end
915 			 * address than the new one, insert the new one here.
916 			 */
917 			if (fl->fl_end > request->fl_end) {
918 				right = fl;
919 				break;
920 			}
921 			if (fl->fl_start >= request->fl_start) {
922 				/* The new lock completely replaces an old
923 				 * one (This may happen several times).
924 				 */
925 				if (added) {
926 					locks_delete_lock(before);
927 					continue;
928 				}
929 				/* Replace the old lock with the new one.
930 				 * Wake up anybody waiting for the old one,
931 				 * as the change in lock type might satisfy
932 				 * their needs.
933 				 */
934 				locks_wake_up_blocks(fl);
935 				fl->fl_start = request->fl_start;
936 				fl->fl_end = request->fl_end;
937 				fl->fl_type = request->fl_type;
938 				locks_release_private(fl);
939 				locks_copy_private(fl, request);
940 				request = fl;
941 				added = 1;
942 			}
943 		}
944 		/* Go on to next lock.
945 		 */
946 	next_lock:
947 		before = &fl->fl_next;
948 	}
949 
950 	/*
951 	 * The above code only modifies existing locks in case of
952 	 * merging or replacing.  If new lock(s) need to be inserted
953 	 * all modifications are done bellow this, so it's safe yet to
954 	 * bail out.
955 	 */
956 	error = -ENOLCK; /* "no luck" */
957 	if (right && left == right && !new_fl2)
958 		goto out;
959 
960 	error = 0;
961 	if (!added) {
962 		if (request->fl_type == F_UNLCK) {
963 			if (request->fl_flags & FL_EXISTS)
964 				error = -ENOENT;
965 			goto out;
966 		}
967 
968 		if (!new_fl) {
969 			error = -ENOLCK;
970 			goto out;
971 		}
972 		locks_copy_lock(new_fl, request);
973 		locks_insert_lock(before, new_fl);
974 		new_fl = NULL;
975 	}
976 	if (right) {
977 		if (left == right) {
978 			/* The new lock breaks the old one in two pieces,
979 			 * so we have to use the second new lock.
980 			 */
981 			left = new_fl2;
982 			new_fl2 = NULL;
983 			locks_copy_lock(left, right);
984 			locks_insert_lock(before, left);
985 		}
986 		right->fl_start = request->fl_end + 1;
987 		locks_wake_up_blocks(right);
988 	}
989 	if (left) {
990 		left->fl_end = request->fl_start - 1;
991 		locks_wake_up_blocks(left);
992 	}
993  out:
994 	unlock_kernel();
995 	/*
996 	 * Free any unused locks.
997 	 */
998 	if (new_fl)
999 		locks_free_lock(new_fl);
1000 	if (new_fl2)
1001 		locks_free_lock(new_fl2);
1002 	return error;
1003 }
1004 
1005 /**
1006  * posix_lock_file - Apply a POSIX-style lock to a file
1007  * @filp: The file to apply the lock to
1008  * @fl: The lock to be applied
1009  * @conflock: Place to return a copy of the conflicting lock, if found.
1010  *
1011  * Add a POSIX style lock to a file.
1012  * We merge adjacent & overlapping locks whenever possible.
1013  * POSIX locks are sorted by owner task, then by starting address
1014  *
1015  * Note that if called with an FL_EXISTS argument, the caller may determine
1016  * whether or not a lock was successfully freed by testing the return
1017  * value for -ENOENT.
1018  */
1019 int posix_lock_file(struct file *filp, struct file_lock *fl,
1020 			struct file_lock *conflock)
1021 {
1022 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1023 }
1024 EXPORT_SYMBOL(posix_lock_file);
1025 
1026 /**
1027  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1028  * @filp: The file to apply the lock to
1029  * @fl: The lock to be applied
1030  *
1031  * Add a POSIX style lock to a file.
1032  * We merge adjacent & overlapping locks whenever possible.
1033  * POSIX locks are sorted by owner task, then by starting address
1034  */
1035 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1036 {
1037 	int error;
1038 	might_sleep ();
1039 	for (;;) {
1040 		error = posix_lock_file(filp, fl, NULL);
1041 		if (error != FILE_LOCK_DEFERRED)
1042 			break;
1043 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1044 		if (!error)
1045 			continue;
1046 
1047 		locks_delete_block(fl);
1048 		break;
1049 	}
1050 	return error;
1051 }
1052 EXPORT_SYMBOL(posix_lock_file_wait);
1053 
1054 /**
1055  * locks_mandatory_locked - Check for an active lock
1056  * @inode: the file to check
1057  *
1058  * Searches the inode's list of locks to find any POSIX locks which conflict.
1059  * This function is called from locks_verify_locked() only.
1060  */
1061 int locks_mandatory_locked(struct inode *inode)
1062 {
1063 	fl_owner_t owner = current->files;
1064 	struct file_lock *fl;
1065 
1066 	/*
1067 	 * Search the lock list for this inode for any POSIX locks.
1068 	 */
1069 	lock_kernel();
1070 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1071 		if (!IS_POSIX(fl))
1072 			continue;
1073 		if (fl->fl_owner != owner)
1074 			break;
1075 	}
1076 	unlock_kernel();
1077 	return fl ? -EAGAIN : 0;
1078 }
1079 
1080 /**
1081  * locks_mandatory_area - Check for a conflicting lock
1082  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1083  *		for shared
1084  * @inode:      the file to check
1085  * @filp:       how the file was opened (if it was)
1086  * @offset:     start of area to check
1087  * @count:      length of area to check
1088  *
1089  * Searches the inode's list of locks to find any POSIX locks which conflict.
1090  * This function is called from rw_verify_area() and
1091  * locks_verify_truncate().
1092  */
1093 int locks_mandatory_area(int read_write, struct inode *inode,
1094 			 struct file *filp, loff_t offset,
1095 			 size_t count)
1096 {
1097 	struct file_lock fl;
1098 	int error;
1099 
1100 	locks_init_lock(&fl);
1101 	fl.fl_owner = current->files;
1102 	fl.fl_pid = current->tgid;
1103 	fl.fl_file = filp;
1104 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1105 	if (filp && !(filp->f_flags & O_NONBLOCK))
1106 		fl.fl_flags |= FL_SLEEP;
1107 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1108 	fl.fl_start = offset;
1109 	fl.fl_end = offset + count - 1;
1110 
1111 	for (;;) {
1112 		error = __posix_lock_file(inode, &fl, NULL);
1113 		if (error != FILE_LOCK_DEFERRED)
1114 			break;
1115 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1116 		if (!error) {
1117 			/*
1118 			 * If we've been sleeping someone might have
1119 			 * changed the permissions behind our back.
1120 			 */
1121 			if (__mandatory_lock(inode))
1122 				continue;
1123 		}
1124 
1125 		locks_delete_block(&fl);
1126 		break;
1127 	}
1128 
1129 	return error;
1130 }
1131 
1132 EXPORT_SYMBOL(locks_mandatory_area);
1133 
1134 /* We already had a lease on this file; just change its type */
1135 int lease_modify(struct file_lock **before, int arg)
1136 {
1137 	struct file_lock *fl = *before;
1138 	int error = assign_type(fl, arg);
1139 
1140 	if (error)
1141 		return error;
1142 	locks_wake_up_blocks(fl);
1143 	if (arg == F_UNLCK)
1144 		locks_delete_lock(before);
1145 	return 0;
1146 }
1147 
1148 EXPORT_SYMBOL(lease_modify);
1149 
1150 static void time_out_leases(struct inode *inode)
1151 {
1152 	struct file_lock **before;
1153 	struct file_lock *fl;
1154 
1155 	before = &inode->i_flock;
1156 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1157 		if ((fl->fl_break_time == 0)
1158 				|| time_before(jiffies, fl->fl_break_time)) {
1159 			before = &fl->fl_next;
1160 			continue;
1161 		}
1162 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1163 		if (fl == *before)	/* lease_modify may have freed fl */
1164 			before = &fl->fl_next;
1165 	}
1166 }
1167 
1168 /**
1169  *	__break_lease	-	revoke all outstanding leases on file
1170  *	@inode: the inode of the file to return
1171  *	@mode: the open mode (read or write)
1172  *
1173  *	break_lease (inlined for speed) has checked there already is at least
1174  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1175  *	a call to open() or truncate().  This function can sleep unless you
1176  *	specified %O_NONBLOCK to your open().
1177  */
1178 int __break_lease(struct inode *inode, unsigned int mode)
1179 {
1180 	int error = 0, future;
1181 	struct file_lock *new_fl, *flock;
1182 	struct file_lock *fl;
1183 	unsigned long break_time;
1184 	int i_have_this_lease = 0;
1185 
1186 	new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
1187 
1188 	lock_kernel();
1189 
1190 	time_out_leases(inode);
1191 
1192 	flock = inode->i_flock;
1193 	if ((flock == NULL) || !IS_LEASE(flock))
1194 		goto out;
1195 
1196 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1197 		if (fl->fl_owner == current->files)
1198 			i_have_this_lease = 1;
1199 
1200 	if (mode & FMODE_WRITE) {
1201 		/* If we want write access, we have to revoke any lease. */
1202 		future = F_UNLCK | F_INPROGRESS;
1203 	} else if (flock->fl_type & F_INPROGRESS) {
1204 		/* If the lease is already being broken, we just leave it */
1205 		future = flock->fl_type;
1206 	} else if (flock->fl_type & F_WRLCK) {
1207 		/* Downgrade the exclusive lease to a read-only lease. */
1208 		future = F_RDLCK | F_INPROGRESS;
1209 	} else {
1210 		/* the existing lease was read-only, so we can read too. */
1211 		goto out;
1212 	}
1213 
1214 	if (IS_ERR(new_fl) && !i_have_this_lease
1215 			&& ((mode & O_NONBLOCK) == 0)) {
1216 		error = PTR_ERR(new_fl);
1217 		goto out;
1218 	}
1219 
1220 	break_time = 0;
1221 	if (lease_break_time > 0) {
1222 		break_time = jiffies + lease_break_time * HZ;
1223 		if (break_time == 0)
1224 			break_time++;	/* so that 0 means no break time */
1225 	}
1226 
1227 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1228 		if (fl->fl_type != future) {
1229 			fl->fl_type = future;
1230 			fl->fl_break_time = break_time;
1231 			/* lease must have lmops break callback */
1232 			fl->fl_lmops->fl_break(fl);
1233 		}
1234 	}
1235 
1236 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1237 		error = -EWOULDBLOCK;
1238 		goto out;
1239 	}
1240 
1241 restart:
1242 	break_time = flock->fl_break_time;
1243 	if (break_time != 0) {
1244 		break_time -= jiffies;
1245 		if (break_time == 0)
1246 			break_time++;
1247 	}
1248 	locks_insert_block(flock, new_fl);
1249 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1250 						!new_fl->fl_next, break_time);
1251 	__locks_delete_block(new_fl);
1252 	if (error >= 0) {
1253 		if (error == 0)
1254 			time_out_leases(inode);
1255 		/* Wait for the next lease that has not been broken yet */
1256 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1257 				flock = flock->fl_next) {
1258 			if (flock->fl_type & F_INPROGRESS)
1259 				goto restart;
1260 		}
1261 		error = 0;
1262 	}
1263 
1264 out:
1265 	unlock_kernel();
1266 	if (!IS_ERR(new_fl))
1267 		locks_free_lock(new_fl);
1268 	return error;
1269 }
1270 
1271 EXPORT_SYMBOL(__break_lease);
1272 
1273 /**
1274  *	lease_get_mtime - get the last modified time of an inode
1275  *	@inode: the inode
1276  *      @time:  pointer to a timespec which will contain the last modified time
1277  *
1278  * This is to force NFS clients to flush their caches for files with
1279  * exclusive leases.  The justification is that if someone has an
1280  * exclusive lease, then they could be modifying it.
1281  */
1282 void lease_get_mtime(struct inode *inode, struct timespec *time)
1283 {
1284 	struct file_lock *flock = inode->i_flock;
1285 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1286 		*time = current_fs_time(inode->i_sb);
1287 	else
1288 		*time = inode->i_mtime;
1289 }
1290 
1291 EXPORT_SYMBOL(lease_get_mtime);
1292 
1293 /**
1294  *	fcntl_getlease - Enquire what lease is currently active
1295  *	@filp: the file
1296  *
1297  *	The value returned by this function will be one of
1298  *	(if no lease break is pending):
1299  *
1300  *	%F_RDLCK to indicate a shared lease is held.
1301  *
1302  *	%F_WRLCK to indicate an exclusive lease is held.
1303  *
1304  *	%F_UNLCK to indicate no lease is held.
1305  *
1306  *	(if a lease break is pending):
1307  *
1308  *	%F_RDLCK to indicate an exclusive lease needs to be
1309  *		changed to a shared lease (or removed).
1310  *
1311  *	%F_UNLCK to indicate the lease needs to be removed.
1312  *
1313  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1314  *	should be returned to userspace.
1315  */
1316 int fcntl_getlease(struct file *filp)
1317 {
1318 	struct file_lock *fl;
1319 	int type = F_UNLCK;
1320 
1321 	lock_kernel();
1322 	time_out_leases(filp->f_path.dentry->d_inode);
1323 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1324 			fl = fl->fl_next) {
1325 		if (fl->fl_file == filp) {
1326 			type = fl->fl_type & ~F_INPROGRESS;
1327 			break;
1328 		}
1329 	}
1330 	unlock_kernel();
1331 	return type;
1332 }
1333 
1334 /**
1335  *	generic_setlease	-	sets a lease on an open file
1336  *	@filp: file pointer
1337  *	@arg: type of lease to obtain
1338  *	@flp: input - file_lock to use, output - file_lock inserted
1339  *
1340  *	The (input) flp->fl_lmops->fl_break function is required
1341  *	by break_lease().
1342  *
1343  *	Called with kernel lock held.
1344  */
1345 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1346 {
1347 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1348 	struct file_lock *new_fl = NULL;
1349 	struct dentry *dentry = filp->f_path.dentry;
1350 	struct inode *inode = dentry->d_inode;
1351 	int error, rdlease_count = 0, wrlease_count = 0;
1352 
1353 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1354 		return -EACCES;
1355 	if (!S_ISREG(inode->i_mode))
1356 		return -EINVAL;
1357 	error = security_file_lock(filp, arg);
1358 	if (error)
1359 		return error;
1360 
1361 	time_out_leases(inode);
1362 
1363 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1364 
1365 	lease = *flp;
1366 
1367 	if (arg != F_UNLCK) {
1368 		error = -ENOMEM;
1369 		new_fl = locks_alloc_lock();
1370 		if (new_fl == NULL)
1371 			goto out;
1372 
1373 		error = -EAGAIN;
1374 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1375 			goto out;
1376 		if ((arg == F_WRLCK)
1377 		    && ((atomic_read(&dentry->d_count) > 1)
1378 			|| (atomic_read(&inode->i_count) > 1)))
1379 			goto out;
1380 	}
1381 
1382 	/*
1383 	 * At this point, we know that if there is an exclusive
1384 	 * lease on this file, then we hold it on this filp
1385 	 * (otherwise our open of this file would have blocked).
1386 	 * And if we are trying to acquire an exclusive lease,
1387 	 * then the file is not open by anyone (including us)
1388 	 * except for this filp.
1389 	 */
1390 	for (before = &inode->i_flock;
1391 			((fl = *before) != NULL) && IS_LEASE(fl);
1392 			before = &fl->fl_next) {
1393 		if (lease->fl_lmops->fl_mylease(fl, lease))
1394 			my_before = before;
1395 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1396 			/*
1397 			 * Someone is in the process of opening this
1398 			 * file for writing so we may not take an
1399 			 * exclusive lease on it.
1400 			 */
1401 			wrlease_count++;
1402 		else
1403 			rdlease_count++;
1404 	}
1405 
1406 	error = -EAGAIN;
1407 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1408 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1409 		goto out;
1410 
1411 	if (my_before != NULL) {
1412 		*flp = *my_before;
1413 		error = lease->fl_lmops->fl_change(my_before, arg);
1414 		goto out;
1415 	}
1416 
1417 	error = 0;
1418 	if (arg == F_UNLCK)
1419 		goto out;
1420 
1421 	error = -EINVAL;
1422 	if (!leases_enable)
1423 		goto out;
1424 
1425 	locks_copy_lock(new_fl, lease);
1426 	locks_insert_lock(before, new_fl);
1427 
1428 	*flp = new_fl;
1429 	return 0;
1430 
1431 out:
1432 	if (new_fl != NULL)
1433 		locks_free_lock(new_fl);
1434 	return error;
1435 }
1436 EXPORT_SYMBOL(generic_setlease);
1437 
1438  /**
1439  *	vfs_setlease        -       sets a lease on an open file
1440  *	@filp: file pointer
1441  *	@arg: type of lease to obtain
1442  *	@lease: file_lock to use
1443  *
1444  *	Call this to establish a lease on the file.
1445  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1446  *	break_lease will oops!
1447  *
1448  *	This will call the filesystem's setlease file method, if
1449  *	defined.  Note that there is no getlease method; instead, the
1450  *	filesystem setlease method should call back to setlease() to
1451  *	add a lease to the inode's lease list, where fcntl_getlease() can
1452  *	find it.  Since fcntl_getlease() only reports whether the current
1453  *	task holds a lease, a cluster filesystem need only do this for
1454  *	leases held by processes on this node.
1455  *
1456  *	There is also no break_lease method; filesystems that
1457  *	handle their own leases shoud break leases themselves from the
1458  *	filesystem's open, create, and (on truncate) setattr methods.
1459  *
1460  *	Warning: the only current setlease methods exist only to disable
1461  *	leases in certain cases.  More vfs changes may be required to
1462  *	allow a full filesystem lease implementation.
1463  */
1464 
1465 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1466 {
1467 	int error;
1468 
1469 	lock_kernel();
1470 	if (filp->f_op && filp->f_op->setlease)
1471 		error = filp->f_op->setlease(filp, arg, lease);
1472 	else
1473 		error = generic_setlease(filp, arg, lease);
1474 	unlock_kernel();
1475 
1476 	return error;
1477 }
1478 EXPORT_SYMBOL_GPL(vfs_setlease);
1479 
1480 /**
1481  *	fcntl_setlease	-	sets a lease on an open file
1482  *	@fd: open file descriptor
1483  *	@filp: file pointer
1484  *	@arg: type of lease to obtain
1485  *
1486  *	Call this fcntl to establish a lease on the file.
1487  *	Note that you also need to call %F_SETSIG to
1488  *	receive a signal when the lease is broken.
1489  */
1490 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1491 {
1492 	struct file_lock fl, *flp = &fl;
1493 	struct inode *inode = filp->f_path.dentry->d_inode;
1494 	int error;
1495 
1496 	locks_init_lock(&fl);
1497 	error = lease_init(filp, arg, &fl);
1498 	if (error)
1499 		return error;
1500 
1501 	lock_kernel();
1502 
1503 	error = vfs_setlease(filp, arg, &flp);
1504 	if (error || arg == F_UNLCK)
1505 		goto out_unlock;
1506 
1507 	error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
1508 	if (error < 0) {
1509 		/* remove lease just inserted by setlease */
1510 		flp->fl_type = F_UNLCK | F_INPROGRESS;
1511 		flp->fl_break_time = jiffies - 10;
1512 		time_out_leases(inode);
1513 		goto out_unlock;
1514 	}
1515 
1516 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1517 out_unlock:
1518 	unlock_kernel();
1519 	return error;
1520 }
1521 
1522 /**
1523  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1524  * @filp: The file to apply the lock to
1525  * @fl: The lock to be applied
1526  *
1527  * Add a FLOCK style lock to a file.
1528  */
1529 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1530 {
1531 	int error;
1532 	might_sleep();
1533 	for (;;) {
1534 		error = flock_lock_file(filp, fl);
1535 		if (error != FILE_LOCK_DEFERRED)
1536 			break;
1537 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1538 		if (!error)
1539 			continue;
1540 
1541 		locks_delete_block(fl);
1542 		break;
1543 	}
1544 	return error;
1545 }
1546 
1547 EXPORT_SYMBOL(flock_lock_file_wait);
1548 
1549 /**
1550  *	sys_flock: - flock() system call.
1551  *	@fd: the file descriptor to lock.
1552  *	@cmd: the type of lock to apply.
1553  *
1554  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1555  *	The @cmd can be one of
1556  *
1557  *	%LOCK_SH -- a shared lock.
1558  *
1559  *	%LOCK_EX -- an exclusive lock.
1560  *
1561  *	%LOCK_UN -- remove an existing lock.
1562  *
1563  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1564  *
1565  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1566  *	processes read and write access respectively.
1567  */
1568 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1569 {
1570 	struct file *filp;
1571 	struct file_lock *lock;
1572 	int can_sleep, unlock;
1573 	int error;
1574 
1575 	error = -EBADF;
1576 	filp = fget(fd);
1577 	if (!filp)
1578 		goto out;
1579 
1580 	can_sleep = !(cmd & LOCK_NB);
1581 	cmd &= ~LOCK_NB;
1582 	unlock = (cmd == LOCK_UN);
1583 
1584 	if (!unlock && !(cmd & LOCK_MAND) &&
1585 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1586 		goto out_putf;
1587 
1588 	error = flock_make_lock(filp, &lock, cmd);
1589 	if (error)
1590 		goto out_putf;
1591 	if (can_sleep)
1592 		lock->fl_flags |= FL_SLEEP;
1593 
1594 	error = security_file_lock(filp, cmd);
1595 	if (error)
1596 		goto out_free;
1597 
1598 	if (filp->f_op && filp->f_op->flock)
1599 		error = filp->f_op->flock(filp,
1600 					  (can_sleep) ? F_SETLKW : F_SETLK,
1601 					  lock);
1602 	else
1603 		error = flock_lock_file_wait(filp, lock);
1604 
1605  out_free:
1606 	locks_free_lock(lock);
1607 
1608  out_putf:
1609 	fput(filp);
1610  out:
1611 	return error;
1612 }
1613 
1614 /**
1615  * vfs_test_lock - test file byte range lock
1616  * @filp: The file to test lock for
1617  * @fl: The lock to test; also used to hold result
1618  *
1619  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1620  * setting conf->fl_type to something other than F_UNLCK.
1621  */
1622 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1623 {
1624 	if (filp->f_op && filp->f_op->lock)
1625 		return filp->f_op->lock(filp, F_GETLK, fl);
1626 	posix_test_lock(filp, fl);
1627 	return 0;
1628 }
1629 EXPORT_SYMBOL_GPL(vfs_test_lock);
1630 
1631 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1632 {
1633 	flock->l_pid = fl->fl_pid;
1634 #if BITS_PER_LONG == 32
1635 	/*
1636 	 * Make sure we can represent the posix lock via
1637 	 * legacy 32bit flock.
1638 	 */
1639 	if (fl->fl_start > OFFT_OFFSET_MAX)
1640 		return -EOVERFLOW;
1641 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1642 		return -EOVERFLOW;
1643 #endif
1644 	flock->l_start = fl->fl_start;
1645 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1646 		fl->fl_end - fl->fl_start + 1;
1647 	flock->l_whence = 0;
1648 	flock->l_type = fl->fl_type;
1649 	return 0;
1650 }
1651 
1652 #if BITS_PER_LONG == 32
1653 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1654 {
1655 	flock->l_pid = fl->fl_pid;
1656 	flock->l_start = fl->fl_start;
1657 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1658 		fl->fl_end - fl->fl_start + 1;
1659 	flock->l_whence = 0;
1660 	flock->l_type = fl->fl_type;
1661 }
1662 #endif
1663 
1664 /* Report the first existing lock that would conflict with l.
1665  * This implements the F_GETLK command of fcntl().
1666  */
1667 int fcntl_getlk(struct file *filp, struct flock __user *l)
1668 {
1669 	struct file_lock file_lock;
1670 	struct flock flock;
1671 	int error;
1672 
1673 	error = -EFAULT;
1674 	if (copy_from_user(&flock, l, sizeof(flock)))
1675 		goto out;
1676 	error = -EINVAL;
1677 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1678 		goto out;
1679 
1680 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1681 	if (error)
1682 		goto out;
1683 
1684 	error = vfs_test_lock(filp, &file_lock);
1685 	if (error)
1686 		goto out;
1687 
1688 	flock.l_type = file_lock.fl_type;
1689 	if (file_lock.fl_type != F_UNLCK) {
1690 		error = posix_lock_to_flock(&flock, &file_lock);
1691 		if (error)
1692 			goto out;
1693 	}
1694 	error = -EFAULT;
1695 	if (!copy_to_user(l, &flock, sizeof(flock)))
1696 		error = 0;
1697 out:
1698 	return error;
1699 }
1700 
1701 /**
1702  * vfs_lock_file - file byte range lock
1703  * @filp: The file to apply the lock to
1704  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1705  * @fl: The lock to be applied
1706  * @conf: Place to return a copy of the conflicting lock, if found.
1707  *
1708  * A caller that doesn't care about the conflicting lock may pass NULL
1709  * as the final argument.
1710  *
1711  * If the filesystem defines a private ->lock() method, then @conf will
1712  * be left unchanged; so a caller that cares should initialize it to
1713  * some acceptable default.
1714  *
1715  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1716  * locks, the ->lock() interface may return asynchronously, before the lock has
1717  * been granted or denied by the underlying filesystem, if (and only if)
1718  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1719  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1720  * the request is for a blocking lock. When ->lock() does return asynchronously,
1721  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1722  * request completes.
1723  * If the request is for non-blocking lock the file system should return
1724  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1725  * with the result. If the request timed out the callback routine will return a
1726  * nonzero return code and the file system should release the lock. The file
1727  * system is also responsible to keep a corresponding posix lock when it
1728  * grants a lock so the VFS can find out which locks are locally held and do
1729  * the correct lock cleanup when required.
1730  * The underlying filesystem must not drop the kernel lock or call
1731  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1732  * return code.
1733  */
1734 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1735 {
1736 	if (filp->f_op && filp->f_op->lock)
1737 		return filp->f_op->lock(filp, cmd, fl);
1738 	else
1739 		return posix_lock_file(filp, fl, conf);
1740 }
1741 EXPORT_SYMBOL_GPL(vfs_lock_file);
1742 
1743 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1744 			     struct file_lock *fl)
1745 {
1746 	int error;
1747 
1748 	error = security_file_lock(filp, fl->fl_type);
1749 	if (error)
1750 		return error;
1751 
1752 	for (;;) {
1753 		error = vfs_lock_file(filp, cmd, fl, NULL);
1754 		if (error != FILE_LOCK_DEFERRED)
1755 			break;
1756 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1757 		if (!error)
1758 			continue;
1759 
1760 		locks_delete_block(fl);
1761 		break;
1762 	}
1763 
1764 	return error;
1765 }
1766 
1767 /* Apply the lock described by l to an open file descriptor.
1768  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1769  */
1770 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1771 		struct flock __user *l)
1772 {
1773 	struct file_lock *file_lock = locks_alloc_lock();
1774 	struct flock flock;
1775 	struct inode *inode;
1776 	struct file *f;
1777 	int error;
1778 
1779 	if (file_lock == NULL)
1780 		return -ENOLCK;
1781 
1782 	/*
1783 	 * This might block, so we do it before checking the inode.
1784 	 */
1785 	error = -EFAULT;
1786 	if (copy_from_user(&flock, l, sizeof(flock)))
1787 		goto out;
1788 
1789 	inode = filp->f_path.dentry->d_inode;
1790 
1791 	/* Don't allow mandatory locks on files that may be memory mapped
1792 	 * and shared.
1793 	 */
1794 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1795 		error = -EAGAIN;
1796 		goto out;
1797 	}
1798 
1799 again:
1800 	error = flock_to_posix_lock(filp, file_lock, &flock);
1801 	if (error)
1802 		goto out;
1803 	if (cmd == F_SETLKW) {
1804 		file_lock->fl_flags |= FL_SLEEP;
1805 	}
1806 
1807 	error = -EBADF;
1808 	switch (flock.l_type) {
1809 	case F_RDLCK:
1810 		if (!(filp->f_mode & FMODE_READ))
1811 			goto out;
1812 		break;
1813 	case F_WRLCK:
1814 		if (!(filp->f_mode & FMODE_WRITE))
1815 			goto out;
1816 		break;
1817 	case F_UNLCK:
1818 		break;
1819 	default:
1820 		error = -EINVAL;
1821 		goto out;
1822 	}
1823 
1824 	error = do_lock_file_wait(filp, cmd, file_lock);
1825 
1826 	/*
1827 	 * Attempt to detect a close/fcntl race and recover by
1828 	 * releasing the lock that was just acquired.
1829 	 */
1830 	/*
1831 	 * we need that spin_lock here - it prevents reordering between
1832 	 * update of inode->i_flock and check for it done in close().
1833 	 * rcu_read_lock() wouldn't do.
1834 	 */
1835 	spin_lock(&current->files->file_lock);
1836 	f = fcheck(fd);
1837 	spin_unlock(&current->files->file_lock);
1838 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1839 		flock.l_type = F_UNLCK;
1840 		goto again;
1841 	}
1842 
1843 out:
1844 	locks_free_lock(file_lock);
1845 	return error;
1846 }
1847 
1848 #if BITS_PER_LONG == 32
1849 /* Report the first existing lock that would conflict with l.
1850  * This implements the F_GETLK command of fcntl().
1851  */
1852 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1853 {
1854 	struct file_lock file_lock;
1855 	struct flock64 flock;
1856 	int error;
1857 
1858 	error = -EFAULT;
1859 	if (copy_from_user(&flock, l, sizeof(flock)))
1860 		goto out;
1861 	error = -EINVAL;
1862 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1863 		goto out;
1864 
1865 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1866 	if (error)
1867 		goto out;
1868 
1869 	error = vfs_test_lock(filp, &file_lock);
1870 	if (error)
1871 		goto out;
1872 
1873 	flock.l_type = file_lock.fl_type;
1874 	if (file_lock.fl_type != F_UNLCK)
1875 		posix_lock_to_flock64(&flock, &file_lock);
1876 
1877 	error = -EFAULT;
1878 	if (!copy_to_user(l, &flock, sizeof(flock)))
1879 		error = 0;
1880 
1881 out:
1882 	return error;
1883 }
1884 
1885 /* Apply the lock described by l to an open file descriptor.
1886  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1887  */
1888 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1889 		struct flock64 __user *l)
1890 {
1891 	struct file_lock *file_lock = locks_alloc_lock();
1892 	struct flock64 flock;
1893 	struct inode *inode;
1894 	struct file *f;
1895 	int error;
1896 
1897 	if (file_lock == NULL)
1898 		return -ENOLCK;
1899 
1900 	/*
1901 	 * This might block, so we do it before checking the inode.
1902 	 */
1903 	error = -EFAULT;
1904 	if (copy_from_user(&flock, l, sizeof(flock)))
1905 		goto out;
1906 
1907 	inode = filp->f_path.dentry->d_inode;
1908 
1909 	/* Don't allow mandatory locks on files that may be memory mapped
1910 	 * and shared.
1911 	 */
1912 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1913 		error = -EAGAIN;
1914 		goto out;
1915 	}
1916 
1917 again:
1918 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1919 	if (error)
1920 		goto out;
1921 	if (cmd == F_SETLKW64) {
1922 		file_lock->fl_flags |= FL_SLEEP;
1923 	}
1924 
1925 	error = -EBADF;
1926 	switch (flock.l_type) {
1927 	case F_RDLCK:
1928 		if (!(filp->f_mode & FMODE_READ))
1929 			goto out;
1930 		break;
1931 	case F_WRLCK:
1932 		if (!(filp->f_mode & FMODE_WRITE))
1933 			goto out;
1934 		break;
1935 	case F_UNLCK:
1936 		break;
1937 	default:
1938 		error = -EINVAL;
1939 		goto out;
1940 	}
1941 
1942 	error = do_lock_file_wait(filp, cmd, file_lock);
1943 
1944 	/*
1945 	 * Attempt to detect a close/fcntl race and recover by
1946 	 * releasing the lock that was just acquired.
1947 	 */
1948 	spin_lock(&current->files->file_lock);
1949 	f = fcheck(fd);
1950 	spin_unlock(&current->files->file_lock);
1951 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1952 		flock.l_type = F_UNLCK;
1953 		goto again;
1954 	}
1955 
1956 out:
1957 	locks_free_lock(file_lock);
1958 	return error;
1959 }
1960 #endif /* BITS_PER_LONG == 32 */
1961 
1962 /*
1963  * This function is called when the file is being removed
1964  * from the task's fd array.  POSIX locks belonging to this task
1965  * are deleted at this time.
1966  */
1967 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1968 {
1969 	struct file_lock lock;
1970 
1971 	/*
1972 	 * If there are no locks held on this file, we don't need to call
1973 	 * posix_lock_file().  Another process could be setting a lock on this
1974 	 * file at the same time, but we wouldn't remove that lock anyway.
1975 	 */
1976 	if (!filp->f_path.dentry->d_inode->i_flock)
1977 		return;
1978 
1979 	lock.fl_type = F_UNLCK;
1980 	lock.fl_flags = FL_POSIX | FL_CLOSE;
1981 	lock.fl_start = 0;
1982 	lock.fl_end = OFFSET_MAX;
1983 	lock.fl_owner = owner;
1984 	lock.fl_pid = current->tgid;
1985 	lock.fl_file = filp;
1986 	lock.fl_ops = NULL;
1987 	lock.fl_lmops = NULL;
1988 
1989 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
1990 
1991 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
1992 		lock.fl_ops->fl_release_private(&lock);
1993 }
1994 
1995 EXPORT_SYMBOL(locks_remove_posix);
1996 
1997 /*
1998  * This function is called on the last close of an open file.
1999  */
2000 void locks_remove_flock(struct file *filp)
2001 {
2002 	struct inode * inode = filp->f_path.dentry->d_inode;
2003 	struct file_lock *fl;
2004 	struct file_lock **before;
2005 
2006 	if (!inode->i_flock)
2007 		return;
2008 
2009 	if (filp->f_op && filp->f_op->flock) {
2010 		struct file_lock fl = {
2011 			.fl_pid = current->tgid,
2012 			.fl_file = filp,
2013 			.fl_flags = FL_FLOCK,
2014 			.fl_type = F_UNLCK,
2015 			.fl_end = OFFSET_MAX,
2016 		};
2017 		filp->f_op->flock(filp, F_SETLKW, &fl);
2018 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2019 			fl.fl_ops->fl_release_private(&fl);
2020 	}
2021 
2022 	lock_kernel();
2023 	before = &inode->i_flock;
2024 
2025 	while ((fl = *before) != NULL) {
2026 		if (fl->fl_file == filp) {
2027 			if (IS_FLOCK(fl)) {
2028 				locks_delete_lock(before);
2029 				continue;
2030 			}
2031 			if (IS_LEASE(fl)) {
2032 				lease_modify(before, F_UNLCK);
2033 				continue;
2034 			}
2035 			/* What? */
2036 			BUG();
2037  		}
2038 		before = &fl->fl_next;
2039 	}
2040 	unlock_kernel();
2041 }
2042 
2043 /**
2044  *	posix_unblock_lock - stop waiting for a file lock
2045  *      @filp:   how the file was opened
2046  *	@waiter: the lock which was waiting
2047  *
2048  *	lockd needs to block waiting for locks.
2049  */
2050 int
2051 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2052 {
2053 	int status = 0;
2054 
2055 	lock_kernel();
2056 	if (waiter->fl_next)
2057 		__locks_delete_block(waiter);
2058 	else
2059 		status = -ENOENT;
2060 	unlock_kernel();
2061 	return status;
2062 }
2063 
2064 EXPORT_SYMBOL(posix_unblock_lock);
2065 
2066 /**
2067  * vfs_cancel_lock - file byte range unblock lock
2068  * @filp: The file to apply the unblock to
2069  * @fl: The lock to be unblocked
2070  *
2071  * Used by lock managers to cancel blocked requests
2072  */
2073 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2074 {
2075 	if (filp->f_op && filp->f_op->lock)
2076 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2077 	return 0;
2078 }
2079 
2080 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2081 
2082 #ifdef CONFIG_PROC_FS
2083 #include <linux/proc_fs.h>
2084 #include <linux/seq_file.h>
2085 
2086 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2087 							int id, char *pfx)
2088 {
2089 	struct inode *inode = NULL;
2090 	unsigned int fl_pid;
2091 
2092 	if (fl->fl_nspid)
2093 		fl_pid = pid_vnr(fl->fl_nspid);
2094 	else
2095 		fl_pid = fl->fl_pid;
2096 
2097 	if (fl->fl_file != NULL)
2098 		inode = fl->fl_file->f_path.dentry->d_inode;
2099 
2100 	seq_printf(f, "%d:%s ", id, pfx);
2101 	if (IS_POSIX(fl)) {
2102 		seq_printf(f, "%6s %s ",
2103 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2104 			     (inode == NULL) ? "*NOINODE*" :
2105 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2106 	} else if (IS_FLOCK(fl)) {
2107 		if (fl->fl_type & LOCK_MAND) {
2108 			seq_printf(f, "FLOCK  MSNFS     ");
2109 		} else {
2110 			seq_printf(f, "FLOCK  ADVISORY  ");
2111 		}
2112 	} else if (IS_LEASE(fl)) {
2113 		seq_printf(f, "LEASE  ");
2114 		if (fl->fl_type & F_INPROGRESS)
2115 			seq_printf(f, "BREAKING  ");
2116 		else if (fl->fl_file)
2117 			seq_printf(f, "ACTIVE    ");
2118 		else
2119 			seq_printf(f, "BREAKER   ");
2120 	} else {
2121 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2122 	}
2123 	if (fl->fl_type & LOCK_MAND) {
2124 		seq_printf(f, "%s ",
2125 			       (fl->fl_type & LOCK_READ)
2126 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2127 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2128 	} else {
2129 		seq_printf(f, "%s ",
2130 			       (fl->fl_type & F_INPROGRESS)
2131 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2132 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2133 	}
2134 	if (inode) {
2135 #ifdef WE_CAN_BREAK_LSLK_NOW
2136 		seq_printf(f, "%d %s:%ld ", fl_pid,
2137 				inode->i_sb->s_id, inode->i_ino);
2138 #else
2139 		/* userspace relies on this representation of dev_t ;-( */
2140 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2141 				MAJOR(inode->i_sb->s_dev),
2142 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2143 #endif
2144 	} else {
2145 		seq_printf(f, "%d <none>:0 ", fl_pid);
2146 	}
2147 	if (IS_POSIX(fl)) {
2148 		if (fl->fl_end == OFFSET_MAX)
2149 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2150 		else
2151 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2152 	} else {
2153 		seq_printf(f, "0 EOF\n");
2154 	}
2155 }
2156 
2157 static int locks_show(struct seq_file *f, void *v)
2158 {
2159 	struct file_lock *fl, *bfl;
2160 
2161 	fl = list_entry(v, struct file_lock, fl_link);
2162 
2163 	lock_get_status(f, fl, (long)f->private, "");
2164 
2165 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2166 		lock_get_status(f, bfl, (long)f->private, " ->");
2167 
2168 	f->private++;
2169 	return 0;
2170 }
2171 
2172 static void *locks_start(struct seq_file *f, loff_t *pos)
2173 {
2174 	lock_kernel();
2175 	f->private = (void *)1;
2176 	return seq_list_start(&file_lock_list, *pos);
2177 }
2178 
2179 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2180 {
2181 	return seq_list_next(v, &file_lock_list, pos);
2182 }
2183 
2184 static void locks_stop(struct seq_file *f, void *v)
2185 {
2186 	unlock_kernel();
2187 }
2188 
2189 static const struct seq_operations locks_seq_operations = {
2190 	.start	= locks_start,
2191 	.next	= locks_next,
2192 	.stop	= locks_stop,
2193 	.show	= locks_show,
2194 };
2195 
2196 static int locks_open(struct inode *inode, struct file *filp)
2197 {
2198 	return seq_open(filp, &locks_seq_operations);
2199 }
2200 
2201 static const struct file_operations proc_locks_operations = {
2202 	.open		= locks_open,
2203 	.read		= seq_read,
2204 	.llseek		= seq_lseek,
2205 	.release	= seq_release,
2206 };
2207 
2208 static int __init proc_locks_init(void)
2209 {
2210 	proc_create("locks", 0, NULL, &proc_locks_operations);
2211 	return 0;
2212 }
2213 module_init(proc_locks_init);
2214 #endif
2215 
2216 /**
2217  *	lock_may_read - checks that the region is free of locks
2218  *	@inode: the inode that is being read
2219  *	@start: the first byte to read
2220  *	@len: the number of bytes to read
2221  *
2222  *	Emulates Windows locking requirements.  Whole-file
2223  *	mandatory locks (share modes) can prohibit a read and
2224  *	byte-range POSIX locks can prohibit a read if they overlap.
2225  *
2226  *	N.B. this function is only ever called
2227  *	from knfsd and ownership of locks is never checked.
2228  */
2229 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2230 {
2231 	struct file_lock *fl;
2232 	int result = 1;
2233 	lock_kernel();
2234 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2235 		if (IS_POSIX(fl)) {
2236 			if (fl->fl_type == F_RDLCK)
2237 				continue;
2238 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2239 				continue;
2240 		} else if (IS_FLOCK(fl)) {
2241 			if (!(fl->fl_type & LOCK_MAND))
2242 				continue;
2243 			if (fl->fl_type & LOCK_READ)
2244 				continue;
2245 		} else
2246 			continue;
2247 		result = 0;
2248 		break;
2249 	}
2250 	unlock_kernel();
2251 	return result;
2252 }
2253 
2254 EXPORT_SYMBOL(lock_may_read);
2255 
2256 /**
2257  *	lock_may_write - checks that the region is free of locks
2258  *	@inode: the inode that is being written
2259  *	@start: the first byte to write
2260  *	@len: the number of bytes to write
2261  *
2262  *	Emulates Windows locking requirements.  Whole-file
2263  *	mandatory locks (share modes) can prohibit a write and
2264  *	byte-range POSIX locks can prohibit a write if they overlap.
2265  *
2266  *	N.B. this function is only ever called
2267  *	from knfsd and ownership of locks is never checked.
2268  */
2269 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2270 {
2271 	struct file_lock *fl;
2272 	int result = 1;
2273 	lock_kernel();
2274 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2275 		if (IS_POSIX(fl)) {
2276 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2277 				continue;
2278 		} else if (IS_FLOCK(fl)) {
2279 			if (!(fl->fl_type & LOCK_MAND))
2280 				continue;
2281 			if (fl->fl_type & LOCK_WRITE)
2282 				continue;
2283 		} else
2284 			continue;
2285 		result = 0;
2286 		break;
2287 	}
2288 	unlock_kernel();
2289 	return result;
2290 }
2291 
2292 EXPORT_SYMBOL(lock_may_write);
2293 
2294 static int __init filelock_init(void)
2295 {
2296 	filelock_cache = kmem_cache_create("file_lock_cache",
2297 			sizeof(struct file_lock), 0, SLAB_PANIC,
2298 			init_once);
2299 	return 0;
2300 }
2301 
2302 core_initcall(filelock_init);
2303