xref: /linux/fs/locks.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fs.h>
120 #include <linux/init.h>
121 #include <linux/module.h>
122 #include <linux/security.h>
123 #include <linux/slab.h>
124 #include <linux/smp_lock.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 
129 #include <asm/semaphore.h>
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 int leases_enable = 1;
137 int lease_break_time = 45;
138 
139 #define for_each_lock(inode, lockp) \
140 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141 
142 static LIST_HEAD(file_lock_list);
143 static LIST_HEAD(blocked_list);
144 
145 static kmem_cache_t *filelock_cache __read_mostly;
146 
147 /* Allocate an empty lock structure. */
148 static struct file_lock *locks_alloc_lock(void)
149 {
150 	return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
151 }
152 
153 static void locks_release_private(struct file_lock *fl)
154 {
155 	if (fl->fl_ops) {
156 		if (fl->fl_ops->fl_release_private)
157 			fl->fl_ops->fl_release_private(fl);
158 		fl->fl_ops = NULL;
159 	}
160 	if (fl->fl_lmops) {
161 		if (fl->fl_lmops->fl_release_private)
162 			fl->fl_lmops->fl_release_private(fl);
163 		fl->fl_lmops = NULL;
164 	}
165 
166 }
167 
168 /* Free a lock which is not in use. */
169 static void locks_free_lock(struct file_lock *fl)
170 {
171 	if (fl == NULL) {
172 		BUG();
173 		return;
174 	}
175 	if (waitqueue_active(&fl->fl_wait))
176 		panic("Attempting to free lock with active wait queue");
177 
178 	if (!list_empty(&fl->fl_block))
179 		panic("Attempting to free lock with active block list");
180 
181 	if (!list_empty(&fl->fl_link))
182 		panic("Attempting to free lock on active lock list");
183 
184 	locks_release_private(fl);
185 	kmem_cache_free(filelock_cache, fl);
186 }
187 
188 void locks_init_lock(struct file_lock *fl)
189 {
190 	INIT_LIST_HEAD(&fl->fl_link);
191 	INIT_LIST_HEAD(&fl->fl_block);
192 	init_waitqueue_head(&fl->fl_wait);
193 	fl->fl_next = NULL;
194 	fl->fl_fasync = NULL;
195 	fl->fl_owner = NULL;
196 	fl->fl_pid = 0;
197 	fl->fl_file = NULL;
198 	fl->fl_flags = 0;
199 	fl->fl_type = 0;
200 	fl->fl_start = fl->fl_end = 0;
201 	fl->fl_ops = NULL;
202 	fl->fl_lmops = NULL;
203 }
204 
205 EXPORT_SYMBOL(locks_init_lock);
206 
207 /*
208  * Initialises the fields of the file lock which are invariant for
209  * free file_locks.
210  */
211 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
212 {
213 	struct file_lock *lock = (struct file_lock *) foo;
214 
215 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) !=
216 					SLAB_CTOR_CONSTRUCTOR)
217 		return;
218 
219 	locks_init_lock(lock);
220 }
221 
222 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
223 {
224 	if (fl->fl_ops) {
225 		if (fl->fl_ops->fl_copy_lock)
226 			fl->fl_ops->fl_copy_lock(new, fl);
227 		new->fl_ops = fl->fl_ops;
228 	}
229 	if (fl->fl_lmops) {
230 		if (fl->fl_lmops->fl_copy_lock)
231 			fl->fl_lmops->fl_copy_lock(new, fl);
232 		new->fl_lmops = fl->fl_lmops;
233 	}
234 }
235 
236 /*
237  * Initialize a new lock from an existing file_lock structure.
238  */
239 static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
240 {
241 	new->fl_owner = fl->fl_owner;
242 	new->fl_pid = fl->fl_pid;
243 	new->fl_file = NULL;
244 	new->fl_flags = fl->fl_flags;
245 	new->fl_type = fl->fl_type;
246 	new->fl_start = fl->fl_start;
247 	new->fl_end = fl->fl_end;
248 	new->fl_ops = NULL;
249 	new->fl_lmops = NULL;
250 }
251 
252 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
253 {
254 	locks_release_private(new);
255 
256 	__locks_copy_lock(new, fl);
257 	new->fl_file = fl->fl_file;
258 	new->fl_ops = fl->fl_ops;
259 	new->fl_lmops = fl->fl_lmops;
260 
261 	locks_copy_private(new, fl);
262 }
263 
264 EXPORT_SYMBOL(locks_copy_lock);
265 
266 static inline int flock_translate_cmd(int cmd) {
267 	if (cmd & LOCK_MAND)
268 		return cmd & (LOCK_MAND | LOCK_RW);
269 	switch (cmd) {
270 	case LOCK_SH:
271 		return F_RDLCK;
272 	case LOCK_EX:
273 		return F_WRLCK;
274 	case LOCK_UN:
275 		return F_UNLCK;
276 	}
277 	return -EINVAL;
278 }
279 
280 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
281 static int flock_make_lock(struct file *filp, struct file_lock **lock,
282 		unsigned int cmd)
283 {
284 	struct file_lock *fl;
285 	int type = flock_translate_cmd(cmd);
286 	if (type < 0)
287 		return type;
288 
289 	fl = locks_alloc_lock();
290 	if (fl == NULL)
291 		return -ENOMEM;
292 
293 	fl->fl_file = filp;
294 	fl->fl_pid = current->tgid;
295 	fl->fl_flags = FL_FLOCK;
296 	fl->fl_type = type;
297 	fl->fl_end = OFFSET_MAX;
298 
299 	*lock = fl;
300 	return 0;
301 }
302 
303 static int assign_type(struct file_lock *fl, int type)
304 {
305 	switch (type) {
306 	case F_RDLCK:
307 	case F_WRLCK:
308 	case F_UNLCK:
309 		fl->fl_type = type;
310 		break;
311 	default:
312 		return -EINVAL;
313 	}
314 	return 0;
315 }
316 
317 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
318  * style lock.
319  */
320 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
321 			       struct flock *l)
322 {
323 	off_t start, end;
324 
325 	switch (l->l_whence) {
326 	case 0: /*SEEK_SET*/
327 		start = 0;
328 		break;
329 	case 1: /*SEEK_CUR*/
330 		start = filp->f_pos;
331 		break;
332 	case 2: /*SEEK_END*/
333 		start = i_size_read(filp->f_dentry->d_inode);
334 		break;
335 	default:
336 		return -EINVAL;
337 	}
338 
339 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
340 	   POSIX-2001 defines it. */
341 	start += l->l_start;
342 	if (start < 0)
343 		return -EINVAL;
344 	fl->fl_end = OFFSET_MAX;
345 	if (l->l_len > 0) {
346 		end = start + l->l_len - 1;
347 		fl->fl_end = end;
348 	} else if (l->l_len < 0) {
349 		end = start - 1;
350 		fl->fl_end = end;
351 		start += l->l_len;
352 		if (start < 0)
353 			return -EINVAL;
354 	}
355 	fl->fl_start = start;	/* we record the absolute position */
356 	if (fl->fl_end < fl->fl_start)
357 		return -EOVERFLOW;
358 
359 	fl->fl_owner = current->files;
360 	fl->fl_pid = current->tgid;
361 	fl->fl_file = filp;
362 	fl->fl_flags = FL_POSIX;
363 	fl->fl_ops = NULL;
364 	fl->fl_lmops = NULL;
365 
366 	return assign_type(fl, l->l_type);
367 }
368 
369 #if BITS_PER_LONG == 32
370 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
371 				 struct flock64 *l)
372 {
373 	loff_t start;
374 
375 	switch (l->l_whence) {
376 	case 0: /*SEEK_SET*/
377 		start = 0;
378 		break;
379 	case 1: /*SEEK_CUR*/
380 		start = filp->f_pos;
381 		break;
382 	case 2: /*SEEK_END*/
383 		start = i_size_read(filp->f_dentry->d_inode);
384 		break;
385 	default:
386 		return -EINVAL;
387 	}
388 
389 	start += l->l_start;
390 	if (start < 0)
391 		return -EINVAL;
392 	fl->fl_end = OFFSET_MAX;
393 	if (l->l_len > 0) {
394 		fl->fl_end = start + l->l_len - 1;
395 	} else if (l->l_len < 0) {
396 		fl->fl_end = start - 1;
397 		start += l->l_len;
398 		if (start < 0)
399 			return -EINVAL;
400 	}
401 	fl->fl_start = start;	/* we record the absolute position */
402 	if (fl->fl_end < fl->fl_start)
403 		return -EOVERFLOW;
404 
405 	fl->fl_owner = current->files;
406 	fl->fl_pid = current->tgid;
407 	fl->fl_file = filp;
408 	fl->fl_flags = FL_POSIX;
409 	fl->fl_ops = NULL;
410 	fl->fl_lmops = NULL;
411 
412 	switch (l->l_type) {
413 	case F_RDLCK:
414 	case F_WRLCK:
415 	case F_UNLCK:
416 		fl->fl_type = l->l_type;
417 		break;
418 	default:
419 		return -EINVAL;
420 	}
421 
422 	return (0);
423 }
424 #endif
425 
426 /* default lease lock manager operations */
427 static void lease_break_callback(struct file_lock *fl)
428 {
429 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
430 }
431 
432 static void lease_release_private_callback(struct file_lock *fl)
433 {
434 	if (!fl->fl_file)
435 		return;
436 
437 	f_delown(fl->fl_file);
438 	fl->fl_file->f_owner.signum = 0;
439 }
440 
441 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
442 {
443 	return fl->fl_file == try->fl_file;
444 }
445 
446 static struct lock_manager_operations lease_manager_ops = {
447 	.fl_break = lease_break_callback,
448 	.fl_release_private = lease_release_private_callback,
449 	.fl_mylease = lease_mylease_callback,
450 	.fl_change = lease_modify,
451 };
452 
453 /*
454  * Initialize a lease, use the default lock manager operations
455  */
456 static int lease_init(struct file *filp, int type, struct file_lock *fl)
457  {
458 	fl->fl_owner = current->files;
459 	fl->fl_pid = current->tgid;
460 
461 	fl->fl_file = filp;
462 	fl->fl_flags = FL_LEASE;
463 	if (assign_type(fl, type) != 0) {
464 		locks_free_lock(fl);
465 		return -EINVAL;
466 	}
467 	fl->fl_start = 0;
468 	fl->fl_end = OFFSET_MAX;
469 	fl->fl_ops = NULL;
470 	fl->fl_lmops = &lease_manager_ops;
471 	return 0;
472 }
473 
474 /* Allocate a file_lock initialised to this type of lease */
475 static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
476 {
477 	struct file_lock *fl = locks_alloc_lock();
478 	int error;
479 
480 	if (fl == NULL)
481 		return -ENOMEM;
482 
483 	error = lease_init(filp, type, fl);
484 	if (error)
485 		return error;
486 	*flp = fl;
487 	return 0;
488 }
489 
490 /* Check if two locks overlap each other.
491  */
492 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
493 {
494 	return ((fl1->fl_end >= fl2->fl_start) &&
495 		(fl2->fl_end >= fl1->fl_start));
496 }
497 
498 /*
499  * Check whether two locks have the same owner.
500  */
501 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
502 {
503 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
504 		return fl2->fl_lmops == fl1->fl_lmops &&
505 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
506 	return fl1->fl_owner == fl2->fl_owner;
507 }
508 
509 /* Remove waiter from blocker's block list.
510  * When blocker ends up pointing to itself then the list is empty.
511  */
512 static void __locks_delete_block(struct file_lock *waiter)
513 {
514 	list_del_init(&waiter->fl_block);
515 	list_del_init(&waiter->fl_link);
516 	waiter->fl_next = NULL;
517 }
518 
519 /*
520  */
521 static void locks_delete_block(struct file_lock *waiter)
522 {
523 	lock_kernel();
524 	__locks_delete_block(waiter);
525 	unlock_kernel();
526 }
527 
528 /* Insert waiter into blocker's block list.
529  * We use a circular list so that processes can be easily woken up in
530  * the order they blocked. The documentation doesn't require this but
531  * it seems like the reasonable thing to do.
532  */
533 static void locks_insert_block(struct file_lock *blocker,
534 			       struct file_lock *waiter)
535 {
536 	BUG_ON(!list_empty(&waiter->fl_block));
537 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
538 	waiter->fl_next = blocker;
539 	if (IS_POSIX(blocker))
540 		list_add(&waiter->fl_link, &blocked_list);
541 }
542 
543 /* Wake up processes blocked waiting for blocker.
544  * If told to wait then schedule the processes until the block list
545  * is empty, otherwise empty the block list ourselves.
546  */
547 static void locks_wake_up_blocks(struct file_lock *blocker)
548 {
549 	while (!list_empty(&blocker->fl_block)) {
550 		struct file_lock *waiter = list_entry(blocker->fl_block.next,
551 				struct file_lock, fl_block);
552 		__locks_delete_block(waiter);
553 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
554 			waiter->fl_lmops->fl_notify(waiter);
555 		else
556 			wake_up(&waiter->fl_wait);
557 	}
558 }
559 
560 /* Insert file lock fl into an inode's lock list at the position indicated
561  * by pos. At the same time add the lock to the global file lock list.
562  */
563 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
564 {
565 	list_add(&fl->fl_link, &file_lock_list);
566 
567 	/* insert into file's list */
568 	fl->fl_next = *pos;
569 	*pos = fl;
570 
571 	if (fl->fl_ops && fl->fl_ops->fl_insert)
572 		fl->fl_ops->fl_insert(fl);
573 }
574 
575 /*
576  * Delete a lock and then free it.
577  * Wake up processes that are blocked waiting for this lock,
578  * notify the FS that the lock has been cleared and
579  * finally free the lock.
580  */
581 static void locks_delete_lock(struct file_lock **thisfl_p)
582 {
583 	struct file_lock *fl = *thisfl_p;
584 
585 	*thisfl_p = fl->fl_next;
586 	fl->fl_next = NULL;
587 	list_del_init(&fl->fl_link);
588 
589 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
590 	if (fl->fl_fasync != NULL) {
591 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
592 		fl->fl_fasync = NULL;
593 	}
594 
595 	if (fl->fl_ops && fl->fl_ops->fl_remove)
596 		fl->fl_ops->fl_remove(fl);
597 
598 	locks_wake_up_blocks(fl);
599 	locks_free_lock(fl);
600 }
601 
602 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
603  * checks for shared/exclusive status of overlapping locks.
604  */
605 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
606 {
607 	if (sys_fl->fl_type == F_WRLCK)
608 		return 1;
609 	if (caller_fl->fl_type == F_WRLCK)
610 		return 1;
611 	return 0;
612 }
613 
614 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
615  * checking before calling the locks_conflict().
616  */
617 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
618 {
619 	/* POSIX locks owned by the same process do not conflict with
620 	 * each other.
621 	 */
622 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
623 		return (0);
624 
625 	/* Check whether they overlap */
626 	if (!locks_overlap(caller_fl, sys_fl))
627 		return 0;
628 
629 	return (locks_conflict(caller_fl, sys_fl));
630 }
631 
632 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
633  * checking before calling the locks_conflict().
634  */
635 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
636 {
637 	/* FLOCK locks referring to the same filp do not conflict with
638 	 * each other.
639 	 */
640 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
641 		return (0);
642 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
643 		return 0;
644 
645 	return (locks_conflict(caller_fl, sys_fl));
646 }
647 
648 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)
649 {
650 	int result = 0;
651 	DECLARE_WAITQUEUE(wait, current);
652 
653 	__set_current_state(TASK_INTERRUPTIBLE);
654 	add_wait_queue(fl_wait, &wait);
655 	if (timeout == 0)
656 		schedule();
657 	else
658 		result = schedule_timeout(timeout);
659 	if (signal_pending(current))
660 		result = -ERESTARTSYS;
661 	remove_wait_queue(fl_wait, &wait);
662 	__set_current_state(TASK_RUNNING);
663 	return result;
664 }
665 
666 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)
667 {
668 	int result;
669 	locks_insert_block(blocker, waiter);
670 	result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
671 	__locks_delete_block(waiter);
672 	return result;
673 }
674 
675 int
676 posix_test_lock(struct file *filp, struct file_lock *fl,
677 		struct file_lock *conflock)
678 {
679 	struct file_lock *cfl;
680 
681 	lock_kernel();
682 	for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
683 		if (!IS_POSIX(cfl))
684 			continue;
685 		if (posix_locks_conflict(cfl, fl))
686 			break;
687 	}
688 	if (cfl) {
689 		__locks_copy_lock(conflock, cfl);
690 		unlock_kernel();
691 		return 1;
692 	}
693 	unlock_kernel();
694 	return 0;
695 }
696 
697 EXPORT_SYMBOL(posix_test_lock);
698 
699 /* This function tests for deadlock condition before putting a process to
700  * sleep. The detection scheme is no longer recursive. Recursive was neat,
701  * but dangerous - we risked stack corruption if the lock data was bad, or
702  * if the recursion was too deep for any other reason.
703  *
704  * We rely on the fact that a task can only be on one lock's wait queue
705  * at a time. When we find blocked_task on a wait queue we can re-search
706  * with blocked_task equal to that queue's owner, until either blocked_task
707  * isn't found, or blocked_task is found on a queue owned by my_task.
708  *
709  * Note: the above assumption may not be true when handling lock requests
710  * from a broken NFS client. But broken NFS clients have a lot more to
711  * worry about than proper deadlock detection anyway... --okir
712  */
713 int posix_locks_deadlock(struct file_lock *caller_fl,
714 				struct file_lock *block_fl)
715 {
716 	struct list_head *tmp;
717 
718 next_task:
719 	if (posix_same_owner(caller_fl, block_fl))
720 		return 1;
721 	list_for_each(tmp, &blocked_list) {
722 		struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
723 		if (posix_same_owner(fl, block_fl)) {
724 			fl = fl->fl_next;
725 			block_fl = fl;
726 			goto next_task;
727 		}
728 	}
729 	return 0;
730 }
731 
732 EXPORT_SYMBOL(posix_locks_deadlock);
733 
734 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
735  * at the head of the list, but that's secret knowledge known only to
736  * flock_lock_file and posix_lock_file.
737  */
738 static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
739 {
740 	struct file_lock **before;
741 	struct inode * inode = filp->f_dentry->d_inode;
742 	int error = 0;
743 	int found = 0;
744 
745 	lock_kernel();
746 	for_each_lock(inode, before) {
747 		struct file_lock *fl = *before;
748 		if (IS_POSIX(fl))
749 			break;
750 		if (IS_LEASE(fl))
751 			continue;
752 		if (filp != fl->fl_file)
753 			continue;
754 		if (new_fl->fl_type == fl->fl_type)
755 			goto out;
756 		found = 1;
757 		locks_delete_lock(before);
758 		break;
759 	}
760 	unlock_kernel();
761 
762 	if (new_fl->fl_type == F_UNLCK)
763 		return 0;
764 
765 	/*
766 	 * If a higher-priority process was blocked on the old file lock,
767 	 * give it the opportunity to lock the file.
768 	 */
769 	if (found)
770 		cond_resched();
771 
772 	lock_kernel();
773 	for_each_lock(inode, before) {
774 		struct file_lock *fl = *before;
775 		if (IS_POSIX(fl))
776 			break;
777 		if (IS_LEASE(fl))
778 			continue;
779 		if (!flock_locks_conflict(new_fl, fl))
780 			continue;
781 		error = -EAGAIN;
782 		if (new_fl->fl_flags & FL_SLEEP) {
783 			locks_insert_block(fl, new_fl);
784 		}
785 		goto out;
786 	}
787 	locks_insert_lock(&inode->i_flock, new_fl);
788 	error = 0;
789 
790 out:
791 	unlock_kernel();
792 	return error;
793 }
794 
795 static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
796 {
797 	struct file_lock *fl;
798 	struct file_lock *new_fl, *new_fl2;
799 	struct file_lock *left = NULL;
800 	struct file_lock *right = NULL;
801 	struct file_lock **before;
802 	int error, added = 0;
803 
804 	/*
805 	 * We may need two file_lock structures for this operation,
806 	 * so we get them in advance to avoid races.
807 	 */
808 	new_fl = locks_alloc_lock();
809 	new_fl2 = locks_alloc_lock();
810 
811 	lock_kernel();
812 	if (request->fl_type != F_UNLCK) {
813 		for_each_lock(inode, before) {
814 			struct file_lock *fl = *before;
815 			if (!IS_POSIX(fl))
816 				continue;
817 			if (!posix_locks_conflict(request, fl))
818 				continue;
819 			if (conflock)
820 				locks_copy_lock(conflock, fl);
821 			error = -EAGAIN;
822 			if (!(request->fl_flags & FL_SLEEP))
823 				goto out;
824 			error = -EDEADLK;
825 			if (posix_locks_deadlock(request, fl))
826 				goto out;
827 			error = -EAGAIN;
828 			locks_insert_block(fl, request);
829 			goto out;
830   		}
831   	}
832 
833 	/* If we're just looking for a conflict, we're done. */
834 	error = 0;
835 	if (request->fl_flags & FL_ACCESS)
836 		goto out;
837 
838 	error = -ENOLCK; /* "no luck" */
839 	if (!(new_fl && new_fl2))
840 		goto out;
841 
842 	/*
843 	 * We've allocated the new locks in advance, so there are no
844 	 * errors possible (and no blocking operations) from here on.
845 	 *
846 	 * Find the first old lock with the same owner as the new lock.
847 	 */
848 
849 	before = &inode->i_flock;
850 
851 	/* First skip locks owned by other processes.  */
852 	while ((fl = *before) && (!IS_POSIX(fl) ||
853 				  !posix_same_owner(request, fl))) {
854 		before = &fl->fl_next;
855 	}
856 
857 	/* Process locks with this owner.  */
858 	while ((fl = *before) && posix_same_owner(request, fl)) {
859 		/* Detect adjacent or overlapping regions (if same lock type)
860 		 */
861 		if (request->fl_type == fl->fl_type) {
862 			/* In all comparisons of start vs end, use
863 			 * "start - 1" rather than "end + 1". If end
864 			 * is OFFSET_MAX, end + 1 will become negative.
865 			 */
866 			if (fl->fl_end < request->fl_start - 1)
867 				goto next_lock;
868 			/* If the next lock in the list has entirely bigger
869 			 * addresses than the new one, insert the lock here.
870 			 */
871 			if (fl->fl_start - 1 > request->fl_end)
872 				break;
873 
874 			/* If we come here, the new and old lock are of the
875 			 * same type and adjacent or overlapping. Make one
876 			 * lock yielding from the lower start address of both
877 			 * locks to the higher end address.
878 			 */
879 			if (fl->fl_start > request->fl_start)
880 				fl->fl_start = request->fl_start;
881 			else
882 				request->fl_start = fl->fl_start;
883 			if (fl->fl_end < request->fl_end)
884 				fl->fl_end = request->fl_end;
885 			else
886 				request->fl_end = fl->fl_end;
887 			if (added) {
888 				locks_delete_lock(before);
889 				continue;
890 			}
891 			request = fl;
892 			added = 1;
893 		}
894 		else {
895 			/* Processing for different lock types is a bit
896 			 * more complex.
897 			 */
898 			if (fl->fl_end < request->fl_start)
899 				goto next_lock;
900 			if (fl->fl_start > request->fl_end)
901 				break;
902 			if (request->fl_type == F_UNLCK)
903 				added = 1;
904 			if (fl->fl_start < request->fl_start)
905 				left = fl;
906 			/* If the next lock in the list has a higher end
907 			 * address than the new one, insert the new one here.
908 			 */
909 			if (fl->fl_end > request->fl_end) {
910 				right = fl;
911 				break;
912 			}
913 			if (fl->fl_start >= request->fl_start) {
914 				/* The new lock completely replaces an old
915 				 * one (This may happen several times).
916 				 */
917 				if (added) {
918 					locks_delete_lock(before);
919 					continue;
920 				}
921 				/* Replace the old lock with the new one.
922 				 * Wake up anybody waiting for the old one,
923 				 * as the change in lock type might satisfy
924 				 * their needs.
925 				 */
926 				locks_wake_up_blocks(fl);
927 				fl->fl_start = request->fl_start;
928 				fl->fl_end = request->fl_end;
929 				fl->fl_type = request->fl_type;
930 				locks_release_private(fl);
931 				locks_copy_private(fl, request);
932 				request = fl;
933 				added = 1;
934 			}
935 		}
936 		/* Go on to next lock.
937 		 */
938 	next_lock:
939 		before = &fl->fl_next;
940 	}
941 
942 	error = 0;
943 	if (!added) {
944 		if (request->fl_type == F_UNLCK)
945 			goto out;
946 		locks_copy_lock(new_fl, request);
947 		locks_insert_lock(before, new_fl);
948 		new_fl = NULL;
949 	}
950 	if (right) {
951 		if (left == right) {
952 			/* The new lock breaks the old one in two pieces,
953 			 * so we have to use the second new lock.
954 			 */
955 			left = new_fl2;
956 			new_fl2 = NULL;
957 			locks_copy_lock(left, right);
958 			locks_insert_lock(before, left);
959 		}
960 		right->fl_start = request->fl_end + 1;
961 		locks_wake_up_blocks(right);
962 	}
963 	if (left) {
964 		left->fl_end = request->fl_start - 1;
965 		locks_wake_up_blocks(left);
966 	}
967  out:
968 	unlock_kernel();
969 	/*
970 	 * Free any unused locks.
971 	 */
972 	if (new_fl)
973 		locks_free_lock(new_fl);
974 	if (new_fl2)
975 		locks_free_lock(new_fl2);
976 	return error;
977 }
978 
979 /**
980  * posix_lock_file - Apply a POSIX-style lock to a file
981  * @filp: The file to apply the lock to
982  * @fl: The lock to be applied
983  *
984  * Add a POSIX style lock to a file.
985  * We merge adjacent & overlapping locks whenever possible.
986  * POSIX locks are sorted by owner task, then by starting address
987  */
988 int posix_lock_file(struct file *filp, struct file_lock *fl)
989 {
990 	return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL);
991 }
992 EXPORT_SYMBOL(posix_lock_file);
993 
994 /**
995  * posix_lock_file_conf - Apply a POSIX-style lock to a file
996  * @filp: The file to apply the lock to
997  * @fl: The lock to be applied
998  * @conflock: Place to return a copy of the conflicting lock, if found.
999  *
1000  * Except for the conflock parameter, acts just like posix_lock_file.
1001  */
1002 int posix_lock_file_conf(struct file *filp, struct file_lock *fl,
1003 			struct file_lock *conflock)
1004 {
1005 	return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock);
1006 }
1007 EXPORT_SYMBOL(posix_lock_file_conf);
1008 
1009 /**
1010  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1011  * @filp: The file to apply the lock to
1012  * @fl: The lock to be applied
1013  *
1014  * Add a POSIX style lock to a file.
1015  * We merge adjacent & overlapping locks whenever possible.
1016  * POSIX locks are sorted by owner task, then by starting address
1017  */
1018 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1019 {
1020 	int error;
1021 	might_sleep ();
1022 	for (;;) {
1023 		error = posix_lock_file(filp, fl);
1024 		if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
1025 			break;
1026 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1027 		if (!error)
1028 			continue;
1029 
1030 		locks_delete_block(fl);
1031 		break;
1032 	}
1033 	return error;
1034 }
1035 EXPORT_SYMBOL(posix_lock_file_wait);
1036 
1037 /**
1038  * locks_mandatory_locked - Check for an active lock
1039  * @inode: the file to check
1040  *
1041  * Searches the inode's list of locks to find any POSIX locks which conflict.
1042  * This function is called from locks_verify_locked() only.
1043  */
1044 int locks_mandatory_locked(struct inode *inode)
1045 {
1046 	fl_owner_t owner = current->files;
1047 	struct file_lock *fl;
1048 
1049 	/*
1050 	 * Search the lock list for this inode for any POSIX locks.
1051 	 */
1052 	lock_kernel();
1053 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1054 		if (!IS_POSIX(fl))
1055 			continue;
1056 		if (fl->fl_owner != owner)
1057 			break;
1058 	}
1059 	unlock_kernel();
1060 	return fl ? -EAGAIN : 0;
1061 }
1062 
1063 /**
1064  * locks_mandatory_area - Check for a conflicting lock
1065  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1066  *		for shared
1067  * @inode:      the file to check
1068  * @filp:       how the file was opened (if it was)
1069  * @offset:     start of area to check
1070  * @count:      length of area to check
1071  *
1072  * Searches the inode's list of locks to find any POSIX locks which conflict.
1073  * This function is called from rw_verify_area() and
1074  * locks_verify_truncate().
1075  */
1076 int locks_mandatory_area(int read_write, struct inode *inode,
1077 			 struct file *filp, loff_t offset,
1078 			 size_t count)
1079 {
1080 	struct file_lock fl;
1081 	int error;
1082 
1083 	locks_init_lock(&fl);
1084 	fl.fl_owner = current->files;
1085 	fl.fl_pid = current->tgid;
1086 	fl.fl_file = filp;
1087 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1088 	if (filp && !(filp->f_flags & O_NONBLOCK))
1089 		fl.fl_flags |= FL_SLEEP;
1090 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1091 	fl.fl_start = offset;
1092 	fl.fl_end = offset + count - 1;
1093 
1094 	for (;;) {
1095 		error = __posix_lock_file_conf(inode, &fl, NULL);
1096 		if (error != -EAGAIN)
1097 			break;
1098 		if (!(fl.fl_flags & FL_SLEEP))
1099 			break;
1100 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1101 		if (!error) {
1102 			/*
1103 			 * If we've been sleeping someone might have
1104 			 * changed the permissions behind our back.
1105 			 */
1106 			if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
1107 				continue;
1108 		}
1109 
1110 		locks_delete_block(&fl);
1111 		break;
1112 	}
1113 
1114 	return error;
1115 }
1116 
1117 EXPORT_SYMBOL(locks_mandatory_area);
1118 
1119 /* We already had a lease on this file; just change its type */
1120 int lease_modify(struct file_lock **before, int arg)
1121 {
1122 	struct file_lock *fl = *before;
1123 	int error = assign_type(fl, arg);
1124 
1125 	if (error)
1126 		return error;
1127 	locks_wake_up_blocks(fl);
1128 	if (arg == F_UNLCK)
1129 		locks_delete_lock(before);
1130 	return 0;
1131 }
1132 
1133 EXPORT_SYMBOL(lease_modify);
1134 
1135 static void time_out_leases(struct inode *inode)
1136 {
1137 	struct file_lock **before;
1138 	struct file_lock *fl;
1139 
1140 	before = &inode->i_flock;
1141 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1142 		if ((fl->fl_break_time == 0)
1143 				|| time_before(jiffies, fl->fl_break_time)) {
1144 			before = &fl->fl_next;
1145 			continue;
1146 		}
1147 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1148 		if (fl == *before)	/* lease_modify may have freed fl */
1149 			before = &fl->fl_next;
1150 	}
1151 }
1152 
1153 /**
1154  *	__break_lease	-	revoke all outstanding leases on file
1155  *	@inode: the inode of the file to return
1156  *	@mode: the open mode (read or write)
1157  *
1158  *	break_lease (inlined for speed) has checked there already
1159  *	is a lease on this file.  Leases are broken on a call to open()
1160  *	or truncate().  This function can sleep unless you
1161  *	specified %O_NONBLOCK to your open().
1162  */
1163 int __break_lease(struct inode *inode, unsigned int mode)
1164 {
1165 	int error = 0, future;
1166 	struct file_lock *new_fl, *flock;
1167 	struct file_lock *fl;
1168 	int alloc_err;
1169 	unsigned long break_time;
1170 	int i_have_this_lease = 0;
1171 
1172 	alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK,
1173 			&new_fl);
1174 
1175 	lock_kernel();
1176 
1177 	time_out_leases(inode);
1178 
1179 	flock = inode->i_flock;
1180 	if ((flock == NULL) || !IS_LEASE(flock))
1181 		goto out;
1182 
1183 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1184 		if (fl->fl_owner == current->files)
1185 			i_have_this_lease = 1;
1186 
1187 	if (mode & FMODE_WRITE) {
1188 		/* If we want write access, we have to revoke any lease. */
1189 		future = F_UNLCK | F_INPROGRESS;
1190 	} else if (flock->fl_type & F_INPROGRESS) {
1191 		/* If the lease is already being broken, we just leave it */
1192 		future = flock->fl_type;
1193 	} else if (flock->fl_type & F_WRLCK) {
1194 		/* Downgrade the exclusive lease to a read-only lease. */
1195 		future = F_RDLCK | F_INPROGRESS;
1196 	} else {
1197 		/* the existing lease was read-only, so we can read too. */
1198 		goto out;
1199 	}
1200 
1201 	if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) {
1202 		error = alloc_err;
1203 		goto out;
1204 	}
1205 
1206 	break_time = 0;
1207 	if (lease_break_time > 0) {
1208 		break_time = jiffies + lease_break_time * HZ;
1209 		if (break_time == 0)
1210 			break_time++;	/* so that 0 means no break time */
1211 	}
1212 
1213 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1214 		if (fl->fl_type != future) {
1215 			fl->fl_type = future;
1216 			fl->fl_break_time = break_time;
1217 			/* lease must have lmops break callback */
1218 			fl->fl_lmops->fl_break(fl);
1219 		}
1220 	}
1221 
1222 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1223 		error = -EWOULDBLOCK;
1224 		goto out;
1225 	}
1226 
1227 restart:
1228 	break_time = flock->fl_break_time;
1229 	if (break_time != 0) {
1230 		break_time -= jiffies;
1231 		if (break_time == 0)
1232 			break_time++;
1233 	}
1234 	error = locks_block_on_timeout(flock, new_fl, break_time);
1235 	if (error >= 0) {
1236 		if (error == 0)
1237 			time_out_leases(inode);
1238 		/* Wait for the next lease that has not been broken yet */
1239 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1240 				flock = flock->fl_next) {
1241 			if (flock->fl_type & F_INPROGRESS)
1242 				goto restart;
1243 		}
1244 		error = 0;
1245 	}
1246 
1247 out:
1248 	unlock_kernel();
1249 	if (!alloc_err)
1250 		locks_free_lock(new_fl);
1251 	return error;
1252 }
1253 
1254 EXPORT_SYMBOL(__break_lease);
1255 
1256 /**
1257  *	lease_get_mtime
1258  *	@inode: the inode
1259  *      @time:  pointer to a timespec which will contain the last modified time
1260  *
1261  * This is to force NFS clients to flush their caches for files with
1262  * exclusive leases.  The justification is that if someone has an
1263  * exclusive lease, then they could be modifiying it.
1264  */
1265 void lease_get_mtime(struct inode *inode, struct timespec *time)
1266 {
1267 	struct file_lock *flock = inode->i_flock;
1268 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1269 		*time = current_fs_time(inode->i_sb);
1270 	else
1271 		*time = inode->i_mtime;
1272 }
1273 
1274 EXPORT_SYMBOL(lease_get_mtime);
1275 
1276 /**
1277  *	fcntl_getlease - Enquire what lease is currently active
1278  *	@filp: the file
1279  *
1280  *	The value returned by this function will be one of
1281  *	(if no lease break is pending):
1282  *
1283  *	%F_RDLCK to indicate a shared lease is held.
1284  *
1285  *	%F_WRLCK to indicate an exclusive lease is held.
1286  *
1287  *	%F_UNLCK to indicate no lease is held.
1288  *
1289  *	(if a lease break is pending):
1290  *
1291  *	%F_RDLCK to indicate an exclusive lease needs to be
1292  *		changed to a shared lease (or removed).
1293  *
1294  *	%F_UNLCK to indicate the lease needs to be removed.
1295  *
1296  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1297  *	should be returned to userspace.
1298  */
1299 int fcntl_getlease(struct file *filp)
1300 {
1301 	struct file_lock *fl;
1302 	int type = F_UNLCK;
1303 
1304 	lock_kernel();
1305 	time_out_leases(filp->f_dentry->d_inode);
1306 	for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1307 			fl = fl->fl_next) {
1308 		if (fl->fl_file == filp) {
1309 			type = fl->fl_type & ~F_INPROGRESS;
1310 			break;
1311 		}
1312 	}
1313 	unlock_kernel();
1314 	return type;
1315 }
1316 
1317 /**
1318  *	__setlease	-	sets a lease on an open file
1319  *	@filp: file pointer
1320  *	@arg: type of lease to obtain
1321  *	@flp: input - file_lock to use, output - file_lock inserted
1322  *
1323  *	The (input) flp->fl_lmops->fl_break function is required
1324  *	by break_lease().
1325  *
1326  *	Called with kernel lock held.
1327  */
1328 static int __setlease(struct file *filp, long arg, struct file_lock **flp)
1329 {
1330 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1331 	struct dentry *dentry = filp->f_dentry;
1332 	struct inode *inode = dentry->d_inode;
1333 	int error, rdlease_count = 0, wrlease_count = 0;
1334 
1335 	time_out_leases(inode);
1336 
1337 	error = -EINVAL;
1338 	if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break)
1339 		goto out;
1340 
1341 	lease = *flp;
1342 
1343 	error = -EAGAIN;
1344 	if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1345 		goto out;
1346 	if ((arg == F_WRLCK)
1347 	    && ((atomic_read(&dentry->d_count) > 1)
1348 		|| (atomic_read(&inode->i_count) > 1)))
1349 		goto out;
1350 
1351 	/*
1352 	 * At this point, we know that if there is an exclusive
1353 	 * lease on this file, then we hold it on this filp
1354 	 * (otherwise our open of this file would have blocked).
1355 	 * And if we are trying to acquire an exclusive lease,
1356 	 * then the file is not open by anyone (including us)
1357 	 * except for this filp.
1358 	 */
1359 	for (before = &inode->i_flock;
1360 			((fl = *before) != NULL) && IS_LEASE(fl);
1361 			before = &fl->fl_next) {
1362 		if (lease->fl_lmops->fl_mylease(fl, lease))
1363 			my_before = before;
1364 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1365 			/*
1366 			 * Someone is in the process of opening this
1367 			 * file for writing so we may not take an
1368 			 * exclusive lease on it.
1369 			 */
1370 			wrlease_count++;
1371 		else
1372 			rdlease_count++;
1373 	}
1374 
1375 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1376 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1377 		goto out;
1378 
1379 	if (my_before != NULL) {
1380 		error = lease->fl_lmops->fl_change(my_before, arg);
1381 		goto out;
1382 	}
1383 
1384 	error = 0;
1385 	if (arg == F_UNLCK)
1386 		goto out;
1387 
1388 	error = -EINVAL;
1389 	if (!leases_enable)
1390 		goto out;
1391 
1392 	error = lease_alloc(filp, arg, &fl);
1393 	if (error)
1394 		goto out;
1395 
1396 	locks_copy_lock(fl, lease);
1397 
1398 	locks_insert_lock(before, fl);
1399 
1400 	*flp = fl;
1401 out:
1402 	return error;
1403 }
1404 
1405  /**
1406  *	setlease        -       sets a lease on an open file
1407  *	@filp: file pointer
1408  *	@arg: type of lease to obtain
1409  *	@lease: file_lock to use
1410  *
1411  *	Call this to establish a lease on the file.
1412  *	The fl_lmops fl_break function is required by break_lease
1413  */
1414 
1415 int setlease(struct file *filp, long arg, struct file_lock **lease)
1416 {
1417 	struct dentry *dentry = filp->f_dentry;
1418 	struct inode *inode = dentry->d_inode;
1419 	int error;
1420 
1421 	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
1422 		return -EACCES;
1423 	if (!S_ISREG(inode->i_mode))
1424 		return -EINVAL;
1425 	error = security_file_lock(filp, arg);
1426 	if (error)
1427 		return error;
1428 
1429 	lock_kernel();
1430 	error = __setlease(filp, arg, lease);
1431 	unlock_kernel();
1432 
1433 	return error;
1434 }
1435 
1436 EXPORT_SYMBOL(setlease);
1437 
1438 /**
1439  *	fcntl_setlease	-	sets a lease on an open file
1440  *	@fd: open file descriptor
1441  *	@filp: file pointer
1442  *	@arg: type of lease to obtain
1443  *
1444  *	Call this fcntl to establish a lease on the file.
1445  *	Note that you also need to call %F_SETSIG to
1446  *	receive a signal when the lease is broken.
1447  */
1448 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1449 {
1450 	struct file_lock fl, *flp = &fl;
1451 	struct dentry *dentry = filp->f_dentry;
1452 	struct inode *inode = dentry->d_inode;
1453 	int error;
1454 
1455 	if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE))
1456 		return -EACCES;
1457 	if (!S_ISREG(inode->i_mode))
1458 		return -EINVAL;
1459 	error = security_file_lock(filp, arg);
1460 	if (error)
1461 		return error;
1462 
1463 	locks_init_lock(&fl);
1464 	error = lease_init(filp, arg, &fl);
1465 	if (error)
1466 		return error;
1467 
1468 	lock_kernel();
1469 
1470 	error = __setlease(filp, arg, &flp);
1471 	if (error || arg == F_UNLCK)
1472 		goto out_unlock;
1473 
1474 	error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
1475 	if (error < 0) {
1476 		/* remove lease just inserted by __setlease */
1477 		flp->fl_type = F_UNLCK | F_INPROGRESS;
1478 		flp->fl_break_time = jiffies- 10;
1479 		time_out_leases(inode);
1480 		goto out_unlock;
1481 	}
1482 
1483 	error = f_setown(filp, current->pid, 0);
1484 out_unlock:
1485 	unlock_kernel();
1486 	return error;
1487 }
1488 
1489 /**
1490  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1491  * @filp: The file to apply the lock to
1492  * @fl: The lock to be applied
1493  *
1494  * Add a FLOCK style lock to a file.
1495  */
1496 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1497 {
1498 	int error;
1499 	might_sleep();
1500 	for (;;) {
1501 		error = flock_lock_file(filp, fl);
1502 		if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP))
1503 			break;
1504 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1505 		if (!error)
1506 			continue;
1507 
1508 		locks_delete_block(fl);
1509 		break;
1510 	}
1511 	return error;
1512 }
1513 
1514 EXPORT_SYMBOL(flock_lock_file_wait);
1515 
1516 /**
1517  *	sys_flock: - flock() system call.
1518  *	@fd: the file descriptor to lock.
1519  *	@cmd: the type of lock to apply.
1520  *
1521  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1522  *	The @cmd can be one of
1523  *
1524  *	%LOCK_SH -- a shared lock.
1525  *
1526  *	%LOCK_EX -- an exclusive lock.
1527  *
1528  *	%LOCK_UN -- remove an existing lock.
1529  *
1530  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1531  *
1532  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1533  *	processes read and write access respectively.
1534  */
1535 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
1536 {
1537 	struct file *filp;
1538 	struct file_lock *lock;
1539 	int can_sleep, unlock;
1540 	int error;
1541 
1542 	error = -EBADF;
1543 	filp = fget(fd);
1544 	if (!filp)
1545 		goto out;
1546 
1547 	can_sleep = !(cmd & LOCK_NB);
1548 	cmd &= ~LOCK_NB;
1549 	unlock = (cmd == LOCK_UN);
1550 
1551 	if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3))
1552 		goto out_putf;
1553 
1554 	error = flock_make_lock(filp, &lock, cmd);
1555 	if (error)
1556 		goto out_putf;
1557 	if (can_sleep)
1558 		lock->fl_flags |= FL_SLEEP;
1559 
1560 	error = security_file_lock(filp, cmd);
1561 	if (error)
1562 		goto out_free;
1563 
1564 	if (filp->f_op && filp->f_op->flock)
1565 		error = filp->f_op->flock(filp,
1566 					  (can_sleep) ? F_SETLKW : F_SETLK,
1567 					  lock);
1568 	else
1569 		error = flock_lock_file_wait(filp, lock);
1570 
1571  out_free:
1572 	if (list_empty(&lock->fl_link)) {
1573 		locks_free_lock(lock);
1574 	}
1575 
1576  out_putf:
1577 	fput(filp);
1578  out:
1579 	return error;
1580 }
1581 
1582 /* Report the first existing lock that would conflict with l.
1583  * This implements the F_GETLK command of fcntl().
1584  */
1585 int fcntl_getlk(struct file *filp, struct flock __user *l)
1586 {
1587 	struct file_lock *fl, cfl, file_lock;
1588 	struct flock flock;
1589 	int error;
1590 
1591 	error = -EFAULT;
1592 	if (copy_from_user(&flock, l, sizeof(flock)))
1593 		goto out;
1594 	error = -EINVAL;
1595 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1596 		goto out;
1597 
1598 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1599 	if (error)
1600 		goto out;
1601 
1602 	if (filp->f_op && filp->f_op->lock) {
1603 		error = filp->f_op->lock(filp, F_GETLK, &file_lock);
1604 		if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
1605 			file_lock.fl_ops->fl_release_private(&file_lock);
1606 		if (error < 0)
1607 			goto out;
1608 		else
1609 		  fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
1610 	} else {
1611 		fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL);
1612 	}
1613 
1614 	flock.l_type = F_UNLCK;
1615 	if (fl != NULL) {
1616 		flock.l_pid = fl->fl_pid;
1617 #if BITS_PER_LONG == 32
1618 		/*
1619 		 * Make sure we can represent the posix lock via
1620 		 * legacy 32bit flock.
1621 		 */
1622 		error = -EOVERFLOW;
1623 		if (fl->fl_start > OFFT_OFFSET_MAX)
1624 			goto out;
1625 		if ((fl->fl_end != OFFSET_MAX)
1626 		    && (fl->fl_end > OFFT_OFFSET_MAX))
1627 			goto out;
1628 #endif
1629 		flock.l_start = fl->fl_start;
1630 		flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
1631 			fl->fl_end - fl->fl_start + 1;
1632 		flock.l_whence = 0;
1633 		flock.l_type = fl->fl_type;
1634 	}
1635 	error = -EFAULT;
1636 	if (!copy_to_user(l, &flock, sizeof(flock)))
1637 		error = 0;
1638 out:
1639 	return error;
1640 }
1641 
1642 /* Apply the lock described by l to an open file descriptor.
1643  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1644  */
1645 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1646 		struct flock __user *l)
1647 {
1648 	struct file_lock *file_lock = locks_alloc_lock();
1649 	struct flock flock;
1650 	struct inode *inode;
1651 	int error;
1652 
1653 	if (file_lock == NULL)
1654 		return -ENOLCK;
1655 
1656 	/*
1657 	 * This might block, so we do it before checking the inode.
1658 	 */
1659 	error = -EFAULT;
1660 	if (copy_from_user(&flock, l, sizeof(flock)))
1661 		goto out;
1662 
1663 	inode = filp->f_dentry->d_inode;
1664 
1665 	/* Don't allow mandatory locks on files that may be memory mapped
1666 	 * and shared.
1667 	 */
1668 	if (IS_MANDLOCK(inode) &&
1669 	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
1670 	    mapping_writably_mapped(filp->f_mapping)) {
1671 		error = -EAGAIN;
1672 		goto out;
1673 	}
1674 
1675 again:
1676 	error = flock_to_posix_lock(filp, file_lock, &flock);
1677 	if (error)
1678 		goto out;
1679 	if (cmd == F_SETLKW) {
1680 		file_lock->fl_flags |= FL_SLEEP;
1681 	}
1682 
1683 	error = -EBADF;
1684 	switch (flock.l_type) {
1685 	case F_RDLCK:
1686 		if (!(filp->f_mode & FMODE_READ))
1687 			goto out;
1688 		break;
1689 	case F_WRLCK:
1690 		if (!(filp->f_mode & FMODE_WRITE))
1691 			goto out;
1692 		break;
1693 	case F_UNLCK:
1694 		break;
1695 	default:
1696 		error = -EINVAL;
1697 		goto out;
1698 	}
1699 
1700 	error = security_file_lock(filp, file_lock->fl_type);
1701 	if (error)
1702 		goto out;
1703 
1704 	if (filp->f_op && filp->f_op->lock != NULL)
1705 		error = filp->f_op->lock(filp, cmd, file_lock);
1706 	else {
1707 		for (;;) {
1708 			error = posix_lock_file(filp, file_lock);
1709 			if ((error != -EAGAIN) || (cmd == F_SETLK))
1710 				break;
1711 			error = wait_event_interruptible(file_lock->fl_wait,
1712 					!file_lock->fl_next);
1713 			if (!error)
1714 				continue;
1715 
1716 			locks_delete_block(file_lock);
1717 			break;
1718 		}
1719 	}
1720 
1721 	/*
1722 	 * Attempt to detect a close/fcntl race and recover by
1723 	 * releasing the lock that was just acquired.
1724 	 */
1725 	if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
1726 		flock.l_type = F_UNLCK;
1727 		goto again;
1728 	}
1729 
1730 out:
1731 	locks_free_lock(file_lock);
1732 	return error;
1733 }
1734 
1735 #if BITS_PER_LONG == 32
1736 /* Report the first existing lock that would conflict with l.
1737  * This implements the F_GETLK command of fcntl().
1738  */
1739 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1740 {
1741 	struct file_lock *fl, cfl, file_lock;
1742 	struct flock64 flock;
1743 	int error;
1744 
1745 	error = -EFAULT;
1746 	if (copy_from_user(&flock, l, sizeof(flock)))
1747 		goto out;
1748 	error = -EINVAL;
1749 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1750 		goto out;
1751 
1752 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1753 	if (error)
1754 		goto out;
1755 
1756 	if (filp->f_op && filp->f_op->lock) {
1757 		error = filp->f_op->lock(filp, F_GETLK, &file_lock);
1758 		if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
1759 			file_lock.fl_ops->fl_release_private(&file_lock);
1760 		if (error < 0)
1761 			goto out;
1762 		else
1763 		  fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock);
1764 	} else {
1765 		fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL);
1766 	}
1767 
1768 	flock.l_type = F_UNLCK;
1769 	if (fl != NULL) {
1770 		flock.l_pid = fl->fl_pid;
1771 		flock.l_start = fl->fl_start;
1772 		flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
1773 			fl->fl_end - fl->fl_start + 1;
1774 		flock.l_whence = 0;
1775 		flock.l_type = fl->fl_type;
1776 	}
1777 	error = -EFAULT;
1778 	if (!copy_to_user(l, &flock, sizeof(flock)))
1779 		error = 0;
1780 
1781 out:
1782 	return error;
1783 }
1784 
1785 /* Apply the lock described by l to an open file descriptor.
1786  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1787  */
1788 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1789 		struct flock64 __user *l)
1790 {
1791 	struct file_lock *file_lock = locks_alloc_lock();
1792 	struct flock64 flock;
1793 	struct inode *inode;
1794 	int error;
1795 
1796 	if (file_lock == NULL)
1797 		return -ENOLCK;
1798 
1799 	/*
1800 	 * This might block, so we do it before checking the inode.
1801 	 */
1802 	error = -EFAULT;
1803 	if (copy_from_user(&flock, l, sizeof(flock)))
1804 		goto out;
1805 
1806 	inode = filp->f_dentry->d_inode;
1807 
1808 	/* Don't allow mandatory locks on files that may be memory mapped
1809 	 * and shared.
1810 	 */
1811 	if (IS_MANDLOCK(inode) &&
1812 	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
1813 	    mapping_writably_mapped(filp->f_mapping)) {
1814 		error = -EAGAIN;
1815 		goto out;
1816 	}
1817 
1818 again:
1819 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1820 	if (error)
1821 		goto out;
1822 	if (cmd == F_SETLKW64) {
1823 		file_lock->fl_flags |= FL_SLEEP;
1824 	}
1825 
1826 	error = -EBADF;
1827 	switch (flock.l_type) {
1828 	case F_RDLCK:
1829 		if (!(filp->f_mode & FMODE_READ))
1830 			goto out;
1831 		break;
1832 	case F_WRLCK:
1833 		if (!(filp->f_mode & FMODE_WRITE))
1834 			goto out;
1835 		break;
1836 	case F_UNLCK:
1837 		break;
1838 	default:
1839 		error = -EINVAL;
1840 		goto out;
1841 	}
1842 
1843 	error = security_file_lock(filp, file_lock->fl_type);
1844 	if (error)
1845 		goto out;
1846 
1847 	if (filp->f_op && filp->f_op->lock != NULL)
1848 		error = filp->f_op->lock(filp, cmd, file_lock);
1849 	else {
1850 		for (;;) {
1851 			error = posix_lock_file(filp, file_lock);
1852 			if ((error != -EAGAIN) || (cmd == F_SETLK64))
1853 				break;
1854 			error = wait_event_interruptible(file_lock->fl_wait,
1855 					!file_lock->fl_next);
1856 			if (!error)
1857 				continue;
1858 
1859 			locks_delete_block(file_lock);
1860 			break;
1861 		}
1862 	}
1863 
1864 	/*
1865 	 * Attempt to detect a close/fcntl race and recover by
1866 	 * releasing the lock that was just acquired.
1867 	 */
1868 	if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
1869 		flock.l_type = F_UNLCK;
1870 		goto again;
1871 	}
1872 
1873 out:
1874 	locks_free_lock(file_lock);
1875 	return error;
1876 }
1877 #endif /* BITS_PER_LONG == 32 */
1878 
1879 /*
1880  * This function is called when the file is being removed
1881  * from the task's fd array.  POSIX locks belonging to this task
1882  * are deleted at this time.
1883  */
1884 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1885 {
1886 	struct file_lock lock, **before;
1887 
1888 	/*
1889 	 * If there are no locks held on this file, we don't need to call
1890 	 * posix_lock_file().  Another process could be setting a lock on this
1891 	 * file at the same time, but we wouldn't remove that lock anyway.
1892 	 */
1893 	before = &filp->f_dentry->d_inode->i_flock;
1894 	if (*before == NULL)
1895 		return;
1896 
1897 	lock.fl_type = F_UNLCK;
1898 	lock.fl_flags = FL_POSIX;
1899 	lock.fl_start = 0;
1900 	lock.fl_end = OFFSET_MAX;
1901 	lock.fl_owner = owner;
1902 	lock.fl_pid = current->tgid;
1903 	lock.fl_file = filp;
1904 	lock.fl_ops = NULL;
1905 	lock.fl_lmops = NULL;
1906 
1907 	if (filp->f_op && filp->f_op->lock != NULL) {
1908 		filp->f_op->lock(filp, F_SETLK, &lock);
1909 		goto out;
1910 	}
1911 
1912 	/* Can't use posix_lock_file here; we need to remove it no matter
1913 	 * which pid we have.
1914 	 */
1915 	lock_kernel();
1916 	while (*before != NULL) {
1917 		struct file_lock *fl = *before;
1918 		if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) {
1919 			locks_delete_lock(before);
1920 			continue;
1921 		}
1922 		before = &fl->fl_next;
1923 	}
1924 	unlock_kernel();
1925 out:
1926 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
1927 		lock.fl_ops->fl_release_private(&lock);
1928 }
1929 
1930 EXPORT_SYMBOL(locks_remove_posix);
1931 
1932 /*
1933  * This function is called on the last close of an open file.
1934  */
1935 void locks_remove_flock(struct file *filp)
1936 {
1937 	struct inode * inode = filp->f_dentry->d_inode;
1938 	struct file_lock *fl;
1939 	struct file_lock **before;
1940 
1941 	if (!inode->i_flock)
1942 		return;
1943 
1944 	if (filp->f_op && filp->f_op->flock) {
1945 		struct file_lock fl = {
1946 			.fl_pid = current->tgid,
1947 			.fl_file = filp,
1948 			.fl_flags = FL_FLOCK,
1949 			.fl_type = F_UNLCK,
1950 			.fl_end = OFFSET_MAX,
1951 		};
1952 		filp->f_op->flock(filp, F_SETLKW, &fl);
1953 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
1954 			fl.fl_ops->fl_release_private(&fl);
1955 	}
1956 
1957 	lock_kernel();
1958 	before = &inode->i_flock;
1959 
1960 	while ((fl = *before) != NULL) {
1961 		if (fl->fl_file == filp) {
1962 			if (IS_FLOCK(fl)) {
1963 				locks_delete_lock(before);
1964 				continue;
1965 			}
1966 			if (IS_LEASE(fl)) {
1967 				lease_modify(before, F_UNLCK);
1968 				continue;
1969 			}
1970 			/* What? */
1971 			BUG();
1972  		}
1973 		before = &fl->fl_next;
1974 	}
1975 	unlock_kernel();
1976 }
1977 
1978 /**
1979  *	posix_unblock_lock - stop waiting for a file lock
1980  *      @filp:   how the file was opened
1981  *	@waiter: the lock which was waiting
1982  *
1983  *	lockd needs to block waiting for locks.
1984  */
1985 int
1986 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
1987 {
1988 	int status = 0;
1989 
1990 	lock_kernel();
1991 	if (waiter->fl_next)
1992 		__locks_delete_block(waiter);
1993 	else
1994 		status = -ENOENT;
1995 	unlock_kernel();
1996 	return status;
1997 }
1998 
1999 EXPORT_SYMBOL(posix_unblock_lock);
2000 
2001 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
2002 {
2003 	struct inode *inode = NULL;
2004 
2005 	if (fl->fl_file != NULL)
2006 		inode = fl->fl_file->f_dentry->d_inode;
2007 
2008 	out += sprintf(out, "%d:%s ", id, pfx);
2009 	if (IS_POSIX(fl)) {
2010 		out += sprintf(out, "%6s %s ",
2011 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2012 			     (inode == NULL) ? "*NOINODE*" :
2013 			     (IS_MANDLOCK(inode) &&
2014 			      (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ?
2015 			     "MANDATORY" : "ADVISORY ");
2016 	} else if (IS_FLOCK(fl)) {
2017 		if (fl->fl_type & LOCK_MAND) {
2018 			out += sprintf(out, "FLOCK  MSNFS     ");
2019 		} else {
2020 			out += sprintf(out, "FLOCK  ADVISORY  ");
2021 		}
2022 	} else if (IS_LEASE(fl)) {
2023 		out += sprintf(out, "LEASE  ");
2024 		if (fl->fl_type & F_INPROGRESS)
2025 			out += sprintf(out, "BREAKING  ");
2026 		else if (fl->fl_file)
2027 			out += sprintf(out, "ACTIVE    ");
2028 		else
2029 			out += sprintf(out, "BREAKER   ");
2030 	} else {
2031 		out += sprintf(out, "UNKNOWN UNKNOWN  ");
2032 	}
2033 	if (fl->fl_type & LOCK_MAND) {
2034 		out += sprintf(out, "%s ",
2035 			       (fl->fl_type & LOCK_READ)
2036 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2037 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2038 	} else {
2039 		out += sprintf(out, "%s ",
2040 			       (fl->fl_type & F_INPROGRESS)
2041 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2042 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2043 	}
2044 	if (inode) {
2045 #ifdef WE_CAN_BREAK_LSLK_NOW
2046 		out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
2047 				inode->i_sb->s_id, inode->i_ino);
2048 #else
2049 		/* userspace relies on this representation of dev_t ;-( */
2050 		out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid,
2051 				MAJOR(inode->i_sb->s_dev),
2052 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2053 #endif
2054 	} else {
2055 		out += sprintf(out, "%d <none>:0 ", fl->fl_pid);
2056 	}
2057 	if (IS_POSIX(fl)) {
2058 		if (fl->fl_end == OFFSET_MAX)
2059 			out += sprintf(out, "%Ld EOF\n", fl->fl_start);
2060 		else
2061 			out += sprintf(out, "%Ld %Ld\n", fl->fl_start,
2062 					fl->fl_end);
2063 	} else {
2064 		out += sprintf(out, "0 EOF\n");
2065 	}
2066 }
2067 
2068 static void move_lock_status(char **p, off_t* pos, off_t offset)
2069 {
2070 	int len;
2071 	len = strlen(*p);
2072 	if(*pos >= offset) {
2073 		/* the complete line is valid */
2074 		*p += len;
2075 		*pos += len;
2076 		return;
2077 	}
2078 	if(*pos+len > offset) {
2079 		/* use the second part of the line */
2080 		int i = offset-*pos;
2081 		memmove(*p,*p+i,len-i);
2082 		*p += len-i;
2083 		*pos += len;
2084 		return;
2085 	}
2086 	/* discard the complete line */
2087 	*pos += len;
2088 }
2089 
2090 /**
2091  *	get_locks_status	-	reports lock usage in /proc/locks
2092  *	@buffer: address in userspace to write into
2093  *	@start: ?
2094  *	@offset: how far we are through the buffer
2095  *	@length: how much to read
2096  */
2097 
2098 int get_locks_status(char *buffer, char **start, off_t offset, int length)
2099 {
2100 	struct list_head *tmp;
2101 	char *q = buffer;
2102 	off_t pos = 0;
2103 	int i = 0;
2104 
2105 	lock_kernel();
2106 	list_for_each(tmp, &file_lock_list) {
2107 		struct list_head *btmp;
2108 		struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link);
2109 		lock_get_status(q, fl, ++i, "");
2110 		move_lock_status(&q, &pos, offset);
2111 
2112 		if(pos >= offset+length)
2113 			goto done;
2114 
2115 		list_for_each(btmp, &fl->fl_block) {
2116 			struct file_lock *bfl = list_entry(btmp,
2117 					struct file_lock, fl_block);
2118 			lock_get_status(q, bfl, i, " ->");
2119 			move_lock_status(&q, &pos, offset);
2120 
2121 			if(pos >= offset+length)
2122 				goto done;
2123 		}
2124 	}
2125 done:
2126 	unlock_kernel();
2127 	*start = buffer;
2128 	if(q-buffer < length)
2129 		return (q-buffer);
2130 	return length;
2131 }
2132 
2133 /**
2134  *	lock_may_read - checks that the region is free of locks
2135  *	@inode: the inode that is being read
2136  *	@start: the first byte to read
2137  *	@len: the number of bytes to read
2138  *
2139  *	Emulates Windows locking requirements.  Whole-file
2140  *	mandatory locks (share modes) can prohibit a read and
2141  *	byte-range POSIX locks can prohibit a read if they overlap.
2142  *
2143  *	N.B. this function is only ever called
2144  *	from knfsd and ownership of locks is never checked.
2145  */
2146 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2147 {
2148 	struct file_lock *fl;
2149 	int result = 1;
2150 	lock_kernel();
2151 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2152 		if (IS_POSIX(fl)) {
2153 			if (fl->fl_type == F_RDLCK)
2154 				continue;
2155 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2156 				continue;
2157 		} else if (IS_FLOCK(fl)) {
2158 			if (!(fl->fl_type & LOCK_MAND))
2159 				continue;
2160 			if (fl->fl_type & LOCK_READ)
2161 				continue;
2162 		} else
2163 			continue;
2164 		result = 0;
2165 		break;
2166 	}
2167 	unlock_kernel();
2168 	return result;
2169 }
2170 
2171 EXPORT_SYMBOL(lock_may_read);
2172 
2173 /**
2174  *	lock_may_write - checks that the region is free of locks
2175  *	@inode: the inode that is being written
2176  *	@start: the first byte to write
2177  *	@len: the number of bytes to write
2178  *
2179  *	Emulates Windows locking requirements.  Whole-file
2180  *	mandatory locks (share modes) can prohibit a write and
2181  *	byte-range POSIX locks can prohibit a write if they overlap.
2182  *
2183  *	N.B. this function is only ever called
2184  *	from knfsd and ownership of locks is never checked.
2185  */
2186 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2187 {
2188 	struct file_lock *fl;
2189 	int result = 1;
2190 	lock_kernel();
2191 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2192 		if (IS_POSIX(fl)) {
2193 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2194 				continue;
2195 		} else if (IS_FLOCK(fl)) {
2196 			if (!(fl->fl_type & LOCK_MAND))
2197 				continue;
2198 			if (fl->fl_type & LOCK_WRITE)
2199 				continue;
2200 		} else
2201 			continue;
2202 		result = 0;
2203 		break;
2204 	}
2205 	unlock_kernel();
2206 	return result;
2207 }
2208 
2209 EXPORT_SYMBOL(lock_may_write);
2210 
2211 static inline void __steal_locks(struct file *file, fl_owner_t from)
2212 {
2213 	struct inode *inode = file->f_dentry->d_inode;
2214 	struct file_lock *fl = inode->i_flock;
2215 
2216 	while (fl) {
2217 		if (fl->fl_file == file && fl->fl_owner == from)
2218 			fl->fl_owner = current->files;
2219 		fl = fl->fl_next;
2220 	}
2221 }
2222 
2223 /* When getting ready for executing a binary, we make sure that current
2224  * has a files_struct on its own. Before dropping the old files_struct,
2225  * we take over ownership of all locks for all file descriptors we own.
2226  * Note that we may accidentally steal a lock for a file that a sibling
2227  * has created since the unshare_files() call.
2228  */
2229 void steal_locks(fl_owner_t from)
2230 {
2231 	struct files_struct *files = current->files;
2232 	int i, j;
2233 	struct fdtable *fdt;
2234 
2235 	if (from == files)
2236 		return;
2237 
2238 	lock_kernel();
2239 	j = 0;
2240 	rcu_read_lock();
2241 	fdt = files_fdtable(files);
2242 	for (;;) {
2243 		unsigned long set;
2244 		i = j * __NFDBITS;
2245 		if (i >= fdt->max_fdset || i >= fdt->max_fds)
2246 			break;
2247 		set = fdt->open_fds->fds_bits[j++];
2248 		while (set) {
2249 			if (set & 1) {
2250 				struct file *file = fdt->fd[i];
2251 				if (file)
2252 					__steal_locks(file, from);
2253 			}
2254 			i++;
2255 			set >>= 1;
2256 		}
2257 	}
2258 	rcu_read_unlock();
2259 	unlock_kernel();
2260 }
2261 EXPORT_SYMBOL(steal_locks);
2262 
2263 static int __init filelock_init(void)
2264 {
2265 	filelock_cache = kmem_cache_create("file_lock_cache",
2266 			sizeof(struct file_lock), 0, SLAB_PANIC,
2267 			init_once, NULL);
2268 	return 0;
2269 }
2270 
2271 core_initcall(filelock_init);
2272