xref: /linux/fs/locks.c (revision 704bf317fd21683e5c71a542f5fb5f65271a1582)
1 /*
2  *  linux/fs/locks.c
3  *
4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5  *  Doug Evans (dje@spiff.uucp), August 07, 1992
6  *
7  *  Deadlock detection added.
8  *  FIXME: one thing isn't handled yet:
9  *	- mandatory locks (requires lots of changes elsewhere)
10  *  Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11  *
12  *  Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13  *  Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14  *
15  *  Converted file_lock_table to a linked list from an array, which eliminates
16  *  the limits on how many active file locks are open.
17  *  Chad Page (pageone@netcom.com), November 27, 1994
18  *
19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
20  *  get the same locks as the original file descriptors, and a close() on
21  *  any file descriptor removes ALL the locks on the file for the current
22  *  process. Since locks still depend on the process id, locks are inherited
23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
24  *  BSD and SVR4 practice.
25  *  Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26  *
27  *  Scrapped free list which is redundant now that we allocate locks
28  *  dynamically with kmalloc()/kfree().
29  *  Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30  *
31  *  Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32  *
33  *  FL_POSIX locks are created with calls to fcntl() and lockf() through the
34  *  fcntl() system call. They have the semantics described above.
35  *
36  *  FL_FLOCK locks are created with calls to flock(), through the flock()
37  *  system call, which is new. Old C libraries implement flock() via fcntl()
38  *  and will continue to use the old, broken implementation.
39  *
40  *  FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41  *  with a file pointer (filp). As a result they can be shared by a parent
42  *  process and its children after a fork(). They are removed when the last
43  *  file descriptor referring to the file pointer is closed (unless explicitly
44  *  unlocked).
45  *
46  *  FL_FLOCK locks never deadlock, an existing lock is always removed before
47  *  upgrading from shared to exclusive (or vice versa). When this happens
48  *  any processes blocked by the current lock are woken up and allowed to
49  *  run before the new lock is applied.
50  *  Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51  *
52  *  Removed some race conditions in flock_lock_file(), marked other possible
53  *  races. Just grep for FIXME to see them.
54  *  Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55  *
56  *  Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57  *  Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58  *  once we've checked for blocking and deadlocking.
59  *  Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60  *
61  *  Initial implementation of mandatory locks. SunOS turned out to be
62  *  a rotten model, so I implemented the "obvious" semantics.
63  *  See 'Documentation/mandatory.txt' for details.
64  *  Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65  *
66  *  Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67  *  check if a file has mandatory locks, used by mmap(), open() and creat() to
68  *  see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69  *  Manual, Section 2.
70  *  Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71  *
72  *  Tidied up block list handling. Added '/proc/locks' interface.
73  *  Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74  *
75  *  Fixed deadlock condition for pathological code that mixes calls to
76  *  flock() and fcntl().
77  *  Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78  *
79  *  Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80  *  for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81  *  guarantee sensible behaviour in the case where file system modules might
82  *  be compiled with different options than the kernel itself.
83  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84  *
85  *  Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86  *  (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87  *  Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88  *
89  *  Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90  *  locks. Changed process synchronisation to avoid dereferencing locks that
91  *  have already been freed.
92  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93  *
94  *  Made the block list a circular list to minimise searching in the list.
95  *  Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96  *
97  *  Made mandatory locking a mount option. Default is not to allow mandatory
98  *  locking.
99  *  Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100  *
101  *  Some adaptations for NFS support.
102  *  Olaf Kirch (okir@monad.swb.de), Dec 1996,
103  *
104  *  Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105  *  Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106  *
107  *  Use slab allocator instead of kmalloc/kfree.
108  *  Use generic list implementation from <linux/list.h>.
109  *  Sped up posix_locks_deadlock by only considering blocked locks.
110  *  Matthew Wilcox <willy@debian.org>, March, 2000.
111  *
112  *  Leases and LOCK_MAND
113  *  Matthew Wilcox <willy@debian.org>, June, 2000.
114  *  Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115  */
116 
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
129 
130 #include <asm/uaccess.h>
131 
132 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl)	(fl->fl_flags & FL_LEASE)
135 
136 int leases_enable = 1;
137 int lease_break_time = 45;
138 
139 #define for_each_lock(inode, lockp) \
140 	for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141 
142 static LIST_HEAD(file_lock_list);
143 static LIST_HEAD(blocked_list);
144 static DEFINE_SPINLOCK(file_lock_lock);
145 
146 /*
147  * Protects the two list heads above, plus the inode->i_flock list
148  * FIXME: should use a spinlock, once lockd and ceph are ready.
149  */
150 void lock_flocks(void)
151 {
152 	spin_lock(&file_lock_lock);
153 }
154 EXPORT_SYMBOL_GPL(lock_flocks);
155 
156 void unlock_flocks(void)
157 {
158 	spin_unlock(&file_lock_lock);
159 }
160 EXPORT_SYMBOL_GPL(unlock_flocks);
161 
162 static struct kmem_cache *filelock_cache __read_mostly;
163 
164 /* Allocate an empty lock structure. */
165 struct file_lock *locks_alloc_lock(void)
166 {
167 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
168 }
169 EXPORT_SYMBOL_GPL(locks_alloc_lock);
170 
171 void locks_release_private(struct file_lock *fl)
172 {
173 	if (fl->fl_ops) {
174 		if (fl->fl_ops->fl_release_private)
175 			fl->fl_ops->fl_release_private(fl);
176 		fl->fl_ops = NULL;
177 	}
178 	if (fl->fl_lmops) {
179 		if (fl->fl_lmops->fl_release_private)
180 			fl->fl_lmops->fl_release_private(fl);
181 		fl->fl_lmops = NULL;
182 	}
183 
184 }
185 EXPORT_SYMBOL_GPL(locks_release_private);
186 
187 /* Free a lock which is not in use. */
188 void locks_free_lock(struct file_lock *fl)
189 {
190 	BUG_ON(waitqueue_active(&fl->fl_wait));
191 	BUG_ON(!list_empty(&fl->fl_block));
192 	BUG_ON(!list_empty(&fl->fl_link));
193 
194 	locks_release_private(fl);
195 	kmem_cache_free(filelock_cache, fl);
196 }
197 EXPORT_SYMBOL(locks_free_lock);
198 
199 void locks_init_lock(struct file_lock *fl)
200 {
201 	INIT_LIST_HEAD(&fl->fl_link);
202 	INIT_LIST_HEAD(&fl->fl_block);
203 	init_waitqueue_head(&fl->fl_wait);
204 	fl->fl_next = NULL;
205 	fl->fl_fasync = NULL;
206 	fl->fl_owner = NULL;
207 	fl->fl_pid = 0;
208 	fl->fl_nspid = NULL;
209 	fl->fl_file = NULL;
210 	fl->fl_flags = 0;
211 	fl->fl_type = 0;
212 	fl->fl_start = fl->fl_end = 0;
213 	fl->fl_ops = NULL;
214 	fl->fl_lmops = NULL;
215 }
216 
217 EXPORT_SYMBOL(locks_init_lock);
218 
219 /*
220  * Initialises the fields of the file lock which are invariant for
221  * free file_locks.
222  */
223 static void init_once(void *foo)
224 {
225 	struct file_lock *lock = (struct file_lock *) foo;
226 
227 	locks_init_lock(lock);
228 }
229 
230 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231 {
232 	if (fl->fl_ops) {
233 		if (fl->fl_ops->fl_copy_lock)
234 			fl->fl_ops->fl_copy_lock(new, fl);
235 		new->fl_ops = fl->fl_ops;
236 	}
237 	if (fl->fl_lmops)
238 		new->fl_lmops = fl->fl_lmops;
239 }
240 
241 /*
242  * Initialize a new lock from an existing file_lock structure.
243  */
244 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
245 {
246 	new->fl_owner = fl->fl_owner;
247 	new->fl_pid = fl->fl_pid;
248 	new->fl_file = NULL;
249 	new->fl_flags = fl->fl_flags;
250 	new->fl_type = fl->fl_type;
251 	new->fl_start = fl->fl_start;
252 	new->fl_end = fl->fl_end;
253 	new->fl_ops = NULL;
254 	new->fl_lmops = NULL;
255 }
256 EXPORT_SYMBOL(__locks_copy_lock);
257 
258 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
259 {
260 	locks_release_private(new);
261 
262 	__locks_copy_lock(new, fl);
263 	new->fl_file = fl->fl_file;
264 	new->fl_ops = fl->fl_ops;
265 	new->fl_lmops = fl->fl_lmops;
266 
267 	locks_copy_private(new, fl);
268 }
269 
270 EXPORT_SYMBOL(locks_copy_lock);
271 
272 static inline int flock_translate_cmd(int cmd) {
273 	if (cmd & LOCK_MAND)
274 		return cmd & (LOCK_MAND | LOCK_RW);
275 	switch (cmd) {
276 	case LOCK_SH:
277 		return F_RDLCK;
278 	case LOCK_EX:
279 		return F_WRLCK;
280 	case LOCK_UN:
281 		return F_UNLCK;
282 	}
283 	return -EINVAL;
284 }
285 
286 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
287 static int flock_make_lock(struct file *filp, struct file_lock **lock,
288 		unsigned int cmd)
289 {
290 	struct file_lock *fl;
291 	int type = flock_translate_cmd(cmd);
292 	if (type < 0)
293 		return type;
294 
295 	fl = locks_alloc_lock();
296 	if (fl == NULL)
297 		return -ENOMEM;
298 
299 	fl->fl_file = filp;
300 	fl->fl_pid = current->tgid;
301 	fl->fl_flags = FL_FLOCK;
302 	fl->fl_type = type;
303 	fl->fl_end = OFFSET_MAX;
304 
305 	*lock = fl;
306 	return 0;
307 }
308 
309 static int assign_type(struct file_lock *fl, int type)
310 {
311 	switch (type) {
312 	case F_RDLCK:
313 	case F_WRLCK:
314 	case F_UNLCK:
315 		fl->fl_type = type;
316 		break;
317 	default:
318 		return -EINVAL;
319 	}
320 	return 0;
321 }
322 
323 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
324  * style lock.
325  */
326 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
327 			       struct flock *l)
328 {
329 	off_t start, end;
330 
331 	switch (l->l_whence) {
332 	case SEEK_SET:
333 		start = 0;
334 		break;
335 	case SEEK_CUR:
336 		start = filp->f_pos;
337 		break;
338 	case SEEK_END:
339 		start = i_size_read(filp->f_path.dentry->d_inode);
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 
345 	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
346 	   POSIX-2001 defines it. */
347 	start += l->l_start;
348 	if (start < 0)
349 		return -EINVAL;
350 	fl->fl_end = OFFSET_MAX;
351 	if (l->l_len > 0) {
352 		end = start + l->l_len - 1;
353 		fl->fl_end = end;
354 	} else if (l->l_len < 0) {
355 		end = start - 1;
356 		fl->fl_end = end;
357 		start += l->l_len;
358 		if (start < 0)
359 			return -EINVAL;
360 	}
361 	fl->fl_start = start;	/* we record the absolute position */
362 	if (fl->fl_end < fl->fl_start)
363 		return -EOVERFLOW;
364 
365 	fl->fl_owner = current->files;
366 	fl->fl_pid = current->tgid;
367 	fl->fl_file = filp;
368 	fl->fl_flags = FL_POSIX;
369 	fl->fl_ops = NULL;
370 	fl->fl_lmops = NULL;
371 
372 	return assign_type(fl, l->l_type);
373 }
374 
375 #if BITS_PER_LONG == 32
376 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
377 				 struct flock64 *l)
378 {
379 	loff_t start;
380 
381 	switch (l->l_whence) {
382 	case SEEK_SET:
383 		start = 0;
384 		break;
385 	case SEEK_CUR:
386 		start = filp->f_pos;
387 		break;
388 	case SEEK_END:
389 		start = i_size_read(filp->f_path.dentry->d_inode);
390 		break;
391 	default:
392 		return -EINVAL;
393 	}
394 
395 	start += l->l_start;
396 	if (start < 0)
397 		return -EINVAL;
398 	fl->fl_end = OFFSET_MAX;
399 	if (l->l_len > 0) {
400 		fl->fl_end = start + l->l_len - 1;
401 	} else if (l->l_len < 0) {
402 		fl->fl_end = start - 1;
403 		start += l->l_len;
404 		if (start < 0)
405 			return -EINVAL;
406 	}
407 	fl->fl_start = start;	/* we record the absolute position */
408 	if (fl->fl_end < fl->fl_start)
409 		return -EOVERFLOW;
410 
411 	fl->fl_owner = current->files;
412 	fl->fl_pid = current->tgid;
413 	fl->fl_file = filp;
414 	fl->fl_flags = FL_POSIX;
415 	fl->fl_ops = NULL;
416 	fl->fl_lmops = NULL;
417 
418 	switch (l->l_type) {
419 	case F_RDLCK:
420 	case F_WRLCK:
421 	case F_UNLCK:
422 		fl->fl_type = l->l_type;
423 		break;
424 	default:
425 		return -EINVAL;
426 	}
427 
428 	return (0);
429 }
430 #endif
431 
432 /* default lease lock manager operations */
433 static void lease_break_callback(struct file_lock *fl)
434 {
435 	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
436 }
437 
438 static void lease_release_private_callback(struct file_lock *fl)
439 {
440 	if (!fl->fl_file)
441 		return;
442 
443 	f_delown(fl->fl_file);
444 	fl->fl_file->f_owner.signum = 0;
445 }
446 
447 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
448 {
449 	return fl->fl_file == try->fl_file;
450 }
451 
452 static const struct lock_manager_operations lease_manager_ops = {
453 	.fl_break = lease_break_callback,
454 	.fl_release_private = lease_release_private_callback,
455 	.fl_mylease = lease_mylease_callback,
456 	.fl_change = lease_modify,
457 };
458 
459 /*
460  * Initialize a lease, use the default lock manager operations
461  */
462 static int lease_init(struct file *filp, int type, struct file_lock *fl)
463  {
464 	if (assign_type(fl, type) != 0)
465 		return -EINVAL;
466 
467 	fl->fl_owner = current->files;
468 	fl->fl_pid = current->tgid;
469 
470 	fl->fl_file = filp;
471 	fl->fl_flags = FL_LEASE;
472 	fl->fl_start = 0;
473 	fl->fl_end = OFFSET_MAX;
474 	fl->fl_ops = NULL;
475 	fl->fl_lmops = &lease_manager_ops;
476 	return 0;
477 }
478 
479 /* Allocate a file_lock initialised to this type of lease */
480 static struct file_lock *lease_alloc(struct file *filp, int type)
481 {
482 	struct file_lock *fl = locks_alloc_lock();
483 	int error = -ENOMEM;
484 
485 	if (fl == NULL)
486 		return ERR_PTR(error);
487 
488 	error = lease_init(filp, type, fl);
489 	if (error) {
490 		locks_free_lock(fl);
491 		return ERR_PTR(error);
492 	}
493 	return fl;
494 }
495 
496 /* Check if two locks overlap each other.
497  */
498 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
499 {
500 	return ((fl1->fl_end >= fl2->fl_start) &&
501 		(fl2->fl_end >= fl1->fl_start));
502 }
503 
504 /*
505  * Check whether two locks have the same owner.
506  */
507 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
508 {
509 	if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
510 		return fl2->fl_lmops == fl1->fl_lmops &&
511 			fl1->fl_lmops->fl_compare_owner(fl1, fl2);
512 	return fl1->fl_owner == fl2->fl_owner;
513 }
514 
515 /* Remove waiter from blocker's block list.
516  * When blocker ends up pointing to itself then the list is empty.
517  */
518 static void __locks_delete_block(struct file_lock *waiter)
519 {
520 	list_del_init(&waiter->fl_block);
521 	list_del_init(&waiter->fl_link);
522 	waiter->fl_next = NULL;
523 }
524 
525 /*
526  */
527 static void locks_delete_block(struct file_lock *waiter)
528 {
529 	lock_flocks();
530 	__locks_delete_block(waiter);
531 	unlock_flocks();
532 }
533 
534 /* Insert waiter into blocker's block list.
535  * We use a circular list so that processes can be easily woken up in
536  * the order they blocked. The documentation doesn't require this but
537  * it seems like the reasonable thing to do.
538  */
539 static void locks_insert_block(struct file_lock *blocker,
540 			       struct file_lock *waiter)
541 {
542 	BUG_ON(!list_empty(&waiter->fl_block));
543 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
544 	waiter->fl_next = blocker;
545 	if (IS_POSIX(blocker))
546 		list_add(&waiter->fl_link, &blocked_list);
547 }
548 
549 /* Wake up processes blocked waiting for blocker.
550  * If told to wait then schedule the processes until the block list
551  * is empty, otherwise empty the block list ourselves.
552  */
553 static void locks_wake_up_blocks(struct file_lock *blocker)
554 {
555 	while (!list_empty(&blocker->fl_block)) {
556 		struct file_lock *waiter;
557 
558 		waiter = list_first_entry(&blocker->fl_block,
559 				struct file_lock, fl_block);
560 		__locks_delete_block(waiter);
561 		if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
562 			waiter->fl_lmops->fl_notify(waiter);
563 		else
564 			wake_up(&waiter->fl_wait);
565 	}
566 }
567 
568 /* Insert file lock fl into an inode's lock list at the position indicated
569  * by pos. At the same time add the lock to the global file lock list.
570  */
571 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
572 {
573 	list_add(&fl->fl_link, &file_lock_list);
574 
575 	fl->fl_nspid = get_pid(task_tgid(current));
576 
577 	/* insert into file's list */
578 	fl->fl_next = *pos;
579 	*pos = fl;
580 }
581 
582 /*
583  * Delete a lock and then free it.
584  * Wake up processes that are blocked waiting for this lock,
585  * notify the FS that the lock has been cleared and
586  * finally free the lock.
587  */
588 static void locks_delete_lock(struct file_lock **thisfl_p)
589 {
590 	struct file_lock *fl = *thisfl_p;
591 
592 	*thisfl_p = fl->fl_next;
593 	fl->fl_next = NULL;
594 	list_del_init(&fl->fl_link);
595 
596 	fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
597 	if (fl->fl_fasync != NULL) {
598 		printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
599 		fl->fl_fasync = NULL;
600 	}
601 
602 	if (fl->fl_nspid) {
603 		put_pid(fl->fl_nspid);
604 		fl->fl_nspid = NULL;
605 	}
606 
607 	locks_wake_up_blocks(fl);
608 	locks_free_lock(fl);
609 }
610 
611 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
612  * checks for shared/exclusive status of overlapping locks.
613  */
614 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
615 {
616 	if (sys_fl->fl_type == F_WRLCK)
617 		return 1;
618 	if (caller_fl->fl_type == F_WRLCK)
619 		return 1;
620 	return 0;
621 }
622 
623 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
624  * checking before calling the locks_conflict().
625  */
626 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
627 {
628 	/* POSIX locks owned by the same process do not conflict with
629 	 * each other.
630 	 */
631 	if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
632 		return (0);
633 
634 	/* Check whether they overlap */
635 	if (!locks_overlap(caller_fl, sys_fl))
636 		return 0;
637 
638 	return (locks_conflict(caller_fl, sys_fl));
639 }
640 
641 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
642  * checking before calling the locks_conflict().
643  */
644 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
645 {
646 	/* FLOCK locks referring to the same filp do not conflict with
647 	 * each other.
648 	 */
649 	if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
650 		return (0);
651 	if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
652 		return 0;
653 
654 	return (locks_conflict(caller_fl, sys_fl));
655 }
656 
657 void
658 posix_test_lock(struct file *filp, struct file_lock *fl)
659 {
660 	struct file_lock *cfl;
661 
662 	lock_flocks();
663 	for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
664 		if (!IS_POSIX(cfl))
665 			continue;
666 		if (posix_locks_conflict(fl, cfl))
667 			break;
668 	}
669 	if (cfl) {
670 		__locks_copy_lock(fl, cfl);
671 		if (cfl->fl_nspid)
672 			fl->fl_pid = pid_vnr(cfl->fl_nspid);
673 	} else
674 		fl->fl_type = F_UNLCK;
675 	unlock_flocks();
676 	return;
677 }
678 EXPORT_SYMBOL(posix_test_lock);
679 
680 /*
681  * Deadlock detection:
682  *
683  * We attempt to detect deadlocks that are due purely to posix file
684  * locks.
685  *
686  * We assume that a task can be waiting for at most one lock at a time.
687  * So for any acquired lock, the process holding that lock may be
688  * waiting on at most one other lock.  That lock in turns may be held by
689  * someone waiting for at most one other lock.  Given a requested lock
690  * caller_fl which is about to wait for a conflicting lock block_fl, we
691  * follow this chain of waiters to ensure we are not about to create a
692  * cycle.
693  *
694  * Since we do this before we ever put a process to sleep on a lock, we
695  * are ensured that there is never a cycle; that is what guarantees that
696  * the while() loop in posix_locks_deadlock() eventually completes.
697  *
698  * Note: the above assumption may not be true when handling lock
699  * requests from a broken NFS client. It may also fail in the presence
700  * of tasks (such as posix threads) sharing the same open file table.
701  *
702  * To handle those cases, we just bail out after a few iterations.
703  */
704 
705 #define MAX_DEADLK_ITERATIONS 10
706 
707 /* Find a lock that the owner of the given block_fl is blocking on. */
708 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
709 {
710 	struct file_lock *fl;
711 
712 	list_for_each_entry(fl, &blocked_list, fl_link) {
713 		if (posix_same_owner(fl, block_fl))
714 			return fl->fl_next;
715 	}
716 	return NULL;
717 }
718 
719 static int posix_locks_deadlock(struct file_lock *caller_fl,
720 				struct file_lock *block_fl)
721 {
722 	int i = 0;
723 
724 	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
725 		if (i++ > MAX_DEADLK_ITERATIONS)
726 			return 0;
727 		if (posix_same_owner(caller_fl, block_fl))
728 			return 1;
729 	}
730 	return 0;
731 }
732 
733 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
734  * after any leases, but before any posix locks.
735  *
736  * Note that if called with an FL_EXISTS argument, the caller may determine
737  * whether or not a lock was successfully freed by testing the return
738  * value for -ENOENT.
739  */
740 static int flock_lock_file(struct file *filp, struct file_lock *request)
741 {
742 	struct file_lock *new_fl = NULL;
743 	struct file_lock **before;
744 	struct inode * inode = filp->f_path.dentry->d_inode;
745 	int error = 0;
746 	int found = 0;
747 
748 	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
749 		new_fl = locks_alloc_lock();
750 		if (!new_fl)
751 			return -ENOMEM;
752 	}
753 
754 	lock_flocks();
755 	if (request->fl_flags & FL_ACCESS)
756 		goto find_conflict;
757 
758 	for_each_lock(inode, before) {
759 		struct file_lock *fl = *before;
760 		if (IS_POSIX(fl))
761 			break;
762 		if (IS_LEASE(fl))
763 			continue;
764 		if (filp != fl->fl_file)
765 			continue;
766 		if (request->fl_type == fl->fl_type)
767 			goto out;
768 		found = 1;
769 		locks_delete_lock(before);
770 		break;
771 	}
772 
773 	if (request->fl_type == F_UNLCK) {
774 		if ((request->fl_flags & FL_EXISTS) && !found)
775 			error = -ENOENT;
776 		goto out;
777 	}
778 
779 	/*
780 	 * If a higher-priority process was blocked on the old file lock,
781 	 * give it the opportunity to lock the file.
782 	 */
783 	if (found) {
784 		unlock_flocks();
785 		cond_resched();
786 		lock_flocks();
787 	}
788 
789 find_conflict:
790 	for_each_lock(inode, before) {
791 		struct file_lock *fl = *before;
792 		if (IS_POSIX(fl))
793 			break;
794 		if (IS_LEASE(fl))
795 			continue;
796 		if (!flock_locks_conflict(request, fl))
797 			continue;
798 		error = -EAGAIN;
799 		if (!(request->fl_flags & FL_SLEEP))
800 			goto out;
801 		error = FILE_LOCK_DEFERRED;
802 		locks_insert_block(fl, request);
803 		goto out;
804 	}
805 	if (request->fl_flags & FL_ACCESS)
806 		goto out;
807 	locks_copy_lock(new_fl, request);
808 	locks_insert_lock(before, new_fl);
809 	new_fl = NULL;
810 	error = 0;
811 
812 out:
813 	unlock_flocks();
814 	if (new_fl)
815 		locks_free_lock(new_fl);
816 	return error;
817 }
818 
819 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
820 {
821 	struct file_lock *fl;
822 	struct file_lock *new_fl = NULL;
823 	struct file_lock *new_fl2 = NULL;
824 	struct file_lock *left = NULL;
825 	struct file_lock *right = NULL;
826 	struct file_lock **before;
827 	int error, added = 0;
828 
829 	/*
830 	 * We may need two file_lock structures for this operation,
831 	 * so we get them in advance to avoid races.
832 	 *
833 	 * In some cases we can be sure, that no new locks will be needed
834 	 */
835 	if (!(request->fl_flags & FL_ACCESS) &&
836 	    (request->fl_type != F_UNLCK ||
837 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
838 		new_fl = locks_alloc_lock();
839 		new_fl2 = locks_alloc_lock();
840 	}
841 
842 	lock_flocks();
843 	if (request->fl_type != F_UNLCK) {
844 		for_each_lock(inode, before) {
845 			fl = *before;
846 			if (!IS_POSIX(fl))
847 				continue;
848 			if (!posix_locks_conflict(request, fl))
849 				continue;
850 			if (conflock)
851 				__locks_copy_lock(conflock, fl);
852 			error = -EAGAIN;
853 			if (!(request->fl_flags & FL_SLEEP))
854 				goto out;
855 			error = -EDEADLK;
856 			if (posix_locks_deadlock(request, fl))
857 				goto out;
858 			error = FILE_LOCK_DEFERRED;
859 			locks_insert_block(fl, request);
860 			goto out;
861   		}
862   	}
863 
864 	/* If we're just looking for a conflict, we're done. */
865 	error = 0;
866 	if (request->fl_flags & FL_ACCESS)
867 		goto out;
868 
869 	/*
870 	 * Find the first old lock with the same owner as the new lock.
871 	 */
872 
873 	before = &inode->i_flock;
874 
875 	/* First skip locks owned by other processes.  */
876 	while ((fl = *before) && (!IS_POSIX(fl) ||
877 				  !posix_same_owner(request, fl))) {
878 		before = &fl->fl_next;
879 	}
880 
881 	/* Process locks with this owner.  */
882 	while ((fl = *before) && posix_same_owner(request, fl)) {
883 		/* Detect adjacent or overlapping regions (if same lock type)
884 		 */
885 		if (request->fl_type == fl->fl_type) {
886 			/* In all comparisons of start vs end, use
887 			 * "start - 1" rather than "end + 1". If end
888 			 * is OFFSET_MAX, end + 1 will become negative.
889 			 */
890 			if (fl->fl_end < request->fl_start - 1)
891 				goto next_lock;
892 			/* If the next lock in the list has entirely bigger
893 			 * addresses than the new one, insert the lock here.
894 			 */
895 			if (fl->fl_start - 1 > request->fl_end)
896 				break;
897 
898 			/* If we come here, the new and old lock are of the
899 			 * same type and adjacent or overlapping. Make one
900 			 * lock yielding from the lower start address of both
901 			 * locks to the higher end address.
902 			 */
903 			if (fl->fl_start > request->fl_start)
904 				fl->fl_start = request->fl_start;
905 			else
906 				request->fl_start = fl->fl_start;
907 			if (fl->fl_end < request->fl_end)
908 				fl->fl_end = request->fl_end;
909 			else
910 				request->fl_end = fl->fl_end;
911 			if (added) {
912 				locks_delete_lock(before);
913 				continue;
914 			}
915 			request = fl;
916 			added = 1;
917 		}
918 		else {
919 			/* Processing for different lock types is a bit
920 			 * more complex.
921 			 */
922 			if (fl->fl_end < request->fl_start)
923 				goto next_lock;
924 			if (fl->fl_start > request->fl_end)
925 				break;
926 			if (request->fl_type == F_UNLCK)
927 				added = 1;
928 			if (fl->fl_start < request->fl_start)
929 				left = fl;
930 			/* If the next lock in the list has a higher end
931 			 * address than the new one, insert the new one here.
932 			 */
933 			if (fl->fl_end > request->fl_end) {
934 				right = fl;
935 				break;
936 			}
937 			if (fl->fl_start >= request->fl_start) {
938 				/* The new lock completely replaces an old
939 				 * one (This may happen several times).
940 				 */
941 				if (added) {
942 					locks_delete_lock(before);
943 					continue;
944 				}
945 				/* Replace the old lock with the new one.
946 				 * Wake up anybody waiting for the old one,
947 				 * as the change in lock type might satisfy
948 				 * their needs.
949 				 */
950 				locks_wake_up_blocks(fl);
951 				fl->fl_start = request->fl_start;
952 				fl->fl_end = request->fl_end;
953 				fl->fl_type = request->fl_type;
954 				locks_release_private(fl);
955 				locks_copy_private(fl, request);
956 				request = fl;
957 				added = 1;
958 			}
959 		}
960 		/* Go on to next lock.
961 		 */
962 	next_lock:
963 		before = &fl->fl_next;
964 	}
965 
966 	/*
967 	 * The above code only modifies existing locks in case of
968 	 * merging or replacing.  If new lock(s) need to be inserted
969 	 * all modifications are done bellow this, so it's safe yet to
970 	 * bail out.
971 	 */
972 	error = -ENOLCK; /* "no luck" */
973 	if (right && left == right && !new_fl2)
974 		goto out;
975 
976 	error = 0;
977 	if (!added) {
978 		if (request->fl_type == F_UNLCK) {
979 			if (request->fl_flags & FL_EXISTS)
980 				error = -ENOENT;
981 			goto out;
982 		}
983 
984 		if (!new_fl) {
985 			error = -ENOLCK;
986 			goto out;
987 		}
988 		locks_copy_lock(new_fl, request);
989 		locks_insert_lock(before, new_fl);
990 		new_fl = NULL;
991 	}
992 	if (right) {
993 		if (left == right) {
994 			/* The new lock breaks the old one in two pieces,
995 			 * so we have to use the second new lock.
996 			 */
997 			left = new_fl2;
998 			new_fl2 = NULL;
999 			locks_copy_lock(left, right);
1000 			locks_insert_lock(before, left);
1001 		}
1002 		right->fl_start = request->fl_end + 1;
1003 		locks_wake_up_blocks(right);
1004 	}
1005 	if (left) {
1006 		left->fl_end = request->fl_start - 1;
1007 		locks_wake_up_blocks(left);
1008 	}
1009  out:
1010 	unlock_flocks();
1011 	/*
1012 	 * Free any unused locks.
1013 	 */
1014 	if (new_fl)
1015 		locks_free_lock(new_fl);
1016 	if (new_fl2)
1017 		locks_free_lock(new_fl2);
1018 	return error;
1019 }
1020 
1021 /**
1022  * posix_lock_file - Apply a POSIX-style lock to a file
1023  * @filp: The file to apply the lock to
1024  * @fl: The lock to be applied
1025  * @conflock: Place to return a copy of the conflicting lock, if found.
1026  *
1027  * Add a POSIX style lock to a file.
1028  * We merge adjacent & overlapping locks whenever possible.
1029  * POSIX locks are sorted by owner task, then by starting address
1030  *
1031  * Note that if called with an FL_EXISTS argument, the caller may determine
1032  * whether or not a lock was successfully freed by testing the return
1033  * value for -ENOENT.
1034  */
1035 int posix_lock_file(struct file *filp, struct file_lock *fl,
1036 			struct file_lock *conflock)
1037 {
1038 	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1039 }
1040 EXPORT_SYMBOL(posix_lock_file);
1041 
1042 /**
1043  * posix_lock_file_wait - Apply a POSIX-style lock to a file
1044  * @filp: The file to apply the lock to
1045  * @fl: The lock to be applied
1046  *
1047  * Add a POSIX style lock to a file.
1048  * We merge adjacent & overlapping locks whenever possible.
1049  * POSIX locks are sorted by owner task, then by starting address
1050  */
1051 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1052 {
1053 	int error;
1054 	might_sleep ();
1055 	for (;;) {
1056 		error = posix_lock_file(filp, fl, NULL);
1057 		if (error != FILE_LOCK_DEFERRED)
1058 			break;
1059 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1060 		if (!error)
1061 			continue;
1062 
1063 		locks_delete_block(fl);
1064 		break;
1065 	}
1066 	return error;
1067 }
1068 EXPORT_SYMBOL(posix_lock_file_wait);
1069 
1070 /**
1071  * locks_mandatory_locked - Check for an active lock
1072  * @inode: the file to check
1073  *
1074  * Searches the inode's list of locks to find any POSIX locks which conflict.
1075  * This function is called from locks_verify_locked() only.
1076  */
1077 int locks_mandatory_locked(struct inode *inode)
1078 {
1079 	fl_owner_t owner = current->files;
1080 	struct file_lock *fl;
1081 
1082 	/*
1083 	 * Search the lock list for this inode for any POSIX locks.
1084 	 */
1085 	lock_flocks();
1086 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1087 		if (!IS_POSIX(fl))
1088 			continue;
1089 		if (fl->fl_owner != owner)
1090 			break;
1091 	}
1092 	unlock_flocks();
1093 	return fl ? -EAGAIN : 0;
1094 }
1095 
1096 /**
1097  * locks_mandatory_area - Check for a conflicting lock
1098  * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1099  *		for shared
1100  * @inode:      the file to check
1101  * @filp:       how the file was opened (if it was)
1102  * @offset:     start of area to check
1103  * @count:      length of area to check
1104  *
1105  * Searches the inode's list of locks to find any POSIX locks which conflict.
1106  * This function is called from rw_verify_area() and
1107  * locks_verify_truncate().
1108  */
1109 int locks_mandatory_area(int read_write, struct inode *inode,
1110 			 struct file *filp, loff_t offset,
1111 			 size_t count)
1112 {
1113 	struct file_lock fl;
1114 	int error;
1115 
1116 	locks_init_lock(&fl);
1117 	fl.fl_owner = current->files;
1118 	fl.fl_pid = current->tgid;
1119 	fl.fl_file = filp;
1120 	fl.fl_flags = FL_POSIX | FL_ACCESS;
1121 	if (filp && !(filp->f_flags & O_NONBLOCK))
1122 		fl.fl_flags |= FL_SLEEP;
1123 	fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1124 	fl.fl_start = offset;
1125 	fl.fl_end = offset + count - 1;
1126 
1127 	for (;;) {
1128 		error = __posix_lock_file(inode, &fl, NULL);
1129 		if (error != FILE_LOCK_DEFERRED)
1130 			break;
1131 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1132 		if (!error) {
1133 			/*
1134 			 * If we've been sleeping someone might have
1135 			 * changed the permissions behind our back.
1136 			 */
1137 			if (__mandatory_lock(inode))
1138 				continue;
1139 		}
1140 
1141 		locks_delete_block(&fl);
1142 		break;
1143 	}
1144 
1145 	return error;
1146 }
1147 
1148 EXPORT_SYMBOL(locks_mandatory_area);
1149 
1150 /* We already had a lease on this file; just change its type */
1151 int lease_modify(struct file_lock **before, int arg)
1152 {
1153 	struct file_lock *fl = *before;
1154 	int error = assign_type(fl, arg);
1155 
1156 	if (error)
1157 		return error;
1158 	locks_wake_up_blocks(fl);
1159 	if (arg == F_UNLCK)
1160 		locks_delete_lock(before);
1161 	return 0;
1162 }
1163 
1164 EXPORT_SYMBOL(lease_modify);
1165 
1166 static void time_out_leases(struct inode *inode)
1167 {
1168 	struct file_lock **before;
1169 	struct file_lock *fl;
1170 
1171 	before = &inode->i_flock;
1172 	while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1173 		if ((fl->fl_break_time == 0)
1174 				|| time_before(jiffies, fl->fl_break_time)) {
1175 			before = &fl->fl_next;
1176 			continue;
1177 		}
1178 		lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1179 		if (fl == *before)	/* lease_modify may have freed fl */
1180 			before = &fl->fl_next;
1181 	}
1182 }
1183 
1184 /**
1185  *	__break_lease	-	revoke all outstanding leases on file
1186  *	@inode: the inode of the file to return
1187  *	@mode: the open mode (read or write)
1188  *
1189  *	break_lease (inlined for speed) has checked there already is at least
1190  *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1191  *	a call to open() or truncate().  This function can sleep unless you
1192  *	specified %O_NONBLOCK to your open().
1193  */
1194 int __break_lease(struct inode *inode, unsigned int mode)
1195 {
1196 	int error = 0, future;
1197 	struct file_lock *new_fl, *flock;
1198 	struct file_lock *fl;
1199 	unsigned long break_time;
1200 	int i_have_this_lease = 0;
1201 	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1202 
1203 	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1204 
1205 	lock_flocks();
1206 
1207 	time_out_leases(inode);
1208 
1209 	flock = inode->i_flock;
1210 	if ((flock == NULL) || !IS_LEASE(flock))
1211 		goto out;
1212 
1213 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1214 		if (fl->fl_owner == current->files)
1215 			i_have_this_lease = 1;
1216 
1217 	if (want_write) {
1218 		/* If we want write access, we have to revoke any lease. */
1219 		future = F_UNLCK | F_INPROGRESS;
1220 	} else if (flock->fl_type & F_INPROGRESS) {
1221 		/* If the lease is already being broken, we just leave it */
1222 		future = flock->fl_type;
1223 	} else if (flock->fl_type & F_WRLCK) {
1224 		/* Downgrade the exclusive lease to a read-only lease. */
1225 		future = F_RDLCK | F_INPROGRESS;
1226 	} else {
1227 		/* the existing lease was read-only, so we can read too. */
1228 		goto out;
1229 	}
1230 
1231 	if (IS_ERR(new_fl) && !i_have_this_lease
1232 			&& ((mode & O_NONBLOCK) == 0)) {
1233 		error = PTR_ERR(new_fl);
1234 		goto out;
1235 	}
1236 
1237 	break_time = 0;
1238 	if (lease_break_time > 0) {
1239 		break_time = jiffies + lease_break_time * HZ;
1240 		if (break_time == 0)
1241 			break_time++;	/* so that 0 means no break time */
1242 	}
1243 
1244 	for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1245 		if (fl->fl_type != future) {
1246 			fl->fl_type = future;
1247 			fl->fl_break_time = break_time;
1248 			/* lease must have lmops break callback */
1249 			fl->fl_lmops->fl_break(fl);
1250 		}
1251 	}
1252 
1253 	if (i_have_this_lease || (mode & O_NONBLOCK)) {
1254 		error = -EWOULDBLOCK;
1255 		goto out;
1256 	}
1257 
1258 restart:
1259 	break_time = flock->fl_break_time;
1260 	if (break_time != 0) {
1261 		break_time -= jiffies;
1262 		if (break_time == 0)
1263 			break_time++;
1264 	}
1265 	locks_insert_block(flock, new_fl);
1266 	unlock_flocks();
1267 	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1268 						!new_fl->fl_next, break_time);
1269 	lock_flocks();
1270 	__locks_delete_block(new_fl);
1271 	if (error >= 0) {
1272 		if (error == 0)
1273 			time_out_leases(inode);
1274 		/* Wait for the next lease that has not been broken yet */
1275 		for (flock = inode->i_flock; flock && IS_LEASE(flock);
1276 				flock = flock->fl_next) {
1277 			if (flock->fl_type & F_INPROGRESS)
1278 				goto restart;
1279 		}
1280 		error = 0;
1281 	}
1282 
1283 out:
1284 	unlock_flocks();
1285 	if (!IS_ERR(new_fl))
1286 		locks_free_lock(new_fl);
1287 	return error;
1288 }
1289 
1290 EXPORT_SYMBOL(__break_lease);
1291 
1292 /**
1293  *	lease_get_mtime - get the last modified time of an inode
1294  *	@inode: the inode
1295  *      @time:  pointer to a timespec which will contain the last modified time
1296  *
1297  * This is to force NFS clients to flush their caches for files with
1298  * exclusive leases.  The justification is that if someone has an
1299  * exclusive lease, then they could be modifying it.
1300  */
1301 void lease_get_mtime(struct inode *inode, struct timespec *time)
1302 {
1303 	struct file_lock *flock = inode->i_flock;
1304 	if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1305 		*time = current_fs_time(inode->i_sb);
1306 	else
1307 		*time = inode->i_mtime;
1308 }
1309 
1310 EXPORT_SYMBOL(lease_get_mtime);
1311 
1312 /**
1313  *	fcntl_getlease - Enquire what lease is currently active
1314  *	@filp: the file
1315  *
1316  *	The value returned by this function will be one of
1317  *	(if no lease break is pending):
1318  *
1319  *	%F_RDLCK to indicate a shared lease is held.
1320  *
1321  *	%F_WRLCK to indicate an exclusive lease is held.
1322  *
1323  *	%F_UNLCK to indicate no lease is held.
1324  *
1325  *	(if a lease break is pending):
1326  *
1327  *	%F_RDLCK to indicate an exclusive lease needs to be
1328  *		changed to a shared lease (or removed).
1329  *
1330  *	%F_UNLCK to indicate the lease needs to be removed.
1331  *
1332  *	XXX: sfr & willy disagree over whether F_INPROGRESS
1333  *	should be returned to userspace.
1334  */
1335 int fcntl_getlease(struct file *filp)
1336 {
1337 	struct file_lock *fl;
1338 	int type = F_UNLCK;
1339 
1340 	lock_flocks();
1341 	time_out_leases(filp->f_path.dentry->d_inode);
1342 	for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1343 			fl = fl->fl_next) {
1344 		if (fl->fl_file == filp) {
1345 			type = fl->fl_type & ~F_INPROGRESS;
1346 			break;
1347 		}
1348 	}
1349 	unlock_flocks();
1350 	return type;
1351 }
1352 
1353 /**
1354  *	generic_setlease	-	sets a lease on an open file
1355  *	@filp: file pointer
1356  *	@arg: type of lease to obtain
1357  *	@flp: input - file_lock to use, output - file_lock inserted
1358  *
1359  *	The (input) flp->fl_lmops->fl_break function is required
1360  *	by break_lease().
1361  *
1362  *	Called with file_lock_lock held.
1363  */
1364 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1365 {
1366 	struct file_lock *fl, **before, **my_before = NULL, *lease;
1367 	struct dentry *dentry = filp->f_path.dentry;
1368 	struct inode *inode = dentry->d_inode;
1369 	int error, rdlease_count = 0, wrlease_count = 0;
1370 
1371 	lease = *flp;
1372 
1373 	error = -EACCES;
1374 	if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1375 		goto out;
1376 	error = -EINVAL;
1377 	if (!S_ISREG(inode->i_mode))
1378 		goto out;
1379 	error = security_file_lock(filp, arg);
1380 	if (error)
1381 		goto out;
1382 
1383 	time_out_leases(inode);
1384 
1385 	BUG_ON(!(*flp)->fl_lmops->fl_break);
1386 
1387 	if (arg != F_UNLCK) {
1388 		error = -EAGAIN;
1389 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1390 			goto out;
1391 		if ((arg == F_WRLCK)
1392 		    && ((dentry->d_count > 1)
1393 			|| (atomic_read(&inode->i_count) > 1)))
1394 			goto out;
1395 	}
1396 
1397 	/*
1398 	 * At this point, we know that if there is an exclusive
1399 	 * lease on this file, then we hold it on this filp
1400 	 * (otherwise our open of this file would have blocked).
1401 	 * And if we are trying to acquire an exclusive lease,
1402 	 * then the file is not open by anyone (including us)
1403 	 * except for this filp.
1404 	 */
1405 	for (before = &inode->i_flock;
1406 			((fl = *before) != NULL) && IS_LEASE(fl);
1407 			before = &fl->fl_next) {
1408 		if (lease->fl_lmops->fl_mylease(fl, lease))
1409 			my_before = before;
1410 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1411 			/*
1412 			 * Someone is in the process of opening this
1413 			 * file for writing so we may not take an
1414 			 * exclusive lease on it.
1415 			 */
1416 			wrlease_count++;
1417 		else
1418 			rdlease_count++;
1419 	}
1420 
1421 	error = -EAGAIN;
1422 	if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1423 	    (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1424 		goto out;
1425 
1426 	if (my_before != NULL) {
1427 		error = lease->fl_lmops->fl_change(my_before, arg);
1428 		if (!error)
1429 			*flp = *my_before;
1430 		goto out;
1431 	}
1432 
1433 	if (arg == F_UNLCK)
1434 		goto out;
1435 
1436 	error = -EINVAL;
1437 	if (!leases_enable)
1438 		goto out;
1439 
1440 	locks_insert_lock(before, lease);
1441 	return 0;
1442 
1443 out:
1444 	return error;
1445 }
1446 EXPORT_SYMBOL(generic_setlease);
1447 
1448 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1449 {
1450 	if (filp->f_op && filp->f_op->setlease)
1451 		return filp->f_op->setlease(filp, arg, lease);
1452 	else
1453 		return generic_setlease(filp, arg, lease);
1454 }
1455 
1456 /**
1457  *	vfs_setlease        -       sets a lease on an open file
1458  *	@filp: file pointer
1459  *	@arg: type of lease to obtain
1460  *	@lease: file_lock to use
1461  *
1462  *	Call this to establish a lease on the file.
1463  *	The (*lease)->fl_lmops->fl_break operation must be set; if not,
1464  *	break_lease will oops!
1465  *
1466  *	This will call the filesystem's setlease file method, if
1467  *	defined.  Note that there is no getlease method; instead, the
1468  *	filesystem setlease method should call back to setlease() to
1469  *	add a lease to the inode's lease list, where fcntl_getlease() can
1470  *	find it.  Since fcntl_getlease() only reports whether the current
1471  *	task holds a lease, a cluster filesystem need only do this for
1472  *	leases held by processes on this node.
1473  *
1474  *	There is also no break_lease method; filesystems that
1475  *	handle their own leases should break leases themselves from the
1476  *	filesystem's open, create, and (on truncate) setattr methods.
1477  *
1478  *	Warning: the only current setlease methods exist only to disable
1479  *	leases in certain cases.  More vfs changes may be required to
1480  *	allow a full filesystem lease implementation.
1481  */
1482 
1483 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1484 {
1485 	int error;
1486 
1487 	lock_flocks();
1488 	error = __vfs_setlease(filp, arg, lease);
1489 	unlock_flocks();
1490 
1491 	return error;
1492 }
1493 EXPORT_SYMBOL_GPL(vfs_setlease);
1494 
1495 static int do_fcntl_delete_lease(struct file *filp)
1496 {
1497 	struct file_lock fl, *flp = &fl;
1498 
1499 	lease_init(filp, F_UNLCK, flp);
1500 
1501 	return vfs_setlease(filp, F_UNLCK, &flp);
1502 }
1503 
1504 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1505 {
1506 	struct file_lock *fl, *ret;
1507 	struct fasync_struct *new;
1508 	int error;
1509 
1510 	fl = lease_alloc(filp, arg);
1511 	if (IS_ERR(fl))
1512 		return PTR_ERR(fl);
1513 
1514 	new = fasync_alloc();
1515 	if (!new) {
1516 		locks_free_lock(fl);
1517 		return -ENOMEM;
1518 	}
1519 	ret = fl;
1520 	lock_flocks();
1521 	error = __vfs_setlease(filp, arg, &ret);
1522 	if (error) {
1523 		unlock_flocks();
1524 		locks_free_lock(fl);
1525 		goto out_free_fasync;
1526 	}
1527 	if (ret != fl)
1528 		locks_free_lock(fl);
1529 
1530 	/*
1531 	 * fasync_insert_entry() returns the old entry if any.
1532 	 * If there was no old entry, then it used 'new' and
1533 	 * inserted it into the fasync list. Clear new so that
1534 	 * we don't release it here.
1535 	 */
1536 	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1537 		new = NULL;
1538 
1539 	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1540 	unlock_flocks();
1541 
1542 out_free_fasync:
1543 	if (new)
1544 		fasync_free(new);
1545 	return error;
1546 }
1547 
1548 /**
1549  *	fcntl_setlease	-	sets a lease on an open file
1550  *	@fd: open file descriptor
1551  *	@filp: file pointer
1552  *	@arg: type of lease to obtain
1553  *
1554  *	Call this fcntl to establish a lease on the file.
1555  *	Note that you also need to call %F_SETSIG to
1556  *	receive a signal when the lease is broken.
1557  */
1558 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1559 {
1560 	if (arg == F_UNLCK)
1561 		return do_fcntl_delete_lease(filp);
1562 	return do_fcntl_add_lease(fd, filp, arg);
1563 }
1564 
1565 /**
1566  * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1567  * @filp: The file to apply the lock to
1568  * @fl: The lock to be applied
1569  *
1570  * Add a FLOCK style lock to a file.
1571  */
1572 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1573 {
1574 	int error;
1575 	might_sleep();
1576 	for (;;) {
1577 		error = flock_lock_file(filp, fl);
1578 		if (error != FILE_LOCK_DEFERRED)
1579 			break;
1580 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1581 		if (!error)
1582 			continue;
1583 
1584 		locks_delete_block(fl);
1585 		break;
1586 	}
1587 	return error;
1588 }
1589 
1590 EXPORT_SYMBOL(flock_lock_file_wait);
1591 
1592 /**
1593  *	sys_flock: - flock() system call.
1594  *	@fd: the file descriptor to lock.
1595  *	@cmd: the type of lock to apply.
1596  *
1597  *	Apply a %FL_FLOCK style lock to an open file descriptor.
1598  *	The @cmd can be one of
1599  *
1600  *	%LOCK_SH -- a shared lock.
1601  *
1602  *	%LOCK_EX -- an exclusive lock.
1603  *
1604  *	%LOCK_UN -- remove an existing lock.
1605  *
1606  *	%LOCK_MAND -- a `mandatory' flock.  This exists to emulate Windows Share Modes.
1607  *
1608  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1609  *	processes read and write access respectively.
1610  */
1611 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1612 {
1613 	struct file *filp;
1614 	struct file_lock *lock;
1615 	int can_sleep, unlock;
1616 	int error;
1617 
1618 	error = -EBADF;
1619 	filp = fget(fd);
1620 	if (!filp)
1621 		goto out;
1622 
1623 	can_sleep = !(cmd & LOCK_NB);
1624 	cmd &= ~LOCK_NB;
1625 	unlock = (cmd == LOCK_UN);
1626 
1627 	if (!unlock && !(cmd & LOCK_MAND) &&
1628 	    !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1629 		goto out_putf;
1630 
1631 	error = flock_make_lock(filp, &lock, cmd);
1632 	if (error)
1633 		goto out_putf;
1634 	if (can_sleep)
1635 		lock->fl_flags |= FL_SLEEP;
1636 
1637 	error = security_file_lock(filp, lock->fl_type);
1638 	if (error)
1639 		goto out_free;
1640 
1641 	if (filp->f_op && filp->f_op->flock)
1642 		error = filp->f_op->flock(filp,
1643 					  (can_sleep) ? F_SETLKW : F_SETLK,
1644 					  lock);
1645 	else
1646 		error = flock_lock_file_wait(filp, lock);
1647 
1648  out_free:
1649 	locks_free_lock(lock);
1650 
1651  out_putf:
1652 	fput(filp);
1653  out:
1654 	return error;
1655 }
1656 
1657 /**
1658  * vfs_test_lock - test file byte range lock
1659  * @filp: The file to test lock for
1660  * @fl: The lock to test; also used to hold result
1661  *
1662  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
1663  * setting conf->fl_type to something other than F_UNLCK.
1664  */
1665 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1666 {
1667 	if (filp->f_op && filp->f_op->lock)
1668 		return filp->f_op->lock(filp, F_GETLK, fl);
1669 	posix_test_lock(filp, fl);
1670 	return 0;
1671 }
1672 EXPORT_SYMBOL_GPL(vfs_test_lock);
1673 
1674 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1675 {
1676 	flock->l_pid = fl->fl_pid;
1677 #if BITS_PER_LONG == 32
1678 	/*
1679 	 * Make sure we can represent the posix lock via
1680 	 * legacy 32bit flock.
1681 	 */
1682 	if (fl->fl_start > OFFT_OFFSET_MAX)
1683 		return -EOVERFLOW;
1684 	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1685 		return -EOVERFLOW;
1686 #endif
1687 	flock->l_start = fl->fl_start;
1688 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1689 		fl->fl_end - fl->fl_start + 1;
1690 	flock->l_whence = 0;
1691 	flock->l_type = fl->fl_type;
1692 	return 0;
1693 }
1694 
1695 #if BITS_PER_LONG == 32
1696 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1697 {
1698 	flock->l_pid = fl->fl_pid;
1699 	flock->l_start = fl->fl_start;
1700 	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1701 		fl->fl_end - fl->fl_start + 1;
1702 	flock->l_whence = 0;
1703 	flock->l_type = fl->fl_type;
1704 }
1705 #endif
1706 
1707 /* Report the first existing lock that would conflict with l.
1708  * This implements the F_GETLK command of fcntl().
1709  */
1710 int fcntl_getlk(struct file *filp, struct flock __user *l)
1711 {
1712 	struct file_lock file_lock;
1713 	struct flock flock;
1714 	int error;
1715 
1716 	error = -EFAULT;
1717 	if (copy_from_user(&flock, l, sizeof(flock)))
1718 		goto out;
1719 	error = -EINVAL;
1720 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1721 		goto out;
1722 
1723 	error = flock_to_posix_lock(filp, &file_lock, &flock);
1724 	if (error)
1725 		goto out;
1726 
1727 	error = vfs_test_lock(filp, &file_lock);
1728 	if (error)
1729 		goto out;
1730 
1731 	flock.l_type = file_lock.fl_type;
1732 	if (file_lock.fl_type != F_UNLCK) {
1733 		error = posix_lock_to_flock(&flock, &file_lock);
1734 		if (error)
1735 			goto out;
1736 	}
1737 	error = -EFAULT;
1738 	if (!copy_to_user(l, &flock, sizeof(flock)))
1739 		error = 0;
1740 out:
1741 	return error;
1742 }
1743 
1744 /**
1745  * vfs_lock_file - file byte range lock
1746  * @filp: The file to apply the lock to
1747  * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1748  * @fl: The lock to be applied
1749  * @conf: Place to return a copy of the conflicting lock, if found.
1750  *
1751  * A caller that doesn't care about the conflicting lock may pass NULL
1752  * as the final argument.
1753  *
1754  * If the filesystem defines a private ->lock() method, then @conf will
1755  * be left unchanged; so a caller that cares should initialize it to
1756  * some acceptable default.
1757  *
1758  * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1759  * locks, the ->lock() interface may return asynchronously, before the lock has
1760  * been granted or denied by the underlying filesystem, if (and only if)
1761  * fl_grant is set. Callers expecting ->lock() to return asynchronously
1762  * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1763  * the request is for a blocking lock. When ->lock() does return asynchronously,
1764  * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1765  * request completes.
1766  * If the request is for non-blocking lock the file system should return
1767  * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1768  * with the result. If the request timed out the callback routine will return a
1769  * nonzero return code and the file system should release the lock. The file
1770  * system is also responsible to keep a corresponding posix lock when it
1771  * grants a lock so the VFS can find out which locks are locally held and do
1772  * the correct lock cleanup when required.
1773  * The underlying filesystem must not drop the kernel lock or call
1774  * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1775  * return code.
1776  */
1777 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1778 {
1779 	if (filp->f_op && filp->f_op->lock)
1780 		return filp->f_op->lock(filp, cmd, fl);
1781 	else
1782 		return posix_lock_file(filp, fl, conf);
1783 }
1784 EXPORT_SYMBOL_GPL(vfs_lock_file);
1785 
1786 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1787 			     struct file_lock *fl)
1788 {
1789 	int error;
1790 
1791 	error = security_file_lock(filp, fl->fl_type);
1792 	if (error)
1793 		return error;
1794 
1795 	for (;;) {
1796 		error = vfs_lock_file(filp, cmd, fl, NULL);
1797 		if (error != FILE_LOCK_DEFERRED)
1798 			break;
1799 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1800 		if (!error)
1801 			continue;
1802 
1803 		locks_delete_block(fl);
1804 		break;
1805 	}
1806 
1807 	return error;
1808 }
1809 
1810 /* Apply the lock described by l to an open file descriptor.
1811  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1812  */
1813 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1814 		struct flock __user *l)
1815 {
1816 	struct file_lock *file_lock = locks_alloc_lock();
1817 	struct flock flock;
1818 	struct inode *inode;
1819 	struct file *f;
1820 	int error;
1821 
1822 	if (file_lock == NULL)
1823 		return -ENOLCK;
1824 
1825 	/*
1826 	 * This might block, so we do it before checking the inode.
1827 	 */
1828 	error = -EFAULT;
1829 	if (copy_from_user(&flock, l, sizeof(flock)))
1830 		goto out;
1831 
1832 	inode = filp->f_path.dentry->d_inode;
1833 
1834 	/* Don't allow mandatory locks on files that may be memory mapped
1835 	 * and shared.
1836 	 */
1837 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1838 		error = -EAGAIN;
1839 		goto out;
1840 	}
1841 
1842 again:
1843 	error = flock_to_posix_lock(filp, file_lock, &flock);
1844 	if (error)
1845 		goto out;
1846 	if (cmd == F_SETLKW) {
1847 		file_lock->fl_flags |= FL_SLEEP;
1848 	}
1849 
1850 	error = -EBADF;
1851 	switch (flock.l_type) {
1852 	case F_RDLCK:
1853 		if (!(filp->f_mode & FMODE_READ))
1854 			goto out;
1855 		break;
1856 	case F_WRLCK:
1857 		if (!(filp->f_mode & FMODE_WRITE))
1858 			goto out;
1859 		break;
1860 	case F_UNLCK:
1861 		break;
1862 	default:
1863 		error = -EINVAL;
1864 		goto out;
1865 	}
1866 
1867 	error = do_lock_file_wait(filp, cmd, file_lock);
1868 
1869 	/*
1870 	 * Attempt to detect a close/fcntl race and recover by
1871 	 * releasing the lock that was just acquired.
1872 	 */
1873 	/*
1874 	 * we need that spin_lock here - it prevents reordering between
1875 	 * update of inode->i_flock and check for it done in close().
1876 	 * rcu_read_lock() wouldn't do.
1877 	 */
1878 	spin_lock(&current->files->file_lock);
1879 	f = fcheck(fd);
1880 	spin_unlock(&current->files->file_lock);
1881 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1882 		flock.l_type = F_UNLCK;
1883 		goto again;
1884 	}
1885 
1886 out:
1887 	locks_free_lock(file_lock);
1888 	return error;
1889 }
1890 
1891 #if BITS_PER_LONG == 32
1892 /* Report the first existing lock that would conflict with l.
1893  * This implements the F_GETLK command of fcntl().
1894  */
1895 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1896 {
1897 	struct file_lock file_lock;
1898 	struct flock64 flock;
1899 	int error;
1900 
1901 	error = -EFAULT;
1902 	if (copy_from_user(&flock, l, sizeof(flock)))
1903 		goto out;
1904 	error = -EINVAL;
1905 	if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1906 		goto out;
1907 
1908 	error = flock64_to_posix_lock(filp, &file_lock, &flock);
1909 	if (error)
1910 		goto out;
1911 
1912 	error = vfs_test_lock(filp, &file_lock);
1913 	if (error)
1914 		goto out;
1915 
1916 	flock.l_type = file_lock.fl_type;
1917 	if (file_lock.fl_type != F_UNLCK)
1918 		posix_lock_to_flock64(&flock, &file_lock);
1919 
1920 	error = -EFAULT;
1921 	if (!copy_to_user(l, &flock, sizeof(flock)))
1922 		error = 0;
1923 
1924 out:
1925 	return error;
1926 }
1927 
1928 /* Apply the lock described by l to an open file descriptor.
1929  * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1930  */
1931 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1932 		struct flock64 __user *l)
1933 {
1934 	struct file_lock *file_lock = locks_alloc_lock();
1935 	struct flock64 flock;
1936 	struct inode *inode;
1937 	struct file *f;
1938 	int error;
1939 
1940 	if (file_lock == NULL)
1941 		return -ENOLCK;
1942 
1943 	/*
1944 	 * This might block, so we do it before checking the inode.
1945 	 */
1946 	error = -EFAULT;
1947 	if (copy_from_user(&flock, l, sizeof(flock)))
1948 		goto out;
1949 
1950 	inode = filp->f_path.dentry->d_inode;
1951 
1952 	/* Don't allow mandatory locks on files that may be memory mapped
1953 	 * and shared.
1954 	 */
1955 	if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1956 		error = -EAGAIN;
1957 		goto out;
1958 	}
1959 
1960 again:
1961 	error = flock64_to_posix_lock(filp, file_lock, &flock);
1962 	if (error)
1963 		goto out;
1964 	if (cmd == F_SETLKW64) {
1965 		file_lock->fl_flags |= FL_SLEEP;
1966 	}
1967 
1968 	error = -EBADF;
1969 	switch (flock.l_type) {
1970 	case F_RDLCK:
1971 		if (!(filp->f_mode & FMODE_READ))
1972 			goto out;
1973 		break;
1974 	case F_WRLCK:
1975 		if (!(filp->f_mode & FMODE_WRITE))
1976 			goto out;
1977 		break;
1978 	case F_UNLCK:
1979 		break;
1980 	default:
1981 		error = -EINVAL;
1982 		goto out;
1983 	}
1984 
1985 	error = do_lock_file_wait(filp, cmd, file_lock);
1986 
1987 	/*
1988 	 * Attempt to detect a close/fcntl race and recover by
1989 	 * releasing the lock that was just acquired.
1990 	 */
1991 	spin_lock(&current->files->file_lock);
1992 	f = fcheck(fd);
1993 	spin_unlock(&current->files->file_lock);
1994 	if (!error && f != filp && flock.l_type != F_UNLCK) {
1995 		flock.l_type = F_UNLCK;
1996 		goto again;
1997 	}
1998 
1999 out:
2000 	locks_free_lock(file_lock);
2001 	return error;
2002 }
2003 #endif /* BITS_PER_LONG == 32 */
2004 
2005 /*
2006  * This function is called when the file is being removed
2007  * from the task's fd array.  POSIX locks belonging to this task
2008  * are deleted at this time.
2009  */
2010 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2011 {
2012 	struct file_lock lock;
2013 
2014 	/*
2015 	 * If there are no locks held on this file, we don't need to call
2016 	 * posix_lock_file().  Another process could be setting a lock on this
2017 	 * file at the same time, but we wouldn't remove that lock anyway.
2018 	 */
2019 	if (!filp->f_path.dentry->d_inode->i_flock)
2020 		return;
2021 
2022 	lock.fl_type = F_UNLCK;
2023 	lock.fl_flags = FL_POSIX | FL_CLOSE;
2024 	lock.fl_start = 0;
2025 	lock.fl_end = OFFSET_MAX;
2026 	lock.fl_owner = owner;
2027 	lock.fl_pid = current->tgid;
2028 	lock.fl_file = filp;
2029 	lock.fl_ops = NULL;
2030 	lock.fl_lmops = NULL;
2031 
2032 	vfs_lock_file(filp, F_SETLK, &lock, NULL);
2033 
2034 	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2035 		lock.fl_ops->fl_release_private(&lock);
2036 }
2037 
2038 EXPORT_SYMBOL(locks_remove_posix);
2039 
2040 /*
2041  * This function is called on the last close of an open file.
2042  */
2043 void locks_remove_flock(struct file *filp)
2044 {
2045 	struct inode * inode = filp->f_path.dentry->d_inode;
2046 	struct file_lock *fl;
2047 	struct file_lock **before;
2048 
2049 	if (!inode->i_flock)
2050 		return;
2051 
2052 	if (filp->f_op && filp->f_op->flock) {
2053 		struct file_lock fl = {
2054 			.fl_pid = current->tgid,
2055 			.fl_file = filp,
2056 			.fl_flags = FL_FLOCK,
2057 			.fl_type = F_UNLCK,
2058 			.fl_end = OFFSET_MAX,
2059 		};
2060 		filp->f_op->flock(filp, F_SETLKW, &fl);
2061 		if (fl.fl_ops && fl.fl_ops->fl_release_private)
2062 			fl.fl_ops->fl_release_private(&fl);
2063 	}
2064 
2065 	lock_flocks();
2066 	before = &inode->i_flock;
2067 
2068 	while ((fl = *before) != NULL) {
2069 		if (fl->fl_file == filp) {
2070 			if (IS_FLOCK(fl)) {
2071 				locks_delete_lock(before);
2072 				continue;
2073 			}
2074 			if (IS_LEASE(fl)) {
2075 				lease_modify(before, F_UNLCK);
2076 				continue;
2077 			}
2078 			/* What? */
2079 			BUG();
2080  		}
2081 		before = &fl->fl_next;
2082 	}
2083 	unlock_flocks();
2084 }
2085 
2086 /**
2087  *	posix_unblock_lock - stop waiting for a file lock
2088  *      @filp:   how the file was opened
2089  *	@waiter: the lock which was waiting
2090  *
2091  *	lockd needs to block waiting for locks.
2092  */
2093 int
2094 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2095 {
2096 	int status = 0;
2097 
2098 	lock_flocks();
2099 	if (waiter->fl_next)
2100 		__locks_delete_block(waiter);
2101 	else
2102 		status = -ENOENT;
2103 	unlock_flocks();
2104 	return status;
2105 }
2106 
2107 EXPORT_SYMBOL(posix_unblock_lock);
2108 
2109 /**
2110  * vfs_cancel_lock - file byte range unblock lock
2111  * @filp: The file to apply the unblock to
2112  * @fl: The lock to be unblocked
2113  *
2114  * Used by lock managers to cancel blocked requests
2115  */
2116 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2117 {
2118 	if (filp->f_op && filp->f_op->lock)
2119 		return filp->f_op->lock(filp, F_CANCELLK, fl);
2120 	return 0;
2121 }
2122 
2123 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2124 
2125 #ifdef CONFIG_PROC_FS
2126 #include <linux/proc_fs.h>
2127 #include <linux/seq_file.h>
2128 
2129 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2130 			    loff_t id, char *pfx)
2131 {
2132 	struct inode *inode = NULL;
2133 	unsigned int fl_pid;
2134 
2135 	if (fl->fl_nspid)
2136 		fl_pid = pid_vnr(fl->fl_nspid);
2137 	else
2138 		fl_pid = fl->fl_pid;
2139 
2140 	if (fl->fl_file != NULL)
2141 		inode = fl->fl_file->f_path.dentry->d_inode;
2142 
2143 	seq_printf(f, "%lld:%s ", id, pfx);
2144 	if (IS_POSIX(fl)) {
2145 		seq_printf(f, "%6s %s ",
2146 			     (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2147 			     (inode == NULL) ? "*NOINODE*" :
2148 			     mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2149 	} else if (IS_FLOCK(fl)) {
2150 		if (fl->fl_type & LOCK_MAND) {
2151 			seq_printf(f, "FLOCK  MSNFS     ");
2152 		} else {
2153 			seq_printf(f, "FLOCK  ADVISORY  ");
2154 		}
2155 	} else if (IS_LEASE(fl)) {
2156 		seq_printf(f, "LEASE  ");
2157 		if (fl->fl_type & F_INPROGRESS)
2158 			seq_printf(f, "BREAKING  ");
2159 		else if (fl->fl_file)
2160 			seq_printf(f, "ACTIVE    ");
2161 		else
2162 			seq_printf(f, "BREAKER   ");
2163 	} else {
2164 		seq_printf(f, "UNKNOWN UNKNOWN  ");
2165 	}
2166 	if (fl->fl_type & LOCK_MAND) {
2167 		seq_printf(f, "%s ",
2168 			       (fl->fl_type & LOCK_READ)
2169 			       ? (fl->fl_type & LOCK_WRITE) ? "RW   " : "READ "
2170 			       : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2171 	} else {
2172 		seq_printf(f, "%s ",
2173 			       (fl->fl_type & F_INPROGRESS)
2174 			       ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2175 			       : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2176 	}
2177 	if (inode) {
2178 #ifdef WE_CAN_BREAK_LSLK_NOW
2179 		seq_printf(f, "%d %s:%ld ", fl_pid,
2180 				inode->i_sb->s_id, inode->i_ino);
2181 #else
2182 		/* userspace relies on this representation of dev_t ;-( */
2183 		seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2184 				MAJOR(inode->i_sb->s_dev),
2185 				MINOR(inode->i_sb->s_dev), inode->i_ino);
2186 #endif
2187 	} else {
2188 		seq_printf(f, "%d <none>:0 ", fl_pid);
2189 	}
2190 	if (IS_POSIX(fl)) {
2191 		if (fl->fl_end == OFFSET_MAX)
2192 			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2193 		else
2194 			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2195 	} else {
2196 		seq_printf(f, "0 EOF\n");
2197 	}
2198 }
2199 
2200 static int locks_show(struct seq_file *f, void *v)
2201 {
2202 	struct file_lock *fl, *bfl;
2203 
2204 	fl = list_entry(v, struct file_lock, fl_link);
2205 
2206 	lock_get_status(f, fl, *((loff_t *)f->private), "");
2207 
2208 	list_for_each_entry(bfl, &fl->fl_block, fl_block)
2209 		lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2210 
2211 	return 0;
2212 }
2213 
2214 static void *locks_start(struct seq_file *f, loff_t *pos)
2215 {
2216 	loff_t *p = f->private;
2217 
2218 	lock_flocks();
2219 	*p = (*pos + 1);
2220 	return seq_list_start(&file_lock_list, *pos);
2221 }
2222 
2223 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2224 {
2225 	loff_t *p = f->private;
2226 	++*p;
2227 	return seq_list_next(v, &file_lock_list, pos);
2228 }
2229 
2230 static void locks_stop(struct seq_file *f, void *v)
2231 {
2232 	unlock_flocks();
2233 }
2234 
2235 static const struct seq_operations locks_seq_operations = {
2236 	.start	= locks_start,
2237 	.next	= locks_next,
2238 	.stop	= locks_stop,
2239 	.show	= locks_show,
2240 };
2241 
2242 static int locks_open(struct inode *inode, struct file *filp)
2243 {
2244 	return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2245 }
2246 
2247 static const struct file_operations proc_locks_operations = {
2248 	.open		= locks_open,
2249 	.read		= seq_read,
2250 	.llseek		= seq_lseek,
2251 	.release	= seq_release_private,
2252 };
2253 
2254 static int __init proc_locks_init(void)
2255 {
2256 	proc_create("locks", 0, NULL, &proc_locks_operations);
2257 	return 0;
2258 }
2259 module_init(proc_locks_init);
2260 #endif
2261 
2262 /**
2263  *	lock_may_read - checks that the region is free of locks
2264  *	@inode: the inode that is being read
2265  *	@start: the first byte to read
2266  *	@len: the number of bytes to read
2267  *
2268  *	Emulates Windows locking requirements.  Whole-file
2269  *	mandatory locks (share modes) can prohibit a read and
2270  *	byte-range POSIX locks can prohibit a read if they overlap.
2271  *
2272  *	N.B. this function is only ever called
2273  *	from knfsd and ownership of locks is never checked.
2274  */
2275 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2276 {
2277 	struct file_lock *fl;
2278 	int result = 1;
2279 	lock_flocks();
2280 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2281 		if (IS_POSIX(fl)) {
2282 			if (fl->fl_type == F_RDLCK)
2283 				continue;
2284 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2285 				continue;
2286 		} else if (IS_FLOCK(fl)) {
2287 			if (!(fl->fl_type & LOCK_MAND))
2288 				continue;
2289 			if (fl->fl_type & LOCK_READ)
2290 				continue;
2291 		} else
2292 			continue;
2293 		result = 0;
2294 		break;
2295 	}
2296 	unlock_flocks();
2297 	return result;
2298 }
2299 
2300 EXPORT_SYMBOL(lock_may_read);
2301 
2302 /**
2303  *	lock_may_write - checks that the region is free of locks
2304  *	@inode: the inode that is being written
2305  *	@start: the first byte to write
2306  *	@len: the number of bytes to write
2307  *
2308  *	Emulates Windows locking requirements.  Whole-file
2309  *	mandatory locks (share modes) can prohibit a write and
2310  *	byte-range POSIX locks can prohibit a write if they overlap.
2311  *
2312  *	N.B. this function is only ever called
2313  *	from knfsd and ownership of locks is never checked.
2314  */
2315 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2316 {
2317 	struct file_lock *fl;
2318 	int result = 1;
2319 	lock_flocks();
2320 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2321 		if (IS_POSIX(fl)) {
2322 			if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2323 				continue;
2324 		} else if (IS_FLOCK(fl)) {
2325 			if (!(fl->fl_type & LOCK_MAND))
2326 				continue;
2327 			if (fl->fl_type & LOCK_WRITE)
2328 				continue;
2329 		} else
2330 			continue;
2331 		result = 0;
2332 		break;
2333 	}
2334 	unlock_flocks();
2335 	return result;
2336 }
2337 
2338 EXPORT_SYMBOL(lock_may_write);
2339 
2340 static int __init filelock_init(void)
2341 {
2342 	filelock_cache = kmem_cache_create("file_lock_cache",
2343 			sizeof(struct file_lock), 0, SLAB_PANIC,
2344 			init_once);
2345 	return 0;
2346 }
2347 
2348 core_initcall(filelock_init);
2349