1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/filesystems/mandatory-locking.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fdtable.h> 120 #include <linux/fs.h> 121 #include <linux/init.h> 122 #include <linux/module.h> 123 #include <linux/security.h> 124 #include <linux/slab.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 #include <linux/pid_namespace.h> 129 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 static LIST_HEAD(file_lock_list); 143 static LIST_HEAD(blocked_list); 144 static DEFINE_SPINLOCK(file_lock_lock); 145 146 /* 147 * Protects the two list heads above, plus the inode->i_flock list 148 */ 149 void lock_flocks(void) 150 { 151 spin_lock(&file_lock_lock); 152 } 153 EXPORT_SYMBOL_GPL(lock_flocks); 154 155 void unlock_flocks(void) 156 { 157 spin_unlock(&file_lock_lock); 158 } 159 EXPORT_SYMBOL_GPL(unlock_flocks); 160 161 static struct kmem_cache *filelock_cache __read_mostly; 162 163 static void locks_init_lock_heads(struct file_lock *fl) 164 { 165 INIT_LIST_HEAD(&fl->fl_link); 166 INIT_LIST_HEAD(&fl->fl_block); 167 init_waitqueue_head(&fl->fl_wait); 168 } 169 170 /* Allocate an empty lock structure. */ 171 struct file_lock *locks_alloc_lock(void) 172 { 173 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); 174 175 if (fl) 176 locks_init_lock_heads(fl); 177 178 return fl; 179 } 180 EXPORT_SYMBOL_GPL(locks_alloc_lock); 181 182 void locks_release_private(struct file_lock *fl) 183 { 184 if (fl->fl_ops) { 185 if (fl->fl_ops->fl_release_private) 186 fl->fl_ops->fl_release_private(fl); 187 fl->fl_ops = NULL; 188 } 189 if (fl->fl_lmops) { 190 if (fl->fl_lmops->lm_release_private) 191 fl->fl_lmops->lm_release_private(fl); 192 fl->fl_lmops = NULL; 193 } 194 195 } 196 EXPORT_SYMBOL_GPL(locks_release_private); 197 198 /* Free a lock which is not in use. */ 199 void locks_free_lock(struct file_lock *fl) 200 { 201 BUG_ON(waitqueue_active(&fl->fl_wait)); 202 BUG_ON(!list_empty(&fl->fl_block)); 203 BUG_ON(!list_empty(&fl->fl_link)); 204 205 locks_release_private(fl); 206 kmem_cache_free(filelock_cache, fl); 207 } 208 EXPORT_SYMBOL(locks_free_lock); 209 210 void locks_init_lock(struct file_lock *fl) 211 { 212 memset(fl, 0, sizeof(struct file_lock)); 213 locks_init_lock_heads(fl); 214 } 215 216 EXPORT_SYMBOL(locks_init_lock); 217 218 static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 219 { 220 if (fl->fl_ops) { 221 if (fl->fl_ops->fl_copy_lock) 222 fl->fl_ops->fl_copy_lock(new, fl); 223 new->fl_ops = fl->fl_ops; 224 } 225 if (fl->fl_lmops) 226 new->fl_lmops = fl->fl_lmops; 227 } 228 229 /* 230 * Initialize a new lock from an existing file_lock structure. 231 */ 232 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 233 { 234 new->fl_owner = fl->fl_owner; 235 new->fl_pid = fl->fl_pid; 236 new->fl_file = NULL; 237 new->fl_flags = fl->fl_flags; 238 new->fl_type = fl->fl_type; 239 new->fl_start = fl->fl_start; 240 new->fl_end = fl->fl_end; 241 new->fl_ops = NULL; 242 new->fl_lmops = NULL; 243 } 244 EXPORT_SYMBOL(__locks_copy_lock); 245 246 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 247 { 248 locks_release_private(new); 249 250 __locks_copy_lock(new, fl); 251 new->fl_file = fl->fl_file; 252 new->fl_ops = fl->fl_ops; 253 new->fl_lmops = fl->fl_lmops; 254 255 locks_copy_private(new, fl); 256 } 257 258 EXPORT_SYMBOL(locks_copy_lock); 259 260 static inline int flock_translate_cmd(int cmd) { 261 if (cmd & LOCK_MAND) 262 return cmd & (LOCK_MAND | LOCK_RW); 263 switch (cmd) { 264 case LOCK_SH: 265 return F_RDLCK; 266 case LOCK_EX: 267 return F_WRLCK; 268 case LOCK_UN: 269 return F_UNLCK; 270 } 271 return -EINVAL; 272 } 273 274 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 275 static int flock_make_lock(struct file *filp, struct file_lock **lock, 276 unsigned int cmd) 277 { 278 struct file_lock *fl; 279 int type = flock_translate_cmd(cmd); 280 if (type < 0) 281 return type; 282 283 fl = locks_alloc_lock(); 284 if (fl == NULL) 285 return -ENOMEM; 286 287 fl->fl_file = filp; 288 fl->fl_pid = current->tgid; 289 fl->fl_flags = FL_FLOCK; 290 fl->fl_type = type; 291 fl->fl_end = OFFSET_MAX; 292 293 *lock = fl; 294 return 0; 295 } 296 297 static int assign_type(struct file_lock *fl, int type) 298 { 299 switch (type) { 300 case F_RDLCK: 301 case F_WRLCK: 302 case F_UNLCK: 303 fl->fl_type = type; 304 break; 305 default: 306 return -EINVAL; 307 } 308 return 0; 309 } 310 311 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 312 * style lock. 313 */ 314 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 315 struct flock *l) 316 { 317 off_t start, end; 318 319 switch (l->l_whence) { 320 case SEEK_SET: 321 start = 0; 322 break; 323 case SEEK_CUR: 324 start = filp->f_pos; 325 break; 326 case SEEK_END: 327 start = i_size_read(filp->f_path.dentry->d_inode); 328 break; 329 default: 330 return -EINVAL; 331 } 332 333 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 334 POSIX-2001 defines it. */ 335 start += l->l_start; 336 if (start < 0) 337 return -EINVAL; 338 fl->fl_end = OFFSET_MAX; 339 if (l->l_len > 0) { 340 end = start + l->l_len - 1; 341 fl->fl_end = end; 342 } else if (l->l_len < 0) { 343 end = start - 1; 344 fl->fl_end = end; 345 start += l->l_len; 346 if (start < 0) 347 return -EINVAL; 348 } 349 fl->fl_start = start; /* we record the absolute position */ 350 if (fl->fl_end < fl->fl_start) 351 return -EOVERFLOW; 352 353 fl->fl_owner = current->files; 354 fl->fl_pid = current->tgid; 355 fl->fl_file = filp; 356 fl->fl_flags = FL_POSIX; 357 fl->fl_ops = NULL; 358 fl->fl_lmops = NULL; 359 360 return assign_type(fl, l->l_type); 361 } 362 363 #if BITS_PER_LONG == 32 364 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 365 struct flock64 *l) 366 { 367 loff_t start; 368 369 switch (l->l_whence) { 370 case SEEK_SET: 371 start = 0; 372 break; 373 case SEEK_CUR: 374 start = filp->f_pos; 375 break; 376 case SEEK_END: 377 start = i_size_read(filp->f_path.dentry->d_inode); 378 break; 379 default: 380 return -EINVAL; 381 } 382 383 start += l->l_start; 384 if (start < 0) 385 return -EINVAL; 386 fl->fl_end = OFFSET_MAX; 387 if (l->l_len > 0) { 388 fl->fl_end = start + l->l_len - 1; 389 } else if (l->l_len < 0) { 390 fl->fl_end = start - 1; 391 start += l->l_len; 392 if (start < 0) 393 return -EINVAL; 394 } 395 fl->fl_start = start; /* we record the absolute position */ 396 if (fl->fl_end < fl->fl_start) 397 return -EOVERFLOW; 398 399 fl->fl_owner = current->files; 400 fl->fl_pid = current->tgid; 401 fl->fl_file = filp; 402 fl->fl_flags = FL_POSIX; 403 fl->fl_ops = NULL; 404 fl->fl_lmops = NULL; 405 406 return assign_type(fl, l->l_type); 407 } 408 #endif 409 410 /* default lease lock manager operations */ 411 static void lease_break_callback(struct file_lock *fl) 412 { 413 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 414 } 415 416 static void lease_release_private_callback(struct file_lock *fl) 417 { 418 if (!fl->fl_file) 419 return; 420 421 f_delown(fl->fl_file); 422 fl->fl_file->f_owner.signum = 0; 423 } 424 425 static const struct lock_manager_operations lease_manager_ops = { 426 .lm_break = lease_break_callback, 427 .lm_release_private = lease_release_private_callback, 428 .lm_change = lease_modify, 429 }; 430 431 /* 432 * Initialize a lease, use the default lock manager operations 433 */ 434 static int lease_init(struct file *filp, int type, struct file_lock *fl) 435 { 436 if (assign_type(fl, type) != 0) 437 return -EINVAL; 438 439 fl->fl_owner = current->files; 440 fl->fl_pid = current->tgid; 441 442 fl->fl_file = filp; 443 fl->fl_flags = FL_LEASE; 444 fl->fl_start = 0; 445 fl->fl_end = OFFSET_MAX; 446 fl->fl_ops = NULL; 447 fl->fl_lmops = &lease_manager_ops; 448 return 0; 449 } 450 451 /* Allocate a file_lock initialised to this type of lease */ 452 static struct file_lock *lease_alloc(struct file *filp, int type) 453 { 454 struct file_lock *fl = locks_alloc_lock(); 455 int error = -ENOMEM; 456 457 if (fl == NULL) 458 return ERR_PTR(error); 459 460 error = lease_init(filp, type, fl); 461 if (error) { 462 locks_free_lock(fl); 463 return ERR_PTR(error); 464 } 465 return fl; 466 } 467 468 /* Check if two locks overlap each other. 469 */ 470 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 471 { 472 return ((fl1->fl_end >= fl2->fl_start) && 473 (fl2->fl_end >= fl1->fl_start)); 474 } 475 476 /* 477 * Check whether two locks have the same owner. 478 */ 479 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 480 { 481 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) 482 return fl2->fl_lmops == fl1->fl_lmops && 483 fl1->fl_lmops->lm_compare_owner(fl1, fl2); 484 return fl1->fl_owner == fl2->fl_owner; 485 } 486 487 /* Remove waiter from blocker's block list. 488 * When blocker ends up pointing to itself then the list is empty. 489 */ 490 static void __locks_delete_block(struct file_lock *waiter) 491 { 492 list_del_init(&waiter->fl_block); 493 list_del_init(&waiter->fl_link); 494 waiter->fl_next = NULL; 495 } 496 497 /* 498 */ 499 static void locks_delete_block(struct file_lock *waiter) 500 { 501 lock_flocks(); 502 __locks_delete_block(waiter); 503 unlock_flocks(); 504 } 505 506 /* Insert waiter into blocker's block list. 507 * We use a circular list so that processes can be easily woken up in 508 * the order they blocked. The documentation doesn't require this but 509 * it seems like the reasonable thing to do. 510 */ 511 static void locks_insert_block(struct file_lock *blocker, 512 struct file_lock *waiter) 513 { 514 BUG_ON(!list_empty(&waiter->fl_block)); 515 list_add_tail(&waiter->fl_block, &blocker->fl_block); 516 waiter->fl_next = blocker; 517 if (IS_POSIX(blocker)) 518 list_add(&waiter->fl_link, &blocked_list); 519 } 520 521 /* Wake up processes blocked waiting for blocker. 522 * If told to wait then schedule the processes until the block list 523 * is empty, otherwise empty the block list ourselves. 524 */ 525 static void locks_wake_up_blocks(struct file_lock *blocker) 526 { 527 while (!list_empty(&blocker->fl_block)) { 528 struct file_lock *waiter; 529 530 waiter = list_first_entry(&blocker->fl_block, 531 struct file_lock, fl_block); 532 __locks_delete_block(waiter); 533 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) 534 waiter->fl_lmops->lm_notify(waiter); 535 else 536 wake_up(&waiter->fl_wait); 537 } 538 } 539 540 /* Insert file lock fl into an inode's lock list at the position indicated 541 * by pos. At the same time add the lock to the global file lock list. 542 */ 543 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 544 { 545 list_add(&fl->fl_link, &file_lock_list); 546 547 fl->fl_nspid = get_pid(task_tgid(current)); 548 549 /* insert into file's list */ 550 fl->fl_next = *pos; 551 *pos = fl; 552 } 553 554 /* 555 * Delete a lock and then free it. 556 * Wake up processes that are blocked waiting for this lock, 557 * notify the FS that the lock has been cleared and 558 * finally free the lock. 559 */ 560 static void locks_delete_lock(struct file_lock **thisfl_p) 561 { 562 struct file_lock *fl = *thisfl_p; 563 564 *thisfl_p = fl->fl_next; 565 fl->fl_next = NULL; 566 list_del_init(&fl->fl_link); 567 568 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 569 if (fl->fl_fasync != NULL) { 570 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 571 fl->fl_fasync = NULL; 572 } 573 574 if (fl->fl_nspid) { 575 put_pid(fl->fl_nspid); 576 fl->fl_nspid = NULL; 577 } 578 579 locks_wake_up_blocks(fl); 580 locks_free_lock(fl); 581 } 582 583 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 584 * checks for shared/exclusive status of overlapping locks. 585 */ 586 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 587 { 588 if (sys_fl->fl_type == F_WRLCK) 589 return 1; 590 if (caller_fl->fl_type == F_WRLCK) 591 return 1; 592 return 0; 593 } 594 595 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 596 * checking before calling the locks_conflict(). 597 */ 598 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 599 { 600 /* POSIX locks owned by the same process do not conflict with 601 * each other. 602 */ 603 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 604 return (0); 605 606 /* Check whether they overlap */ 607 if (!locks_overlap(caller_fl, sys_fl)) 608 return 0; 609 610 return (locks_conflict(caller_fl, sys_fl)); 611 } 612 613 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 614 * checking before calling the locks_conflict(). 615 */ 616 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 617 { 618 /* FLOCK locks referring to the same filp do not conflict with 619 * each other. 620 */ 621 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 622 return (0); 623 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 624 return 0; 625 626 return (locks_conflict(caller_fl, sys_fl)); 627 } 628 629 void 630 posix_test_lock(struct file *filp, struct file_lock *fl) 631 { 632 struct file_lock *cfl; 633 634 lock_flocks(); 635 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 636 if (!IS_POSIX(cfl)) 637 continue; 638 if (posix_locks_conflict(fl, cfl)) 639 break; 640 } 641 if (cfl) { 642 __locks_copy_lock(fl, cfl); 643 if (cfl->fl_nspid) 644 fl->fl_pid = pid_vnr(cfl->fl_nspid); 645 } else 646 fl->fl_type = F_UNLCK; 647 unlock_flocks(); 648 return; 649 } 650 EXPORT_SYMBOL(posix_test_lock); 651 652 /* 653 * Deadlock detection: 654 * 655 * We attempt to detect deadlocks that are due purely to posix file 656 * locks. 657 * 658 * We assume that a task can be waiting for at most one lock at a time. 659 * So for any acquired lock, the process holding that lock may be 660 * waiting on at most one other lock. That lock in turns may be held by 661 * someone waiting for at most one other lock. Given a requested lock 662 * caller_fl which is about to wait for a conflicting lock block_fl, we 663 * follow this chain of waiters to ensure we are not about to create a 664 * cycle. 665 * 666 * Since we do this before we ever put a process to sleep on a lock, we 667 * are ensured that there is never a cycle; that is what guarantees that 668 * the while() loop in posix_locks_deadlock() eventually completes. 669 * 670 * Note: the above assumption may not be true when handling lock 671 * requests from a broken NFS client. It may also fail in the presence 672 * of tasks (such as posix threads) sharing the same open file table. 673 * 674 * To handle those cases, we just bail out after a few iterations. 675 */ 676 677 #define MAX_DEADLK_ITERATIONS 10 678 679 /* Find a lock that the owner of the given block_fl is blocking on. */ 680 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 681 { 682 struct file_lock *fl; 683 684 list_for_each_entry(fl, &blocked_list, fl_link) { 685 if (posix_same_owner(fl, block_fl)) 686 return fl->fl_next; 687 } 688 return NULL; 689 } 690 691 static int posix_locks_deadlock(struct file_lock *caller_fl, 692 struct file_lock *block_fl) 693 { 694 int i = 0; 695 696 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 697 if (i++ > MAX_DEADLK_ITERATIONS) 698 return 0; 699 if (posix_same_owner(caller_fl, block_fl)) 700 return 1; 701 } 702 return 0; 703 } 704 705 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 706 * after any leases, but before any posix locks. 707 * 708 * Note that if called with an FL_EXISTS argument, the caller may determine 709 * whether or not a lock was successfully freed by testing the return 710 * value for -ENOENT. 711 */ 712 static int flock_lock_file(struct file *filp, struct file_lock *request) 713 { 714 struct file_lock *new_fl = NULL; 715 struct file_lock **before; 716 struct inode * inode = filp->f_path.dentry->d_inode; 717 int error = 0; 718 int found = 0; 719 720 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 721 new_fl = locks_alloc_lock(); 722 if (!new_fl) 723 return -ENOMEM; 724 } 725 726 lock_flocks(); 727 if (request->fl_flags & FL_ACCESS) 728 goto find_conflict; 729 730 for_each_lock(inode, before) { 731 struct file_lock *fl = *before; 732 if (IS_POSIX(fl)) 733 break; 734 if (IS_LEASE(fl)) 735 continue; 736 if (filp != fl->fl_file) 737 continue; 738 if (request->fl_type == fl->fl_type) 739 goto out; 740 found = 1; 741 locks_delete_lock(before); 742 break; 743 } 744 745 if (request->fl_type == F_UNLCK) { 746 if ((request->fl_flags & FL_EXISTS) && !found) 747 error = -ENOENT; 748 goto out; 749 } 750 751 /* 752 * If a higher-priority process was blocked on the old file lock, 753 * give it the opportunity to lock the file. 754 */ 755 if (found) { 756 unlock_flocks(); 757 cond_resched(); 758 lock_flocks(); 759 } 760 761 find_conflict: 762 for_each_lock(inode, before) { 763 struct file_lock *fl = *before; 764 if (IS_POSIX(fl)) 765 break; 766 if (IS_LEASE(fl)) 767 continue; 768 if (!flock_locks_conflict(request, fl)) 769 continue; 770 error = -EAGAIN; 771 if (!(request->fl_flags & FL_SLEEP)) 772 goto out; 773 error = FILE_LOCK_DEFERRED; 774 locks_insert_block(fl, request); 775 goto out; 776 } 777 if (request->fl_flags & FL_ACCESS) 778 goto out; 779 locks_copy_lock(new_fl, request); 780 locks_insert_lock(before, new_fl); 781 new_fl = NULL; 782 error = 0; 783 784 out: 785 unlock_flocks(); 786 if (new_fl) 787 locks_free_lock(new_fl); 788 return error; 789 } 790 791 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 792 { 793 struct file_lock *fl; 794 struct file_lock *new_fl = NULL; 795 struct file_lock *new_fl2 = NULL; 796 struct file_lock *left = NULL; 797 struct file_lock *right = NULL; 798 struct file_lock **before; 799 int error, added = 0; 800 801 /* 802 * We may need two file_lock structures for this operation, 803 * so we get them in advance to avoid races. 804 * 805 * In some cases we can be sure, that no new locks will be needed 806 */ 807 if (!(request->fl_flags & FL_ACCESS) && 808 (request->fl_type != F_UNLCK || 809 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 810 new_fl = locks_alloc_lock(); 811 new_fl2 = locks_alloc_lock(); 812 } 813 814 lock_flocks(); 815 if (request->fl_type != F_UNLCK) { 816 for_each_lock(inode, before) { 817 fl = *before; 818 if (!IS_POSIX(fl)) 819 continue; 820 if (!posix_locks_conflict(request, fl)) 821 continue; 822 if (conflock) 823 __locks_copy_lock(conflock, fl); 824 error = -EAGAIN; 825 if (!(request->fl_flags & FL_SLEEP)) 826 goto out; 827 error = -EDEADLK; 828 if (posix_locks_deadlock(request, fl)) 829 goto out; 830 error = FILE_LOCK_DEFERRED; 831 locks_insert_block(fl, request); 832 goto out; 833 } 834 } 835 836 /* If we're just looking for a conflict, we're done. */ 837 error = 0; 838 if (request->fl_flags & FL_ACCESS) 839 goto out; 840 841 /* 842 * Find the first old lock with the same owner as the new lock. 843 */ 844 845 before = &inode->i_flock; 846 847 /* First skip locks owned by other processes. */ 848 while ((fl = *before) && (!IS_POSIX(fl) || 849 !posix_same_owner(request, fl))) { 850 before = &fl->fl_next; 851 } 852 853 /* Process locks with this owner. */ 854 while ((fl = *before) && posix_same_owner(request, fl)) { 855 /* Detect adjacent or overlapping regions (if same lock type) 856 */ 857 if (request->fl_type == fl->fl_type) { 858 /* In all comparisons of start vs end, use 859 * "start - 1" rather than "end + 1". If end 860 * is OFFSET_MAX, end + 1 will become negative. 861 */ 862 if (fl->fl_end < request->fl_start - 1) 863 goto next_lock; 864 /* If the next lock in the list has entirely bigger 865 * addresses than the new one, insert the lock here. 866 */ 867 if (fl->fl_start - 1 > request->fl_end) 868 break; 869 870 /* If we come here, the new and old lock are of the 871 * same type and adjacent or overlapping. Make one 872 * lock yielding from the lower start address of both 873 * locks to the higher end address. 874 */ 875 if (fl->fl_start > request->fl_start) 876 fl->fl_start = request->fl_start; 877 else 878 request->fl_start = fl->fl_start; 879 if (fl->fl_end < request->fl_end) 880 fl->fl_end = request->fl_end; 881 else 882 request->fl_end = fl->fl_end; 883 if (added) { 884 locks_delete_lock(before); 885 continue; 886 } 887 request = fl; 888 added = 1; 889 } 890 else { 891 /* Processing for different lock types is a bit 892 * more complex. 893 */ 894 if (fl->fl_end < request->fl_start) 895 goto next_lock; 896 if (fl->fl_start > request->fl_end) 897 break; 898 if (request->fl_type == F_UNLCK) 899 added = 1; 900 if (fl->fl_start < request->fl_start) 901 left = fl; 902 /* If the next lock in the list has a higher end 903 * address than the new one, insert the new one here. 904 */ 905 if (fl->fl_end > request->fl_end) { 906 right = fl; 907 break; 908 } 909 if (fl->fl_start >= request->fl_start) { 910 /* The new lock completely replaces an old 911 * one (This may happen several times). 912 */ 913 if (added) { 914 locks_delete_lock(before); 915 continue; 916 } 917 /* Replace the old lock with the new one. 918 * Wake up anybody waiting for the old one, 919 * as the change in lock type might satisfy 920 * their needs. 921 */ 922 locks_wake_up_blocks(fl); 923 fl->fl_start = request->fl_start; 924 fl->fl_end = request->fl_end; 925 fl->fl_type = request->fl_type; 926 locks_release_private(fl); 927 locks_copy_private(fl, request); 928 request = fl; 929 added = 1; 930 } 931 } 932 /* Go on to next lock. 933 */ 934 next_lock: 935 before = &fl->fl_next; 936 } 937 938 /* 939 * The above code only modifies existing locks in case of 940 * merging or replacing. If new lock(s) need to be inserted 941 * all modifications are done bellow this, so it's safe yet to 942 * bail out. 943 */ 944 error = -ENOLCK; /* "no luck" */ 945 if (right && left == right && !new_fl2) 946 goto out; 947 948 error = 0; 949 if (!added) { 950 if (request->fl_type == F_UNLCK) { 951 if (request->fl_flags & FL_EXISTS) 952 error = -ENOENT; 953 goto out; 954 } 955 956 if (!new_fl) { 957 error = -ENOLCK; 958 goto out; 959 } 960 locks_copy_lock(new_fl, request); 961 locks_insert_lock(before, new_fl); 962 new_fl = NULL; 963 } 964 if (right) { 965 if (left == right) { 966 /* The new lock breaks the old one in two pieces, 967 * so we have to use the second new lock. 968 */ 969 left = new_fl2; 970 new_fl2 = NULL; 971 locks_copy_lock(left, right); 972 locks_insert_lock(before, left); 973 } 974 right->fl_start = request->fl_end + 1; 975 locks_wake_up_blocks(right); 976 } 977 if (left) { 978 left->fl_end = request->fl_start - 1; 979 locks_wake_up_blocks(left); 980 } 981 out: 982 unlock_flocks(); 983 /* 984 * Free any unused locks. 985 */ 986 if (new_fl) 987 locks_free_lock(new_fl); 988 if (new_fl2) 989 locks_free_lock(new_fl2); 990 return error; 991 } 992 993 /** 994 * posix_lock_file - Apply a POSIX-style lock to a file 995 * @filp: The file to apply the lock to 996 * @fl: The lock to be applied 997 * @conflock: Place to return a copy of the conflicting lock, if found. 998 * 999 * Add a POSIX style lock to a file. 1000 * We merge adjacent & overlapping locks whenever possible. 1001 * POSIX locks are sorted by owner task, then by starting address 1002 * 1003 * Note that if called with an FL_EXISTS argument, the caller may determine 1004 * whether or not a lock was successfully freed by testing the return 1005 * value for -ENOENT. 1006 */ 1007 int posix_lock_file(struct file *filp, struct file_lock *fl, 1008 struct file_lock *conflock) 1009 { 1010 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); 1011 } 1012 EXPORT_SYMBOL(posix_lock_file); 1013 1014 /** 1015 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1016 * @filp: The file to apply the lock to 1017 * @fl: The lock to be applied 1018 * 1019 * Add a POSIX style lock to a file. 1020 * We merge adjacent & overlapping locks whenever possible. 1021 * POSIX locks are sorted by owner task, then by starting address 1022 */ 1023 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1024 { 1025 int error; 1026 might_sleep (); 1027 for (;;) { 1028 error = posix_lock_file(filp, fl, NULL); 1029 if (error != FILE_LOCK_DEFERRED) 1030 break; 1031 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1032 if (!error) 1033 continue; 1034 1035 locks_delete_block(fl); 1036 break; 1037 } 1038 return error; 1039 } 1040 EXPORT_SYMBOL(posix_lock_file_wait); 1041 1042 /** 1043 * locks_mandatory_locked - Check for an active lock 1044 * @inode: the file to check 1045 * 1046 * Searches the inode's list of locks to find any POSIX locks which conflict. 1047 * This function is called from locks_verify_locked() only. 1048 */ 1049 int locks_mandatory_locked(struct inode *inode) 1050 { 1051 fl_owner_t owner = current->files; 1052 struct file_lock *fl; 1053 1054 /* 1055 * Search the lock list for this inode for any POSIX locks. 1056 */ 1057 lock_flocks(); 1058 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1059 if (!IS_POSIX(fl)) 1060 continue; 1061 if (fl->fl_owner != owner) 1062 break; 1063 } 1064 unlock_flocks(); 1065 return fl ? -EAGAIN : 0; 1066 } 1067 1068 /** 1069 * locks_mandatory_area - Check for a conflicting lock 1070 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1071 * for shared 1072 * @inode: the file to check 1073 * @filp: how the file was opened (if it was) 1074 * @offset: start of area to check 1075 * @count: length of area to check 1076 * 1077 * Searches the inode's list of locks to find any POSIX locks which conflict. 1078 * This function is called from rw_verify_area() and 1079 * locks_verify_truncate(). 1080 */ 1081 int locks_mandatory_area(int read_write, struct inode *inode, 1082 struct file *filp, loff_t offset, 1083 size_t count) 1084 { 1085 struct file_lock fl; 1086 int error; 1087 1088 locks_init_lock(&fl); 1089 fl.fl_owner = current->files; 1090 fl.fl_pid = current->tgid; 1091 fl.fl_file = filp; 1092 fl.fl_flags = FL_POSIX | FL_ACCESS; 1093 if (filp && !(filp->f_flags & O_NONBLOCK)) 1094 fl.fl_flags |= FL_SLEEP; 1095 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1096 fl.fl_start = offset; 1097 fl.fl_end = offset + count - 1; 1098 1099 for (;;) { 1100 error = __posix_lock_file(inode, &fl, NULL); 1101 if (error != FILE_LOCK_DEFERRED) 1102 break; 1103 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1104 if (!error) { 1105 /* 1106 * If we've been sleeping someone might have 1107 * changed the permissions behind our back. 1108 */ 1109 if (__mandatory_lock(inode)) 1110 continue; 1111 } 1112 1113 locks_delete_block(&fl); 1114 break; 1115 } 1116 1117 return error; 1118 } 1119 1120 EXPORT_SYMBOL(locks_mandatory_area); 1121 1122 /* We already had a lease on this file; just change its type */ 1123 int lease_modify(struct file_lock **before, int arg) 1124 { 1125 struct file_lock *fl = *before; 1126 int error = assign_type(fl, arg); 1127 1128 if (error) 1129 return error; 1130 locks_wake_up_blocks(fl); 1131 if (arg == F_UNLCK) 1132 locks_delete_lock(before); 1133 return 0; 1134 } 1135 1136 EXPORT_SYMBOL(lease_modify); 1137 1138 static void time_out_leases(struct inode *inode) 1139 { 1140 struct file_lock **before; 1141 struct file_lock *fl; 1142 1143 before = &inode->i_flock; 1144 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1145 if ((fl->fl_break_time == 0) 1146 || time_before(jiffies, fl->fl_break_time)) { 1147 before = &fl->fl_next; 1148 continue; 1149 } 1150 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1151 if (fl == *before) /* lease_modify may have freed fl */ 1152 before = &fl->fl_next; 1153 } 1154 } 1155 1156 /** 1157 * __break_lease - revoke all outstanding leases on file 1158 * @inode: the inode of the file to return 1159 * @mode: the open mode (read or write) 1160 * 1161 * break_lease (inlined for speed) has checked there already is at least 1162 * some kind of lock (maybe a lease) on this file. Leases are broken on 1163 * a call to open() or truncate(). This function can sleep unless you 1164 * specified %O_NONBLOCK to your open(). 1165 */ 1166 int __break_lease(struct inode *inode, unsigned int mode) 1167 { 1168 int error = 0, future; 1169 struct file_lock *new_fl, *flock; 1170 struct file_lock *fl; 1171 unsigned long break_time; 1172 int i_have_this_lease = 0; 1173 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1174 1175 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); 1176 1177 lock_flocks(); 1178 1179 time_out_leases(inode); 1180 1181 flock = inode->i_flock; 1182 if ((flock == NULL) || !IS_LEASE(flock)) 1183 goto out; 1184 1185 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1186 if (fl->fl_owner == current->files) 1187 i_have_this_lease = 1; 1188 1189 if (want_write) { 1190 /* If we want write access, we have to revoke any lease. */ 1191 future = F_UNLCK | F_INPROGRESS; 1192 } else if (flock->fl_type & F_INPROGRESS) { 1193 /* If the lease is already being broken, we just leave it */ 1194 future = flock->fl_type; 1195 } else if (flock->fl_type & F_WRLCK) { 1196 /* Downgrade the exclusive lease to a read-only lease. */ 1197 future = F_RDLCK | F_INPROGRESS; 1198 } else { 1199 /* the existing lease was read-only, so we can read too. */ 1200 goto out; 1201 } 1202 1203 if (IS_ERR(new_fl) && !i_have_this_lease 1204 && ((mode & O_NONBLOCK) == 0)) { 1205 error = PTR_ERR(new_fl); 1206 goto out; 1207 } 1208 1209 break_time = 0; 1210 if (lease_break_time > 0) { 1211 break_time = jiffies + lease_break_time * HZ; 1212 if (break_time == 0) 1213 break_time++; /* so that 0 means no break time */ 1214 } 1215 1216 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1217 if (fl->fl_type != future) { 1218 fl->fl_type = future; 1219 fl->fl_break_time = break_time; 1220 /* lease must have lmops break callback */ 1221 fl->fl_lmops->lm_break(fl); 1222 } 1223 } 1224 1225 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1226 error = -EWOULDBLOCK; 1227 goto out; 1228 } 1229 1230 restart: 1231 break_time = flock->fl_break_time; 1232 if (break_time != 0) { 1233 break_time -= jiffies; 1234 if (break_time == 0) 1235 break_time++; 1236 } 1237 locks_insert_block(flock, new_fl); 1238 unlock_flocks(); 1239 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1240 !new_fl->fl_next, break_time); 1241 lock_flocks(); 1242 __locks_delete_block(new_fl); 1243 if (error >= 0) { 1244 if (error == 0) 1245 time_out_leases(inode); 1246 /* Wait for the next lease that has not been broken yet */ 1247 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1248 flock = flock->fl_next) { 1249 if (flock->fl_type & F_INPROGRESS) 1250 goto restart; 1251 } 1252 error = 0; 1253 } 1254 1255 out: 1256 unlock_flocks(); 1257 if (!IS_ERR(new_fl)) 1258 locks_free_lock(new_fl); 1259 return error; 1260 } 1261 1262 EXPORT_SYMBOL(__break_lease); 1263 1264 /** 1265 * lease_get_mtime - get the last modified time of an inode 1266 * @inode: the inode 1267 * @time: pointer to a timespec which will contain the last modified time 1268 * 1269 * This is to force NFS clients to flush their caches for files with 1270 * exclusive leases. The justification is that if someone has an 1271 * exclusive lease, then they could be modifying it. 1272 */ 1273 void lease_get_mtime(struct inode *inode, struct timespec *time) 1274 { 1275 struct file_lock *flock = inode->i_flock; 1276 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1277 *time = current_fs_time(inode->i_sb); 1278 else 1279 *time = inode->i_mtime; 1280 } 1281 1282 EXPORT_SYMBOL(lease_get_mtime); 1283 1284 /** 1285 * fcntl_getlease - Enquire what lease is currently active 1286 * @filp: the file 1287 * 1288 * The value returned by this function will be one of 1289 * (if no lease break is pending): 1290 * 1291 * %F_RDLCK to indicate a shared lease is held. 1292 * 1293 * %F_WRLCK to indicate an exclusive lease is held. 1294 * 1295 * %F_UNLCK to indicate no lease is held. 1296 * 1297 * (if a lease break is pending): 1298 * 1299 * %F_RDLCK to indicate an exclusive lease needs to be 1300 * changed to a shared lease (or removed). 1301 * 1302 * %F_UNLCK to indicate the lease needs to be removed. 1303 * 1304 * XXX: sfr & willy disagree over whether F_INPROGRESS 1305 * should be returned to userspace. 1306 */ 1307 int fcntl_getlease(struct file *filp) 1308 { 1309 struct file_lock *fl; 1310 int type = F_UNLCK; 1311 1312 lock_flocks(); 1313 time_out_leases(filp->f_path.dentry->d_inode); 1314 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1315 fl = fl->fl_next) { 1316 if (fl->fl_file == filp) { 1317 type = fl->fl_type & ~F_INPROGRESS; 1318 break; 1319 } 1320 } 1321 unlock_flocks(); 1322 return type; 1323 } 1324 1325 /** 1326 * generic_setlease - sets a lease on an open file 1327 * @filp: file pointer 1328 * @arg: type of lease to obtain 1329 * @flp: input - file_lock to use, output - file_lock inserted 1330 * 1331 * The (input) flp->fl_lmops->lm_break function is required 1332 * by break_lease(). 1333 * 1334 * Called with file_lock_lock held. 1335 */ 1336 int generic_setlease(struct file *filp, long arg, struct file_lock **flp) 1337 { 1338 struct file_lock *fl, **before, **my_before = NULL, *lease; 1339 struct dentry *dentry = filp->f_path.dentry; 1340 struct inode *inode = dentry->d_inode; 1341 int error, rdlease_count = 0, wrlease_count = 0; 1342 1343 lease = *flp; 1344 1345 error = -EACCES; 1346 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE)) 1347 goto out; 1348 error = -EINVAL; 1349 if (!S_ISREG(inode->i_mode)) 1350 goto out; 1351 error = security_file_lock(filp, arg); 1352 if (error) 1353 goto out; 1354 1355 time_out_leases(inode); 1356 1357 BUG_ON(!(*flp)->fl_lmops->lm_break); 1358 1359 if (arg != F_UNLCK) { 1360 error = -EAGAIN; 1361 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1362 goto out; 1363 if ((arg == F_WRLCK) 1364 && ((dentry->d_count > 1) 1365 || (atomic_read(&inode->i_count) > 1))) 1366 goto out; 1367 } 1368 1369 /* 1370 * At this point, we know that if there is an exclusive 1371 * lease on this file, then we hold it on this filp 1372 * (otherwise our open of this file would have blocked). 1373 * And if we are trying to acquire an exclusive lease, 1374 * then the file is not open by anyone (including us) 1375 * except for this filp. 1376 */ 1377 for (before = &inode->i_flock; 1378 ((fl = *before) != NULL) && IS_LEASE(fl); 1379 before = &fl->fl_next) { 1380 if (fl->fl_file == filp) 1381 my_before = before; 1382 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1383 /* 1384 * Someone is in the process of opening this 1385 * file for writing so we may not take an 1386 * exclusive lease on it. 1387 */ 1388 wrlease_count++; 1389 else 1390 rdlease_count++; 1391 } 1392 1393 error = -EAGAIN; 1394 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1395 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1396 goto out; 1397 1398 if (my_before != NULL) { 1399 error = lease->fl_lmops->lm_change(my_before, arg); 1400 if (!error) 1401 *flp = *my_before; 1402 goto out; 1403 } 1404 1405 if (arg == F_UNLCK) 1406 goto out; 1407 1408 error = -EINVAL; 1409 if (!leases_enable) 1410 goto out; 1411 1412 locks_insert_lock(before, lease); 1413 return 0; 1414 1415 out: 1416 return error; 1417 } 1418 EXPORT_SYMBOL(generic_setlease); 1419 1420 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1421 { 1422 if (filp->f_op && filp->f_op->setlease) 1423 return filp->f_op->setlease(filp, arg, lease); 1424 else 1425 return generic_setlease(filp, arg, lease); 1426 } 1427 1428 /** 1429 * vfs_setlease - sets a lease on an open file 1430 * @filp: file pointer 1431 * @arg: type of lease to obtain 1432 * @lease: file_lock to use 1433 * 1434 * Call this to establish a lease on the file. 1435 * The (*lease)->fl_lmops->lm_break operation must be set; if not, 1436 * break_lease will oops! 1437 * 1438 * This will call the filesystem's setlease file method, if 1439 * defined. Note that there is no getlease method; instead, the 1440 * filesystem setlease method should call back to setlease() to 1441 * add a lease to the inode's lease list, where fcntl_getlease() can 1442 * find it. Since fcntl_getlease() only reports whether the current 1443 * task holds a lease, a cluster filesystem need only do this for 1444 * leases held by processes on this node. 1445 * 1446 * There is also no break_lease method; filesystems that 1447 * handle their own leases should break leases themselves from the 1448 * filesystem's open, create, and (on truncate) setattr methods. 1449 * 1450 * Warning: the only current setlease methods exist only to disable 1451 * leases in certain cases. More vfs changes may be required to 1452 * allow a full filesystem lease implementation. 1453 */ 1454 1455 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1456 { 1457 int error; 1458 1459 lock_flocks(); 1460 error = __vfs_setlease(filp, arg, lease); 1461 unlock_flocks(); 1462 1463 return error; 1464 } 1465 EXPORT_SYMBOL_GPL(vfs_setlease); 1466 1467 static int do_fcntl_delete_lease(struct file *filp) 1468 { 1469 struct file_lock fl, *flp = &fl; 1470 1471 lease_init(filp, F_UNLCK, flp); 1472 1473 return vfs_setlease(filp, F_UNLCK, &flp); 1474 } 1475 1476 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1477 { 1478 struct file_lock *fl, *ret; 1479 struct fasync_struct *new; 1480 int error; 1481 1482 fl = lease_alloc(filp, arg); 1483 if (IS_ERR(fl)) 1484 return PTR_ERR(fl); 1485 1486 new = fasync_alloc(); 1487 if (!new) { 1488 locks_free_lock(fl); 1489 return -ENOMEM; 1490 } 1491 ret = fl; 1492 lock_flocks(); 1493 error = __vfs_setlease(filp, arg, &ret); 1494 if (error) { 1495 unlock_flocks(); 1496 locks_free_lock(fl); 1497 goto out_free_fasync; 1498 } 1499 if (ret != fl) 1500 locks_free_lock(fl); 1501 1502 /* 1503 * fasync_insert_entry() returns the old entry if any. 1504 * If there was no old entry, then it used 'new' and 1505 * inserted it into the fasync list. Clear new so that 1506 * we don't release it here. 1507 */ 1508 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) 1509 new = NULL; 1510 1511 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1512 unlock_flocks(); 1513 1514 out_free_fasync: 1515 if (new) 1516 fasync_free(new); 1517 return error; 1518 } 1519 1520 /** 1521 * fcntl_setlease - sets a lease on an open file 1522 * @fd: open file descriptor 1523 * @filp: file pointer 1524 * @arg: type of lease to obtain 1525 * 1526 * Call this fcntl to establish a lease on the file. 1527 * Note that you also need to call %F_SETSIG to 1528 * receive a signal when the lease is broken. 1529 */ 1530 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1531 { 1532 if (arg == F_UNLCK) 1533 return do_fcntl_delete_lease(filp); 1534 return do_fcntl_add_lease(fd, filp, arg); 1535 } 1536 1537 /** 1538 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1539 * @filp: The file to apply the lock to 1540 * @fl: The lock to be applied 1541 * 1542 * Add a FLOCK style lock to a file. 1543 */ 1544 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1545 { 1546 int error; 1547 might_sleep(); 1548 for (;;) { 1549 error = flock_lock_file(filp, fl); 1550 if (error != FILE_LOCK_DEFERRED) 1551 break; 1552 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1553 if (!error) 1554 continue; 1555 1556 locks_delete_block(fl); 1557 break; 1558 } 1559 return error; 1560 } 1561 1562 EXPORT_SYMBOL(flock_lock_file_wait); 1563 1564 /** 1565 * sys_flock: - flock() system call. 1566 * @fd: the file descriptor to lock. 1567 * @cmd: the type of lock to apply. 1568 * 1569 * Apply a %FL_FLOCK style lock to an open file descriptor. 1570 * The @cmd can be one of 1571 * 1572 * %LOCK_SH -- a shared lock. 1573 * 1574 * %LOCK_EX -- an exclusive lock. 1575 * 1576 * %LOCK_UN -- remove an existing lock. 1577 * 1578 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1579 * 1580 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1581 * processes read and write access respectively. 1582 */ 1583 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 1584 { 1585 struct file *filp; 1586 struct file_lock *lock; 1587 int can_sleep, unlock; 1588 int error; 1589 1590 error = -EBADF; 1591 filp = fget(fd); 1592 if (!filp) 1593 goto out; 1594 1595 can_sleep = !(cmd & LOCK_NB); 1596 cmd &= ~LOCK_NB; 1597 unlock = (cmd == LOCK_UN); 1598 1599 if (!unlock && !(cmd & LOCK_MAND) && 1600 !(filp->f_mode & (FMODE_READ|FMODE_WRITE))) 1601 goto out_putf; 1602 1603 error = flock_make_lock(filp, &lock, cmd); 1604 if (error) 1605 goto out_putf; 1606 if (can_sleep) 1607 lock->fl_flags |= FL_SLEEP; 1608 1609 error = security_file_lock(filp, lock->fl_type); 1610 if (error) 1611 goto out_free; 1612 1613 if (filp->f_op && filp->f_op->flock) 1614 error = filp->f_op->flock(filp, 1615 (can_sleep) ? F_SETLKW : F_SETLK, 1616 lock); 1617 else 1618 error = flock_lock_file_wait(filp, lock); 1619 1620 out_free: 1621 locks_free_lock(lock); 1622 1623 out_putf: 1624 fput(filp); 1625 out: 1626 return error; 1627 } 1628 1629 /** 1630 * vfs_test_lock - test file byte range lock 1631 * @filp: The file to test lock for 1632 * @fl: The lock to test; also used to hold result 1633 * 1634 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 1635 * setting conf->fl_type to something other than F_UNLCK. 1636 */ 1637 int vfs_test_lock(struct file *filp, struct file_lock *fl) 1638 { 1639 if (filp->f_op && filp->f_op->lock) 1640 return filp->f_op->lock(filp, F_GETLK, fl); 1641 posix_test_lock(filp, fl); 1642 return 0; 1643 } 1644 EXPORT_SYMBOL_GPL(vfs_test_lock); 1645 1646 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 1647 { 1648 flock->l_pid = fl->fl_pid; 1649 #if BITS_PER_LONG == 32 1650 /* 1651 * Make sure we can represent the posix lock via 1652 * legacy 32bit flock. 1653 */ 1654 if (fl->fl_start > OFFT_OFFSET_MAX) 1655 return -EOVERFLOW; 1656 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 1657 return -EOVERFLOW; 1658 #endif 1659 flock->l_start = fl->fl_start; 1660 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1661 fl->fl_end - fl->fl_start + 1; 1662 flock->l_whence = 0; 1663 flock->l_type = fl->fl_type; 1664 return 0; 1665 } 1666 1667 #if BITS_PER_LONG == 32 1668 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 1669 { 1670 flock->l_pid = fl->fl_pid; 1671 flock->l_start = fl->fl_start; 1672 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1673 fl->fl_end - fl->fl_start + 1; 1674 flock->l_whence = 0; 1675 flock->l_type = fl->fl_type; 1676 } 1677 #endif 1678 1679 /* Report the first existing lock that would conflict with l. 1680 * This implements the F_GETLK command of fcntl(). 1681 */ 1682 int fcntl_getlk(struct file *filp, struct flock __user *l) 1683 { 1684 struct file_lock file_lock; 1685 struct flock flock; 1686 int error; 1687 1688 error = -EFAULT; 1689 if (copy_from_user(&flock, l, sizeof(flock))) 1690 goto out; 1691 error = -EINVAL; 1692 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1693 goto out; 1694 1695 error = flock_to_posix_lock(filp, &file_lock, &flock); 1696 if (error) 1697 goto out; 1698 1699 error = vfs_test_lock(filp, &file_lock); 1700 if (error) 1701 goto out; 1702 1703 flock.l_type = file_lock.fl_type; 1704 if (file_lock.fl_type != F_UNLCK) { 1705 error = posix_lock_to_flock(&flock, &file_lock); 1706 if (error) 1707 goto out; 1708 } 1709 error = -EFAULT; 1710 if (!copy_to_user(l, &flock, sizeof(flock))) 1711 error = 0; 1712 out: 1713 return error; 1714 } 1715 1716 /** 1717 * vfs_lock_file - file byte range lock 1718 * @filp: The file to apply the lock to 1719 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 1720 * @fl: The lock to be applied 1721 * @conf: Place to return a copy of the conflicting lock, if found. 1722 * 1723 * A caller that doesn't care about the conflicting lock may pass NULL 1724 * as the final argument. 1725 * 1726 * If the filesystem defines a private ->lock() method, then @conf will 1727 * be left unchanged; so a caller that cares should initialize it to 1728 * some acceptable default. 1729 * 1730 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 1731 * locks, the ->lock() interface may return asynchronously, before the lock has 1732 * been granted or denied by the underlying filesystem, if (and only if) 1733 * lm_grant is set. Callers expecting ->lock() to return asynchronously 1734 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 1735 * the request is for a blocking lock. When ->lock() does return asynchronously, 1736 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock 1737 * request completes. 1738 * If the request is for non-blocking lock the file system should return 1739 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine 1740 * with the result. If the request timed out the callback routine will return a 1741 * nonzero return code and the file system should release the lock. The file 1742 * system is also responsible to keep a corresponding posix lock when it 1743 * grants a lock so the VFS can find out which locks are locally held and do 1744 * the correct lock cleanup when required. 1745 * The underlying filesystem must not drop the kernel lock or call 1746 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED 1747 * return code. 1748 */ 1749 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 1750 { 1751 if (filp->f_op && filp->f_op->lock) 1752 return filp->f_op->lock(filp, cmd, fl); 1753 else 1754 return posix_lock_file(filp, fl, conf); 1755 } 1756 EXPORT_SYMBOL_GPL(vfs_lock_file); 1757 1758 static int do_lock_file_wait(struct file *filp, unsigned int cmd, 1759 struct file_lock *fl) 1760 { 1761 int error; 1762 1763 error = security_file_lock(filp, fl->fl_type); 1764 if (error) 1765 return error; 1766 1767 for (;;) { 1768 error = vfs_lock_file(filp, cmd, fl, NULL); 1769 if (error != FILE_LOCK_DEFERRED) 1770 break; 1771 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1772 if (!error) 1773 continue; 1774 1775 locks_delete_block(fl); 1776 break; 1777 } 1778 1779 return error; 1780 } 1781 1782 /* Apply the lock described by l to an open file descriptor. 1783 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1784 */ 1785 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1786 struct flock __user *l) 1787 { 1788 struct file_lock *file_lock = locks_alloc_lock(); 1789 struct flock flock; 1790 struct inode *inode; 1791 struct file *f; 1792 int error; 1793 1794 if (file_lock == NULL) 1795 return -ENOLCK; 1796 1797 /* 1798 * This might block, so we do it before checking the inode. 1799 */ 1800 error = -EFAULT; 1801 if (copy_from_user(&flock, l, sizeof(flock))) 1802 goto out; 1803 1804 inode = filp->f_path.dentry->d_inode; 1805 1806 /* Don't allow mandatory locks on files that may be memory mapped 1807 * and shared. 1808 */ 1809 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1810 error = -EAGAIN; 1811 goto out; 1812 } 1813 1814 again: 1815 error = flock_to_posix_lock(filp, file_lock, &flock); 1816 if (error) 1817 goto out; 1818 if (cmd == F_SETLKW) { 1819 file_lock->fl_flags |= FL_SLEEP; 1820 } 1821 1822 error = -EBADF; 1823 switch (flock.l_type) { 1824 case F_RDLCK: 1825 if (!(filp->f_mode & FMODE_READ)) 1826 goto out; 1827 break; 1828 case F_WRLCK: 1829 if (!(filp->f_mode & FMODE_WRITE)) 1830 goto out; 1831 break; 1832 case F_UNLCK: 1833 break; 1834 default: 1835 error = -EINVAL; 1836 goto out; 1837 } 1838 1839 error = do_lock_file_wait(filp, cmd, file_lock); 1840 1841 /* 1842 * Attempt to detect a close/fcntl race and recover by 1843 * releasing the lock that was just acquired. 1844 */ 1845 /* 1846 * we need that spin_lock here - it prevents reordering between 1847 * update of inode->i_flock and check for it done in close(). 1848 * rcu_read_lock() wouldn't do. 1849 */ 1850 spin_lock(¤t->files->file_lock); 1851 f = fcheck(fd); 1852 spin_unlock(¤t->files->file_lock); 1853 if (!error && f != filp && flock.l_type != F_UNLCK) { 1854 flock.l_type = F_UNLCK; 1855 goto again; 1856 } 1857 1858 out: 1859 locks_free_lock(file_lock); 1860 return error; 1861 } 1862 1863 #if BITS_PER_LONG == 32 1864 /* Report the first existing lock that would conflict with l. 1865 * This implements the F_GETLK command of fcntl(). 1866 */ 1867 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1868 { 1869 struct file_lock file_lock; 1870 struct flock64 flock; 1871 int error; 1872 1873 error = -EFAULT; 1874 if (copy_from_user(&flock, l, sizeof(flock))) 1875 goto out; 1876 error = -EINVAL; 1877 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1878 goto out; 1879 1880 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1881 if (error) 1882 goto out; 1883 1884 error = vfs_test_lock(filp, &file_lock); 1885 if (error) 1886 goto out; 1887 1888 flock.l_type = file_lock.fl_type; 1889 if (file_lock.fl_type != F_UNLCK) 1890 posix_lock_to_flock64(&flock, &file_lock); 1891 1892 error = -EFAULT; 1893 if (!copy_to_user(l, &flock, sizeof(flock))) 1894 error = 0; 1895 1896 out: 1897 return error; 1898 } 1899 1900 /* Apply the lock described by l to an open file descriptor. 1901 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1902 */ 1903 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1904 struct flock64 __user *l) 1905 { 1906 struct file_lock *file_lock = locks_alloc_lock(); 1907 struct flock64 flock; 1908 struct inode *inode; 1909 struct file *f; 1910 int error; 1911 1912 if (file_lock == NULL) 1913 return -ENOLCK; 1914 1915 /* 1916 * This might block, so we do it before checking the inode. 1917 */ 1918 error = -EFAULT; 1919 if (copy_from_user(&flock, l, sizeof(flock))) 1920 goto out; 1921 1922 inode = filp->f_path.dentry->d_inode; 1923 1924 /* Don't allow mandatory locks on files that may be memory mapped 1925 * and shared. 1926 */ 1927 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1928 error = -EAGAIN; 1929 goto out; 1930 } 1931 1932 again: 1933 error = flock64_to_posix_lock(filp, file_lock, &flock); 1934 if (error) 1935 goto out; 1936 if (cmd == F_SETLKW64) { 1937 file_lock->fl_flags |= FL_SLEEP; 1938 } 1939 1940 error = -EBADF; 1941 switch (flock.l_type) { 1942 case F_RDLCK: 1943 if (!(filp->f_mode & FMODE_READ)) 1944 goto out; 1945 break; 1946 case F_WRLCK: 1947 if (!(filp->f_mode & FMODE_WRITE)) 1948 goto out; 1949 break; 1950 case F_UNLCK: 1951 break; 1952 default: 1953 error = -EINVAL; 1954 goto out; 1955 } 1956 1957 error = do_lock_file_wait(filp, cmd, file_lock); 1958 1959 /* 1960 * Attempt to detect a close/fcntl race and recover by 1961 * releasing the lock that was just acquired. 1962 */ 1963 spin_lock(¤t->files->file_lock); 1964 f = fcheck(fd); 1965 spin_unlock(¤t->files->file_lock); 1966 if (!error && f != filp && flock.l_type != F_UNLCK) { 1967 flock.l_type = F_UNLCK; 1968 goto again; 1969 } 1970 1971 out: 1972 locks_free_lock(file_lock); 1973 return error; 1974 } 1975 #endif /* BITS_PER_LONG == 32 */ 1976 1977 /* 1978 * This function is called when the file is being removed 1979 * from the task's fd array. POSIX locks belonging to this task 1980 * are deleted at this time. 1981 */ 1982 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1983 { 1984 struct file_lock lock; 1985 1986 /* 1987 * If there are no locks held on this file, we don't need to call 1988 * posix_lock_file(). Another process could be setting a lock on this 1989 * file at the same time, but we wouldn't remove that lock anyway. 1990 */ 1991 if (!filp->f_path.dentry->d_inode->i_flock) 1992 return; 1993 1994 lock.fl_type = F_UNLCK; 1995 lock.fl_flags = FL_POSIX | FL_CLOSE; 1996 lock.fl_start = 0; 1997 lock.fl_end = OFFSET_MAX; 1998 lock.fl_owner = owner; 1999 lock.fl_pid = current->tgid; 2000 lock.fl_file = filp; 2001 lock.fl_ops = NULL; 2002 lock.fl_lmops = NULL; 2003 2004 vfs_lock_file(filp, F_SETLK, &lock, NULL); 2005 2006 if (lock.fl_ops && lock.fl_ops->fl_release_private) 2007 lock.fl_ops->fl_release_private(&lock); 2008 } 2009 2010 EXPORT_SYMBOL(locks_remove_posix); 2011 2012 /* 2013 * This function is called on the last close of an open file. 2014 */ 2015 void locks_remove_flock(struct file *filp) 2016 { 2017 struct inode * inode = filp->f_path.dentry->d_inode; 2018 struct file_lock *fl; 2019 struct file_lock **before; 2020 2021 if (!inode->i_flock) 2022 return; 2023 2024 if (filp->f_op && filp->f_op->flock) { 2025 struct file_lock fl = { 2026 .fl_pid = current->tgid, 2027 .fl_file = filp, 2028 .fl_flags = FL_FLOCK, 2029 .fl_type = F_UNLCK, 2030 .fl_end = OFFSET_MAX, 2031 }; 2032 filp->f_op->flock(filp, F_SETLKW, &fl); 2033 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2034 fl.fl_ops->fl_release_private(&fl); 2035 } 2036 2037 lock_flocks(); 2038 before = &inode->i_flock; 2039 2040 while ((fl = *before) != NULL) { 2041 if (fl->fl_file == filp) { 2042 if (IS_FLOCK(fl)) { 2043 locks_delete_lock(before); 2044 continue; 2045 } 2046 if (IS_LEASE(fl)) { 2047 lease_modify(before, F_UNLCK); 2048 continue; 2049 } 2050 /* What? */ 2051 BUG(); 2052 } 2053 before = &fl->fl_next; 2054 } 2055 unlock_flocks(); 2056 } 2057 2058 /** 2059 * posix_unblock_lock - stop waiting for a file lock 2060 * @filp: how the file was opened 2061 * @waiter: the lock which was waiting 2062 * 2063 * lockd needs to block waiting for locks. 2064 */ 2065 int 2066 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 2067 { 2068 int status = 0; 2069 2070 lock_flocks(); 2071 if (waiter->fl_next) 2072 __locks_delete_block(waiter); 2073 else 2074 status = -ENOENT; 2075 unlock_flocks(); 2076 return status; 2077 } 2078 2079 EXPORT_SYMBOL(posix_unblock_lock); 2080 2081 /** 2082 * vfs_cancel_lock - file byte range unblock lock 2083 * @filp: The file to apply the unblock to 2084 * @fl: The lock to be unblocked 2085 * 2086 * Used by lock managers to cancel blocked requests 2087 */ 2088 int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 2089 { 2090 if (filp->f_op && filp->f_op->lock) 2091 return filp->f_op->lock(filp, F_CANCELLK, fl); 2092 return 0; 2093 } 2094 2095 EXPORT_SYMBOL_GPL(vfs_cancel_lock); 2096 2097 #ifdef CONFIG_PROC_FS 2098 #include <linux/proc_fs.h> 2099 #include <linux/seq_file.h> 2100 2101 static void lock_get_status(struct seq_file *f, struct file_lock *fl, 2102 loff_t id, char *pfx) 2103 { 2104 struct inode *inode = NULL; 2105 unsigned int fl_pid; 2106 2107 if (fl->fl_nspid) 2108 fl_pid = pid_vnr(fl->fl_nspid); 2109 else 2110 fl_pid = fl->fl_pid; 2111 2112 if (fl->fl_file != NULL) 2113 inode = fl->fl_file->f_path.dentry->d_inode; 2114 2115 seq_printf(f, "%lld:%s ", id, pfx); 2116 if (IS_POSIX(fl)) { 2117 seq_printf(f, "%6s %s ", 2118 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 2119 (inode == NULL) ? "*NOINODE*" : 2120 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); 2121 } else if (IS_FLOCK(fl)) { 2122 if (fl->fl_type & LOCK_MAND) { 2123 seq_printf(f, "FLOCK MSNFS "); 2124 } else { 2125 seq_printf(f, "FLOCK ADVISORY "); 2126 } 2127 } else if (IS_LEASE(fl)) { 2128 seq_printf(f, "LEASE "); 2129 if (fl->fl_type & F_INPROGRESS) 2130 seq_printf(f, "BREAKING "); 2131 else if (fl->fl_file) 2132 seq_printf(f, "ACTIVE "); 2133 else 2134 seq_printf(f, "BREAKER "); 2135 } else { 2136 seq_printf(f, "UNKNOWN UNKNOWN "); 2137 } 2138 if (fl->fl_type & LOCK_MAND) { 2139 seq_printf(f, "%s ", 2140 (fl->fl_type & LOCK_READ) 2141 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2142 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2143 } else { 2144 seq_printf(f, "%s ", 2145 (fl->fl_type & F_INPROGRESS) 2146 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2147 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2148 } 2149 if (inode) { 2150 #ifdef WE_CAN_BREAK_LSLK_NOW 2151 seq_printf(f, "%d %s:%ld ", fl_pid, 2152 inode->i_sb->s_id, inode->i_ino); 2153 #else 2154 /* userspace relies on this representation of dev_t ;-( */ 2155 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, 2156 MAJOR(inode->i_sb->s_dev), 2157 MINOR(inode->i_sb->s_dev), inode->i_ino); 2158 #endif 2159 } else { 2160 seq_printf(f, "%d <none>:0 ", fl_pid); 2161 } 2162 if (IS_POSIX(fl)) { 2163 if (fl->fl_end == OFFSET_MAX) 2164 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2165 else 2166 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2167 } else { 2168 seq_printf(f, "0 EOF\n"); 2169 } 2170 } 2171 2172 static int locks_show(struct seq_file *f, void *v) 2173 { 2174 struct file_lock *fl, *bfl; 2175 2176 fl = list_entry(v, struct file_lock, fl_link); 2177 2178 lock_get_status(f, fl, *((loff_t *)f->private), ""); 2179 2180 list_for_each_entry(bfl, &fl->fl_block, fl_block) 2181 lock_get_status(f, bfl, *((loff_t *)f->private), " ->"); 2182 2183 return 0; 2184 } 2185 2186 static void *locks_start(struct seq_file *f, loff_t *pos) 2187 { 2188 loff_t *p = f->private; 2189 2190 lock_flocks(); 2191 *p = (*pos + 1); 2192 return seq_list_start(&file_lock_list, *pos); 2193 } 2194 2195 static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2196 { 2197 loff_t *p = f->private; 2198 ++*p; 2199 return seq_list_next(v, &file_lock_list, pos); 2200 } 2201 2202 static void locks_stop(struct seq_file *f, void *v) 2203 { 2204 unlock_flocks(); 2205 } 2206 2207 static const struct seq_operations locks_seq_operations = { 2208 .start = locks_start, 2209 .next = locks_next, 2210 .stop = locks_stop, 2211 .show = locks_show, 2212 }; 2213 2214 static int locks_open(struct inode *inode, struct file *filp) 2215 { 2216 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t)); 2217 } 2218 2219 static const struct file_operations proc_locks_operations = { 2220 .open = locks_open, 2221 .read = seq_read, 2222 .llseek = seq_lseek, 2223 .release = seq_release_private, 2224 }; 2225 2226 static int __init proc_locks_init(void) 2227 { 2228 proc_create("locks", 0, NULL, &proc_locks_operations); 2229 return 0; 2230 } 2231 module_init(proc_locks_init); 2232 #endif 2233 2234 /** 2235 * lock_may_read - checks that the region is free of locks 2236 * @inode: the inode that is being read 2237 * @start: the first byte to read 2238 * @len: the number of bytes to read 2239 * 2240 * Emulates Windows locking requirements. Whole-file 2241 * mandatory locks (share modes) can prohibit a read and 2242 * byte-range POSIX locks can prohibit a read if they overlap. 2243 * 2244 * N.B. this function is only ever called 2245 * from knfsd and ownership of locks is never checked. 2246 */ 2247 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2248 { 2249 struct file_lock *fl; 2250 int result = 1; 2251 lock_flocks(); 2252 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2253 if (IS_POSIX(fl)) { 2254 if (fl->fl_type == F_RDLCK) 2255 continue; 2256 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2257 continue; 2258 } else if (IS_FLOCK(fl)) { 2259 if (!(fl->fl_type & LOCK_MAND)) 2260 continue; 2261 if (fl->fl_type & LOCK_READ) 2262 continue; 2263 } else 2264 continue; 2265 result = 0; 2266 break; 2267 } 2268 unlock_flocks(); 2269 return result; 2270 } 2271 2272 EXPORT_SYMBOL(lock_may_read); 2273 2274 /** 2275 * lock_may_write - checks that the region is free of locks 2276 * @inode: the inode that is being written 2277 * @start: the first byte to write 2278 * @len: the number of bytes to write 2279 * 2280 * Emulates Windows locking requirements. Whole-file 2281 * mandatory locks (share modes) can prohibit a write and 2282 * byte-range POSIX locks can prohibit a write if they overlap. 2283 * 2284 * N.B. this function is only ever called 2285 * from knfsd and ownership of locks is never checked. 2286 */ 2287 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2288 { 2289 struct file_lock *fl; 2290 int result = 1; 2291 lock_flocks(); 2292 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2293 if (IS_POSIX(fl)) { 2294 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2295 continue; 2296 } else if (IS_FLOCK(fl)) { 2297 if (!(fl->fl_type & LOCK_MAND)) 2298 continue; 2299 if (fl->fl_type & LOCK_WRITE) 2300 continue; 2301 } else 2302 continue; 2303 result = 0; 2304 break; 2305 } 2306 unlock_flocks(); 2307 return result; 2308 } 2309 2310 EXPORT_SYMBOL(lock_may_write); 2311 2312 static int __init filelock_init(void) 2313 { 2314 filelock_cache = kmem_cache_create("file_lock_cache", 2315 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2316 2317 return 0; 2318 } 2319 2320 core_initcall(filelock_init); 2321