1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 129 #include <asm/semaphore.h> 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 static LIST_HEAD(file_lock_list); 143 static LIST_HEAD(blocked_list); 144 145 static kmem_cache_t *filelock_cache __read_mostly; 146 147 /* Allocate an empty lock structure. */ 148 static struct file_lock *locks_alloc_lock(void) 149 { 150 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 151 } 152 153 static void locks_release_private(struct file_lock *fl) 154 { 155 if (fl->fl_ops) { 156 if (fl->fl_ops->fl_release_private) 157 fl->fl_ops->fl_release_private(fl); 158 fl->fl_ops = NULL; 159 } 160 if (fl->fl_lmops) { 161 if (fl->fl_lmops->fl_release_private) 162 fl->fl_lmops->fl_release_private(fl); 163 fl->fl_lmops = NULL; 164 } 165 166 } 167 168 /* Free a lock which is not in use. */ 169 static void locks_free_lock(struct file_lock *fl) 170 { 171 BUG_ON(waitqueue_active(&fl->fl_wait)); 172 BUG_ON(!list_empty(&fl->fl_block)); 173 BUG_ON(!list_empty(&fl->fl_link)); 174 175 locks_release_private(fl); 176 kmem_cache_free(filelock_cache, fl); 177 } 178 179 void locks_init_lock(struct file_lock *fl) 180 { 181 INIT_LIST_HEAD(&fl->fl_link); 182 INIT_LIST_HEAD(&fl->fl_block); 183 init_waitqueue_head(&fl->fl_wait); 184 fl->fl_next = NULL; 185 fl->fl_fasync = NULL; 186 fl->fl_owner = NULL; 187 fl->fl_pid = 0; 188 fl->fl_file = NULL; 189 fl->fl_flags = 0; 190 fl->fl_type = 0; 191 fl->fl_start = fl->fl_end = 0; 192 fl->fl_ops = NULL; 193 fl->fl_lmops = NULL; 194 } 195 196 EXPORT_SYMBOL(locks_init_lock); 197 198 /* 199 * Initialises the fields of the file lock which are invariant for 200 * free file_locks. 201 */ 202 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 203 { 204 struct file_lock *lock = (struct file_lock *) foo; 205 206 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != 207 SLAB_CTOR_CONSTRUCTOR) 208 return; 209 210 locks_init_lock(lock); 211 } 212 213 static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 214 { 215 if (fl->fl_ops) { 216 if (fl->fl_ops->fl_copy_lock) 217 fl->fl_ops->fl_copy_lock(new, fl); 218 new->fl_ops = fl->fl_ops; 219 } 220 if (fl->fl_lmops) { 221 if (fl->fl_lmops->fl_copy_lock) 222 fl->fl_lmops->fl_copy_lock(new, fl); 223 new->fl_lmops = fl->fl_lmops; 224 } 225 } 226 227 /* 228 * Initialize a new lock from an existing file_lock structure. 229 */ 230 static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 231 { 232 new->fl_owner = fl->fl_owner; 233 new->fl_pid = fl->fl_pid; 234 new->fl_file = NULL; 235 new->fl_flags = fl->fl_flags; 236 new->fl_type = fl->fl_type; 237 new->fl_start = fl->fl_start; 238 new->fl_end = fl->fl_end; 239 new->fl_ops = NULL; 240 new->fl_lmops = NULL; 241 } 242 243 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 244 { 245 locks_release_private(new); 246 247 __locks_copy_lock(new, fl); 248 new->fl_file = fl->fl_file; 249 new->fl_ops = fl->fl_ops; 250 new->fl_lmops = fl->fl_lmops; 251 252 locks_copy_private(new, fl); 253 } 254 255 EXPORT_SYMBOL(locks_copy_lock); 256 257 static inline int flock_translate_cmd(int cmd) { 258 if (cmd & LOCK_MAND) 259 return cmd & (LOCK_MAND | LOCK_RW); 260 switch (cmd) { 261 case LOCK_SH: 262 return F_RDLCK; 263 case LOCK_EX: 264 return F_WRLCK; 265 case LOCK_UN: 266 return F_UNLCK; 267 } 268 return -EINVAL; 269 } 270 271 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 272 static int flock_make_lock(struct file *filp, struct file_lock **lock, 273 unsigned int cmd) 274 { 275 struct file_lock *fl; 276 int type = flock_translate_cmd(cmd); 277 if (type < 0) 278 return type; 279 280 fl = locks_alloc_lock(); 281 if (fl == NULL) 282 return -ENOMEM; 283 284 fl->fl_file = filp; 285 fl->fl_pid = current->tgid; 286 fl->fl_flags = FL_FLOCK; 287 fl->fl_type = type; 288 fl->fl_end = OFFSET_MAX; 289 290 *lock = fl; 291 return 0; 292 } 293 294 static int assign_type(struct file_lock *fl, int type) 295 { 296 switch (type) { 297 case F_RDLCK: 298 case F_WRLCK: 299 case F_UNLCK: 300 fl->fl_type = type; 301 break; 302 default: 303 return -EINVAL; 304 } 305 return 0; 306 } 307 308 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 309 * style lock. 310 */ 311 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 312 struct flock *l) 313 { 314 off_t start, end; 315 316 switch (l->l_whence) { 317 case 0: /*SEEK_SET*/ 318 start = 0; 319 break; 320 case 1: /*SEEK_CUR*/ 321 start = filp->f_pos; 322 break; 323 case 2: /*SEEK_END*/ 324 start = i_size_read(filp->f_dentry->d_inode); 325 break; 326 default: 327 return -EINVAL; 328 } 329 330 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 331 POSIX-2001 defines it. */ 332 start += l->l_start; 333 if (start < 0) 334 return -EINVAL; 335 fl->fl_end = OFFSET_MAX; 336 if (l->l_len > 0) { 337 end = start + l->l_len - 1; 338 fl->fl_end = end; 339 } else if (l->l_len < 0) { 340 end = start - 1; 341 fl->fl_end = end; 342 start += l->l_len; 343 if (start < 0) 344 return -EINVAL; 345 } 346 fl->fl_start = start; /* we record the absolute position */ 347 if (fl->fl_end < fl->fl_start) 348 return -EOVERFLOW; 349 350 fl->fl_owner = current->files; 351 fl->fl_pid = current->tgid; 352 fl->fl_file = filp; 353 fl->fl_flags = FL_POSIX; 354 fl->fl_ops = NULL; 355 fl->fl_lmops = NULL; 356 357 return assign_type(fl, l->l_type); 358 } 359 360 #if BITS_PER_LONG == 32 361 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 362 struct flock64 *l) 363 { 364 loff_t start; 365 366 switch (l->l_whence) { 367 case 0: /*SEEK_SET*/ 368 start = 0; 369 break; 370 case 1: /*SEEK_CUR*/ 371 start = filp->f_pos; 372 break; 373 case 2: /*SEEK_END*/ 374 start = i_size_read(filp->f_dentry->d_inode); 375 break; 376 default: 377 return -EINVAL; 378 } 379 380 start += l->l_start; 381 if (start < 0) 382 return -EINVAL; 383 fl->fl_end = OFFSET_MAX; 384 if (l->l_len > 0) { 385 fl->fl_end = start + l->l_len - 1; 386 } else if (l->l_len < 0) { 387 fl->fl_end = start - 1; 388 start += l->l_len; 389 if (start < 0) 390 return -EINVAL; 391 } 392 fl->fl_start = start; /* we record the absolute position */ 393 if (fl->fl_end < fl->fl_start) 394 return -EOVERFLOW; 395 396 fl->fl_owner = current->files; 397 fl->fl_pid = current->tgid; 398 fl->fl_file = filp; 399 fl->fl_flags = FL_POSIX; 400 fl->fl_ops = NULL; 401 fl->fl_lmops = NULL; 402 403 switch (l->l_type) { 404 case F_RDLCK: 405 case F_WRLCK: 406 case F_UNLCK: 407 fl->fl_type = l->l_type; 408 break; 409 default: 410 return -EINVAL; 411 } 412 413 return (0); 414 } 415 #endif 416 417 /* default lease lock manager operations */ 418 static void lease_break_callback(struct file_lock *fl) 419 { 420 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 421 } 422 423 static void lease_release_private_callback(struct file_lock *fl) 424 { 425 if (!fl->fl_file) 426 return; 427 428 f_delown(fl->fl_file); 429 fl->fl_file->f_owner.signum = 0; 430 } 431 432 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 433 { 434 return fl->fl_file == try->fl_file; 435 } 436 437 static struct lock_manager_operations lease_manager_ops = { 438 .fl_break = lease_break_callback, 439 .fl_release_private = lease_release_private_callback, 440 .fl_mylease = lease_mylease_callback, 441 .fl_change = lease_modify, 442 }; 443 444 /* 445 * Initialize a lease, use the default lock manager operations 446 */ 447 static int lease_init(struct file *filp, int type, struct file_lock *fl) 448 { 449 fl->fl_owner = current->files; 450 fl->fl_pid = current->tgid; 451 452 fl->fl_file = filp; 453 fl->fl_flags = FL_LEASE; 454 if (assign_type(fl, type) != 0) { 455 locks_free_lock(fl); 456 return -EINVAL; 457 } 458 fl->fl_start = 0; 459 fl->fl_end = OFFSET_MAX; 460 fl->fl_ops = NULL; 461 fl->fl_lmops = &lease_manager_ops; 462 return 0; 463 } 464 465 /* Allocate a file_lock initialised to this type of lease */ 466 static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 467 { 468 struct file_lock *fl = locks_alloc_lock(); 469 int error; 470 471 if (fl == NULL) 472 return -ENOMEM; 473 474 error = lease_init(filp, type, fl); 475 if (error) 476 return error; 477 *flp = fl; 478 return 0; 479 } 480 481 /* Check if two locks overlap each other. 482 */ 483 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 484 { 485 return ((fl1->fl_end >= fl2->fl_start) && 486 (fl2->fl_end >= fl1->fl_start)); 487 } 488 489 /* 490 * Check whether two locks have the same owner. 491 */ 492 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 493 { 494 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 495 return fl2->fl_lmops == fl1->fl_lmops && 496 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 497 return fl1->fl_owner == fl2->fl_owner; 498 } 499 500 /* Remove waiter from blocker's block list. 501 * When blocker ends up pointing to itself then the list is empty. 502 */ 503 static void __locks_delete_block(struct file_lock *waiter) 504 { 505 list_del_init(&waiter->fl_block); 506 list_del_init(&waiter->fl_link); 507 waiter->fl_next = NULL; 508 } 509 510 /* 511 */ 512 static void locks_delete_block(struct file_lock *waiter) 513 { 514 lock_kernel(); 515 __locks_delete_block(waiter); 516 unlock_kernel(); 517 } 518 519 /* Insert waiter into blocker's block list. 520 * We use a circular list so that processes can be easily woken up in 521 * the order they blocked. The documentation doesn't require this but 522 * it seems like the reasonable thing to do. 523 */ 524 static void locks_insert_block(struct file_lock *blocker, 525 struct file_lock *waiter) 526 { 527 BUG_ON(!list_empty(&waiter->fl_block)); 528 list_add_tail(&waiter->fl_block, &blocker->fl_block); 529 waiter->fl_next = blocker; 530 if (IS_POSIX(blocker)) 531 list_add(&waiter->fl_link, &blocked_list); 532 } 533 534 /* Wake up processes blocked waiting for blocker. 535 * If told to wait then schedule the processes until the block list 536 * is empty, otherwise empty the block list ourselves. 537 */ 538 static void locks_wake_up_blocks(struct file_lock *blocker) 539 { 540 while (!list_empty(&blocker->fl_block)) { 541 struct file_lock *waiter = list_entry(blocker->fl_block.next, 542 struct file_lock, fl_block); 543 __locks_delete_block(waiter); 544 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 545 waiter->fl_lmops->fl_notify(waiter); 546 else 547 wake_up(&waiter->fl_wait); 548 } 549 } 550 551 /* Insert file lock fl into an inode's lock list at the position indicated 552 * by pos. At the same time add the lock to the global file lock list. 553 */ 554 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 555 { 556 list_add(&fl->fl_link, &file_lock_list); 557 558 /* insert into file's list */ 559 fl->fl_next = *pos; 560 *pos = fl; 561 562 if (fl->fl_ops && fl->fl_ops->fl_insert) 563 fl->fl_ops->fl_insert(fl); 564 } 565 566 /* 567 * Delete a lock and then free it. 568 * Wake up processes that are blocked waiting for this lock, 569 * notify the FS that the lock has been cleared and 570 * finally free the lock. 571 */ 572 static void locks_delete_lock(struct file_lock **thisfl_p) 573 { 574 struct file_lock *fl = *thisfl_p; 575 576 *thisfl_p = fl->fl_next; 577 fl->fl_next = NULL; 578 list_del_init(&fl->fl_link); 579 580 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 581 if (fl->fl_fasync != NULL) { 582 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 583 fl->fl_fasync = NULL; 584 } 585 586 if (fl->fl_ops && fl->fl_ops->fl_remove) 587 fl->fl_ops->fl_remove(fl); 588 589 locks_wake_up_blocks(fl); 590 locks_free_lock(fl); 591 } 592 593 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 594 * checks for shared/exclusive status of overlapping locks. 595 */ 596 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 597 { 598 if (sys_fl->fl_type == F_WRLCK) 599 return 1; 600 if (caller_fl->fl_type == F_WRLCK) 601 return 1; 602 return 0; 603 } 604 605 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 606 * checking before calling the locks_conflict(). 607 */ 608 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 609 { 610 /* POSIX locks owned by the same process do not conflict with 611 * each other. 612 */ 613 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 614 return (0); 615 616 /* Check whether they overlap */ 617 if (!locks_overlap(caller_fl, sys_fl)) 618 return 0; 619 620 return (locks_conflict(caller_fl, sys_fl)); 621 } 622 623 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 624 * checking before calling the locks_conflict(). 625 */ 626 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 627 { 628 /* FLOCK locks referring to the same filp do not conflict with 629 * each other. 630 */ 631 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 632 return (0); 633 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 634 return 0; 635 636 return (locks_conflict(caller_fl, sys_fl)); 637 } 638 639 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 640 { 641 int result = 0; 642 DECLARE_WAITQUEUE(wait, current); 643 644 __set_current_state(TASK_INTERRUPTIBLE); 645 add_wait_queue(fl_wait, &wait); 646 if (timeout == 0) 647 schedule(); 648 else 649 result = schedule_timeout(timeout); 650 if (signal_pending(current)) 651 result = -ERESTARTSYS; 652 remove_wait_queue(fl_wait, &wait); 653 __set_current_state(TASK_RUNNING); 654 return result; 655 } 656 657 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 658 { 659 int result; 660 locks_insert_block(blocker, waiter); 661 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 662 __locks_delete_block(waiter); 663 return result; 664 } 665 666 int 667 posix_test_lock(struct file *filp, struct file_lock *fl, 668 struct file_lock *conflock) 669 { 670 struct file_lock *cfl; 671 672 lock_kernel(); 673 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 674 if (!IS_POSIX(cfl)) 675 continue; 676 if (posix_locks_conflict(cfl, fl)) 677 break; 678 } 679 if (cfl) { 680 __locks_copy_lock(conflock, cfl); 681 unlock_kernel(); 682 return 1; 683 } 684 unlock_kernel(); 685 return 0; 686 } 687 688 EXPORT_SYMBOL(posix_test_lock); 689 690 /* This function tests for deadlock condition before putting a process to 691 * sleep. The detection scheme is no longer recursive. Recursive was neat, 692 * but dangerous - we risked stack corruption if the lock data was bad, or 693 * if the recursion was too deep for any other reason. 694 * 695 * We rely on the fact that a task can only be on one lock's wait queue 696 * at a time. When we find blocked_task on a wait queue we can re-search 697 * with blocked_task equal to that queue's owner, until either blocked_task 698 * isn't found, or blocked_task is found on a queue owned by my_task. 699 * 700 * Note: the above assumption may not be true when handling lock requests 701 * from a broken NFS client. But broken NFS clients have a lot more to 702 * worry about than proper deadlock detection anyway... --okir 703 */ 704 int posix_locks_deadlock(struct file_lock *caller_fl, 705 struct file_lock *block_fl) 706 { 707 struct list_head *tmp; 708 709 next_task: 710 if (posix_same_owner(caller_fl, block_fl)) 711 return 1; 712 list_for_each(tmp, &blocked_list) { 713 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 714 if (posix_same_owner(fl, block_fl)) { 715 fl = fl->fl_next; 716 block_fl = fl; 717 goto next_task; 718 } 719 } 720 return 0; 721 } 722 723 EXPORT_SYMBOL(posix_locks_deadlock); 724 725 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 726 * at the head of the list, but that's secret knowledge known only to 727 * flock_lock_file and posix_lock_file. 728 */ 729 static int flock_lock_file(struct file *filp, struct file_lock *request) 730 { 731 struct file_lock *new_fl = NULL; 732 struct file_lock **before; 733 struct inode * inode = filp->f_dentry->d_inode; 734 int error = 0; 735 int found = 0; 736 737 lock_kernel(); 738 for_each_lock(inode, before) { 739 struct file_lock *fl = *before; 740 if (IS_POSIX(fl)) 741 break; 742 if (IS_LEASE(fl)) 743 continue; 744 if (filp != fl->fl_file) 745 continue; 746 if (request->fl_type == fl->fl_type) 747 goto out; 748 found = 1; 749 locks_delete_lock(before); 750 break; 751 } 752 753 if (request->fl_type == F_UNLCK) 754 goto out; 755 756 new_fl = locks_alloc_lock(); 757 if (new_fl == NULL) 758 goto out; 759 /* 760 * If a higher-priority process was blocked on the old file lock, 761 * give it the opportunity to lock the file. 762 */ 763 if (found) 764 cond_resched(); 765 766 for_each_lock(inode, before) { 767 struct file_lock *fl = *before; 768 if (IS_POSIX(fl)) 769 break; 770 if (IS_LEASE(fl)) 771 continue; 772 if (!flock_locks_conflict(request, fl)) 773 continue; 774 error = -EAGAIN; 775 if (request->fl_flags & FL_SLEEP) 776 locks_insert_block(fl, request); 777 goto out; 778 } 779 locks_copy_lock(new_fl, request); 780 locks_insert_lock(&inode->i_flock, new_fl); 781 new_fl = NULL; 782 783 out: 784 unlock_kernel(); 785 if (new_fl) 786 locks_free_lock(new_fl); 787 return error; 788 } 789 790 static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 791 { 792 struct file_lock *fl; 793 struct file_lock *new_fl, *new_fl2; 794 struct file_lock *left = NULL; 795 struct file_lock *right = NULL; 796 struct file_lock **before; 797 int error, added = 0; 798 799 /* 800 * We may need two file_lock structures for this operation, 801 * so we get them in advance to avoid races. 802 */ 803 new_fl = locks_alloc_lock(); 804 new_fl2 = locks_alloc_lock(); 805 806 lock_kernel(); 807 if (request->fl_type != F_UNLCK) { 808 for_each_lock(inode, before) { 809 struct file_lock *fl = *before; 810 if (!IS_POSIX(fl)) 811 continue; 812 if (!posix_locks_conflict(request, fl)) 813 continue; 814 if (conflock) 815 locks_copy_lock(conflock, fl); 816 error = -EAGAIN; 817 if (!(request->fl_flags & FL_SLEEP)) 818 goto out; 819 error = -EDEADLK; 820 if (posix_locks_deadlock(request, fl)) 821 goto out; 822 error = -EAGAIN; 823 locks_insert_block(fl, request); 824 goto out; 825 } 826 } 827 828 /* If we're just looking for a conflict, we're done. */ 829 error = 0; 830 if (request->fl_flags & FL_ACCESS) 831 goto out; 832 833 error = -ENOLCK; /* "no luck" */ 834 if (!(new_fl && new_fl2)) 835 goto out; 836 837 /* 838 * We've allocated the new locks in advance, so there are no 839 * errors possible (and no blocking operations) from here on. 840 * 841 * Find the first old lock with the same owner as the new lock. 842 */ 843 844 before = &inode->i_flock; 845 846 /* First skip locks owned by other processes. */ 847 while ((fl = *before) && (!IS_POSIX(fl) || 848 !posix_same_owner(request, fl))) { 849 before = &fl->fl_next; 850 } 851 852 /* Process locks with this owner. */ 853 while ((fl = *before) && posix_same_owner(request, fl)) { 854 /* Detect adjacent or overlapping regions (if same lock type) 855 */ 856 if (request->fl_type == fl->fl_type) { 857 /* In all comparisons of start vs end, use 858 * "start - 1" rather than "end + 1". If end 859 * is OFFSET_MAX, end + 1 will become negative. 860 */ 861 if (fl->fl_end < request->fl_start - 1) 862 goto next_lock; 863 /* If the next lock in the list has entirely bigger 864 * addresses than the new one, insert the lock here. 865 */ 866 if (fl->fl_start - 1 > request->fl_end) 867 break; 868 869 /* If we come here, the new and old lock are of the 870 * same type and adjacent or overlapping. Make one 871 * lock yielding from the lower start address of both 872 * locks to the higher end address. 873 */ 874 if (fl->fl_start > request->fl_start) 875 fl->fl_start = request->fl_start; 876 else 877 request->fl_start = fl->fl_start; 878 if (fl->fl_end < request->fl_end) 879 fl->fl_end = request->fl_end; 880 else 881 request->fl_end = fl->fl_end; 882 if (added) { 883 locks_delete_lock(before); 884 continue; 885 } 886 request = fl; 887 added = 1; 888 } 889 else { 890 /* Processing for different lock types is a bit 891 * more complex. 892 */ 893 if (fl->fl_end < request->fl_start) 894 goto next_lock; 895 if (fl->fl_start > request->fl_end) 896 break; 897 if (request->fl_type == F_UNLCK) 898 added = 1; 899 if (fl->fl_start < request->fl_start) 900 left = fl; 901 /* If the next lock in the list has a higher end 902 * address than the new one, insert the new one here. 903 */ 904 if (fl->fl_end > request->fl_end) { 905 right = fl; 906 break; 907 } 908 if (fl->fl_start >= request->fl_start) { 909 /* The new lock completely replaces an old 910 * one (This may happen several times). 911 */ 912 if (added) { 913 locks_delete_lock(before); 914 continue; 915 } 916 /* Replace the old lock with the new one. 917 * Wake up anybody waiting for the old one, 918 * as the change in lock type might satisfy 919 * their needs. 920 */ 921 locks_wake_up_blocks(fl); 922 fl->fl_start = request->fl_start; 923 fl->fl_end = request->fl_end; 924 fl->fl_type = request->fl_type; 925 locks_release_private(fl); 926 locks_copy_private(fl, request); 927 request = fl; 928 added = 1; 929 } 930 } 931 /* Go on to next lock. 932 */ 933 next_lock: 934 before = &fl->fl_next; 935 } 936 937 error = 0; 938 if (!added) { 939 if (request->fl_type == F_UNLCK) 940 goto out; 941 locks_copy_lock(new_fl, request); 942 locks_insert_lock(before, new_fl); 943 new_fl = NULL; 944 } 945 if (right) { 946 if (left == right) { 947 /* The new lock breaks the old one in two pieces, 948 * so we have to use the second new lock. 949 */ 950 left = new_fl2; 951 new_fl2 = NULL; 952 locks_copy_lock(left, right); 953 locks_insert_lock(before, left); 954 } 955 right->fl_start = request->fl_end + 1; 956 locks_wake_up_blocks(right); 957 } 958 if (left) { 959 left->fl_end = request->fl_start - 1; 960 locks_wake_up_blocks(left); 961 } 962 out: 963 unlock_kernel(); 964 /* 965 * Free any unused locks. 966 */ 967 if (new_fl) 968 locks_free_lock(new_fl); 969 if (new_fl2) 970 locks_free_lock(new_fl2); 971 return error; 972 } 973 974 /** 975 * posix_lock_file - Apply a POSIX-style lock to a file 976 * @filp: The file to apply the lock to 977 * @fl: The lock to be applied 978 * 979 * Add a POSIX style lock to a file. 980 * We merge adjacent & overlapping locks whenever possible. 981 * POSIX locks are sorted by owner task, then by starting address 982 */ 983 int posix_lock_file(struct file *filp, struct file_lock *fl) 984 { 985 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL); 986 } 987 EXPORT_SYMBOL(posix_lock_file); 988 989 /** 990 * posix_lock_file_conf - Apply a POSIX-style lock to a file 991 * @filp: The file to apply the lock to 992 * @fl: The lock to be applied 993 * @conflock: Place to return a copy of the conflicting lock, if found. 994 * 995 * Except for the conflock parameter, acts just like posix_lock_file. 996 */ 997 int posix_lock_file_conf(struct file *filp, struct file_lock *fl, 998 struct file_lock *conflock) 999 { 1000 return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock); 1001 } 1002 EXPORT_SYMBOL(posix_lock_file_conf); 1003 1004 /** 1005 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1006 * @filp: The file to apply the lock to 1007 * @fl: The lock to be applied 1008 * 1009 * Add a POSIX style lock to a file. 1010 * We merge adjacent & overlapping locks whenever possible. 1011 * POSIX locks are sorted by owner task, then by starting address 1012 */ 1013 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1014 { 1015 int error; 1016 might_sleep (); 1017 for (;;) { 1018 error = posix_lock_file(filp, fl); 1019 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1020 break; 1021 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1022 if (!error) 1023 continue; 1024 1025 locks_delete_block(fl); 1026 break; 1027 } 1028 return error; 1029 } 1030 EXPORT_SYMBOL(posix_lock_file_wait); 1031 1032 /** 1033 * locks_mandatory_locked - Check for an active lock 1034 * @inode: the file to check 1035 * 1036 * Searches the inode's list of locks to find any POSIX locks which conflict. 1037 * This function is called from locks_verify_locked() only. 1038 */ 1039 int locks_mandatory_locked(struct inode *inode) 1040 { 1041 fl_owner_t owner = current->files; 1042 struct file_lock *fl; 1043 1044 /* 1045 * Search the lock list for this inode for any POSIX locks. 1046 */ 1047 lock_kernel(); 1048 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1049 if (!IS_POSIX(fl)) 1050 continue; 1051 if (fl->fl_owner != owner) 1052 break; 1053 } 1054 unlock_kernel(); 1055 return fl ? -EAGAIN : 0; 1056 } 1057 1058 /** 1059 * locks_mandatory_area - Check for a conflicting lock 1060 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1061 * for shared 1062 * @inode: the file to check 1063 * @filp: how the file was opened (if it was) 1064 * @offset: start of area to check 1065 * @count: length of area to check 1066 * 1067 * Searches the inode's list of locks to find any POSIX locks which conflict. 1068 * This function is called from rw_verify_area() and 1069 * locks_verify_truncate(). 1070 */ 1071 int locks_mandatory_area(int read_write, struct inode *inode, 1072 struct file *filp, loff_t offset, 1073 size_t count) 1074 { 1075 struct file_lock fl; 1076 int error; 1077 1078 locks_init_lock(&fl); 1079 fl.fl_owner = current->files; 1080 fl.fl_pid = current->tgid; 1081 fl.fl_file = filp; 1082 fl.fl_flags = FL_POSIX | FL_ACCESS; 1083 if (filp && !(filp->f_flags & O_NONBLOCK)) 1084 fl.fl_flags |= FL_SLEEP; 1085 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1086 fl.fl_start = offset; 1087 fl.fl_end = offset + count - 1; 1088 1089 for (;;) { 1090 error = __posix_lock_file_conf(inode, &fl, NULL); 1091 if (error != -EAGAIN) 1092 break; 1093 if (!(fl.fl_flags & FL_SLEEP)) 1094 break; 1095 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1096 if (!error) { 1097 /* 1098 * If we've been sleeping someone might have 1099 * changed the permissions behind our back. 1100 */ 1101 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1102 continue; 1103 } 1104 1105 locks_delete_block(&fl); 1106 break; 1107 } 1108 1109 return error; 1110 } 1111 1112 EXPORT_SYMBOL(locks_mandatory_area); 1113 1114 /* We already had a lease on this file; just change its type */ 1115 int lease_modify(struct file_lock **before, int arg) 1116 { 1117 struct file_lock *fl = *before; 1118 int error = assign_type(fl, arg); 1119 1120 if (error) 1121 return error; 1122 locks_wake_up_blocks(fl); 1123 if (arg == F_UNLCK) 1124 locks_delete_lock(before); 1125 return 0; 1126 } 1127 1128 EXPORT_SYMBOL(lease_modify); 1129 1130 static void time_out_leases(struct inode *inode) 1131 { 1132 struct file_lock **before; 1133 struct file_lock *fl; 1134 1135 before = &inode->i_flock; 1136 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1137 if ((fl->fl_break_time == 0) 1138 || time_before(jiffies, fl->fl_break_time)) { 1139 before = &fl->fl_next; 1140 continue; 1141 } 1142 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1143 if (fl == *before) /* lease_modify may have freed fl */ 1144 before = &fl->fl_next; 1145 } 1146 } 1147 1148 /** 1149 * __break_lease - revoke all outstanding leases on file 1150 * @inode: the inode of the file to return 1151 * @mode: the open mode (read or write) 1152 * 1153 * break_lease (inlined for speed) has checked there already 1154 * is a lease on this file. Leases are broken on a call to open() 1155 * or truncate(). This function can sleep unless you 1156 * specified %O_NONBLOCK to your open(). 1157 */ 1158 int __break_lease(struct inode *inode, unsigned int mode) 1159 { 1160 int error = 0, future; 1161 struct file_lock *new_fl, *flock; 1162 struct file_lock *fl; 1163 int alloc_err; 1164 unsigned long break_time; 1165 int i_have_this_lease = 0; 1166 1167 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1168 &new_fl); 1169 1170 lock_kernel(); 1171 1172 time_out_leases(inode); 1173 1174 flock = inode->i_flock; 1175 if ((flock == NULL) || !IS_LEASE(flock)) 1176 goto out; 1177 1178 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1179 if (fl->fl_owner == current->files) 1180 i_have_this_lease = 1; 1181 1182 if (mode & FMODE_WRITE) { 1183 /* If we want write access, we have to revoke any lease. */ 1184 future = F_UNLCK | F_INPROGRESS; 1185 } else if (flock->fl_type & F_INPROGRESS) { 1186 /* If the lease is already being broken, we just leave it */ 1187 future = flock->fl_type; 1188 } else if (flock->fl_type & F_WRLCK) { 1189 /* Downgrade the exclusive lease to a read-only lease. */ 1190 future = F_RDLCK | F_INPROGRESS; 1191 } else { 1192 /* the existing lease was read-only, so we can read too. */ 1193 goto out; 1194 } 1195 1196 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1197 error = alloc_err; 1198 goto out; 1199 } 1200 1201 break_time = 0; 1202 if (lease_break_time > 0) { 1203 break_time = jiffies + lease_break_time * HZ; 1204 if (break_time == 0) 1205 break_time++; /* so that 0 means no break time */ 1206 } 1207 1208 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1209 if (fl->fl_type != future) { 1210 fl->fl_type = future; 1211 fl->fl_break_time = break_time; 1212 /* lease must have lmops break callback */ 1213 fl->fl_lmops->fl_break(fl); 1214 } 1215 } 1216 1217 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1218 error = -EWOULDBLOCK; 1219 goto out; 1220 } 1221 1222 restart: 1223 break_time = flock->fl_break_time; 1224 if (break_time != 0) { 1225 break_time -= jiffies; 1226 if (break_time == 0) 1227 break_time++; 1228 } 1229 error = locks_block_on_timeout(flock, new_fl, break_time); 1230 if (error >= 0) { 1231 if (error == 0) 1232 time_out_leases(inode); 1233 /* Wait for the next lease that has not been broken yet */ 1234 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1235 flock = flock->fl_next) { 1236 if (flock->fl_type & F_INPROGRESS) 1237 goto restart; 1238 } 1239 error = 0; 1240 } 1241 1242 out: 1243 unlock_kernel(); 1244 if (!alloc_err) 1245 locks_free_lock(new_fl); 1246 return error; 1247 } 1248 1249 EXPORT_SYMBOL(__break_lease); 1250 1251 /** 1252 * lease_get_mtime 1253 * @inode: the inode 1254 * @time: pointer to a timespec which will contain the last modified time 1255 * 1256 * This is to force NFS clients to flush their caches for files with 1257 * exclusive leases. The justification is that if someone has an 1258 * exclusive lease, then they could be modifiying it. 1259 */ 1260 void lease_get_mtime(struct inode *inode, struct timespec *time) 1261 { 1262 struct file_lock *flock = inode->i_flock; 1263 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1264 *time = current_fs_time(inode->i_sb); 1265 else 1266 *time = inode->i_mtime; 1267 } 1268 1269 EXPORT_SYMBOL(lease_get_mtime); 1270 1271 /** 1272 * fcntl_getlease - Enquire what lease is currently active 1273 * @filp: the file 1274 * 1275 * The value returned by this function will be one of 1276 * (if no lease break is pending): 1277 * 1278 * %F_RDLCK to indicate a shared lease is held. 1279 * 1280 * %F_WRLCK to indicate an exclusive lease is held. 1281 * 1282 * %F_UNLCK to indicate no lease is held. 1283 * 1284 * (if a lease break is pending): 1285 * 1286 * %F_RDLCK to indicate an exclusive lease needs to be 1287 * changed to a shared lease (or removed). 1288 * 1289 * %F_UNLCK to indicate the lease needs to be removed. 1290 * 1291 * XXX: sfr & willy disagree over whether F_INPROGRESS 1292 * should be returned to userspace. 1293 */ 1294 int fcntl_getlease(struct file *filp) 1295 { 1296 struct file_lock *fl; 1297 int type = F_UNLCK; 1298 1299 lock_kernel(); 1300 time_out_leases(filp->f_dentry->d_inode); 1301 for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1302 fl = fl->fl_next) { 1303 if (fl->fl_file == filp) { 1304 type = fl->fl_type & ~F_INPROGRESS; 1305 break; 1306 } 1307 } 1308 unlock_kernel(); 1309 return type; 1310 } 1311 1312 /** 1313 * __setlease - sets a lease on an open file 1314 * @filp: file pointer 1315 * @arg: type of lease to obtain 1316 * @flp: input - file_lock to use, output - file_lock inserted 1317 * 1318 * The (input) flp->fl_lmops->fl_break function is required 1319 * by break_lease(). 1320 * 1321 * Called with kernel lock held. 1322 */ 1323 static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1324 { 1325 struct file_lock *fl, **before, **my_before = NULL, *lease; 1326 struct dentry *dentry = filp->f_dentry; 1327 struct inode *inode = dentry->d_inode; 1328 int error, rdlease_count = 0, wrlease_count = 0; 1329 1330 time_out_leases(inode); 1331 1332 error = -EINVAL; 1333 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1334 goto out; 1335 1336 lease = *flp; 1337 1338 error = -EAGAIN; 1339 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1340 goto out; 1341 if ((arg == F_WRLCK) 1342 && ((atomic_read(&dentry->d_count) > 1) 1343 || (atomic_read(&inode->i_count) > 1))) 1344 goto out; 1345 1346 /* 1347 * At this point, we know that if there is an exclusive 1348 * lease on this file, then we hold it on this filp 1349 * (otherwise our open of this file would have blocked). 1350 * And if we are trying to acquire an exclusive lease, 1351 * then the file is not open by anyone (including us) 1352 * except for this filp. 1353 */ 1354 for (before = &inode->i_flock; 1355 ((fl = *before) != NULL) && IS_LEASE(fl); 1356 before = &fl->fl_next) { 1357 if (lease->fl_lmops->fl_mylease(fl, lease)) 1358 my_before = before; 1359 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1360 /* 1361 * Someone is in the process of opening this 1362 * file for writing so we may not take an 1363 * exclusive lease on it. 1364 */ 1365 wrlease_count++; 1366 else 1367 rdlease_count++; 1368 } 1369 1370 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1371 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1372 goto out; 1373 1374 if (my_before != NULL) { 1375 error = lease->fl_lmops->fl_change(my_before, arg); 1376 goto out; 1377 } 1378 1379 error = 0; 1380 if (arg == F_UNLCK) 1381 goto out; 1382 1383 error = -EINVAL; 1384 if (!leases_enable) 1385 goto out; 1386 1387 error = lease_alloc(filp, arg, &fl); 1388 if (error) 1389 goto out; 1390 1391 locks_copy_lock(fl, lease); 1392 1393 locks_insert_lock(before, fl); 1394 1395 *flp = fl; 1396 out: 1397 return error; 1398 } 1399 1400 /** 1401 * setlease - sets a lease on an open file 1402 * @filp: file pointer 1403 * @arg: type of lease to obtain 1404 * @lease: file_lock to use 1405 * 1406 * Call this to establish a lease on the file. 1407 * The fl_lmops fl_break function is required by break_lease 1408 */ 1409 1410 int setlease(struct file *filp, long arg, struct file_lock **lease) 1411 { 1412 struct dentry *dentry = filp->f_dentry; 1413 struct inode *inode = dentry->d_inode; 1414 int error; 1415 1416 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1417 return -EACCES; 1418 if (!S_ISREG(inode->i_mode)) 1419 return -EINVAL; 1420 error = security_file_lock(filp, arg); 1421 if (error) 1422 return error; 1423 1424 lock_kernel(); 1425 error = __setlease(filp, arg, lease); 1426 unlock_kernel(); 1427 1428 return error; 1429 } 1430 1431 EXPORT_SYMBOL(setlease); 1432 1433 /** 1434 * fcntl_setlease - sets a lease on an open file 1435 * @fd: open file descriptor 1436 * @filp: file pointer 1437 * @arg: type of lease to obtain 1438 * 1439 * Call this fcntl to establish a lease on the file. 1440 * Note that you also need to call %F_SETSIG to 1441 * receive a signal when the lease is broken. 1442 */ 1443 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1444 { 1445 struct file_lock fl, *flp = &fl; 1446 struct dentry *dentry = filp->f_dentry; 1447 struct inode *inode = dentry->d_inode; 1448 int error; 1449 1450 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1451 return -EACCES; 1452 if (!S_ISREG(inode->i_mode)) 1453 return -EINVAL; 1454 error = security_file_lock(filp, arg); 1455 if (error) 1456 return error; 1457 1458 locks_init_lock(&fl); 1459 error = lease_init(filp, arg, &fl); 1460 if (error) 1461 return error; 1462 1463 lock_kernel(); 1464 1465 error = __setlease(filp, arg, &flp); 1466 if (error || arg == F_UNLCK) 1467 goto out_unlock; 1468 1469 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1470 if (error < 0) { 1471 /* remove lease just inserted by __setlease */ 1472 flp->fl_type = F_UNLCK | F_INPROGRESS; 1473 flp->fl_break_time = jiffies- 10; 1474 time_out_leases(inode); 1475 goto out_unlock; 1476 } 1477 1478 error = f_setown(filp, current->pid, 0); 1479 out_unlock: 1480 unlock_kernel(); 1481 return error; 1482 } 1483 1484 /** 1485 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1486 * @filp: The file to apply the lock to 1487 * @fl: The lock to be applied 1488 * 1489 * Add a FLOCK style lock to a file. 1490 */ 1491 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1492 { 1493 int error; 1494 might_sleep(); 1495 for (;;) { 1496 error = flock_lock_file(filp, fl); 1497 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1498 break; 1499 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1500 if (!error) 1501 continue; 1502 1503 locks_delete_block(fl); 1504 break; 1505 } 1506 return error; 1507 } 1508 1509 EXPORT_SYMBOL(flock_lock_file_wait); 1510 1511 /** 1512 * sys_flock: - flock() system call. 1513 * @fd: the file descriptor to lock. 1514 * @cmd: the type of lock to apply. 1515 * 1516 * Apply a %FL_FLOCK style lock to an open file descriptor. 1517 * The @cmd can be one of 1518 * 1519 * %LOCK_SH -- a shared lock. 1520 * 1521 * %LOCK_EX -- an exclusive lock. 1522 * 1523 * %LOCK_UN -- remove an existing lock. 1524 * 1525 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1526 * 1527 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1528 * processes read and write access respectively. 1529 */ 1530 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1531 { 1532 struct file *filp; 1533 struct file_lock *lock; 1534 int can_sleep, unlock; 1535 int error; 1536 1537 error = -EBADF; 1538 filp = fget(fd); 1539 if (!filp) 1540 goto out; 1541 1542 can_sleep = !(cmd & LOCK_NB); 1543 cmd &= ~LOCK_NB; 1544 unlock = (cmd == LOCK_UN); 1545 1546 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1547 goto out_putf; 1548 1549 error = flock_make_lock(filp, &lock, cmd); 1550 if (error) 1551 goto out_putf; 1552 if (can_sleep) 1553 lock->fl_flags |= FL_SLEEP; 1554 1555 error = security_file_lock(filp, cmd); 1556 if (error) 1557 goto out_free; 1558 1559 if (filp->f_op && filp->f_op->flock) 1560 error = filp->f_op->flock(filp, 1561 (can_sleep) ? F_SETLKW : F_SETLK, 1562 lock); 1563 else 1564 error = flock_lock_file_wait(filp, lock); 1565 1566 out_free: 1567 locks_free_lock(lock); 1568 1569 out_putf: 1570 fput(filp); 1571 out: 1572 return error; 1573 } 1574 1575 /* Report the first existing lock that would conflict with l. 1576 * This implements the F_GETLK command of fcntl(). 1577 */ 1578 int fcntl_getlk(struct file *filp, struct flock __user *l) 1579 { 1580 struct file_lock *fl, cfl, file_lock; 1581 struct flock flock; 1582 int error; 1583 1584 error = -EFAULT; 1585 if (copy_from_user(&flock, l, sizeof(flock))) 1586 goto out; 1587 error = -EINVAL; 1588 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1589 goto out; 1590 1591 error = flock_to_posix_lock(filp, &file_lock, &flock); 1592 if (error) 1593 goto out; 1594 1595 if (filp->f_op && filp->f_op->lock) { 1596 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1597 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1598 file_lock.fl_ops->fl_release_private(&file_lock); 1599 if (error < 0) 1600 goto out; 1601 else 1602 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1603 } else { 1604 fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); 1605 } 1606 1607 flock.l_type = F_UNLCK; 1608 if (fl != NULL) { 1609 flock.l_pid = fl->fl_pid; 1610 #if BITS_PER_LONG == 32 1611 /* 1612 * Make sure we can represent the posix lock via 1613 * legacy 32bit flock. 1614 */ 1615 error = -EOVERFLOW; 1616 if (fl->fl_start > OFFT_OFFSET_MAX) 1617 goto out; 1618 if ((fl->fl_end != OFFSET_MAX) 1619 && (fl->fl_end > OFFT_OFFSET_MAX)) 1620 goto out; 1621 #endif 1622 flock.l_start = fl->fl_start; 1623 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1624 fl->fl_end - fl->fl_start + 1; 1625 flock.l_whence = 0; 1626 flock.l_type = fl->fl_type; 1627 } 1628 error = -EFAULT; 1629 if (!copy_to_user(l, &flock, sizeof(flock))) 1630 error = 0; 1631 out: 1632 return error; 1633 } 1634 1635 /* Apply the lock described by l to an open file descriptor. 1636 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1637 */ 1638 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1639 struct flock __user *l) 1640 { 1641 struct file_lock *file_lock = locks_alloc_lock(); 1642 struct flock flock; 1643 struct inode *inode; 1644 int error; 1645 1646 if (file_lock == NULL) 1647 return -ENOLCK; 1648 1649 /* 1650 * This might block, so we do it before checking the inode. 1651 */ 1652 error = -EFAULT; 1653 if (copy_from_user(&flock, l, sizeof(flock))) 1654 goto out; 1655 1656 inode = filp->f_dentry->d_inode; 1657 1658 /* Don't allow mandatory locks on files that may be memory mapped 1659 * and shared. 1660 */ 1661 if (IS_MANDLOCK(inode) && 1662 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1663 mapping_writably_mapped(filp->f_mapping)) { 1664 error = -EAGAIN; 1665 goto out; 1666 } 1667 1668 again: 1669 error = flock_to_posix_lock(filp, file_lock, &flock); 1670 if (error) 1671 goto out; 1672 if (cmd == F_SETLKW) { 1673 file_lock->fl_flags |= FL_SLEEP; 1674 } 1675 1676 error = -EBADF; 1677 switch (flock.l_type) { 1678 case F_RDLCK: 1679 if (!(filp->f_mode & FMODE_READ)) 1680 goto out; 1681 break; 1682 case F_WRLCK: 1683 if (!(filp->f_mode & FMODE_WRITE)) 1684 goto out; 1685 break; 1686 case F_UNLCK: 1687 break; 1688 default: 1689 error = -EINVAL; 1690 goto out; 1691 } 1692 1693 error = security_file_lock(filp, file_lock->fl_type); 1694 if (error) 1695 goto out; 1696 1697 if (filp->f_op && filp->f_op->lock != NULL) 1698 error = filp->f_op->lock(filp, cmd, file_lock); 1699 else { 1700 for (;;) { 1701 error = posix_lock_file(filp, file_lock); 1702 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1703 break; 1704 error = wait_event_interruptible(file_lock->fl_wait, 1705 !file_lock->fl_next); 1706 if (!error) 1707 continue; 1708 1709 locks_delete_block(file_lock); 1710 break; 1711 } 1712 } 1713 1714 /* 1715 * Attempt to detect a close/fcntl race and recover by 1716 * releasing the lock that was just acquired. 1717 */ 1718 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1719 flock.l_type = F_UNLCK; 1720 goto again; 1721 } 1722 1723 out: 1724 locks_free_lock(file_lock); 1725 return error; 1726 } 1727 1728 #if BITS_PER_LONG == 32 1729 /* Report the first existing lock that would conflict with l. 1730 * This implements the F_GETLK command of fcntl(). 1731 */ 1732 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1733 { 1734 struct file_lock *fl, cfl, file_lock; 1735 struct flock64 flock; 1736 int error; 1737 1738 error = -EFAULT; 1739 if (copy_from_user(&flock, l, sizeof(flock))) 1740 goto out; 1741 error = -EINVAL; 1742 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1743 goto out; 1744 1745 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1746 if (error) 1747 goto out; 1748 1749 if (filp->f_op && filp->f_op->lock) { 1750 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1751 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1752 file_lock.fl_ops->fl_release_private(&file_lock); 1753 if (error < 0) 1754 goto out; 1755 else 1756 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1757 } else { 1758 fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); 1759 } 1760 1761 flock.l_type = F_UNLCK; 1762 if (fl != NULL) { 1763 flock.l_pid = fl->fl_pid; 1764 flock.l_start = fl->fl_start; 1765 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1766 fl->fl_end - fl->fl_start + 1; 1767 flock.l_whence = 0; 1768 flock.l_type = fl->fl_type; 1769 } 1770 error = -EFAULT; 1771 if (!copy_to_user(l, &flock, sizeof(flock))) 1772 error = 0; 1773 1774 out: 1775 return error; 1776 } 1777 1778 /* Apply the lock described by l to an open file descriptor. 1779 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1780 */ 1781 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1782 struct flock64 __user *l) 1783 { 1784 struct file_lock *file_lock = locks_alloc_lock(); 1785 struct flock64 flock; 1786 struct inode *inode; 1787 int error; 1788 1789 if (file_lock == NULL) 1790 return -ENOLCK; 1791 1792 /* 1793 * This might block, so we do it before checking the inode. 1794 */ 1795 error = -EFAULT; 1796 if (copy_from_user(&flock, l, sizeof(flock))) 1797 goto out; 1798 1799 inode = filp->f_dentry->d_inode; 1800 1801 /* Don't allow mandatory locks on files that may be memory mapped 1802 * and shared. 1803 */ 1804 if (IS_MANDLOCK(inode) && 1805 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1806 mapping_writably_mapped(filp->f_mapping)) { 1807 error = -EAGAIN; 1808 goto out; 1809 } 1810 1811 again: 1812 error = flock64_to_posix_lock(filp, file_lock, &flock); 1813 if (error) 1814 goto out; 1815 if (cmd == F_SETLKW64) { 1816 file_lock->fl_flags |= FL_SLEEP; 1817 } 1818 1819 error = -EBADF; 1820 switch (flock.l_type) { 1821 case F_RDLCK: 1822 if (!(filp->f_mode & FMODE_READ)) 1823 goto out; 1824 break; 1825 case F_WRLCK: 1826 if (!(filp->f_mode & FMODE_WRITE)) 1827 goto out; 1828 break; 1829 case F_UNLCK: 1830 break; 1831 default: 1832 error = -EINVAL; 1833 goto out; 1834 } 1835 1836 error = security_file_lock(filp, file_lock->fl_type); 1837 if (error) 1838 goto out; 1839 1840 if (filp->f_op && filp->f_op->lock != NULL) 1841 error = filp->f_op->lock(filp, cmd, file_lock); 1842 else { 1843 for (;;) { 1844 error = posix_lock_file(filp, file_lock); 1845 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1846 break; 1847 error = wait_event_interruptible(file_lock->fl_wait, 1848 !file_lock->fl_next); 1849 if (!error) 1850 continue; 1851 1852 locks_delete_block(file_lock); 1853 break; 1854 } 1855 } 1856 1857 /* 1858 * Attempt to detect a close/fcntl race and recover by 1859 * releasing the lock that was just acquired. 1860 */ 1861 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1862 flock.l_type = F_UNLCK; 1863 goto again; 1864 } 1865 1866 out: 1867 locks_free_lock(file_lock); 1868 return error; 1869 } 1870 #endif /* BITS_PER_LONG == 32 */ 1871 1872 /* 1873 * This function is called when the file is being removed 1874 * from the task's fd array. POSIX locks belonging to this task 1875 * are deleted at this time. 1876 */ 1877 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1878 { 1879 struct file_lock lock, **before; 1880 1881 /* 1882 * If there are no locks held on this file, we don't need to call 1883 * posix_lock_file(). Another process could be setting a lock on this 1884 * file at the same time, but we wouldn't remove that lock anyway. 1885 */ 1886 before = &filp->f_dentry->d_inode->i_flock; 1887 if (*before == NULL) 1888 return; 1889 1890 lock.fl_type = F_UNLCK; 1891 lock.fl_flags = FL_POSIX; 1892 lock.fl_start = 0; 1893 lock.fl_end = OFFSET_MAX; 1894 lock.fl_owner = owner; 1895 lock.fl_pid = current->tgid; 1896 lock.fl_file = filp; 1897 lock.fl_ops = NULL; 1898 lock.fl_lmops = NULL; 1899 1900 if (filp->f_op && filp->f_op->lock != NULL) { 1901 filp->f_op->lock(filp, F_SETLK, &lock); 1902 goto out; 1903 } 1904 1905 /* Can't use posix_lock_file here; we need to remove it no matter 1906 * which pid we have. 1907 */ 1908 lock_kernel(); 1909 while (*before != NULL) { 1910 struct file_lock *fl = *before; 1911 if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { 1912 locks_delete_lock(before); 1913 continue; 1914 } 1915 before = &fl->fl_next; 1916 } 1917 unlock_kernel(); 1918 out: 1919 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1920 lock.fl_ops->fl_release_private(&lock); 1921 } 1922 1923 EXPORT_SYMBOL(locks_remove_posix); 1924 1925 /* 1926 * This function is called on the last close of an open file. 1927 */ 1928 void locks_remove_flock(struct file *filp) 1929 { 1930 struct inode * inode = filp->f_dentry->d_inode; 1931 struct file_lock *fl; 1932 struct file_lock **before; 1933 1934 if (!inode->i_flock) 1935 return; 1936 1937 if (filp->f_op && filp->f_op->flock) { 1938 struct file_lock fl = { 1939 .fl_pid = current->tgid, 1940 .fl_file = filp, 1941 .fl_flags = FL_FLOCK, 1942 .fl_type = F_UNLCK, 1943 .fl_end = OFFSET_MAX, 1944 }; 1945 filp->f_op->flock(filp, F_SETLKW, &fl); 1946 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1947 fl.fl_ops->fl_release_private(&fl); 1948 } 1949 1950 lock_kernel(); 1951 before = &inode->i_flock; 1952 1953 while ((fl = *before) != NULL) { 1954 if (fl->fl_file == filp) { 1955 if (IS_FLOCK(fl)) { 1956 locks_delete_lock(before); 1957 continue; 1958 } 1959 if (IS_LEASE(fl)) { 1960 lease_modify(before, F_UNLCK); 1961 continue; 1962 } 1963 /* What? */ 1964 BUG(); 1965 } 1966 before = &fl->fl_next; 1967 } 1968 unlock_kernel(); 1969 } 1970 1971 /** 1972 * posix_unblock_lock - stop waiting for a file lock 1973 * @filp: how the file was opened 1974 * @waiter: the lock which was waiting 1975 * 1976 * lockd needs to block waiting for locks. 1977 */ 1978 int 1979 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1980 { 1981 int status = 0; 1982 1983 lock_kernel(); 1984 if (waiter->fl_next) 1985 __locks_delete_block(waiter); 1986 else 1987 status = -ENOENT; 1988 unlock_kernel(); 1989 return status; 1990 } 1991 1992 EXPORT_SYMBOL(posix_unblock_lock); 1993 1994 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1995 { 1996 struct inode *inode = NULL; 1997 1998 if (fl->fl_file != NULL) 1999 inode = fl->fl_file->f_dentry->d_inode; 2000 2001 out += sprintf(out, "%d:%s ", id, pfx); 2002 if (IS_POSIX(fl)) { 2003 out += sprintf(out, "%6s %s ", 2004 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 2005 (inode == NULL) ? "*NOINODE*" : 2006 (IS_MANDLOCK(inode) && 2007 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 2008 "MANDATORY" : "ADVISORY "); 2009 } else if (IS_FLOCK(fl)) { 2010 if (fl->fl_type & LOCK_MAND) { 2011 out += sprintf(out, "FLOCK MSNFS "); 2012 } else { 2013 out += sprintf(out, "FLOCK ADVISORY "); 2014 } 2015 } else if (IS_LEASE(fl)) { 2016 out += sprintf(out, "LEASE "); 2017 if (fl->fl_type & F_INPROGRESS) 2018 out += sprintf(out, "BREAKING "); 2019 else if (fl->fl_file) 2020 out += sprintf(out, "ACTIVE "); 2021 else 2022 out += sprintf(out, "BREAKER "); 2023 } else { 2024 out += sprintf(out, "UNKNOWN UNKNOWN "); 2025 } 2026 if (fl->fl_type & LOCK_MAND) { 2027 out += sprintf(out, "%s ", 2028 (fl->fl_type & LOCK_READ) 2029 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2030 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2031 } else { 2032 out += sprintf(out, "%s ", 2033 (fl->fl_type & F_INPROGRESS) 2034 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2035 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2036 } 2037 if (inode) { 2038 #ifdef WE_CAN_BREAK_LSLK_NOW 2039 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 2040 inode->i_sb->s_id, inode->i_ino); 2041 #else 2042 /* userspace relies on this representation of dev_t ;-( */ 2043 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 2044 MAJOR(inode->i_sb->s_dev), 2045 MINOR(inode->i_sb->s_dev), inode->i_ino); 2046 #endif 2047 } else { 2048 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 2049 } 2050 if (IS_POSIX(fl)) { 2051 if (fl->fl_end == OFFSET_MAX) 2052 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 2053 else 2054 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 2055 fl->fl_end); 2056 } else { 2057 out += sprintf(out, "0 EOF\n"); 2058 } 2059 } 2060 2061 static void move_lock_status(char **p, off_t* pos, off_t offset) 2062 { 2063 int len; 2064 len = strlen(*p); 2065 if(*pos >= offset) { 2066 /* the complete line is valid */ 2067 *p += len; 2068 *pos += len; 2069 return; 2070 } 2071 if(*pos+len > offset) { 2072 /* use the second part of the line */ 2073 int i = offset-*pos; 2074 memmove(*p,*p+i,len-i); 2075 *p += len-i; 2076 *pos += len; 2077 return; 2078 } 2079 /* discard the complete line */ 2080 *pos += len; 2081 } 2082 2083 /** 2084 * get_locks_status - reports lock usage in /proc/locks 2085 * @buffer: address in userspace to write into 2086 * @start: ? 2087 * @offset: how far we are through the buffer 2088 * @length: how much to read 2089 */ 2090 2091 int get_locks_status(char *buffer, char **start, off_t offset, int length) 2092 { 2093 struct list_head *tmp; 2094 char *q = buffer; 2095 off_t pos = 0; 2096 int i = 0; 2097 2098 lock_kernel(); 2099 list_for_each(tmp, &file_lock_list) { 2100 struct list_head *btmp; 2101 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2102 lock_get_status(q, fl, ++i, ""); 2103 move_lock_status(&q, &pos, offset); 2104 2105 if(pos >= offset+length) 2106 goto done; 2107 2108 list_for_each(btmp, &fl->fl_block) { 2109 struct file_lock *bfl = list_entry(btmp, 2110 struct file_lock, fl_block); 2111 lock_get_status(q, bfl, i, " ->"); 2112 move_lock_status(&q, &pos, offset); 2113 2114 if(pos >= offset+length) 2115 goto done; 2116 } 2117 } 2118 done: 2119 unlock_kernel(); 2120 *start = buffer; 2121 if(q-buffer < length) 2122 return (q-buffer); 2123 return length; 2124 } 2125 2126 /** 2127 * lock_may_read - checks that the region is free of locks 2128 * @inode: the inode that is being read 2129 * @start: the first byte to read 2130 * @len: the number of bytes to read 2131 * 2132 * Emulates Windows locking requirements. Whole-file 2133 * mandatory locks (share modes) can prohibit a read and 2134 * byte-range POSIX locks can prohibit a read if they overlap. 2135 * 2136 * N.B. this function is only ever called 2137 * from knfsd and ownership of locks is never checked. 2138 */ 2139 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2140 { 2141 struct file_lock *fl; 2142 int result = 1; 2143 lock_kernel(); 2144 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2145 if (IS_POSIX(fl)) { 2146 if (fl->fl_type == F_RDLCK) 2147 continue; 2148 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2149 continue; 2150 } else if (IS_FLOCK(fl)) { 2151 if (!(fl->fl_type & LOCK_MAND)) 2152 continue; 2153 if (fl->fl_type & LOCK_READ) 2154 continue; 2155 } else 2156 continue; 2157 result = 0; 2158 break; 2159 } 2160 unlock_kernel(); 2161 return result; 2162 } 2163 2164 EXPORT_SYMBOL(lock_may_read); 2165 2166 /** 2167 * lock_may_write - checks that the region is free of locks 2168 * @inode: the inode that is being written 2169 * @start: the first byte to write 2170 * @len: the number of bytes to write 2171 * 2172 * Emulates Windows locking requirements. Whole-file 2173 * mandatory locks (share modes) can prohibit a write and 2174 * byte-range POSIX locks can prohibit a write if they overlap. 2175 * 2176 * N.B. this function is only ever called 2177 * from knfsd and ownership of locks is never checked. 2178 */ 2179 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2180 { 2181 struct file_lock *fl; 2182 int result = 1; 2183 lock_kernel(); 2184 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2185 if (IS_POSIX(fl)) { 2186 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2187 continue; 2188 } else if (IS_FLOCK(fl)) { 2189 if (!(fl->fl_type & LOCK_MAND)) 2190 continue; 2191 if (fl->fl_type & LOCK_WRITE) 2192 continue; 2193 } else 2194 continue; 2195 result = 0; 2196 break; 2197 } 2198 unlock_kernel(); 2199 return result; 2200 } 2201 2202 EXPORT_SYMBOL(lock_may_write); 2203 2204 static inline void __steal_locks(struct file *file, fl_owner_t from) 2205 { 2206 struct inode *inode = file->f_dentry->d_inode; 2207 struct file_lock *fl = inode->i_flock; 2208 2209 while (fl) { 2210 if (fl->fl_file == file && fl->fl_owner == from) 2211 fl->fl_owner = current->files; 2212 fl = fl->fl_next; 2213 } 2214 } 2215 2216 /* When getting ready for executing a binary, we make sure that current 2217 * has a files_struct on its own. Before dropping the old files_struct, 2218 * we take over ownership of all locks for all file descriptors we own. 2219 * Note that we may accidentally steal a lock for a file that a sibling 2220 * has created since the unshare_files() call. 2221 */ 2222 void steal_locks(fl_owner_t from) 2223 { 2224 struct files_struct *files = current->files; 2225 int i, j; 2226 struct fdtable *fdt; 2227 2228 if (from == files) 2229 return; 2230 2231 lock_kernel(); 2232 j = 0; 2233 2234 /* 2235 * We are not taking a ref to the file structures, so 2236 * we need to acquire ->file_lock. 2237 */ 2238 spin_lock(&files->file_lock); 2239 fdt = files_fdtable(files); 2240 for (;;) { 2241 unsigned long set; 2242 i = j * __NFDBITS; 2243 if (i >= fdt->max_fdset || i >= fdt->max_fds) 2244 break; 2245 set = fdt->open_fds->fds_bits[j++]; 2246 while (set) { 2247 if (set & 1) { 2248 struct file *file = fdt->fd[i]; 2249 if (file) 2250 __steal_locks(file, from); 2251 } 2252 i++; 2253 set >>= 1; 2254 } 2255 } 2256 spin_unlock(&files->file_lock); 2257 unlock_kernel(); 2258 } 2259 EXPORT_SYMBOL(steal_locks); 2260 2261 static int __init filelock_init(void) 2262 { 2263 filelock_cache = kmem_cache_create("file_lock_cache", 2264 sizeof(struct file_lock), 0, SLAB_PANIC, 2265 init_once, NULL); 2266 return 0; 2267 } 2268 2269 core_initcall(filelock_init); 2270