1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 129 #include <asm/semaphore.h> 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 LIST_HEAD(file_lock_list); 143 144 EXPORT_SYMBOL(file_lock_list); 145 146 static LIST_HEAD(blocked_list); 147 148 static kmem_cache_t *filelock_cache; 149 150 /* Allocate an empty lock structure. */ 151 static struct file_lock *locks_alloc_lock(void) 152 { 153 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 154 } 155 156 /* Free a lock which is not in use. */ 157 static inline void locks_free_lock(struct file_lock *fl) 158 { 159 if (fl == NULL) { 160 BUG(); 161 return; 162 } 163 if (waitqueue_active(&fl->fl_wait)) 164 panic("Attempting to free lock with active wait queue"); 165 166 if (!list_empty(&fl->fl_block)) 167 panic("Attempting to free lock with active block list"); 168 169 if (!list_empty(&fl->fl_link)) 170 panic("Attempting to free lock on active lock list"); 171 172 if (fl->fl_ops) { 173 if (fl->fl_ops->fl_release_private) 174 fl->fl_ops->fl_release_private(fl); 175 fl->fl_ops = NULL; 176 } 177 178 if (fl->fl_lmops) { 179 if (fl->fl_lmops->fl_release_private) 180 fl->fl_lmops->fl_release_private(fl); 181 fl->fl_lmops = NULL; 182 } 183 184 kmem_cache_free(filelock_cache, fl); 185 } 186 187 void locks_init_lock(struct file_lock *fl) 188 { 189 INIT_LIST_HEAD(&fl->fl_link); 190 INIT_LIST_HEAD(&fl->fl_block); 191 init_waitqueue_head(&fl->fl_wait); 192 fl->fl_next = NULL; 193 fl->fl_fasync = NULL; 194 fl->fl_owner = NULL; 195 fl->fl_pid = 0; 196 fl->fl_file = NULL; 197 fl->fl_flags = 0; 198 fl->fl_type = 0; 199 fl->fl_start = fl->fl_end = 0; 200 fl->fl_ops = NULL; 201 fl->fl_lmops = NULL; 202 } 203 204 EXPORT_SYMBOL(locks_init_lock); 205 206 /* 207 * Initialises the fields of the file lock which are invariant for 208 * free file_locks. 209 */ 210 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 211 { 212 struct file_lock *lock = (struct file_lock *) foo; 213 214 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != 215 SLAB_CTOR_CONSTRUCTOR) 216 return; 217 218 locks_init_lock(lock); 219 } 220 221 /* 222 * Initialize a new lock from an existing file_lock structure. 223 */ 224 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 225 { 226 new->fl_owner = fl->fl_owner; 227 new->fl_pid = fl->fl_pid; 228 new->fl_file = fl->fl_file; 229 new->fl_flags = fl->fl_flags; 230 new->fl_type = fl->fl_type; 231 new->fl_start = fl->fl_start; 232 new->fl_end = fl->fl_end; 233 new->fl_ops = fl->fl_ops; 234 new->fl_lmops = fl->fl_lmops; 235 if (fl->fl_ops && fl->fl_ops->fl_copy_lock) 236 fl->fl_ops->fl_copy_lock(new, fl); 237 if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock) 238 fl->fl_lmops->fl_copy_lock(new, fl); 239 } 240 241 EXPORT_SYMBOL(locks_copy_lock); 242 243 static inline int flock_translate_cmd(int cmd) { 244 if (cmd & LOCK_MAND) 245 return cmd & (LOCK_MAND | LOCK_RW); 246 switch (cmd) { 247 case LOCK_SH: 248 return F_RDLCK; 249 case LOCK_EX: 250 return F_WRLCK; 251 case LOCK_UN: 252 return F_UNLCK; 253 } 254 return -EINVAL; 255 } 256 257 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 258 static int flock_make_lock(struct file *filp, struct file_lock **lock, 259 unsigned int cmd) 260 { 261 struct file_lock *fl; 262 int type = flock_translate_cmd(cmd); 263 if (type < 0) 264 return type; 265 266 fl = locks_alloc_lock(); 267 if (fl == NULL) 268 return -ENOMEM; 269 270 fl->fl_file = filp; 271 fl->fl_pid = current->tgid; 272 fl->fl_flags = FL_FLOCK; 273 fl->fl_type = type; 274 fl->fl_end = OFFSET_MAX; 275 276 *lock = fl; 277 return 0; 278 } 279 280 static int assign_type(struct file_lock *fl, int type) 281 { 282 switch (type) { 283 case F_RDLCK: 284 case F_WRLCK: 285 case F_UNLCK: 286 fl->fl_type = type; 287 break; 288 default: 289 return -EINVAL; 290 } 291 return 0; 292 } 293 294 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 295 * style lock. 296 */ 297 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 298 struct flock *l) 299 { 300 off_t start, end; 301 302 switch (l->l_whence) { 303 case 0: /*SEEK_SET*/ 304 start = 0; 305 break; 306 case 1: /*SEEK_CUR*/ 307 start = filp->f_pos; 308 break; 309 case 2: /*SEEK_END*/ 310 start = i_size_read(filp->f_dentry->d_inode); 311 break; 312 default: 313 return -EINVAL; 314 } 315 316 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 317 POSIX-2001 defines it. */ 318 start += l->l_start; 319 if (start < 0) 320 return -EINVAL; 321 fl->fl_end = OFFSET_MAX; 322 if (l->l_len > 0) { 323 end = start + l->l_len - 1; 324 fl->fl_end = end; 325 } else if (l->l_len < 0) { 326 end = start - 1; 327 fl->fl_end = end; 328 start += l->l_len; 329 if (start < 0) 330 return -EINVAL; 331 } 332 fl->fl_start = start; /* we record the absolute position */ 333 if (fl->fl_end < fl->fl_start) 334 return -EOVERFLOW; 335 336 fl->fl_owner = current->files; 337 fl->fl_pid = current->tgid; 338 fl->fl_file = filp; 339 fl->fl_flags = FL_POSIX; 340 fl->fl_ops = NULL; 341 fl->fl_lmops = NULL; 342 343 return assign_type(fl, l->l_type); 344 } 345 346 #if BITS_PER_LONG == 32 347 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 348 struct flock64 *l) 349 { 350 loff_t start; 351 352 switch (l->l_whence) { 353 case 0: /*SEEK_SET*/ 354 start = 0; 355 break; 356 case 1: /*SEEK_CUR*/ 357 start = filp->f_pos; 358 break; 359 case 2: /*SEEK_END*/ 360 start = i_size_read(filp->f_dentry->d_inode); 361 break; 362 default: 363 return -EINVAL; 364 } 365 366 start += l->l_start; 367 if (start < 0) 368 return -EINVAL; 369 fl->fl_end = OFFSET_MAX; 370 if (l->l_len > 0) { 371 fl->fl_end = start + l->l_len - 1; 372 } else if (l->l_len < 0) { 373 fl->fl_end = start - 1; 374 start += l->l_len; 375 if (start < 0) 376 return -EINVAL; 377 } 378 fl->fl_start = start; /* we record the absolute position */ 379 if (fl->fl_end < fl->fl_start) 380 return -EOVERFLOW; 381 382 fl->fl_owner = current->files; 383 fl->fl_pid = current->tgid; 384 fl->fl_file = filp; 385 fl->fl_flags = FL_POSIX; 386 fl->fl_ops = NULL; 387 fl->fl_lmops = NULL; 388 389 switch (l->l_type) { 390 case F_RDLCK: 391 case F_WRLCK: 392 case F_UNLCK: 393 fl->fl_type = l->l_type; 394 break; 395 default: 396 return -EINVAL; 397 } 398 399 return (0); 400 } 401 #endif 402 403 /* default lease lock manager operations */ 404 static void lease_break_callback(struct file_lock *fl) 405 { 406 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 407 } 408 409 static void lease_release_private_callback(struct file_lock *fl) 410 { 411 if (!fl->fl_file) 412 return; 413 414 f_delown(fl->fl_file); 415 fl->fl_file->f_owner.signum = 0; 416 } 417 418 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 419 { 420 return fl->fl_file == try->fl_file; 421 } 422 423 static struct lock_manager_operations lease_manager_ops = { 424 .fl_break = lease_break_callback, 425 .fl_release_private = lease_release_private_callback, 426 .fl_mylease = lease_mylease_callback, 427 .fl_change = lease_modify, 428 }; 429 430 /* 431 * Initialize a lease, use the default lock manager operations 432 */ 433 static int lease_init(struct file *filp, int type, struct file_lock *fl) 434 { 435 fl->fl_owner = current->files; 436 fl->fl_pid = current->tgid; 437 438 fl->fl_file = filp; 439 fl->fl_flags = FL_LEASE; 440 if (assign_type(fl, type) != 0) { 441 locks_free_lock(fl); 442 return -EINVAL; 443 } 444 fl->fl_start = 0; 445 fl->fl_end = OFFSET_MAX; 446 fl->fl_ops = NULL; 447 fl->fl_lmops = &lease_manager_ops; 448 return 0; 449 } 450 451 /* Allocate a file_lock initialised to this type of lease */ 452 static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 453 { 454 struct file_lock *fl = locks_alloc_lock(); 455 int error; 456 457 if (fl == NULL) 458 return -ENOMEM; 459 460 error = lease_init(filp, type, fl); 461 if (error) 462 return error; 463 *flp = fl; 464 return 0; 465 } 466 467 /* Check if two locks overlap each other. 468 */ 469 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 470 { 471 return ((fl1->fl_end >= fl2->fl_start) && 472 (fl2->fl_end >= fl1->fl_start)); 473 } 474 475 /* 476 * Check whether two locks have the same owner. 477 */ 478 static inline int 479 posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 480 { 481 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 482 return fl2->fl_lmops == fl1->fl_lmops && 483 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 484 return fl1->fl_owner == fl2->fl_owner; 485 } 486 487 /* Remove waiter from blocker's block list. 488 * When blocker ends up pointing to itself then the list is empty. 489 */ 490 static inline void __locks_delete_block(struct file_lock *waiter) 491 { 492 list_del_init(&waiter->fl_block); 493 list_del_init(&waiter->fl_link); 494 waiter->fl_next = NULL; 495 } 496 497 /* 498 */ 499 static void locks_delete_block(struct file_lock *waiter) 500 { 501 lock_kernel(); 502 __locks_delete_block(waiter); 503 unlock_kernel(); 504 } 505 506 /* Insert waiter into blocker's block list. 507 * We use a circular list so that processes can be easily woken up in 508 * the order they blocked. The documentation doesn't require this but 509 * it seems like the reasonable thing to do. 510 */ 511 static void locks_insert_block(struct file_lock *blocker, 512 struct file_lock *waiter) 513 { 514 if (!list_empty(&waiter->fl_block)) { 515 printk(KERN_ERR "locks_insert_block: removing duplicated lock " 516 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, 517 waiter->fl_start, waiter->fl_end, waiter->fl_type); 518 __locks_delete_block(waiter); 519 } 520 list_add_tail(&waiter->fl_block, &blocker->fl_block); 521 waiter->fl_next = blocker; 522 if (IS_POSIX(blocker)) 523 list_add(&waiter->fl_link, &blocked_list); 524 } 525 526 /* Wake up processes blocked waiting for blocker. 527 * If told to wait then schedule the processes until the block list 528 * is empty, otherwise empty the block list ourselves. 529 */ 530 static void locks_wake_up_blocks(struct file_lock *blocker) 531 { 532 while (!list_empty(&blocker->fl_block)) { 533 struct file_lock *waiter = list_entry(blocker->fl_block.next, 534 struct file_lock, fl_block); 535 __locks_delete_block(waiter); 536 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 537 waiter->fl_lmops->fl_notify(waiter); 538 else 539 wake_up(&waiter->fl_wait); 540 } 541 } 542 543 /* Insert file lock fl into an inode's lock list at the position indicated 544 * by pos. At the same time add the lock to the global file lock list. 545 */ 546 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 547 { 548 list_add(&fl->fl_link, &file_lock_list); 549 550 /* insert into file's list */ 551 fl->fl_next = *pos; 552 *pos = fl; 553 554 if (fl->fl_ops && fl->fl_ops->fl_insert) 555 fl->fl_ops->fl_insert(fl); 556 } 557 558 /* 559 * Delete a lock and then free it. 560 * Wake up processes that are blocked waiting for this lock, 561 * notify the FS that the lock has been cleared and 562 * finally free the lock. 563 */ 564 static void locks_delete_lock(struct file_lock **thisfl_p) 565 { 566 struct file_lock *fl = *thisfl_p; 567 568 *thisfl_p = fl->fl_next; 569 fl->fl_next = NULL; 570 list_del_init(&fl->fl_link); 571 572 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 573 if (fl->fl_fasync != NULL) { 574 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 575 fl->fl_fasync = NULL; 576 } 577 578 if (fl->fl_ops && fl->fl_ops->fl_remove) 579 fl->fl_ops->fl_remove(fl); 580 581 locks_wake_up_blocks(fl); 582 locks_free_lock(fl); 583 } 584 585 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 586 * checks for shared/exclusive status of overlapping locks. 587 */ 588 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 589 { 590 if (sys_fl->fl_type == F_WRLCK) 591 return 1; 592 if (caller_fl->fl_type == F_WRLCK) 593 return 1; 594 return 0; 595 } 596 597 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 598 * checking before calling the locks_conflict(). 599 */ 600 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 601 { 602 /* POSIX locks owned by the same process do not conflict with 603 * each other. 604 */ 605 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 606 return (0); 607 608 /* Check whether they overlap */ 609 if (!locks_overlap(caller_fl, sys_fl)) 610 return 0; 611 612 return (locks_conflict(caller_fl, sys_fl)); 613 } 614 615 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 616 * checking before calling the locks_conflict(). 617 */ 618 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 619 { 620 /* FLOCK locks referring to the same filp do not conflict with 621 * each other. 622 */ 623 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 624 return (0); 625 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 626 return 0; 627 628 return (locks_conflict(caller_fl, sys_fl)); 629 } 630 631 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 632 { 633 int result = 0; 634 DECLARE_WAITQUEUE(wait, current); 635 636 __set_current_state(TASK_INTERRUPTIBLE); 637 add_wait_queue(fl_wait, &wait); 638 if (timeout == 0) 639 schedule(); 640 else 641 result = schedule_timeout(timeout); 642 if (signal_pending(current)) 643 result = -ERESTARTSYS; 644 remove_wait_queue(fl_wait, &wait); 645 __set_current_state(TASK_RUNNING); 646 return result; 647 } 648 649 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 650 { 651 int result; 652 locks_insert_block(blocker, waiter); 653 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 654 __locks_delete_block(waiter); 655 return result; 656 } 657 658 struct file_lock * 659 posix_test_lock(struct file *filp, struct file_lock *fl) 660 { 661 struct file_lock *cfl; 662 663 lock_kernel(); 664 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 665 if (!IS_POSIX(cfl)) 666 continue; 667 if (posix_locks_conflict(cfl, fl)) 668 break; 669 } 670 unlock_kernel(); 671 672 return (cfl); 673 } 674 675 EXPORT_SYMBOL(posix_test_lock); 676 677 /* This function tests for deadlock condition before putting a process to 678 * sleep. The detection scheme is no longer recursive. Recursive was neat, 679 * but dangerous - we risked stack corruption if the lock data was bad, or 680 * if the recursion was too deep for any other reason. 681 * 682 * We rely on the fact that a task can only be on one lock's wait queue 683 * at a time. When we find blocked_task on a wait queue we can re-search 684 * with blocked_task equal to that queue's owner, until either blocked_task 685 * isn't found, or blocked_task is found on a queue owned by my_task. 686 * 687 * Note: the above assumption may not be true when handling lock requests 688 * from a broken NFS client. But broken NFS clients have a lot more to 689 * worry about than proper deadlock detection anyway... --okir 690 */ 691 int posix_locks_deadlock(struct file_lock *caller_fl, 692 struct file_lock *block_fl) 693 { 694 struct list_head *tmp; 695 696 next_task: 697 if (posix_same_owner(caller_fl, block_fl)) 698 return 1; 699 list_for_each(tmp, &blocked_list) { 700 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 701 if (posix_same_owner(fl, block_fl)) { 702 fl = fl->fl_next; 703 block_fl = fl; 704 goto next_task; 705 } 706 } 707 return 0; 708 } 709 710 EXPORT_SYMBOL(posix_locks_deadlock); 711 712 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 713 * at the head of the list, but that's secret knowledge known only to 714 * flock_lock_file and posix_lock_file. 715 */ 716 static int flock_lock_file(struct file *filp, struct file_lock *new_fl) 717 { 718 struct file_lock **before; 719 struct inode * inode = filp->f_dentry->d_inode; 720 int error = 0; 721 int found = 0; 722 723 lock_kernel(); 724 for_each_lock(inode, before) { 725 struct file_lock *fl = *before; 726 if (IS_POSIX(fl)) 727 break; 728 if (IS_LEASE(fl)) 729 continue; 730 if (filp != fl->fl_file) 731 continue; 732 if (new_fl->fl_type == fl->fl_type) 733 goto out; 734 found = 1; 735 locks_delete_lock(before); 736 break; 737 } 738 unlock_kernel(); 739 740 if (new_fl->fl_type == F_UNLCK) 741 return 0; 742 743 /* 744 * If a higher-priority process was blocked on the old file lock, 745 * give it the opportunity to lock the file. 746 */ 747 if (found) 748 cond_resched(); 749 750 lock_kernel(); 751 for_each_lock(inode, before) { 752 struct file_lock *fl = *before; 753 if (IS_POSIX(fl)) 754 break; 755 if (IS_LEASE(fl)) 756 continue; 757 if (!flock_locks_conflict(new_fl, fl)) 758 continue; 759 error = -EAGAIN; 760 if (new_fl->fl_flags & FL_SLEEP) { 761 locks_insert_block(fl, new_fl); 762 } 763 goto out; 764 } 765 locks_insert_lock(&inode->i_flock, new_fl); 766 error = 0; 767 768 out: 769 unlock_kernel(); 770 return error; 771 } 772 773 EXPORT_SYMBOL(posix_lock_file); 774 775 static int __posix_lock_file(struct inode *inode, struct file_lock *request) 776 { 777 struct file_lock *fl; 778 struct file_lock *new_fl, *new_fl2; 779 struct file_lock *left = NULL; 780 struct file_lock *right = NULL; 781 struct file_lock **before; 782 int error, added = 0; 783 784 /* 785 * We may need two file_lock structures for this operation, 786 * so we get them in advance to avoid races. 787 */ 788 new_fl = locks_alloc_lock(); 789 new_fl2 = locks_alloc_lock(); 790 791 lock_kernel(); 792 if (request->fl_type != F_UNLCK) { 793 for_each_lock(inode, before) { 794 struct file_lock *fl = *before; 795 if (!IS_POSIX(fl)) 796 continue; 797 if (!posix_locks_conflict(request, fl)) 798 continue; 799 error = -EAGAIN; 800 if (!(request->fl_flags & FL_SLEEP)) 801 goto out; 802 error = -EDEADLK; 803 if (posix_locks_deadlock(request, fl)) 804 goto out; 805 error = -EAGAIN; 806 locks_insert_block(fl, request); 807 goto out; 808 } 809 } 810 811 /* If we're just looking for a conflict, we're done. */ 812 error = 0; 813 if (request->fl_flags & FL_ACCESS) 814 goto out; 815 816 error = -ENOLCK; /* "no luck" */ 817 if (!(new_fl && new_fl2)) 818 goto out; 819 820 /* 821 * We've allocated the new locks in advance, so there are no 822 * errors possible (and no blocking operations) from here on. 823 * 824 * Find the first old lock with the same owner as the new lock. 825 */ 826 827 before = &inode->i_flock; 828 829 /* First skip locks owned by other processes. */ 830 while ((fl = *before) && (!IS_POSIX(fl) || 831 !posix_same_owner(request, fl))) { 832 before = &fl->fl_next; 833 } 834 835 /* Process locks with this owner. */ 836 while ((fl = *before) && posix_same_owner(request, fl)) { 837 /* Detect adjacent or overlapping regions (if same lock type) 838 */ 839 if (request->fl_type == fl->fl_type) { 840 /* In all comparisons of start vs end, use 841 * "start - 1" rather than "end + 1". If end 842 * is OFFSET_MAX, end + 1 will become negative. 843 */ 844 if (fl->fl_end < request->fl_start - 1) 845 goto next_lock; 846 /* If the next lock in the list has entirely bigger 847 * addresses than the new one, insert the lock here. 848 */ 849 if (fl->fl_start - 1 > request->fl_end) 850 break; 851 852 /* If we come here, the new and old lock are of the 853 * same type and adjacent or overlapping. Make one 854 * lock yielding from the lower start address of both 855 * locks to the higher end address. 856 */ 857 if (fl->fl_start > request->fl_start) 858 fl->fl_start = request->fl_start; 859 else 860 request->fl_start = fl->fl_start; 861 if (fl->fl_end < request->fl_end) 862 fl->fl_end = request->fl_end; 863 else 864 request->fl_end = fl->fl_end; 865 if (added) { 866 locks_delete_lock(before); 867 continue; 868 } 869 request = fl; 870 added = 1; 871 } 872 else { 873 /* Processing for different lock types is a bit 874 * more complex. 875 */ 876 if (fl->fl_end < request->fl_start) 877 goto next_lock; 878 if (fl->fl_start > request->fl_end) 879 break; 880 if (request->fl_type == F_UNLCK) 881 added = 1; 882 if (fl->fl_start < request->fl_start) 883 left = fl; 884 /* If the next lock in the list has a higher end 885 * address than the new one, insert the new one here. 886 */ 887 if (fl->fl_end > request->fl_end) { 888 right = fl; 889 break; 890 } 891 if (fl->fl_start >= request->fl_start) { 892 /* The new lock completely replaces an old 893 * one (This may happen several times). 894 */ 895 if (added) { 896 locks_delete_lock(before); 897 continue; 898 } 899 /* Replace the old lock with the new one. 900 * Wake up anybody waiting for the old one, 901 * as the change in lock type might satisfy 902 * their needs. 903 */ 904 locks_wake_up_blocks(fl); 905 fl->fl_start = request->fl_start; 906 fl->fl_end = request->fl_end; 907 fl->fl_type = request->fl_type; 908 fl->fl_u = request->fl_u; 909 request = fl; 910 added = 1; 911 } 912 } 913 /* Go on to next lock. 914 */ 915 next_lock: 916 before = &fl->fl_next; 917 } 918 919 error = 0; 920 if (!added) { 921 if (request->fl_type == F_UNLCK) 922 goto out; 923 locks_copy_lock(new_fl, request); 924 locks_insert_lock(before, new_fl); 925 new_fl = NULL; 926 } 927 if (right) { 928 if (left == right) { 929 /* The new lock breaks the old one in two pieces, 930 * so we have to use the second new lock. 931 */ 932 left = new_fl2; 933 new_fl2 = NULL; 934 locks_copy_lock(left, right); 935 locks_insert_lock(before, left); 936 } 937 right->fl_start = request->fl_end + 1; 938 locks_wake_up_blocks(right); 939 } 940 if (left) { 941 left->fl_end = request->fl_start - 1; 942 locks_wake_up_blocks(left); 943 } 944 out: 945 unlock_kernel(); 946 /* 947 * Free any unused locks. 948 */ 949 if (new_fl) 950 locks_free_lock(new_fl); 951 if (new_fl2) 952 locks_free_lock(new_fl2); 953 return error; 954 } 955 956 /** 957 * posix_lock_file - Apply a POSIX-style lock to a file 958 * @filp: The file to apply the lock to 959 * @fl: The lock to be applied 960 * 961 * Add a POSIX style lock to a file. 962 * We merge adjacent & overlapping locks whenever possible. 963 * POSIX locks are sorted by owner task, then by starting address 964 */ 965 int posix_lock_file(struct file *filp, struct file_lock *fl) 966 { 967 return __posix_lock_file(filp->f_dentry->d_inode, fl); 968 } 969 970 /** 971 * posix_lock_file_wait - Apply a POSIX-style lock to a file 972 * @filp: The file to apply the lock to 973 * @fl: The lock to be applied 974 * 975 * Add a POSIX style lock to a file. 976 * We merge adjacent & overlapping locks whenever possible. 977 * POSIX locks are sorted by owner task, then by starting address 978 */ 979 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 980 { 981 int error; 982 might_sleep (); 983 for (;;) { 984 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 985 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 986 break; 987 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 988 if (!error) 989 continue; 990 991 locks_delete_block(fl); 992 break; 993 } 994 return error; 995 } 996 EXPORT_SYMBOL(posix_lock_file_wait); 997 998 /** 999 * locks_mandatory_locked - Check for an active lock 1000 * @inode: the file to check 1001 * 1002 * Searches the inode's list of locks to find any POSIX locks which conflict. 1003 * This function is called from locks_verify_locked() only. 1004 */ 1005 int locks_mandatory_locked(struct inode *inode) 1006 { 1007 fl_owner_t owner = current->files; 1008 struct file_lock *fl; 1009 1010 /* 1011 * Search the lock list for this inode for any POSIX locks. 1012 */ 1013 lock_kernel(); 1014 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1015 if (!IS_POSIX(fl)) 1016 continue; 1017 if (fl->fl_owner != owner) 1018 break; 1019 } 1020 unlock_kernel(); 1021 return fl ? -EAGAIN : 0; 1022 } 1023 1024 /** 1025 * locks_mandatory_area - Check for a conflicting lock 1026 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1027 * for shared 1028 * @inode: the file to check 1029 * @filp: how the file was opened (if it was) 1030 * @offset: start of area to check 1031 * @count: length of area to check 1032 * 1033 * Searches the inode's list of locks to find any POSIX locks which conflict. 1034 * This function is called from rw_verify_area() and 1035 * locks_verify_truncate(). 1036 */ 1037 int locks_mandatory_area(int read_write, struct inode *inode, 1038 struct file *filp, loff_t offset, 1039 size_t count) 1040 { 1041 struct file_lock fl; 1042 int error; 1043 1044 locks_init_lock(&fl); 1045 fl.fl_owner = current->files; 1046 fl.fl_pid = current->tgid; 1047 fl.fl_file = filp; 1048 fl.fl_flags = FL_POSIX | FL_ACCESS; 1049 if (filp && !(filp->f_flags & O_NONBLOCK)) 1050 fl.fl_flags |= FL_SLEEP; 1051 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1052 fl.fl_start = offset; 1053 fl.fl_end = offset + count - 1; 1054 1055 for (;;) { 1056 error = __posix_lock_file(inode, &fl); 1057 if (error != -EAGAIN) 1058 break; 1059 if (!(fl.fl_flags & FL_SLEEP)) 1060 break; 1061 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1062 if (!error) { 1063 /* 1064 * If we've been sleeping someone might have 1065 * changed the permissions behind our back. 1066 */ 1067 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1068 continue; 1069 } 1070 1071 locks_delete_block(&fl); 1072 break; 1073 } 1074 1075 return error; 1076 } 1077 1078 EXPORT_SYMBOL(locks_mandatory_area); 1079 1080 /* We already had a lease on this file; just change its type */ 1081 int lease_modify(struct file_lock **before, int arg) 1082 { 1083 struct file_lock *fl = *before; 1084 int error = assign_type(fl, arg); 1085 1086 if (error) 1087 return error; 1088 locks_wake_up_blocks(fl); 1089 if (arg == F_UNLCK) 1090 locks_delete_lock(before); 1091 return 0; 1092 } 1093 1094 EXPORT_SYMBOL(lease_modify); 1095 1096 static void time_out_leases(struct inode *inode) 1097 { 1098 struct file_lock **before; 1099 struct file_lock *fl; 1100 1101 before = &inode->i_flock; 1102 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1103 if ((fl->fl_break_time == 0) 1104 || time_before(jiffies, fl->fl_break_time)) { 1105 before = &fl->fl_next; 1106 continue; 1107 } 1108 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1109 if (fl == *before) /* lease_modify may have freed fl */ 1110 before = &fl->fl_next; 1111 } 1112 } 1113 1114 /** 1115 * __break_lease - revoke all outstanding leases on file 1116 * @inode: the inode of the file to return 1117 * @mode: the open mode (read or write) 1118 * 1119 * break_lease (inlined for speed) has checked there already 1120 * is a lease on this file. Leases are broken on a call to open() 1121 * or truncate(). This function can sleep unless you 1122 * specified %O_NONBLOCK to your open(). 1123 */ 1124 int __break_lease(struct inode *inode, unsigned int mode) 1125 { 1126 int error = 0, future; 1127 struct file_lock *new_fl, *flock; 1128 struct file_lock *fl; 1129 int alloc_err; 1130 unsigned long break_time; 1131 int i_have_this_lease = 0; 1132 1133 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1134 &new_fl); 1135 1136 lock_kernel(); 1137 1138 time_out_leases(inode); 1139 1140 flock = inode->i_flock; 1141 if ((flock == NULL) || !IS_LEASE(flock)) 1142 goto out; 1143 1144 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1145 if (fl->fl_owner == current->files) 1146 i_have_this_lease = 1; 1147 1148 if (mode & FMODE_WRITE) { 1149 /* If we want write access, we have to revoke any lease. */ 1150 future = F_UNLCK | F_INPROGRESS; 1151 } else if (flock->fl_type & F_INPROGRESS) { 1152 /* If the lease is already being broken, we just leave it */ 1153 future = flock->fl_type; 1154 } else if (flock->fl_type & F_WRLCK) { 1155 /* Downgrade the exclusive lease to a read-only lease. */ 1156 future = F_RDLCK | F_INPROGRESS; 1157 } else { 1158 /* the existing lease was read-only, so we can read too. */ 1159 goto out; 1160 } 1161 1162 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1163 error = alloc_err; 1164 goto out; 1165 } 1166 1167 break_time = 0; 1168 if (lease_break_time > 0) { 1169 break_time = jiffies + lease_break_time * HZ; 1170 if (break_time == 0) 1171 break_time++; /* so that 0 means no break time */ 1172 } 1173 1174 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1175 if (fl->fl_type != future) { 1176 fl->fl_type = future; 1177 fl->fl_break_time = break_time; 1178 /* lease must have lmops break callback */ 1179 fl->fl_lmops->fl_break(fl); 1180 } 1181 } 1182 1183 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1184 error = -EWOULDBLOCK; 1185 goto out; 1186 } 1187 1188 restart: 1189 break_time = flock->fl_break_time; 1190 if (break_time != 0) { 1191 break_time -= jiffies; 1192 if (break_time == 0) 1193 break_time++; 1194 } 1195 error = locks_block_on_timeout(flock, new_fl, break_time); 1196 if (error >= 0) { 1197 if (error == 0) 1198 time_out_leases(inode); 1199 /* Wait for the next lease that has not been broken yet */ 1200 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1201 flock = flock->fl_next) { 1202 if (flock->fl_type & F_INPROGRESS) 1203 goto restart; 1204 } 1205 error = 0; 1206 } 1207 1208 out: 1209 unlock_kernel(); 1210 if (!alloc_err) 1211 locks_free_lock(new_fl); 1212 return error; 1213 } 1214 1215 EXPORT_SYMBOL(__break_lease); 1216 1217 /** 1218 * lease_get_mtime 1219 * @inode: the inode 1220 * @time: pointer to a timespec which will contain the last modified time 1221 * 1222 * This is to force NFS clients to flush their caches for files with 1223 * exclusive leases. The justification is that if someone has an 1224 * exclusive lease, then they could be modifiying it. 1225 */ 1226 void lease_get_mtime(struct inode *inode, struct timespec *time) 1227 { 1228 struct file_lock *flock = inode->i_flock; 1229 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1230 *time = current_fs_time(inode->i_sb); 1231 else 1232 *time = inode->i_mtime; 1233 } 1234 1235 EXPORT_SYMBOL(lease_get_mtime); 1236 1237 /** 1238 * fcntl_getlease - Enquire what lease is currently active 1239 * @filp: the file 1240 * 1241 * The value returned by this function will be one of 1242 * (if no lease break is pending): 1243 * 1244 * %F_RDLCK to indicate a shared lease is held. 1245 * 1246 * %F_WRLCK to indicate an exclusive lease is held. 1247 * 1248 * %F_UNLCK to indicate no lease is held. 1249 * 1250 * (if a lease break is pending): 1251 * 1252 * %F_RDLCK to indicate an exclusive lease needs to be 1253 * changed to a shared lease (or removed). 1254 * 1255 * %F_UNLCK to indicate the lease needs to be removed. 1256 * 1257 * XXX: sfr & willy disagree over whether F_INPROGRESS 1258 * should be returned to userspace. 1259 */ 1260 int fcntl_getlease(struct file *filp) 1261 { 1262 struct file_lock *fl; 1263 int type = F_UNLCK; 1264 1265 lock_kernel(); 1266 time_out_leases(filp->f_dentry->d_inode); 1267 for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1268 fl = fl->fl_next) { 1269 if (fl->fl_file == filp) { 1270 type = fl->fl_type & ~F_INPROGRESS; 1271 break; 1272 } 1273 } 1274 unlock_kernel(); 1275 return type; 1276 } 1277 1278 /** 1279 * __setlease - sets a lease on an open file 1280 * @filp: file pointer 1281 * @arg: type of lease to obtain 1282 * @flp: input - file_lock to use, output - file_lock inserted 1283 * 1284 * The (input) flp->fl_lmops->fl_break function is required 1285 * by break_lease(). 1286 * 1287 * Called with kernel lock held. 1288 */ 1289 static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1290 { 1291 struct file_lock *fl, **before, **my_before = NULL, *lease; 1292 struct dentry *dentry = filp->f_dentry; 1293 struct inode *inode = dentry->d_inode; 1294 int error, rdlease_count = 0, wrlease_count = 0; 1295 1296 time_out_leases(inode); 1297 1298 error = -EINVAL; 1299 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1300 goto out; 1301 1302 lease = *flp; 1303 1304 error = -EAGAIN; 1305 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1306 goto out; 1307 if ((arg == F_WRLCK) 1308 && ((atomic_read(&dentry->d_count) > 1) 1309 || (atomic_read(&inode->i_count) > 1))) 1310 goto out; 1311 1312 /* 1313 * At this point, we know that if there is an exclusive 1314 * lease on this file, then we hold it on this filp 1315 * (otherwise our open of this file would have blocked). 1316 * And if we are trying to acquire an exclusive lease, 1317 * then the file is not open by anyone (including us) 1318 * except for this filp. 1319 */ 1320 for (before = &inode->i_flock; 1321 ((fl = *before) != NULL) && IS_LEASE(fl); 1322 before = &fl->fl_next) { 1323 if (lease->fl_lmops->fl_mylease(fl, lease)) 1324 my_before = before; 1325 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1326 /* 1327 * Someone is in the process of opening this 1328 * file for writing so we may not take an 1329 * exclusive lease on it. 1330 */ 1331 wrlease_count++; 1332 else 1333 rdlease_count++; 1334 } 1335 1336 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1337 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1338 goto out; 1339 1340 if (my_before != NULL) { 1341 error = lease->fl_lmops->fl_change(my_before, arg); 1342 goto out; 1343 } 1344 1345 error = 0; 1346 if (arg == F_UNLCK) 1347 goto out; 1348 1349 error = -EINVAL; 1350 if (!leases_enable) 1351 goto out; 1352 1353 error = lease_alloc(filp, arg, &fl); 1354 if (error) 1355 goto out; 1356 1357 locks_copy_lock(fl, lease); 1358 1359 locks_insert_lock(before, fl); 1360 1361 *flp = fl; 1362 out: 1363 return error; 1364 } 1365 1366 /** 1367 * setlease - sets a lease on an open file 1368 * @filp: file pointer 1369 * @arg: type of lease to obtain 1370 * @lease: file_lock to use 1371 * 1372 * Call this to establish a lease on the file. 1373 * The fl_lmops fl_break function is required by break_lease 1374 */ 1375 1376 int setlease(struct file *filp, long arg, struct file_lock **lease) 1377 { 1378 struct dentry *dentry = filp->f_dentry; 1379 struct inode *inode = dentry->d_inode; 1380 int error; 1381 1382 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1383 return -EACCES; 1384 if (!S_ISREG(inode->i_mode)) 1385 return -EINVAL; 1386 error = security_file_lock(filp, arg); 1387 if (error) 1388 return error; 1389 1390 lock_kernel(); 1391 error = __setlease(filp, arg, lease); 1392 unlock_kernel(); 1393 1394 return error; 1395 } 1396 1397 EXPORT_SYMBOL(setlease); 1398 1399 /** 1400 * fcntl_setlease - sets a lease on an open file 1401 * @fd: open file descriptor 1402 * @filp: file pointer 1403 * @arg: type of lease to obtain 1404 * 1405 * Call this fcntl to establish a lease on the file. 1406 * Note that you also need to call %F_SETSIG to 1407 * receive a signal when the lease is broken. 1408 */ 1409 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1410 { 1411 struct file_lock fl, *flp = &fl; 1412 struct dentry *dentry = filp->f_dentry; 1413 struct inode *inode = dentry->d_inode; 1414 int error; 1415 1416 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1417 return -EACCES; 1418 if (!S_ISREG(inode->i_mode)) 1419 return -EINVAL; 1420 error = security_file_lock(filp, arg); 1421 if (error) 1422 return error; 1423 1424 locks_init_lock(&fl); 1425 error = lease_init(filp, arg, &fl); 1426 if (error) 1427 return error; 1428 1429 lock_kernel(); 1430 1431 error = __setlease(filp, arg, &flp); 1432 if (error || arg == F_UNLCK) 1433 goto out_unlock; 1434 1435 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1436 if (error < 0) { 1437 /* remove lease just inserted by __setlease */ 1438 flp->fl_type = F_UNLCK | F_INPROGRESS; 1439 flp->fl_break_time = jiffies- 10; 1440 time_out_leases(inode); 1441 goto out_unlock; 1442 } 1443 1444 error = f_setown(filp, current->pid, 0); 1445 out_unlock: 1446 unlock_kernel(); 1447 return error; 1448 } 1449 1450 /** 1451 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1452 * @filp: The file to apply the lock to 1453 * @fl: The lock to be applied 1454 * 1455 * Add a FLOCK style lock to a file. 1456 */ 1457 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1458 { 1459 int error; 1460 might_sleep(); 1461 for (;;) { 1462 error = flock_lock_file(filp, fl); 1463 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1464 break; 1465 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1466 if (!error) 1467 continue; 1468 1469 locks_delete_block(fl); 1470 break; 1471 } 1472 return error; 1473 } 1474 1475 EXPORT_SYMBOL(flock_lock_file_wait); 1476 1477 /** 1478 * sys_flock: - flock() system call. 1479 * @fd: the file descriptor to lock. 1480 * @cmd: the type of lock to apply. 1481 * 1482 * Apply a %FL_FLOCK style lock to an open file descriptor. 1483 * The @cmd can be one of 1484 * 1485 * %LOCK_SH -- a shared lock. 1486 * 1487 * %LOCK_EX -- an exclusive lock. 1488 * 1489 * %LOCK_UN -- remove an existing lock. 1490 * 1491 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1492 * 1493 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1494 * processes read and write access respectively. 1495 */ 1496 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1497 { 1498 struct file *filp; 1499 struct file_lock *lock; 1500 int can_sleep, unlock; 1501 int error; 1502 1503 error = -EBADF; 1504 filp = fget(fd); 1505 if (!filp) 1506 goto out; 1507 1508 can_sleep = !(cmd & LOCK_NB); 1509 cmd &= ~LOCK_NB; 1510 unlock = (cmd == LOCK_UN); 1511 1512 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1513 goto out_putf; 1514 1515 error = flock_make_lock(filp, &lock, cmd); 1516 if (error) 1517 goto out_putf; 1518 if (can_sleep) 1519 lock->fl_flags |= FL_SLEEP; 1520 1521 error = security_file_lock(filp, cmd); 1522 if (error) 1523 goto out_free; 1524 1525 if (filp->f_op && filp->f_op->flock) 1526 error = filp->f_op->flock(filp, 1527 (can_sleep) ? F_SETLKW : F_SETLK, 1528 lock); 1529 else 1530 error = flock_lock_file_wait(filp, lock); 1531 1532 out_free: 1533 if (list_empty(&lock->fl_link)) { 1534 locks_free_lock(lock); 1535 } 1536 1537 out_putf: 1538 fput(filp); 1539 out: 1540 return error; 1541 } 1542 1543 /* Report the first existing lock that would conflict with l. 1544 * This implements the F_GETLK command of fcntl(). 1545 */ 1546 int fcntl_getlk(struct file *filp, struct flock __user *l) 1547 { 1548 struct file_lock *fl, file_lock; 1549 struct flock flock; 1550 int error; 1551 1552 error = -EFAULT; 1553 if (copy_from_user(&flock, l, sizeof(flock))) 1554 goto out; 1555 error = -EINVAL; 1556 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1557 goto out; 1558 1559 error = flock_to_posix_lock(filp, &file_lock, &flock); 1560 if (error) 1561 goto out; 1562 1563 if (filp->f_op && filp->f_op->lock) { 1564 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1565 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1566 file_lock.fl_ops->fl_release_private(&file_lock); 1567 if (error < 0) 1568 goto out; 1569 else 1570 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1571 } else { 1572 fl = posix_test_lock(filp, &file_lock); 1573 } 1574 1575 flock.l_type = F_UNLCK; 1576 if (fl != NULL) { 1577 flock.l_pid = fl->fl_pid; 1578 #if BITS_PER_LONG == 32 1579 /* 1580 * Make sure we can represent the posix lock via 1581 * legacy 32bit flock. 1582 */ 1583 error = -EOVERFLOW; 1584 if (fl->fl_start > OFFT_OFFSET_MAX) 1585 goto out; 1586 if ((fl->fl_end != OFFSET_MAX) 1587 && (fl->fl_end > OFFT_OFFSET_MAX)) 1588 goto out; 1589 #endif 1590 flock.l_start = fl->fl_start; 1591 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1592 fl->fl_end - fl->fl_start + 1; 1593 flock.l_whence = 0; 1594 flock.l_type = fl->fl_type; 1595 } 1596 error = -EFAULT; 1597 if (!copy_to_user(l, &flock, sizeof(flock))) 1598 error = 0; 1599 out: 1600 return error; 1601 } 1602 1603 /* Apply the lock described by l to an open file descriptor. 1604 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1605 */ 1606 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1607 struct flock __user *l) 1608 { 1609 struct file_lock *file_lock = locks_alloc_lock(); 1610 struct flock flock; 1611 struct inode *inode; 1612 int error; 1613 1614 if (file_lock == NULL) 1615 return -ENOLCK; 1616 1617 /* 1618 * This might block, so we do it before checking the inode. 1619 */ 1620 error = -EFAULT; 1621 if (copy_from_user(&flock, l, sizeof(flock))) 1622 goto out; 1623 1624 inode = filp->f_dentry->d_inode; 1625 1626 /* Don't allow mandatory locks on files that may be memory mapped 1627 * and shared. 1628 */ 1629 if (IS_MANDLOCK(inode) && 1630 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1631 mapping_writably_mapped(filp->f_mapping)) { 1632 error = -EAGAIN; 1633 goto out; 1634 } 1635 1636 again: 1637 error = flock_to_posix_lock(filp, file_lock, &flock); 1638 if (error) 1639 goto out; 1640 if (cmd == F_SETLKW) { 1641 file_lock->fl_flags |= FL_SLEEP; 1642 } 1643 1644 error = -EBADF; 1645 switch (flock.l_type) { 1646 case F_RDLCK: 1647 if (!(filp->f_mode & FMODE_READ)) 1648 goto out; 1649 break; 1650 case F_WRLCK: 1651 if (!(filp->f_mode & FMODE_WRITE)) 1652 goto out; 1653 break; 1654 case F_UNLCK: 1655 break; 1656 default: 1657 error = -EINVAL; 1658 goto out; 1659 } 1660 1661 error = security_file_lock(filp, file_lock->fl_type); 1662 if (error) 1663 goto out; 1664 1665 if (filp->f_op && filp->f_op->lock != NULL) 1666 error = filp->f_op->lock(filp, cmd, file_lock); 1667 else { 1668 for (;;) { 1669 error = __posix_lock_file(inode, file_lock); 1670 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1671 break; 1672 error = wait_event_interruptible(file_lock->fl_wait, 1673 !file_lock->fl_next); 1674 if (!error) 1675 continue; 1676 1677 locks_delete_block(file_lock); 1678 break; 1679 } 1680 } 1681 1682 /* 1683 * Attempt to detect a close/fcntl race and recover by 1684 * releasing the lock that was just acquired. 1685 */ 1686 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1687 flock.l_type = F_UNLCK; 1688 goto again; 1689 } 1690 1691 out: 1692 locks_free_lock(file_lock); 1693 return error; 1694 } 1695 1696 #if BITS_PER_LONG == 32 1697 /* Report the first existing lock that would conflict with l. 1698 * This implements the F_GETLK command of fcntl(). 1699 */ 1700 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1701 { 1702 struct file_lock *fl, file_lock; 1703 struct flock64 flock; 1704 int error; 1705 1706 error = -EFAULT; 1707 if (copy_from_user(&flock, l, sizeof(flock))) 1708 goto out; 1709 error = -EINVAL; 1710 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1711 goto out; 1712 1713 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1714 if (error) 1715 goto out; 1716 1717 if (filp->f_op && filp->f_op->lock) { 1718 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1719 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1720 file_lock.fl_ops->fl_release_private(&file_lock); 1721 if (error < 0) 1722 goto out; 1723 else 1724 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1725 } else { 1726 fl = posix_test_lock(filp, &file_lock); 1727 } 1728 1729 flock.l_type = F_UNLCK; 1730 if (fl != NULL) { 1731 flock.l_pid = fl->fl_pid; 1732 flock.l_start = fl->fl_start; 1733 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1734 fl->fl_end - fl->fl_start + 1; 1735 flock.l_whence = 0; 1736 flock.l_type = fl->fl_type; 1737 } 1738 error = -EFAULT; 1739 if (!copy_to_user(l, &flock, sizeof(flock))) 1740 error = 0; 1741 1742 out: 1743 return error; 1744 } 1745 1746 /* Apply the lock described by l to an open file descriptor. 1747 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1748 */ 1749 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1750 struct flock64 __user *l) 1751 { 1752 struct file_lock *file_lock = locks_alloc_lock(); 1753 struct flock64 flock; 1754 struct inode *inode; 1755 int error; 1756 1757 if (file_lock == NULL) 1758 return -ENOLCK; 1759 1760 /* 1761 * This might block, so we do it before checking the inode. 1762 */ 1763 error = -EFAULT; 1764 if (copy_from_user(&flock, l, sizeof(flock))) 1765 goto out; 1766 1767 inode = filp->f_dentry->d_inode; 1768 1769 /* Don't allow mandatory locks on files that may be memory mapped 1770 * and shared. 1771 */ 1772 if (IS_MANDLOCK(inode) && 1773 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1774 mapping_writably_mapped(filp->f_mapping)) { 1775 error = -EAGAIN; 1776 goto out; 1777 } 1778 1779 again: 1780 error = flock64_to_posix_lock(filp, file_lock, &flock); 1781 if (error) 1782 goto out; 1783 if (cmd == F_SETLKW64) { 1784 file_lock->fl_flags |= FL_SLEEP; 1785 } 1786 1787 error = -EBADF; 1788 switch (flock.l_type) { 1789 case F_RDLCK: 1790 if (!(filp->f_mode & FMODE_READ)) 1791 goto out; 1792 break; 1793 case F_WRLCK: 1794 if (!(filp->f_mode & FMODE_WRITE)) 1795 goto out; 1796 break; 1797 case F_UNLCK: 1798 break; 1799 default: 1800 error = -EINVAL; 1801 goto out; 1802 } 1803 1804 error = security_file_lock(filp, file_lock->fl_type); 1805 if (error) 1806 goto out; 1807 1808 if (filp->f_op && filp->f_op->lock != NULL) 1809 error = filp->f_op->lock(filp, cmd, file_lock); 1810 else { 1811 for (;;) { 1812 error = __posix_lock_file(inode, file_lock); 1813 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1814 break; 1815 error = wait_event_interruptible(file_lock->fl_wait, 1816 !file_lock->fl_next); 1817 if (!error) 1818 continue; 1819 1820 locks_delete_block(file_lock); 1821 break; 1822 } 1823 } 1824 1825 /* 1826 * Attempt to detect a close/fcntl race and recover by 1827 * releasing the lock that was just acquired. 1828 */ 1829 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1830 flock.l_type = F_UNLCK; 1831 goto again; 1832 } 1833 1834 out: 1835 locks_free_lock(file_lock); 1836 return error; 1837 } 1838 #endif /* BITS_PER_LONG == 32 */ 1839 1840 /* 1841 * This function is called when the file is being removed 1842 * from the task's fd array. POSIX locks belonging to this task 1843 * are deleted at this time. 1844 */ 1845 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1846 { 1847 struct file_lock lock, **before; 1848 1849 /* 1850 * If there are no locks held on this file, we don't need to call 1851 * posix_lock_file(). Another process could be setting a lock on this 1852 * file at the same time, but we wouldn't remove that lock anyway. 1853 */ 1854 before = &filp->f_dentry->d_inode->i_flock; 1855 if (*before == NULL) 1856 return; 1857 1858 lock.fl_type = F_UNLCK; 1859 lock.fl_flags = FL_POSIX; 1860 lock.fl_start = 0; 1861 lock.fl_end = OFFSET_MAX; 1862 lock.fl_owner = owner; 1863 lock.fl_pid = current->tgid; 1864 lock.fl_file = filp; 1865 lock.fl_ops = NULL; 1866 lock.fl_lmops = NULL; 1867 1868 if (filp->f_op && filp->f_op->lock != NULL) { 1869 filp->f_op->lock(filp, F_SETLK, &lock); 1870 goto out; 1871 } 1872 1873 /* Can't use posix_lock_file here; we need to remove it no matter 1874 * which pid we have. 1875 */ 1876 lock_kernel(); 1877 while (*before != NULL) { 1878 struct file_lock *fl = *before; 1879 if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { 1880 locks_delete_lock(before); 1881 continue; 1882 } 1883 before = &fl->fl_next; 1884 } 1885 unlock_kernel(); 1886 out: 1887 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1888 lock.fl_ops->fl_release_private(&lock); 1889 } 1890 1891 EXPORT_SYMBOL(locks_remove_posix); 1892 1893 /* 1894 * This function is called on the last close of an open file. 1895 */ 1896 void locks_remove_flock(struct file *filp) 1897 { 1898 struct inode * inode = filp->f_dentry->d_inode; 1899 struct file_lock *fl; 1900 struct file_lock **before; 1901 1902 if (!inode->i_flock) 1903 return; 1904 1905 if (filp->f_op && filp->f_op->flock) { 1906 struct file_lock fl = { 1907 .fl_pid = current->tgid, 1908 .fl_file = filp, 1909 .fl_flags = FL_FLOCK, 1910 .fl_type = F_UNLCK, 1911 .fl_end = OFFSET_MAX, 1912 }; 1913 filp->f_op->flock(filp, F_SETLKW, &fl); 1914 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1915 fl.fl_ops->fl_release_private(&fl); 1916 } 1917 1918 lock_kernel(); 1919 before = &inode->i_flock; 1920 1921 while ((fl = *before) != NULL) { 1922 if (fl->fl_file == filp) { 1923 if (IS_FLOCK(fl)) { 1924 locks_delete_lock(before); 1925 continue; 1926 } 1927 if (IS_LEASE(fl)) { 1928 lease_modify(before, F_UNLCK); 1929 continue; 1930 } 1931 /* What? */ 1932 BUG(); 1933 } 1934 before = &fl->fl_next; 1935 } 1936 unlock_kernel(); 1937 } 1938 1939 /** 1940 * posix_block_lock - blocks waiting for a file lock 1941 * @blocker: the lock which is blocking 1942 * @waiter: the lock which conflicts and has to wait 1943 * 1944 * lockd needs to block waiting for locks. 1945 */ 1946 void 1947 posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) 1948 { 1949 locks_insert_block(blocker, waiter); 1950 } 1951 1952 EXPORT_SYMBOL(posix_block_lock); 1953 1954 /** 1955 * posix_unblock_lock - stop waiting for a file lock 1956 * @filp: how the file was opened 1957 * @waiter: the lock which was waiting 1958 * 1959 * lockd needs to block waiting for locks. 1960 */ 1961 void 1962 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1963 { 1964 /* 1965 * A remote machine may cancel the lock request after it's been 1966 * granted locally. If that happens, we need to delete the lock. 1967 */ 1968 lock_kernel(); 1969 if (waiter->fl_next) { 1970 __locks_delete_block(waiter); 1971 unlock_kernel(); 1972 } else { 1973 unlock_kernel(); 1974 waiter->fl_type = F_UNLCK; 1975 posix_lock_file(filp, waiter); 1976 } 1977 } 1978 1979 EXPORT_SYMBOL(posix_unblock_lock); 1980 1981 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1982 { 1983 struct inode *inode = NULL; 1984 1985 if (fl->fl_file != NULL) 1986 inode = fl->fl_file->f_dentry->d_inode; 1987 1988 out += sprintf(out, "%d:%s ", id, pfx); 1989 if (IS_POSIX(fl)) { 1990 out += sprintf(out, "%6s %s ", 1991 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1992 (inode == NULL) ? "*NOINODE*" : 1993 (IS_MANDLOCK(inode) && 1994 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 1995 "MANDATORY" : "ADVISORY "); 1996 } else if (IS_FLOCK(fl)) { 1997 if (fl->fl_type & LOCK_MAND) { 1998 out += sprintf(out, "FLOCK MSNFS "); 1999 } else { 2000 out += sprintf(out, "FLOCK ADVISORY "); 2001 } 2002 } else if (IS_LEASE(fl)) { 2003 out += sprintf(out, "LEASE "); 2004 if (fl->fl_type & F_INPROGRESS) 2005 out += sprintf(out, "BREAKING "); 2006 else if (fl->fl_file) 2007 out += sprintf(out, "ACTIVE "); 2008 else 2009 out += sprintf(out, "BREAKER "); 2010 } else { 2011 out += sprintf(out, "UNKNOWN UNKNOWN "); 2012 } 2013 if (fl->fl_type & LOCK_MAND) { 2014 out += sprintf(out, "%s ", 2015 (fl->fl_type & LOCK_READ) 2016 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2017 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2018 } else { 2019 out += sprintf(out, "%s ", 2020 (fl->fl_type & F_INPROGRESS) 2021 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2022 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2023 } 2024 if (inode) { 2025 #ifdef WE_CAN_BREAK_LSLK_NOW 2026 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 2027 inode->i_sb->s_id, inode->i_ino); 2028 #else 2029 /* userspace relies on this representation of dev_t ;-( */ 2030 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 2031 MAJOR(inode->i_sb->s_dev), 2032 MINOR(inode->i_sb->s_dev), inode->i_ino); 2033 #endif 2034 } else { 2035 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 2036 } 2037 if (IS_POSIX(fl)) { 2038 if (fl->fl_end == OFFSET_MAX) 2039 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 2040 else 2041 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 2042 fl->fl_end); 2043 } else { 2044 out += sprintf(out, "0 EOF\n"); 2045 } 2046 } 2047 2048 static void move_lock_status(char **p, off_t* pos, off_t offset) 2049 { 2050 int len; 2051 len = strlen(*p); 2052 if(*pos >= offset) { 2053 /* the complete line is valid */ 2054 *p += len; 2055 *pos += len; 2056 return; 2057 } 2058 if(*pos+len > offset) { 2059 /* use the second part of the line */ 2060 int i = offset-*pos; 2061 memmove(*p,*p+i,len-i); 2062 *p += len-i; 2063 *pos += len; 2064 return; 2065 } 2066 /* discard the complete line */ 2067 *pos += len; 2068 } 2069 2070 /** 2071 * get_locks_status - reports lock usage in /proc/locks 2072 * @buffer: address in userspace to write into 2073 * @start: ? 2074 * @offset: how far we are through the buffer 2075 * @length: how much to read 2076 */ 2077 2078 int get_locks_status(char *buffer, char **start, off_t offset, int length) 2079 { 2080 struct list_head *tmp; 2081 char *q = buffer; 2082 off_t pos = 0; 2083 int i = 0; 2084 2085 lock_kernel(); 2086 list_for_each(tmp, &file_lock_list) { 2087 struct list_head *btmp; 2088 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2089 lock_get_status(q, fl, ++i, ""); 2090 move_lock_status(&q, &pos, offset); 2091 2092 if(pos >= offset+length) 2093 goto done; 2094 2095 list_for_each(btmp, &fl->fl_block) { 2096 struct file_lock *bfl = list_entry(btmp, 2097 struct file_lock, fl_block); 2098 lock_get_status(q, bfl, i, " ->"); 2099 move_lock_status(&q, &pos, offset); 2100 2101 if(pos >= offset+length) 2102 goto done; 2103 } 2104 } 2105 done: 2106 unlock_kernel(); 2107 *start = buffer; 2108 if(q-buffer < length) 2109 return (q-buffer); 2110 return length; 2111 } 2112 2113 /** 2114 * lock_may_read - checks that the region is free of locks 2115 * @inode: the inode that is being read 2116 * @start: the first byte to read 2117 * @len: the number of bytes to read 2118 * 2119 * Emulates Windows locking requirements. Whole-file 2120 * mandatory locks (share modes) can prohibit a read and 2121 * byte-range POSIX locks can prohibit a read if they overlap. 2122 * 2123 * N.B. this function is only ever called 2124 * from knfsd and ownership of locks is never checked. 2125 */ 2126 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2127 { 2128 struct file_lock *fl; 2129 int result = 1; 2130 lock_kernel(); 2131 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2132 if (IS_POSIX(fl)) { 2133 if (fl->fl_type == F_RDLCK) 2134 continue; 2135 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2136 continue; 2137 } else if (IS_FLOCK(fl)) { 2138 if (!(fl->fl_type & LOCK_MAND)) 2139 continue; 2140 if (fl->fl_type & LOCK_READ) 2141 continue; 2142 } else 2143 continue; 2144 result = 0; 2145 break; 2146 } 2147 unlock_kernel(); 2148 return result; 2149 } 2150 2151 EXPORT_SYMBOL(lock_may_read); 2152 2153 /** 2154 * lock_may_write - checks that the region is free of locks 2155 * @inode: the inode that is being written 2156 * @start: the first byte to write 2157 * @len: the number of bytes to write 2158 * 2159 * Emulates Windows locking requirements. Whole-file 2160 * mandatory locks (share modes) can prohibit a write and 2161 * byte-range POSIX locks can prohibit a write if they overlap. 2162 * 2163 * N.B. this function is only ever called 2164 * from knfsd and ownership of locks is never checked. 2165 */ 2166 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2167 { 2168 struct file_lock *fl; 2169 int result = 1; 2170 lock_kernel(); 2171 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2172 if (IS_POSIX(fl)) { 2173 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2174 continue; 2175 } else if (IS_FLOCK(fl)) { 2176 if (!(fl->fl_type & LOCK_MAND)) 2177 continue; 2178 if (fl->fl_type & LOCK_WRITE) 2179 continue; 2180 } else 2181 continue; 2182 result = 0; 2183 break; 2184 } 2185 unlock_kernel(); 2186 return result; 2187 } 2188 2189 EXPORT_SYMBOL(lock_may_write); 2190 2191 static inline void __steal_locks(struct file *file, fl_owner_t from) 2192 { 2193 struct inode *inode = file->f_dentry->d_inode; 2194 struct file_lock *fl = inode->i_flock; 2195 2196 while (fl) { 2197 if (fl->fl_file == file && fl->fl_owner == from) 2198 fl->fl_owner = current->files; 2199 fl = fl->fl_next; 2200 } 2201 } 2202 2203 /* When getting ready for executing a binary, we make sure that current 2204 * has a files_struct on its own. Before dropping the old files_struct, 2205 * we take over ownership of all locks for all file descriptors we own. 2206 * Note that we may accidentally steal a lock for a file that a sibling 2207 * has created since the unshare_files() call. 2208 */ 2209 void steal_locks(fl_owner_t from) 2210 { 2211 struct files_struct *files = current->files; 2212 int i, j; 2213 struct fdtable *fdt; 2214 2215 if (from == files) 2216 return; 2217 2218 lock_kernel(); 2219 j = 0; 2220 rcu_read_lock(); 2221 fdt = files_fdtable(files); 2222 for (;;) { 2223 unsigned long set; 2224 i = j * __NFDBITS; 2225 if (i >= fdt->max_fdset || i >= fdt->max_fds) 2226 break; 2227 set = fdt->open_fds->fds_bits[j++]; 2228 while (set) { 2229 if (set & 1) { 2230 struct file *file = fdt->fd[i]; 2231 if (file) 2232 __steal_locks(file, from); 2233 } 2234 i++; 2235 set >>= 1; 2236 } 2237 } 2238 rcu_read_unlock(); 2239 unlock_kernel(); 2240 } 2241 EXPORT_SYMBOL(steal_locks); 2242 2243 static int __init filelock_init(void) 2244 { 2245 filelock_cache = kmem_cache_create("file_lock_cache", 2246 sizeof(struct file_lock), 0, SLAB_PANIC, 2247 init_once, NULL); 2248 return 0; 2249 } 2250 2251 core_initcall(filelock_init); 2252