1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 #include <linux/rcupdate.h> 128 129 #include <asm/semaphore.h> 130 #include <asm/uaccess.h> 131 132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 135 136 int leases_enable = 1; 137 int lease_break_time = 45; 138 139 #define for_each_lock(inode, lockp) \ 140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 141 142 LIST_HEAD(file_lock_list); 143 144 EXPORT_SYMBOL(file_lock_list); 145 146 static LIST_HEAD(blocked_list); 147 148 static kmem_cache_t *filelock_cache; 149 150 /* Allocate an empty lock structure. */ 151 static struct file_lock *locks_alloc_lock(void) 152 { 153 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 154 } 155 156 /* Free a lock which is not in use. */ 157 static inline void locks_free_lock(struct file_lock *fl) 158 { 159 if (fl == NULL) { 160 BUG(); 161 return; 162 } 163 if (waitqueue_active(&fl->fl_wait)) 164 panic("Attempting to free lock with active wait queue"); 165 166 if (!list_empty(&fl->fl_block)) 167 panic("Attempting to free lock with active block list"); 168 169 if (!list_empty(&fl->fl_link)) 170 panic("Attempting to free lock on active lock list"); 171 172 if (fl->fl_ops) { 173 if (fl->fl_ops->fl_release_private) 174 fl->fl_ops->fl_release_private(fl); 175 fl->fl_ops = NULL; 176 } 177 178 if (fl->fl_lmops) { 179 if (fl->fl_lmops->fl_release_private) 180 fl->fl_lmops->fl_release_private(fl); 181 fl->fl_lmops = NULL; 182 } 183 184 kmem_cache_free(filelock_cache, fl); 185 } 186 187 void locks_init_lock(struct file_lock *fl) 188 { 189 INIT_LIST_HEAD(&fl->fl_link); 190 INIT_LIST_HEAD(&fl->fl_block); 191 init_waitqueue_head(&fl->fl_wait); 192 fl->fl_next = NULL; 193 fl->fl_fasync = NULL; 194 fl->fl_owner = NULL; 195 fl->fl_pid = 0; 196 fl->fl_file = NULL; 197 fl->fl_flags = 0; 198 fl->fl_type = 0; 199 fl->fl_start = fl->fl_end = 0; 200 fl->fl_ops = NULL; 201 fl->fl_lmops = NULL; 202 } 203 204 EXPORT_SYMBOL(locks_init_lock); 205 206 /* 207 * Initialises the fields of the file lock which are invariant for 208 * free file_locks. 209 */ 210 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 211 { 212 struct file_lock *lock = (struct file_lock *) foo; 213 214 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != 215 SLAB_CTOR_CONSTRUCTOR) 216 return; 217 218 locks_init_lock(lock); 219 } 220 221 /* 222 * Initialize a new lock from an existing file_lock structure. 223 */ 224 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 225 { 226 new->fl_owner = fl->fl_owner; 227 new->fl_pid = fl->fl_pid; 228 new->fl_file = fl->fl_file; 229 new->fl_flags = fl->fl_flags; 230 new->fl_type = fl->fl_type; 231 new->fl_start = fl->fl_start; 232 new->fl_end = fl->fl_end; 233 new->fl_ops = fl->fl_ops; 234 new->fl_lmops = fl->fl_lmops; 235 if (fl->fl_ops && fl->fl_ops->fl_copy_lock) 236 fl->fl_ops->fl_copy_lock(new, fl); 237 if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock) 238 fl->fl_lmops->fl_copy_lock(new, fl); 239 } 240 241 EXPORT_SYMBOL(locks_copy_lock); 242 243 static inline int flock_translate_cmd(int cmd) { 244 if (cmd & LOCK_MAND) 245 return cmd & (LOCK_MAND | LOCK_RW); 246 switch (cmd) { 247 case LOCK_SH: 248 return F_RDLCK; 249 case LOCK_EX: 250 return F_WRLCK; 251 case LOCK_UN: 252 return F_UNLCK; 253 } 254 return -EINVAL; 255 } 256 257 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 258 static int flock_make_lock(struct file *filp, struct file_lock **lock, 259 unsigned int cmd) 260 { 261 struct file_lock *fl; 262 int type = flock_translate_cmd(cmd); 263 if (type < 0) 264 return type; 265 266 fl = locks_alloc_lock(); 267 if (fl == NULL) 268 return -ENOMEM; 269 270 fl->fl_file = filp; 271 fl->fl_pid = current->tgid; 272 fl->fl_flags = FL_FLOCK; 273 fl->fl_type = type; 274 fl->fl_end = OFFSET_MAX; 275 276 *lock = fl; 277 return 0; 278 } 279 280 static int assign_type(struct file_lock *fl, int type) 281 { 282 switch (type) { 283 case F_RDLCK: 284 case F_WRLCK: 285 case F_UNLCK: 286 fl->fl_type = type; 287 break; 288 default: 289 return -EINVAL; 290 } 291 return 0; 292 } 293 294 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 295 * style lock. 296 */ 297 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 298 struct flock *l) 299 { 300 off_t start, end; 301 302 switch (l->l_whence) { 303 case 0: /*SEEK_SET*/ 304 start = 0; 305 break; 306 case 1: /*SEEK_CUR*/ 307 start = filp->f_pos; 308 break; 309 case 2: /*SEEK_END*/ 310 start = i_size_read(filp->f_dentry->d_inode); 311 break; 312 default: 313 return -EINVAL; 314 } 315 316 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 317 POSIX-2001 defines it. */ 318 start += l->l_start; 319 end = start + l->l_len - 1; 320 if (l->l_len < 0) { 321 end = start - 1; 322 start += l->l_len; 323 } 324 325 if (start < 0) 326 return -EINVAL; 327 if (l->l_len > 0 && end < 0) 328 return -EOVERFLOW; 329 330 fl->fl_start = start; /* we record the absolute position */ 331 fl->fl_end = end; 332 if (l->l_len == 0) 333 fl->fl_end = OFFSET_MAX; 334 335 fl->fl_owner = current->files; 336 fl->fl_pid = current->tgid; 337 fl->fl_file = filp; 338 fl->fl_flags = FL_POSIX; 339 fl->fl_ops = NULL; 340 fl->fl_lmops = NULL; 341 342 return assign_type(fl, l->l_type); 343 } 344 345 #if BITS_PER_LONG == 32 346 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 347 struct flock64 *l) 348 { 349 loff_t start; 350 351 switch (l->l_whence) { 352 case 0: /*SEEK_SET*/ 353 start = 0; 354 break; 355 case 1: /*SEEK_CUR*/ 356 start = filp->f_pos; 357 break; 358 case 2: /*SEEK_END*/ 359 start = i_size_read(filp->f_dentry->d_inode); 360 break; 361 default: 362 return -EINVAL; 363 } 364 365 if (((start += l->l_start) < 0) || (l->l_len < 0)) 366 return -EINVAL; 367 fl->fl_end = start + l->l_len - 1; 368 if (l->l_len > 0 && fl->fl_end < 0) 369 return -EOVERFLOW; 370 fl->fl_start = start; /* we record the absolute position */ 371 if (l->l_len == 0) 372 fl->fl_end = OFFSET_MAX; 373 374 fl->fl_owner = current->files; 375 fl->fl_pid = current->tgid; 376 fl->fl_file = filp; 377 fl->fl_flags = FL_POSIX; 378 fl->fl_ops = NULL; 379 fl->fl_lmops = NULL; 380 381 switch (l->l_type) { 382 case F_RDLCK: 383 case F_WRLCK: 384 case F_UNLCK: 385 fl->fl_type = l->l_type; 386 break; 387 default: 388 return -EINVAL; 389 } 390 391 return (0); 392 } 393 #endif 394 395 /* default lease lock manager operations */ 396 static void lease_break_callback(struct file_lock *fl) 397 { 398 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 399 } 400 401 static void lease_release_private_callback(struct file_lock *fl) 402 { 403 if (!fl->fl_file) 404 return; 405 406 f_delown(fl->fl_file); 407 fl->fl_file->f_owner.signum = 0; 408 } 409 410 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 411 { 412 return fl->fl_file == try->fl_file; 413 } 414 415 static struct lock_manager_operations lease_manager_ops = { 416 .fl_break = lease_break_callback, 417 .fl_release_private = lease_release_private_callback, 418 .fl_mylease = lease_mylease_callback, 419 .fl_change = lease_modify, 420 }; 421 422 /* 423 * Initialize a lease, use the default lock manager operations 424 */ 425 static int lease_init(struct file *filp, int type, struct file_lock *fl) 426 { 427 fl->fl_owner = current->files; 428 fl->fl_pid = current->tgid; 429 430 fl->fl_file = filp; 431 fl->fl_flags = FL_LEASE; 432 if (assign_type(fl, type) != 0) { 433 locks_free_lock(fl); 434 return -EINVAL; 435 } 436 fl->fl_start = 0; 437 fl->fl_end = OFFSET_MAX; 438 fl->fl_ops = NULL; 439 fl->fl_lmops = &lease_manager_ops; 440 return 0; 441 } 442 443 /* Allocate a file_lock initialised to this type of lease */ 444 static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 445 { 446 struct file_lock *fl = locks_alloc_lock(); 447 int error; 448 449 if (fl == NULL) 450 return -ENOMEM; 451 452 error = lease_init(filp, type, fl); 453 if (error) 454 return error; 455 *flp = fl; 456 return 0; 457 } 458 459 /* Check if two locks overlap each other. 460 */ 461 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 462 { 463 return ((fl1->fl_end >= fl2->fl_start) && 464 (fl2->fl_end >= fl1->fl_start)); 465 } 466 467 /* 468 * Check whether two locks have the same owner. 469 */ 470 static inline int 471 posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 472 { 473 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 474 return fl2->fl_lmops == fl1->fl_lmops && 475 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 476 return fl1->fl_owner == fl2->fl_owner; 477 } 478 479 /* Remove waiter from blocker's block list. 480 * When blocker ends up pointing to itself then the list is empty. 481 */ 482 static inline void __locks_delete_block(struct file_lock *waiter) 483 { 484 list_del_init(&waiter->fl_block); 485 list_del_init(&waiter->fl_link); 486 waiter->fl_next = NULL; 487 } 488 489 /* 490 */ 491 static void locks_delete_block(struct file_lock *waiter) 492 { 493 lock_kernel(); 494 __locks_delete_block(waiter); 495 unlock_kernel(); 496 } 497 498 /* Insert waiter into blocker's block list. 499 * We use a circular list so that processes can be easily woken up in 500 * the order they blocked. The documentation doesn't require this but 501 * it seems like the reasonable thing to do. 502 */ 503 static void locks_insert_block(struct file_lock *blocker, 504 struct file_lock *waiter) 505 { 506 if (!list_empty(&waiter->fl_block)) { 507 printk(KERN_ERR "locks_insert_block: removing duplicated lock " 508 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, 509 waiter->fl_start, waiter->fl_end, waiter->fl_type); 510 __locks_delete_block(waiter); 511 } 512 list_add_tail(&waiter->fl_block, &blocker->fl_block); 513 waiter->fl_next = blocker; 514 if (IS_POSIX(blocker)) 515 list_add(&waiter->fl_link, &blocked_list); 516 } 517 518 /* Wake up processes blocked waiting for blocker. 519 * If told to wait then schedule the processes until the block list 520 * is empty, otherwise empty the block list ourselves. 521 */ 522 static void locks_wake_up_blocks(struct file_lock *blocker) 523 { 524 while (!list_empty(&blocker->fl_block)) { 525 struct file_lock *waiter = list_entry(blocker->fl_block.next, 526 struct file_lock, fl_block); 527 __locks_delete_block(waiter); 528 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 529 waiter->fl_lmops->fl_notify(waiter); 530 else 531 wake_up(&waiter->fl_wait); 532 } 533 } 534 535 /* Insert file lock fl into an inode's lock list at the position indicated 536 * by pos. At the same time add the lock to the global file lock list. 537 */ 538 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 539 { 540 list_add(&fl->fl_link, &file_lock_list); 541 542 /* insert into file's list */ 543 fl->fl_next = *pos; 544 *pos = fl; 545 546 if (fl->fl_ops && fl->fl_ops->fl_insert) 547 fl->fl_ops->fl_insert(fl); 548 } 549 550 /* 551 * Delete a lock and then free it. 552 * Wake up processes that are blocked waiting for this lock, 553 * notify the FS that the lock has been cleared and 554 * finally free the lock. 555 */ 556 static void locks_delete_lock(struct file_lock **thisfl_p) 557 { 558 struct file_lock *fl = *thisfl_p; 559 560 *thisfl_p = fl->fl_next; 561 fl->fl_next = NULL; 562 list_del_init(&fl->fl_link); 563 564 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 565 if (fl->fl_fasync != NULL) { 566 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 567 fl->fl_fasync = NULL; 568 } 569 570 if (fl->fl_ops && fl->fl_ops->fl_remove) 571 fl->fl_ops->fl_remove(fl); 572 573 locks_wake_up_blocks(fl); 574 locks_free_lock(fl); 575 } 576 577 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 578 * checks for shared/exclusive status of overlapping locks. 579 */ 580 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 581 { 582 if (sys_fl->fl_type == F_WRLCK) 583 return 1; 584 if (caller_fl->fl_type == F_WRLCK) 585 return 1; 586 return 0; 587 } 588 589 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 590 * checking before calling the locks_conflict(). 591 */ 592 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 593 { 594 /* POSIX locks owned by the same process do not conflict with 595 * each other. 596 */ 597 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 598 return (0); 599 600 /* Check whether they overlap */ 601 if (!locks_overlap(caller_fl, sys_fl)) 602 return 0; 603 604 return (locks_conflict(caller_fl, sys_fl)); 605 } 606 607 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 608 * checking before calling the locks_conflict(). 609 */ 610 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 611 { 612 /* FLOCK locks referring to the same filp do not conflict with 613 * each other. 614 */ 615 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 616 return (0); 617 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 618 return 0; 619 620 return (locks_conflict(caller_fl, sys_fl)); 621 } 622 623 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 624 { 625 int result = 0; 626 DECLARE_WAITQUEUE(wait, current); 627 628 __set_current_state(TASK_INTERRUPTIBLE); 629 add_wait_queue(fl_wait, &wait); 630 if (timeout == 0) 631 schedule(); 632 else 633 result = schedule_timeout(timeout); 634 if (signal_pending(current)) 635 result = -ERESTARTSYS; 636 remove_wait_queue(fl_wait, &wait); 637 __set_current_state(TASK_RUNNING); 638 return result; 639 } 640 641 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 642 { 643 int result; 644 locks_insert_block(blocker, waiter); 645 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 646 __locks_delete_block(waiter); 647 return result; 648 } 649 650 struct file_lock * 651 posix_test_lock(struct file *filp, struct file_lock *fl) 652 { 653 struct file_lock *cfl; 654 655 lock_kernel(); 656 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 657 if (!IS_POSIX(cfl)) 658 continue; 659 if (posix_locks_conflict(cfl, fl)) 660 break; 661 } 662 unlock_kernel(); 663 664 return (cfl); 665 } 666 667 EXPORT_SYMBOL(posix_test_lock); 668 669 /* This function tests for deadlock condition before putting a process to 670 * sleep. The detection scheme is no longer recursive. Recursive was neat, 671 * but dangerous - we risked stack corruption if the lock data was bad, or 672 * if the recursion was too deep for any other reason. 673 * 674 * We rely on the fact that a task can only be on one lock's wait queue 675 * at a time. When we find blocked_task on a wait queue we can re-search 676 * with blocked_task equal to that queue's owner, until either blocked_task 677 * isn't found, or blocked_task is found on a queue owned by my_task. 678 * 679 * Note: the above assumption may not be true when handling lock requests 680 * from a broken NFS client. But broken NFS clients have a lot more to 681 * worry about than proper deadlock detection anyway... --okir 682 */ 683 int posix_locks_deadlock(struct file_lock *caller_fl, 684 struct file_lock *block_fl) 685 { 686 struct list_head *tmp; 687 688 next_task: 689 if (posix_same_owner(caller_fl, block_fl)) 690 return 1; 691 list_for_each(tmp, &blocked_list) { 692 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 693 if (posix_same_owner(fl, block_fl)) { 694 fl = fl->fl_next; 695 block_fl = fl; 696 goto next_task; 697 } 698 } 699 return 0; 700 } 701 702 EXPORT_SYMBOL(posix_locks_deadlock); 703 704 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 705 * at the head of the list, but that's secret knowledge known only to 706 * flock_lock_file and posix_lock_file. 707 */ 708 static int flock_lock_file(struct file *filp, struct file_lock *new_fl) 709 { 710 struct file_lock **before; 711 struct inode * inode = filp->f_dentry->d_inode; 712 int error = 0; 713 int found = 0; 714 715 lock_kernel(); 716 for_each_lock(inode, before) { 717 struct file_lock *fl = *before; 718 if (IS_POSIX(fl)) 719 break; 720 if (IS_LEASE(fl)) 721 continue; 722 if (filp != fl->fl_file) 723 continue; 724 if (new_fl->fl_type == fl->fl_type) 725 goto out; 726 found = 1; 727 locks_delete_lock(before); 728 break; 729 } 730 unlock_kernel(); 731 732 if (new_fl->fl_type == F_UNLCK) 733 return 0; 734 735 /* 736 * If a higher-priority process was blocked on the old file lock, 737 * give it the opportunity to lock the file. 738 */ 739 if (found) 740 cond_resched(); 741 742 lock_kernel(); 743 for_each_lock(inode, before) { 744 struct file_lock *fl = *before; 745 if (IS_POSIX(fl)) 746 break; 747 if (IS_LEASE(fl)) 748 continue; 749 if (!flock_locks_conflict(new_fl, fl)) 750 continue; 751 error = -EAGAIN; 752 if (new_fl->fl_flags & FL_SLEEP) { 753 locks_insert_block(fl, new_fl); 754 } 755 goto out; 756 } 757 locks_insert_lock(&inode->i_flock, new_fl); 758 error = 0; 759 760 out: 761 unlock_kernel(); 762 return error; 763 } 764 765 EXPORT_SYMBOL(posix_lock_file); 766 767 static int __posix_lock_file(struct inode *inode, struct file_lock *request) 768 { 769 struct file_lock *fl; 770 struct file_lock *new_fl, *new_fl2; 771 struct file_lock *left = NULL; 772 struct file_lock *right = NULL; 773 struct file_lock **before; 774 int error, added = 0; 775 776 /* 777 * We may need two file_lock structures for this operation, 778 * so we get them in advance to avoid races. 779 */ 780 new_fl = locks_alloc_lock(); 781 new_fl2 = locks_alloc_lock(); 782 783 lock_kernel(); 784 if (request->fl_type != F_UNLCK) { 785 for_each_lock(inode, before) { 786 struct file_lock *fl = *before; 787 if (!IS_POSIX(fl)) 788 continue; 789 if (!posix_locks_conflict(request, fl)) 790 continue; 791 error = -EAGAIN; 792 if (!(request->fl_flags & FL_SLEEP)) 793 goto out; 794 error = -EDEADLK; 795 if (posix_locks_deadlock(request, fl)) 796 goto out; 797 error = -EAGAIN; 798 locks_insert_block(fl, request); 799 goto out; 800 } 801 } 802 803 /* If we're just looking for a conflict, we're done. */ 804 error = 0; 805 if (request->fl_flags & FL_ACCESS) 806 goto out; 807 808 error = -ENOLCK; /* "no luck" */ 809 if (!(new_fl && new_fl2)) 810 goto out; 811 812 /* 813 * We've allocated the new locks in advance, so there are no 814 * errors possible (and no blocking operations) from here on. 815 * 816 * Find the first old lock with the same owner as the new lock. 817 */ 818 819 before = &inode->i_flock; 820 821 /* First skip locks owned by other processes. */ 822 while ((fl = *before) && (!IS_POSIX(fl) || 823 !posix_same_owner(request, fl))) { 824 before = &fl->fl_next; 825 } 826 827 /* Process locks with this owner. */ 828 while ((fl = *before) && posix_same_owner(request, fl)) { 829 /* Detect adjacent or overlapping regions (if same lock type) 830 */ 831 if (request->fl_type == fl->fl_type) { 832 if (fl->fl_end < request->fl_start - 1) 833 goto next_lock; 834 /* If the next lock in the list has entirely bigger 835 * addresses than the new one, insert the lock here. 836 */ 837 if (fl->fl_start > request->fl_end + 1) 838 break; 839 840 /* If we come here, the new and old lock are of the 841 * same type and adjacent or overlapping. Make one 842 * lock yielding from the lower start address of both 843 * locks to the higher end address. 844 */ 845 if (fl->fl_start > request->fl_start) 846 fl->fl_start = request->fl_start; 847 else 848 request->fl_start = fl->fl_start; 849 if (fl->fl_end < request->fl_end) 850 fl->fl_end = request->fl_end; 851 else 852 request->fl_end = fl->fl_end; 853 if (added) { 854 locks_delete_lock(before); 855 continue; 856 } 857 request = fl; 858 added = 1; 859 } 860 else { 861 /* Processing for different lock types is a bit 862 * more complex. 863 */ 864 if (fl->fl_end < request->fl_start) 865 goto next_lock; 866 if (fl->fl_start > request->fl_end) 867 break; 868 if (request->fl_type == F_UNLCK) 869 added = 1; 870 if (fl->fl_start < request->fl_start) 871 left = fl; 872 /* If the next lock in the list has a higher end 873 * address than the new one, insert the new one here. 874 */ 875 if (fl->fl_end > request->fl_end) { 876 right = fl; 877 break; 878 } 879 if (fl->fl_start >= request->fl_start) { 880 /* The new lock completely replaces an old 881 * one (This may happen several times). 882 */ 883 if (added) { 884 locks_delete_lock(before); 885 continue; 886 } 887 /* Replace the old lock with the new one. 888 * Wake up anybody waiting for the old one, 889 * as the change in lock type might satisfy 890 * their needs. 891 */ 892 locks_wake_up_blocks(fl); 893 fl->fl_start = request->fl_start; 894 fl->fl_end = request->fl_end; 895 fl->fl_type = request->fl_type; 896 fl->fl_u = request->fl_u; 897 request = fl; 898 added = 1; 899 } 900 } 901 /* Go on to next lock. 902 */ 903 next_lock: 904 before = &fl->fl_next; 905 } 906 907 error = 0; 908 if (!added) { 909 if (request->fl_type == F_UNLCK) 910 goto out; 911 locks_copy_lock(new_fl, request); 912 locks_insert_lock(before, new_fl); 913 new_fl = NULL; 914 } 915 if (right) { 916 if (left == right) { 917 /* The new lock breaks the old one in two pieces, 918 * so we have to use the second new lock. 919 */ 920 left = new_fl2; 921 new_fl2 = NULL; 922 locks_copy_lock(left, right); 923 locks_insert_lock(before, left); 924 } 925 right->fl_start = request->fl_end + 1; 926 locks_wake_up_blocks(right); 927 } 928 if (left) { 929 left->fl_end = request->fl_start - 1; 930 locks_wake_up_blocks(left); 931 } 932 out: 933 unlock_kernel(); 934 /* 935 * Free any unused locks. 936 */ 937 if (new_fl) 938 locks_free_lock(new_fl); 939 if (new_fl2) 940 locks_free_lock(new_fl2); 941 return error; 942 } 943 944 /** 945 * posix_lock_file - Apply a POSIX-style lock to a file 946 * @filp: The file to apply the lock to 947 * @fl: The lock to be applied 948 * 949 * Add a POSIX style lock to a file. 950 * We merge adjacent & overlapping locks whenever possible. 951 * POSIX locks are sorted by owner task, then by starting address 952 */ 953 int posix_lock_file(struct file *filp, struct file_lock *fl) 954 { 955 return __posix_lock_file(filp->f_dentry->d_inode, fl); 956 } 957 958 /** 959 * posix_lock_file_wait - Apply a POSIX-style lock to a file 960 * @filp: The file to apply the lock to 961 * @fl: The lock to be applied 962 * 963 * Add a POSIX style lock to a file. 964 * We merge adjacent & overlapping locks whenever possible. 965 * POSIX locks are sorted by owner task, then by starting address 966 */ 967 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 968 { 969 int error; 970 might_sleep (); 971 for (;;) { 972 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 973 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 974 break; 975 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 976 if (!error) 977 continue; 978 979 locks_delete_block(fl); 980 break; 981 } 982 return error; 983 } 984 EXPORT_SYMBOL(posix_lock_file_wait); 985 986 /** 987 * locks_mandatory_locked - Check for an active lock 988 * @inode: the file to check 989 * 990 * Searches the inode's list of locks to find any POSIX locks which conflict. 991 * This function is called from locks_verify_locked() only. 992 */ 993 int locks_mandatory_locked(struct inode *inode) 994 { 995 fl_owner_t owner = current->files; 996 struct file_lock *fl; 997 998 /* 999 * Search the lock list for this inode for any POSIX locks. 1000 */ 1001 lock_kernel(); 1002 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1003 if (!IS_POSIX(fl)) 1004 continue; 1005 if (fl->fl_owner != owner) 1006 break; 1007 } 1008 unlock_kernel(); 1009 return fl ? -EAGAIN : 0; 1010 } 1011 1012 /** 1013 * locks_mandatory_area - Check for a conflicting lock 1014 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1015 * for shared 1016 * @inode: the file to check 1017 * @filp: how the file was opened (if it was) 1018 * @offset: start of area to check 1019 * @count: length of area to check 1020 * 1021 * Searches the inode's list of locks to find any POSIX locks which conflict. 1022 * This function is called from rw_verify_area() and 1023 * locks_verify_truncate(). 1024 */ 1025 int locks_mandatory_area(int read_write, struct inode *inode, 1026 struct file *filp, loff_t offset, 1027 size_t count) 1028 { 1029 struct file_lock fl; 1030 int error; 1031 1032 locks_init_lock(&fl); 1033 fl.fl_owner = current->files; 1034 fl.fl_pid = current->tgid; 1035 fl.fl_file = filp; 1036 fl.fl_flags = FL_POSIX | FL_ACCESS; 1037 if (filp && !(filp->f_flags & O_NONBLOCK)) 1038 fl.fl_flags |= FL_SLEEP; 1039 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1040 fl.fl_start = offset; 1041 fl.fl_end = offset + count - 1; 1042 1043 for (;;) { 1044 error = __posix_lock_file(inode, &fl); 1045 if (error != -EAGAIN) 1046 break; 1047 if (!(fl.fl_flags & FL_SLEEP)) 1048 break; 1049 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1050 if (!error) { 1051 /* 1052 * If we've been sleeping someone might have 1053 * changed the permissions behind our back. 1054 */ 1055 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1056 continue; 1057 } 1058 1059 locks_delete_block(&fl); 1060 break; 1061 } 1062 1063 return error; 1064 } 1065 1066 EXPORT_SYMBOL(locks_mandatory_area); 1067 1068 /* We already had a lease on this file; just change its type */ 1069 int lease_modify(struct file_lock **before, int arg) 1070 { 1071 struct file_lock *fl = *before; 1072 int error = assign_type(fl, arg); 1073 1074 if (error) 1075 return error; 1076 locks_wake_up_blocks(fl); 1077 if (arg == F_UNLCK) 1078 locks_delete_lock(before); 1079 return 0; 1080 } 1081 1082 EXPORT_SYMBOL(lease_modify); 1083 1084 static void time_out_leases(struct inode *inode) 1085 { 1086 struct file_lock **before; 1087 struct file_lock *fl; 1088 1089 before = &inode->i_flock; 1090 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1091 if ((fl->fl_break_time == 0) 1092 || time_before(jiffies, fl->fl_break_time)) { 1093 before = &fl->fl_next; 1094 continue; 1095 } 1096 printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid); 1097 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1098 if (fl == *before) /* lease_modify may have freed fl */ 1099 before = &fl->fl_next; 1100 } 1101 } 1102 1103 /** 1104 * __break_lease - revoke all outstanding leases on file 1105 * @inode: the inode of the file to return 1106 * @mode: the open mode (read or write) 1107 * 1108 * break_lease (inlined for speed) has checked there already 1109 * is a lease on this file. Leases are broken on a call to open() 1110 * or truncate(). This function can sleep unless you 1111 * specified %O_NONBLOCK to your open(). 1112 */ 1113 int __break_lease(struct inode *inode, unsigned int mode) 1114 { 1115 int error = 0, future; 1116 struct file_lock *new_fl, *flock; 1117 struct file_lock *fl; 1118 int alloc_err; 1119 unsigned long break_time; 1120 int i_have_this_lease = 0; 1121 1122 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1123 &new_fl); 1124 1125 lock_kernel(); 1126 1127 time_out_leases(inode); 1128 1129 flock = inode->i_flock; 1130 if ((flock == NULL) || !IS_LEASE(flock)) 1131 goto out; 1132 1133 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1134 if (fl->fl_owner == current->files) 1135 i_have_this_lease = 1; 1136 1137 if (mode & FMODE_WRITE) { 1138 /* If we want write access, we have to revoke any lease. */ 1139 future = F_UNLCK | F_INPROGRESS; 1140 } else if (flock->fl_type & F_INPROGRESS) { 1141 /* If the lease is already being broken, we just leave it */ 1142 future = flock->fl_type; 1143 } else if (flock->fl_type & F_WRLCK) { 1144 /* Downgrade the exclusive lease to a read-only lease. */ 1145 future = F_RDLCK | F_INPROGRESS; 1146 } else { 1147 /* the existing lease was read-only, so we can read too. */ 1148 goto out; 1149 } 1150 1151 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1152 error = alloc_err; 1153 goto out; 1154 } 1155 1156 break_time = 0; 1157 if (lease_break_time > 0) { 1158 break_time = jiffies + lease_break_time * HZ; 1159 if (break_time == 0) 1160 break_time++; /* so that 0 means no break time */ 1161 } 1162 1163 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1164 if (fl->fl_type != future) { 1165 fl->fl_type = future; 1166 fl->fl_break_time = break_time; 1167 /* lease must have lmops break callback */ 1168 fl->fl_lmops->fl_break(fl); 1169 } 1170 } 1171 1172 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1173 error = -EWOULDBLOCK; 1174 goto out; 1175 } 1176 1177 restart: 1178 break_time = flock->fl_break_time; 1179 if (break_time != 0) { 1180 break_time -= jiffies; 1181 if (break_time == 0) 1182 break_time++; 1183 } 1184 error = locks_block_on_timeout(flock, new_fl, break_time); 1185 if (error >= 0) { 1186 if (error == 0) 1187 time_out_leases(inode); 1188 /* Wait for the next lease that has not been broken yet */ 1189 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1190 flock = flock->fl_next) { 1191 if (flock->fl_type & F_INPROGRESS) 1192 goto restart; 1193 } 1194 error = 0; 1195 } 1196 1197 out: 1198 unlock_kernel(); 1199 if (!alloc_err) 1200 locks_free_lock(new_fl); 1201 return error; 1202 } 1203 1204 EXPORT_SYMBOL(__break_lease); 1205 1206 /** 1207 * lease_get_mtime 1208 * @inode: the inode 1209 * @time: pointer to a timespec which will contain the last modified time 1210 * 1211 * This is to force NFS clients to flush their caches for files with 1212 * exclusive leases. The justification is that if someone has an 1213 * exclusive lease, then they could be modifiying it. 1214 */ 1215 void lease_get_mtime(struct inode *inode, struct timespec *time) 1216 { 1217 struct file_lock *flock = inode->i_flock; 1218 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1219 *time = current_fs_time(inode->i_sb); 1220 else 1221 *time = inode->i_mtime; 1222 } 1223 1224 EXPORT_SYMBOL(lease_get_mtime); 1225 1226 /** 1227 * fcntl_getlease - Enquire what lease is currently active 1228 * @filp: the file 1229 * 1230 * The value returned by this function will be one of 1231 * (if no lease break is pending): 1232 * 1233 * %F_RDLCK to indicate a shared lease is held. 1234 * 1235 * %F_WRLCK to indicate an exclusive lease is held. 1236 * 1237 * %F_UNLCK to indicate no lease is held. 1238 * 1239 * (if a lease break is pending): 1240 * 1241 * %F_RDLCK to indicate an exclusive lease needs to be 1242 * changed to a shared lease (or removed). 1243 * 1244 * %F_UNLCK to indicate the lease needs to be removed. 1245 * 1246 * XXX: sfr & willy disagree over whether F_INPROGRESS 1247 * should be returned to userspace. 1248 */ 1249 int fcntl_getlease(struct file *filp) 1250 { 1251 struct file_lock *fl; 1252 int type = F_UNLCK; 1253 1254 lock_kernel(); 1255 time_out_leases(filp->f_dentry->d_inode); 1256 for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1257 fl = fl->fl_next) { 1258 if (fl->fl_file == filp) { 1259 type = fl->fl_type & ~F_INPROGRESS; 1260 break; 1261 } 1262 } 1263 unlock_kernel(); 1264 return type; 1265 } 1266 1267 /** 1268 * __setlease - sets a lease on an open file 1269 * @filp: file pointer 1270 * @arg: type of lease to obtain 1271 * @flp: input - file_lock to use, output - file_lock inserted 1272 * 1273 * The (input) flp->fl_lmops->fl_break function is required 1274 * by break_lease(). 1275 * 1276 * Called with kernel lock held. 1277 */ 1278 static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1279 { 1280 struct file_lock *fl, **before, **my_before = NULL, *lease; 1281 struct dentry *dentry = filp->f_dentry; 1282 struct inode *inode = dentry->d_inode; 1283 int error, rdlease_count = 0, wrlease_count = 0; 1284 1285 time_out_leases(inode); 1286 1287 error = -EINVAL; 1288 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1289 goto out; 1290 1291 lease = *flp; 1292 1293 error = -EAGAIN; 1294 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1295 goto out; 1296 if ((arg == F_WRLCK) 1297 && ((atomic_read(&dentry->d_count) > 1) 1298 || (atomic_read(&inode->i_count) > 1))) 1299 goto out; 1300 1301 /* 1302 * At this point, we know that if there is an exclusive 1303 * lease on this file, then we hold it on this filp 1304 * (otherwise our open of this file would have blocked). 1305 * And if we are trying to acquire an exclusive lease, 1306 * then the file is not open by anyone (including us) 1307 * except for this filp. 1308 */ 1309 for (before = &inode->i_flock; 1310 ((fl = *before) != NULL) && IS_LEASE(fl); 1311 before = &fl->fl_next) { 1312 if (lease->fl_lmops->fl_mylease(fl, lease)) 1313 my_before = before; 1314 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1315 /* 1316 * Someone is in the process of opening this 1317 * file for writing so we may not take an 1318 * exclusive lease on it. 1319 */ 1320 wrlease_count++; 1321 else 1322 rdlease_count++; 1323 } 1324 1325 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1326 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1327 goto out; 1328 1329 if (my_before != NULL) { 1330 error = lease->fl_lmops->fl_change(my_before, arg); 1331 goto out; 1332 } 1333 1334 error = 0; 1335 if (arg == F_UNLCK) 1336 goto out; 1337 1338 error = -EINVAL; 1339 if (!leases_enable) 1340 goto out; 1341 1342 error = lease_alloc(filp, arg, &fl); 1343 if (error) 1344 goto out; 1345 1346 locks_copy_lock(fl, lease); 1347 1348 locks_insert_lock(before, fl); 1349 1350 *flp = fl; 1351 out: 1352 return error; 1353 } 1354 1355 /** 1356 * setlease - sets a lease on an open file 1357 * @filp: file pointer 1358 * @arg: type of lease to obtain 1359 * @lease: file_lock to use 1360 * 1361 * Call this to establish a lease on the file. 1362 * The fl_lmops fl_break function is required by break_lease 1363 */ 1364 1365 int setlease(struct file *filp, long arg, struct file_lock **lease) 1366 { 1367 struct dentry *dentry = filp->f_dentry; 1368 struct inode *inode = dentry->d_inode; 1369 int error; 1370 1371 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1372 return -EACCES; 1373 if (!S_ISREG(inode->i_mode)) 1374 return -EINVAL; 1375 error = security_file_lock(filp, arg); 1376 if (error) 1377 return error; 1378 1379 lock_kernel(); 1380 error = __setlease(filp, arg, lease); 1381 unlock_kernel(); 1382 1383 return error; 1384 } 1385 1386 EXPORT_SYMBOL(setlease); 1387 1388 /** 1389 * fcntl_setlease - sets a lease on an open file 1390 * @fd: open file descriptor 1391 * @filp: file pointer 1392 * @arg: type of lease to obtain 1393 * 1394 * Call this fcntl to establish a lease on the file. 1395 * Note that you also need to call %F_SETSIG to 1396 * receive a signal when the lease is broken. 1397 */ 1398 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1399 { 1400 struct file_lock fl, *flp = &fl; 1401 struct dentry *dentry = filp->f_dentry; 1402 struct inode *inode = dentry->d_inode; 1403 int error; 1404 1405 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1406 return -EACCES; 1407 if (!S_ISREG(inode->i_mode)) 1408 return -EINVAL; 1409 error = security_file_lock(filp, arg); 1410 if (error) 1411 return error; 1412 1413 locks_init_lock(&fl); 1414 error = lease_init(filp, arg, &fl); 1415 if (error) 1416 return error; 1417 1418 lock_kernel(); 1419 1420 error = __setlease(filp, arg, &flp); 1421 if (error) 1422 goto out_unlock; 1423 1424 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1425 if (error < 0) { 1426 /* remove lease just inserted by __setlease */ 1427 flp->fl_type = F_UNLCK | F_INPROGRESS; 1428 flp->fl_break_time = jiffies- 10; 1429 time_out_leases(inode); 1430 goto out_unlock; 1431 } 1432 1433 error = f_setown(filp, current->pid, 0); 1434 out_unlock: 1435 unlock_kernel(); 1436 return error; 1437 } 1438 1439 /** 1440 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1441 * @filp: The file to apply the lock to 1442 * @fl: The lock to be applied 1443 * 1444 * Add a FLOCK style lock to a file. 1445 */ 1446 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1447 { 1448 int error; 1449 might_sleep(); 1450 for (;;) { 1451 error = flock_lock_file(filp, fl); 1452 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1453 break; 1454 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1455 if (!error) 1456 continue; 1457 1458 locks_delete_block(fl); 1459 break; 1460 } 1461 return error; 1462 } 1463 1464 EXPORT_SYMBOL(flock_lock_file_wait); 1465 1466 /** 1467 * sys_flock: - flock() system call. 1468 * @fd: the file descriptor to lock. 1469 * @cmd: the type of lock to apply. 1470 * 1471 * Apply a %FL_FLOCK style lock to an open file descriptor. 1472 * The @cmd can be one of 1473 * 1474 * %LOCK_SH -- a shared lock. 1475 * 1476 * %LOCK_EX -- an exclusive lock. 1477 * 1478 * %LOCK_UN -- remove an existing lock. 1479 * 1480 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1481 * 1482 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1483 * processes read and write access respectively. 1484 */ 1485 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1486 { 1487 struct file *filp; 1488 struct file_lock *lock; 1489 int can_sleep, unlock; 1490 int error; 1491 1492 error = -EBADF; 1493 filp = fget(fd); 1494 if (!filp) 1495 goto out; 1496 1497 can_sleep = !(cmd & LOCK_NB); 1498 cmd &= ~LOCK_NB; 1499 unlock = (cmd == LOCK_UN); 1500 1501 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1502 goto out_putf; 1503 1504 error = flock_make_lock(filp, &lock, cmd); 1505 if (error) 1506 goto out_putf; 1507 if (can_sleep) 1508 lock->fl_flags |= FL_SLEEP; 1509 1510 error = security_file_lock(filp, cmd); 1511 if (error) 1512 goto out_free; 1513 1514 if (filp->f_op && filp->f_op->flock) 1515 error = filp->f_op->flock(filp, 1516 (can_sleep) ? F_SETLKW : F_SETLK, 1517 lock); 1518 else 1519 error = flock_lock_file_wait(filp, lock); 1520 1521 out_free: 1522 if (list_empty(&lock->fl_link)) { 1523 locks_free_lock(lock); 1524 } 1525 1526 out_putf: 1527 fput(filp); 1528 out: 1529 return error; 1530 } 1531 1532 /* Report the first existing lock that would conflict with l. 1533 * This implements the F_GETLK command of fcntl(). 1534 */ 1535 int fcntl_getlk(struct file *filp, struct flock __user *l) 1536 { 1537 struct file_lock *fl, file_lock; 1538 struct flock flock; 1539 int error; 1540 1541 error = -EFAULT; 1542 if (copy_from_user(&flock, l, sizeof(flock))) 1543 goto out; 1544 error = -EINVAL; 1545 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1546 goto out; 1547 1548 error = flock_to_posix_lock(filp, &file_lock, &flock); 1549 if (error) 1550 goto out; 1551 1552 if (filp->f_op && filp->f_op->lock) { 1553 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1554 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1555 file_lock.fl_ops->fl_release_private(&file_lock); 1556 if (error < 0) 1557 goto out; 1558 else 1559 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1560 } else { 1561 fl = posix_test_lock(filp, &file_lock); 1562 } 1563 1564 flock.l_type = F_UNLCK; 1565 if (fl != NULL) { 1566 flock.l_pid = fl->fl_pid; 1567 #if BITS_PER_LONG == 32 1568 /* 1569 * Make sure we can represent the posix lock via 1570 * legacy 32bit flock. 1571 */ 1572 error = -EOVERFLOW; 1573 if (fl->fl_start > OFFT_OFFSET_MAX) 1574 goto out; 1575 if ((fl->fl_end != OFFSET_MAX) 1576 && (fl->fl_end > OFFT_OFFSET_MAX)) 1577 goto out; 1578 #endif 1579 flock.l_start = fl->fl_start; 1580 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1581 fl->fl_end - fl->fl_start + 1; 1582 flock.l_whence = 0; 1583 flock.l_type = fl->fl_type; 1584 } 1585 error = -EFAULT; 1586 if (!copy_to_user(l, &flock, sizeof(flock))) 1587 error = 0; 1588 out: 1589 return error; 1590 } 1591 1592 /* Apply the lock described by l to an open file descriptor. 1593 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1594 */ 1595 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1596 struct flock __user *l) 1597 { 1598 struct file_lock *file_lock = locks_alloc_lock(); 1599 struct flock flock; 1600 struct inode *inode; 1601 int error; 1602 1603 if (file_lock == NULL) 1604 return -ENOLCK; 1605 1606 /* 1607 * This might block, so we do it before checking the inode. 1608 */ 1609 error = -EFAULT; 1610 if (copy_from_user(&flock, l, sizeof(flock))) 1611 goto out; 1612 1613 inode = filp->f_dentry->d_inode; 1614 1615 /* Don't allow mandatory locks on files that may be memory mapped 1616 * and shared. 1617 */ 1618 if (IS_MANDLOCK(inode) && 1619 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1620 mapping_writably_mapped(filp->f_mapping)) { 1621 error = -EAGAIN; 1622 goto out; 1623 } 1624 1625 again: 1626 error = flock_to_posix_lock(filp, file_lock, &flock); 1627 if (error) 1628 goto out; 1629 if (cmd == F_SETLKW) { 1630 file_lock->fl_flags |= FL_SLEEP; 1631 } 1632 1633 error = -EBADF; 1634 switch (flock.l_type) { 1635 case F_RDLCK: 1636 if (!(filp->f_mode & FMODE_READ)) 1637 goto out; 1638 break; 1639 case F_WRLCK: 1640 if (!(filp->f_mode & FMODE_WRITE)) 1641 goto out; 1642 break; 1643 case F_UNLCK: 1644 break; 1645 default: 1646 error = -EINVAL; 1647 goto out; 1648 } 1649 1650 error = security_file_lock(filp, file_lock->fl_type); 1651 if (error) 1652 goto out; 1653 1654 if (filp->f_op && filp->f_op->lock != NULL) 1655 error = filp->f_op->lock(filp, cmd, file_lock); 1656 else { 1657 for (;;) { 1658 error = __posix_lock_file(inode, file_lock); 1659 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1660 break; 1661 error = wait_event_interruptible(file_lock->fl_wait, 1662 !file_lock->fl_next); 1663 if (!error) 1664 continue; 1665 1666 locks_delete_block(file_lock); 1667 break; 1668 } 1669 } 1670 1671 /* 1672 * Attempt to detect a close/fcntl race and recover by 1673 * releasing the lock that was just acquired. 1674 */ 1675 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1676 flock.l_type = F_UNLCK; 1677 goto again; 1678 } 1679 1680 out: 1681 locks_free_lock(file_lock); 1682 return error; 1683 } 1684 1685 #if BITS_PER_LONG == 32 1686 /* Report the first existing lock that would conflict with l. 1687 * This implements the F_GETLK command of fcntl(). 1688 */ 1689 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1690 { 1691 struct file_lock *fl, file_lock; 1692 struct flock64 flock; 1693 int error; 1694 1695 error = -EFAULT; 1696 if (copy_from_user(&flock, l, sizeof(flock))) 1697 goto out; 1698 error = -EINVAL; 1699 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1700 goto out; 1701 1702 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1703 if (error) 1704 goto out; 1705 1706 if (filp->f_op && filp->f_op->lock) { 1707 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1708 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1709 file_lock.fl_ops->fl_release_private(&file_lock); 1710 if (error < 0) 1711 goto out; 1712 else 1713 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1714 } else { 1715 fl = posix_test_lock(filp, &file_lock); 1716 } 1717 1718 flock.l_type = F_UNLCK; 1719 if (fl != NULL) { 1720 flock.l_pid = fl->fl_pid; 1721 flock.l_start = fl->fl_start; 1722 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1723 fl->fl_end - fl->fl_start + 1; 1724 flock.l_whence = 0; 1725 flock.l_type = fl->fl_type; 1726 } 1727 error = -EFAULT; 1728 if (!copy_to_user(l, &flock, sizeof(flock))) 1729 error = 0; 1730 1731 out: 1732 return error; 1733 } 1734 1735 /* Apply the lock described by l to an open file descriptor. 1736 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1737 */ 1738 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1739 struct flock64 __user *l) 1740 { 1741 struct file_lock *file_lock = locks_alloc_lock(); 1742 struct flock64 flock; 1743 struct inode *inode; 1744 int error; 1745 1746 if (file_lock == NULL) 1747 return -ENOLCK; 1748 1749 /* 1750 * This might block, so we do it before checking the inode. 1751 */ 1752 error = -EFAULT; 1753 if (copy_from_user(&flock, l, sizeof(flock))) 1754 goto out; 1755 1756 inode = filp->f_dentry->d_inode; 1757 1758 /* Don't allow mandatory locks on files that may be memory mapped 1759 * and shared. 1760 */ 1761 if (IS_MANDLOCK(inode) && 1762 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1763 mapping_writably_mapped(filp->f_mapping)) { 1764 error = -EAGAIN; 1765 goto out; 1766 } 1767 1768 again: 1769 error = flock64_to_posix_lock(filp, file_lock, &flock); 1770 if (error) 1771 goto out; 1772 if (cmd == F_SETLKW64) { 1773 file_lock->fl_flags |= FL_SLEEP; 1774 } 1775 1776 error = -EBADF; 1777 switch (flock.l_type) { 1778 case F_RDLCK: 1779 if (!(filp->f_mode & FMODE_READ)) 1780 goto out; 1781 break; 1782 case F_WRLCK: 1783 if (!(filp->f_mode & FMODE_WRITE)) 1784 goto out; 1785 break; 1786 case F_UNLCK: 1787 break; 1788 default: 1789 error = -EINVAL; 1790 goto out; 1791 } 1792 1793 error = security_file_lock(filp, file_lock->fl_type); 1794 if (error) 1795 goto out; 1796 1797 if (filp->f_op && filp->f_op->lock != NULL) 1798 error = filp->f_op->lock(filp, cmd, file_lock); 1799 else { 1800 for (;;) { 1801 error = __posix_lock_file(inode, file_lock); 1802 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1803 break; 1804 error = wait_event_interruptible(file_lock->fl_wait, 1805 !file_lock->fl_next); 1806 if (!error) 1807 continue; 1808 1809 locks_delete_block(file_lock); 1810 break; 1811 } 1812 } 1813 1814 /* 1815 * Attempt to detect a close/fcntl race and recover by 1816 * releasing the lock that was just acquired. 1817 */ 1818 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1819 flock.l_type = F_UNLCK; 1820 goto again; 1821 } 1822 1823 out: 1824 locks_free_lock(file_lock); 1825 return error; 1826 } 1827 #endif /* BITS_PER_LONG == 32 */ 1828 1829 /* 1830 * This function is called when the file is being removed 1831 * from the task's fd array. POSIX locks belonging to this task 1832 * are deleted at this time. 1833 */ 1834 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1835 { 1836 struct file_lock lock, **before; 1837 1838 /* 1839 * If there are no locks held on this file, we don't need to call 1840 * posix_lock_file(). Another process could be setting a lock on this 1841 * file at the same time, but we wouldn't remove that lock anyway. 1842 */ 1843 before = &filp->f_dentry->d_inode->i_flock; 1844 if (*before == NULL) 1845 return; 1846 1847 lock.fl_type = F_UNLCK; 1848 lock.fl_flags = FL_POSIX; 1849 lock.fl_start = 0; 1850 lock.fl_end = OFFSET_MAX; 1851 lock.fl_owner = owner; 1852 lock.fl_pid = current->tgid; 1853 lock.fl_file = filp; 1854 lock.fl_ops = NULL; 1855 lock.fl_lmops = NULL; 1856 1857 if (filp->f_op && filp->f_op->lock != NULL) { 1858 filp->f_op->lock(filp, F_SETLK, &lock); 1859 goto out; 1860 } 1861 1862 /* Can't use posix_lock_file here; we need to remove it no matter 1863 * which pid we have. 1864 */ 1865 lock_kernel(); 1866 while (*before != NULL) { 1867 struct file_lock *fl = *before; 1868 if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { 1869 locks_delete_lock(before); 1870 continue; 1871 } 1872 before = &fl->fl_next; 1873 } 1874 unlock_kernel(); 1875 out: 1876 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1877 lock.fl_ops->fl_release_private(&lock); 1878 } 1879 1880 EXPORT_SYMBOL(locks_remove_posix); 1881 1882 /* 1883 * This function is called on the last close of an open file. 1884 */ 1885 void locks_remove_flock(struct file *filp) 1886 { 1887 struct inode * inode = filp->f_dentry->d_inode; 1888 struct file_lock *fl; 1889 struct file_lock **before; 1890 1891 if (!inode->i_flock) 1892 return; 1893 1894 if (filp->f_op && filp->f_op->flock) { 1895 struct file_lock fl = { 1896 .fl_pid = current->tgid, 1897 .fl_file = filp, 1898 .fl_flags = FL_FLOCK, 1899 .fl_type = F_UNLCK, 1900 .fl_end = OFFSET_MAX, 1901 }; 1902 filp->f_op->flock(filp, F_SETLKW, &fl); 1903 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1904 fl.fl_ops->fl_release_private(&fl); 1905 } 1906 1907 lock_kernel(); 1908 before = &inode->i_flock; 1909 1910 while ((fl = *before) != NULL) { 1911 if (fl->fl_file == filp) { 1912 if (IS_FLOCK(fl)) { 1913 locks_delete_lock(before); 1914 continue; 1915 } 1916 if (IS_LEASE(fl)) { 1917 lease_modify(before, F_UNLCK); 1918 continue; 1919 } 1920 /* What? */ 1921 BUG(); 1922 } 1923 before = &fl->fl_next; 1924 } 1925 unlock_kernel(); 1926 } 1927 1928 /** 1929 * posix_block_lock - blocks waiting for a file lock 1930 * @blocker: the lock which is blocking 1931 * @waiter: the lock which conflicts and has to wait 1932 * 1933 * lockd needs to block waiting for locks. 1934 */ 1935 void 1936 posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) 1937 { 1938 locks_insert_block(blocker, waiter); 1939 } 1940 1941 EXPORT_SYMBOL(posix_block_lock); 1942 1943 /** 1944 * posix_unblock_lock - stop waiting for a file lock 1945 * @filp: how the file was opened 1946 * @waiter: the lock which was waiting 1947 * 1948 * lockd needs to block waiting for locks. 1949 */ 1950 void 1951 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1952 { 1953 /* 1954 * A remote machine may cancel the lock request after it's been 1955 * granted locally. If that happens, we need to delete the lock. 1956 */ 1957 lock_kernel(); 1958 if (waiter->fl_next) { 1959 __locks_delete_block(waiter); 1960 unlock_kernel(); 1961 } else { 1962 unlock_kernel(); 1963 waiter->fl_type = F_UNLCK; 1964 posix_lock_file(filp, waiter); 1965 } 1966 } 1967 1968 EXPORT_SYMBOL(posix_unblock_lock); 1969 1970 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1971 { 1972 struct inode *inode = NULL; 1973 1974 if (fl->fl_file != NULL) 1975 inode = fl->fl_file->f_dentry->d_inode; 1976 1977 out += sprintf(out, "%d:%s ", id, pfx); 1978 if (IS_POSIX(fl)) { 1979 out += sprintf(out, "%6s %s ", 1980 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1981 (inode == NULL) ? "*NOINODE*" : 1982 (IS_MANDLOCK(inode) && 1983 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 1984 "MANDATORY" : "ADVISORY "); 1985 } else if (IS_FLOCK(fl)) { 1986 if (fl->fl_type & LOCK_MAND) { 1987 out += sprintf(out, "FLOCK MSNFS "); 1988 } else { 1989 out += sprintf(out, "FLOCK ADVISORY "); 1990 } 1991 } else if (IS_LEASE(fl)) { 1992 out += sprintf(out, "LEASE "); 1993 if (fl->fl_type & F_INPROGRESS) 1994 out += sprintf(out, "BREAKING "); 1995 else if (fl->fl_file) 1996 out += sprintf(out, "ACTIVE "); 1997 else 1998 out += sprintf(out, "BREAKER "); 1999 } else { 2000 out += sprintf(out, "UNKNOWN UNKNOWN "); 2001 } 2002 if (fl->fl_type & LOCK_MAND) { 2003 out += sprintf(out, "%s ", 2004 (fl->fl_type & LOCK_READ) 2005 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2006 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2007 } else { 2008 out += sprintf(out, "%s ", 2009 (fl->fl_type & F_INPROGRESS) 2010 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 2011 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 2012 } 2013 if (inode) { 2014 #ifdef WE_CAN_BREAK_LSLK_NOW 2015 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 2016 inode->i_sb->s_id, inode->i_ino); 2017 #else 2018 /* userspace relies on this representation of dev_t ;-( */ 2019 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 2020 MAJOR(inode->i_sb->s_dev), 2021 MINOR(inode->i_sb->s_dev), inode->i_ino); 2022 #endif 2023 } else { 2024 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 2025 } 2026 if (IS_POSIX(fl)) { 2027 if (fl->fl_end == OFFSET_MAX) 2028 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 2029 else 2030 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 2031 fl->fl_end); 2032 } else { 2033 out += sprintf(out, "0 EOF\n"); 2034 } 2035 } 2036 2037 static void move_lock_status(char **p, off_t* pos, off_t offset) 2038 { 2039 int len; 2040 len = strlen(*p); 2041 if(*pos >= offset) { 2042 /* the complete line is valid */ 2043 *p += len; 2044 *pos += len; 2045 return; 2046 } 2047 if(*pos+len > offset) { 2048 /* use the second part of the line */ 2049 int i = offset-*pos; 2050 memmove(*p,*p+i,len-i); 2051 *p += len-i; 2052 *pos += len; 2053 return; 2054 } 2055 /* discard the complete line */ 2056 *pos += len; 2057 } 2058 2059 /** 2060 * get_locks_status - reports lock usage in /proc/locks 2061 * @buffer: address in userspace to write into 2062 * @start: ? 2063 * @offset: how far we are through the buffer 2064 * @length: how much to read 2065 */ 2066 2067 int get_locks_status(char *buffer, char **start, off_t offset, int length) 2068 { 2069 struct list_head *tmp; 2070 char *q = buffer; 2071 off_t pos = 0; 2072 int i = 0; 2073 2074 lock_kernel(); 2075 list_for_each(tmp, &file_lock_list) { 2076 struct list_head *btmp; 2077 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2078 lock_get_status(q, fl, ++i, ""); 2079 move_lock_status(&q, &pos, offset); 2080 2081 if(pos >= offset+length) 2082 goto done; 2083 2084 list_for_each(btmp, &fl->fl_block) { 2085 struct file_lock *bfl = list_entry(btmp, 2086 struct file_lock, fl_block); 2087 lock_get_status(q, bfl, i, " ->"); 2088 move_lock_status(&q, &pos, offset); 2089 2090 if(pos >= offset+length) 2091 goto done; 2092 } 2093 } 2094 done: 2095 unlock_kernel(); 2096 *start = buffer; 2097 if(q-buffer < length) 2098 return (q-buffer); 2099 return length; 2100 } 2101 2102 /** 2103 * lock_may_read - checks that the region is free of locks 2104 * @inode: the inode that is being read 2105 * @start: the first byte to read 2106 * @len: the number of bytes to read 2107 * 2108 * Emulates Windows locking requirements. Whole-file 2109 * mandatory locks (share modes) can prohibit a read and 2110 * byte-range POSIX locks can prohibit a read if they overlap. 2111 * 2112 * N.B. this function is only ever called 2113 * from knfsd and ownership of locks is never checked. 2114 */ 2115 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2116 { 2117 struct file_lock *fl; 2118 int result = 1; 2119 lock_kernel(); 2120 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2121 if (IS_POSIX(fl)) { 2122 if (fl->fl_type == F_RDLCK) 2123 continue; 2124 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2125 continue; 2126 } else if (IS_FLOCK(fl)) { 2127 if (!(fl->fl_type & LOCK_MAND)) 2128 continue; 2129 if (fl->fl_type & LOCK_READ) 2130 continue; 2131 } else 2132 continue; 2133 result = 0; 2134 break; 2135 } 2136 unlock_kernel(); 2137 return result; 2138 } 2139 2140 EXPORT_SYMBOL(lock_may_read); 2141 2142 /** 2143 * lock_may_write - checks that the region is free of locks 2144 * @inode: the inode that is being written 2145 * @start: the first byte to write 2146 * @len: the number of bytes to write 2147 * 2148 * Emulates Windows locking requirements. Whole-file 2149 * mandatory locks (share modes) can prohibit a write and 2150 * byte-range POSIX locks can prohibit a write if they overlap. 2151 * 2152 * N.B. this function is only ever called 2153 * from knfsd and ownership of locks is never checked. 2154 */ 2155 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2156 { 2157 struct file_lock *fl; 2158 int result = 1; 2159 lock_kernel(); 2160 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2161 if (IS_POSIX(fl)) { 2162 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2163 continue; 2164 } else if (IS_FLOCK(fl)) { 2165 if (!(fl->fl_type & LOCK_MAND)) 2166 continue; 2167 if (fl->fl_type & LOCK_WRITE) 2168 continue; 2169 } else 2170 continue; 2171 result = 0; 2172 break; 2173 } 2174 unlock_kernel(); 2175 return result; 2176 } 2177 2178 EXPORT_SYMBOL(lock_may_write); 2179 2180 static inline void __steal_locks(struct file *file, fl_owner_t from) 2181 { 2182 struct inode *inode = file->f_dentry->d_inode; 2183 struct file_lock *fl = inode->i_flock; 2184 2185 while (fl) { 2186 if (fl->fl_file == file && fl->fl_owner == from) 2187 fl->fl_owner = current->files; 2188 fl = fl->fl_next; 2189 } 2190 } 2191 2192 /* When getting ready for executing a binary, we make sure that current 2193 * has a files_struct on its own. Before dropping the old files_struct, 2194 * we take over ownership of all locks for all file descriptors we own. 2195 * Note that we may accidentally steal a lock for a file that a sibling 2196 * has created since the unshare_files() call. 2197 */ 2198 void steal_locks(fl_owner_t from) 2199 { 2200 struct files_struct *files = current->files; 2201 int i, j; 2202 struct fdtable *fdt; 2203 2204 if (from == files) 2205 return; 2206 2207 lock_kernel(); 2208 j = 0; 2209 rcu_read_lock(); 2210 fdt = files_fdtable(files); 2211 for (;;) { 2212 unsigned long set; 2213 i = j * __NFDBITS; 2214 if (i >= fdt->max_fdset || i >= fdt->max_fds) 2215 break; 2216 set = fdt->open_fds->fds_bits[j++]; 2217 while (set) { 2218 if (set & 1) { 2219 struct file *file = fdt->fd[i]; 2220 if (file) 2221 __steal_locks(file, from); 2222 } 2223 i++; 2224 set >>= 1; 2225 } 2226 } 2227 rcu_read_unlock(); 2228 unlock_kernel(); 2229 } 2230 EXPORT_SYMBOL(steal_locks); 2231 2232 static int __init filelock_init(void) 2233 { 2234 filelock_cache = kmem_cache_create("file_lock_cache", 2235 sizeof(struct file_lock), 0, SLAB_PANIC, 2236 init_once, NULL); 2237 return 0; 2238 } 2239 2240 core_initcall(filelock_init); 2241