1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/mandatory.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 */ 116 117 #include <linux/capability.h> 118 #include <linux/file.h> 119 #include <linux/fs.h> 120 #include <linux/init.h> 121 #include <linux/module.h> 122 #include <linux/security.h> 123 #include <linux/slab.h> 124 #include <linux/smp_lock.h> 125 #include <linux/syscalls.h> 126 #include <linux/time.h> 127 128 #include <asm/semaphore.h> 129 #include <asm/uaccess.h> 130 131 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 132 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 133 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 134 135 int leases_enable = 1; 136 int lease_break_time = 45; 137 138 #define for_each_lock(inode, lockp) \ 139 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 140 141 LIST_HEAD(file_lock_list); 142 143 EXPORT_SYMBOL(file_lock_list); 144 145 static LIST_HEAD(blocked_list); 146 147 static kmem_cache_t *filelock_cache; 148 149 /* Allocate an empty lock structure. */ 150 static struct file_lock *locks_alloc_lock(void) 151 { 152 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 153 } 154 155 /* Free a lock which is not in use. */ 156 static inline void locks_free_lock(struct file_lock *fl) 157 { 158 if (fl == NULL) { 159 BUG(); 160 return; 161 } 162 if (waitqueue_active(&fl->fl_wait)) 163 panic("Attempting to free lock with active wait queue"); 164 165 if (!list_empty(&fl->fl_block)) 166 panic("Attempting to free lock with active block list"); 167 168 if (!list_empty(&fl->fl_link)) 169 panic("Attempting to free lock on active lock list"); 170 171 if (fl->fl_ops) { 172 if (fl->fl_ops->fl_release_private) 173 fl->fl_ops->fl_release_private(fl); 174 fl->fl_ops = NULL; 175 } 176 177 if (fl->fl_lmops) { 178 if (fl->fl_lmops->fl_release_private) 179 fl->fl_lmops->fl_release_private(fl); 180 fl->fl_lmops = NULL; 181 } 182 183 kmem_cache_free(filelock_cache, fl); 184 } 185 186 void locks_init_lock(struct file_lock *fl) 187 { 188 INIT_LIST_HEAD(&fl->fl_link); 189 INIT_LIST_HEAD(&fl->fl_block); 190 init_waitqueue_head(&fl->fl_wait); 191 fl->fl_next = NULL; 192 fl->fl_fasync = NULL; 193 fl->fl_owner = NULL; 194 fl->fl_pid = 0; 195 fl->fl_file = NULL; 196 fl->fl_flags = 0; 197 fl->fl_type = 0; 198 fl->fl_start = fl->fl_end = 0; 199 fl->fl_ops = NULL; 200 fl->fl_lmops = NULL; 201 } 202 203 EXPORT_SYMBOL(locks_init_lock); 204 205 /* 206 * Initialises the fields of the file lock which are invariant for 207 * free file_locks. 208 */ 209 static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 210 { 211 struct file_lock *lock = (struct file_lock *) foo; 212 213 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != 214 SLAB_CTOR_CONSTRUCTOR) 215 return; 216 217 locks_init_lock(lock); 218 } 219 220 /* 221 * Initialize a new lock from an existing file_lock structure. 222 */ 223 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 224 { 225 new->fl_owner = fl->fl_owner; 226 new->fl_pid = fl->fl_pid; 227 new->fl_file = fl->fl_file; 228 new->fl_flags = fl->fl_flags; 229 new->fl_type = fl->fl_type; 230 new->fl_start = fl->fl_start; 231 new->fl_end = fl->fl_end; 232 new->fl_ops = fl->fl_ops; 233 new->fl_lmops = fl->fl_lmops; 234 if (fl->fl_ops && fl->fl_ops->fl_copy_lock) 235 fl->fl_ops->fl_copy_lock(new, fl); 236 if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock) 237 fl->fl_lmops->fl_copy_lock(new, fl); 238 } 239 240 EXPORT_SYMBOL(locks_copy_lock); 241 242 static inline int flock_translate_cmd(int cmd) { 243 if (cmd & LOCK_MAND) 244 return cmd & (LOCK_MAND | LOCK_RW); 245 switch (cmd) { 246 case LOCK_SH: 247 return F_RDLCK; 248 case LOCK_EX: 249 return F_WRLCK; 250 case LOCK_UN: 251 return F_UNLCK; 252 } 253 return -EINVAL; 254 } 255 256 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 257 static int flock_make_lock(struct file *filp, struct file_lock **lock, 258 unsigned int cmd) 259 { 260 struct file_lock *fl; 261 int type = flock_translate_cmd(cmd); 262 if (type < 0) 263 return type; 264 265 fl = locks_alloc_lock(); 266 if (fl == NULL) 267 return -ENOMEM; 268 269 fl->fl_file = filp; 270 fl->fl_pid = current->tgid; 271 fl->fl_flags = FL_FLOCK; 272 fl->fl_type = type; 273 fl->fl_end = OFFSET_MAX; 274 275 *lock = fl; 276 return 0; 277 } 278 279 static int assign_type(struct file_lock *fl, int type) 280 { 281 switch (type) { 282 case F_RDLCK: 283 case F_WRLCK: 284 case F_UNLCK: 285 fl->fl_type = type; 286 break; 287 default: 288 return -EINVAL; 289 } 290 return 0; 291 } 292 293 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 294 * style lock. 295 */ 296 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 297 struct flock *l) 298 { 299 off_t start, end; 300 301 switch (l->l_whence) { 302 case 0: /*SEEK_SET*/ 303 start = 0; 304 break; 305 case 1: /*SEEK_CUR*/ 306 start = filp->f_pos; 307 break; 308 case 2: /*SEEK_END*/ 309 start = i_size_read(filp->f_dentry->d_inode); 310 break; 311 default: 312 return -EINVAL; 313 } 314 315 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 316 POSIX-2001 defines it. */ 317 start += l->l_start; 318 end = start + l->l_len - 1; 319 if (l->l_len < 0) { 320 end = start - 1; 321 start += l->l_len; 322 } 323 324 if (start < 0) 325 return -EINVAL; 326 if (l->l_len > 0 && end < 0) 327 return -EOVERFLOW; 328 329 fl->fl_start = start; /* we record the absolute position */ 330 fl->fl_end = end; 331 if (l->l_len == 0) 332 fl->fl_end = OFFSET_MAX; 333 334 fl->fl_owner = current->files; 335 fl->fl_pid = current->tgid; 336 fl->fl_file = filp; 337 fl->fl_flags = FL_POSIX; 338 fl->fl_ops = NULL; 339 fl->fl_lmops = NULL; 340 341 return assign_type(fl, l->l_type); 342 } 343 344 #if BITS_PER_LONG == 32 345 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 346 struct flock64 *l) 347 { 348 loff_t start; 349 350 switch (l->l_whence) { 351 case 0: /*SEEK_SET*/ 352 start = 0; 353 break; 354 case 1: /*SEEK_CUR*/ 355 start = filp->f_pos; 356 break; 357 case 2: /*SEEK_END*/ 358 start = i_size_read(filp->f_dentry->d_inode); 359 break; 360 default: 361 return -EINVAL; 362 } 363 364 if (((start += l->l_start) < 0) || (l->l_len < 0)) 365 return -EINVAL; 366 fl->fl_end = start + l->l_len - 1; 367 if (l->l_len > 0 && fl->fl_end < 0) 368 return -EOVERFLOW; 369 fl->fl_start = start; /* we record the absolute position */ 370 if (l->l_len == 0) 371 fl->fl_end = OFFSET_MAX; 372 373 fl->fl_owner = current->files; 374 fl->fl_pid = current->tgid; 375 fl->fl_file = filp; 376 fl->fl_flags = FL_POSIX; 377 fl->fl_ops = NULL; 378 fl->fl_lmops = NULL; 379 380 switch (l->l_type) { 381 case F_RDLCK: 382 case F_WRLCK: 383 case F_UNLCK: 384 fl->fl_type = l->l_type; 385 break; 386 default: 387 return -EINVAL; 388 } 389 390 return (0); 391 } 392 #endif 393 394 /* default lease lock manager operations */ 395 static void lease_break_callback(struct file_lock *fl) 396 { 397 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 398 } 399 400 static void lease_release_private_callback(struct file_lock *fl) 401 { 402 if (!fl->fl_file) 403 return; 404 405 f_delown(fl->fl_file); 406 fl->fl_file->f_owner.signum = 0; 407 } 408 409 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 410 { 411 return fl->fl_file == try->fl_file; 412 } 413 414 static struct lock_manager_operations lease_manager_ops = { 415 .fl_break = lease_break_callback, 416 .fl_release_private = lease_release_private_callback, 417 .fl_mylease = lease_mylease_callback, 418 .fl_change = lease_modify, 419 }; 420 421 /* 422 * Initialize a lease, use the default lock manager operations 423 */ 424 static int lease_init(struct file *filp, int type, struct file_lock *fl) 425 { 426 fl->fl_owner = current->files; 427 fl->fl_pid = current->tgid; 428 429 fl->fl_file = filp; 430 fl->fl_flags = FL_LEASE; 431 if (assign_type(fl, type) != 0) { 432 locks_free_lock(fl); 433 return -EINVAL; 434 } 435 fl->fl_start = 0; 436 fl->fl_end = OFFSET_MAX; 437 fl->fl_ops = NULL; 438 fl->fl_lmops = &lease_manager_ops; 439 return 0; 440 } 441 442 /* Allocate a file_lock initialised to this type of lease */ 443 static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 444 { 445 struct file_lock *fl = locks_alloc_lock(); 446 int error; 447 448 if (fl == NULL) 449 return -ENOMEM; 450 451 error = lease_init(filp, type, fl); 452 if (error) 453 return error; 454 *flp = fl; 455 return 0; 456 } 457 458 /* Check if two locks overlap each other. 459 */ 460 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 461 { 462 return ((fl1->fl_end >= fl2->fl_start) && 463 (fl2->fl_end >= fl1->fl_start)); 464 } 465 466 /* 467 * Check whether two locks have the same owner. 468 */ 469 static inline int 470 posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 471 { 472 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 473 return fl2->fl_lmops == fl1->fl_lmops && 474 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 475 return fl1->fl_owner == fl2->fl_owner; 476 } 477 478 /* Remove waiter from blocker's block list. 479 * When blocker ends up pointing to itself then the list is empty. 480 */ 481 static inline void __locks_delete_block(struct file_lock *waiter) 482 { 483 list_del_init(&waiter->fl_block); 484 list_del_init(&waiter->fl_link); 485 waiter->fl_next = NULL; 486 } 487 488 /* 489 */ 490 static void locks_delete_block(struct file_lock *waiter) 491 { 492 lock_kernel(); 493 __locks_delete_block(waiter); 494 unlock_kernel(); 495 } 496 497 /* Insert waiter into blocker's block list. 498 * We use a circular list so that processes can be easily woken up in 499 * the order they blocked. The documentation doesn't require this but 500 * it seems like the reasonable thing to do. 501 */ 502 static void locks_insert_block(struct file_lock *blocker, 503 struct file_lock *waiter) 504 { 505 if (!list_empty(&waiter->fl_block)) { 506 printk(KERN_ERR "locks_insert_block: removing duplicated lock " 507 "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, 508 waiter->fl_start, waiter->fl_end, waiter->fl_type); 509 __locks_delete_block(waiter); 510 } 511 list_add_tail(&waiter->fl_block, &blocker->fl_block); 512 waiter->fl_next = blocker; 513 if (IS_POSIX(blocker)) 514 list_add(&waiter->fl_link, &blocked_list); 515 } 516 517 /* Wake up processes blocked waiting for blocker. 518 * If told to wait then schedule the processes until the block list 519 * is empty, otherwise empty the block list ourselves. 520 */ 521 static void locks_wake_up_blocks(struct file_lock *blocker) 522 { 523 while (!list_empty(&blocker->fl_block)) { 524 struct file_lock *waiter = list_entry(blocker->fl_block.next, 525 struct file_lock, fl_block); 526 __locks_delete_block(waiter); 527 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 528 waiter->fl_lmops->fl_notify(waiter); 529 else 530 wake_up(&waiter->fl_wait); 531 } 532 } 533 534 /* Insert file lock fl into an inode's lock list at the position indicated 535 * by pos. At the same time add the lock to the global file lock list. 536 */ 537 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 538 { 539 list_add(&fl->fl_link, &file_lock_list); 540 541 /* insert into file's list */ 542 fl->fl_next = *pos; 543 *pos = fl; 544 545 if (fl->fl_ops && fl->fl_ops->fl_insert) 546 fl->fl_ops->fl_insert(fl); 547 } 548 549 /* 550 * Delete a lock and then free it. 551 * Wake up processes that are blocked waiting for this lock, 552 * notify the FS that the lock has been cleared and 553 * finally free the lock. 554 */ 555 static void locks_delete_lock(struct file_lock **thisfl_p) 556 { 557 struct file_lock *fl = *thisfl_p; 558 559 *thisfl_p = fl->fl_next; 560 fl->fl_next = NULL; 561 list_del_init(&fl->fl_link); 562 563 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 564 if (fl->fl_fasync != NULL) { 565 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 566 fl->fl_fasync = NULL; 567 } 568 569 if (fl->fl_ops && fl->fl_ops->fl_remove) 570 fl->fl_ops->fl_remove(fl); 571 572 locks_wake_up_blocks(fl); 573 locks_free_lock(fl); 574 } 575 576 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 577 * checks for shared/exclusive status of overlapping locks. 578 */ 579 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 580 { 581 if (sys_fl->fl_type == F_WRLCK) 582 return 1; 583 if (caller_fl->fl_type == F_WRLCK) 584 return 1; 585 return 0; 586 } 587 588 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 589 * checking before calling the locks_conflict(). 590 */ 591 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 592 { 593 /* POSIX locks owned by the same process do not conflict with 594 * each other. 595 */ 596 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 597 return (0); 598 599 /* Check whether they overlap */ 600 if (!locks_overlap(caller_fl, sys_fl)) 601 return 0; 602 603 return (locks_conflict(caller_fl, sys_fl)); 604 } 605 606 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 607 * checking before calling the locks_conflict(). 608 */ 609 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 610 { 611 /* FLOCK locks referring to the same filp do not conflict with 612 * each other. 613 */ 614 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 615 return (0); 616 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 617 return 0; 618 619 return (locks_conflict(caller_fl, sys_fl)); 620 } 621 622 static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 623 { 624 int result = 0; 625 DECLARE_WAITQUEUE(wait, current); 626 627 __set_current_state(TASK_INTERRUPTIBLE); 628 add_wait_queue(fl_wait, &wait); 629 if (timeout == 0) 630 schedule(); 631 else 632 result = schedule_timeout(timeout); 633 if (signal_pending(current)) 634 result = -ERESTARTSYS; 635 remove_wait_queue(fl_wait, &wait); 636 __set_current_state(TASK_RUNNING); 637 return result; 638 } 639 640 static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 641 { 642 int result; 643 locks_insert_block(blocker, waiter); 644 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 645 __locks_delete_block(waiter); 646 return result; 647 } 648 649 struct file_lock * 650 posix_test_lock(struct file *filp, struct file_lock *fl) 651 { 652 struct file_lock *cfl; 653 654 lock_kernel(); 655 for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 656 if (!IS_POSIX(cfl)) 657 continue; 658 if (posix_locks_conflict(cfl, fl)) 659 break; 660 } 661 unlock_kernel(); 662 663 return (cfl); 664 } 665 666 EXPORT_SYMBOL(posix_test_lock); 667 668 /* This function tests for deadlock condition before putting a process to 669 * sleep. The detection scheme is no longer recursive. Recursive was neat, 670 * but dangerous - we risked stack corruption if the lock data was bad, or 671 * if the recursion was too deep for any other reason. 672 * 673 * We rely on the fact that a task can only be on one lock's wait queue 674 * at a time. When we find blocked_task on a wait queue we can re-search 675 * with blocked_task equal to that queue's owner, until either blocked_task 676 * isn't found, or blocked_task is found on a queue owned by my_task. 677 * 678 * Note: the above assumption may not be true when handling lock requests 679 * from a broken NFS client. But broken NFS clients have a lot more to 680 * worry about than proper deadlock detection anyway... --okir 681 */ 682 int posix_locks_deadlock(struct file_lock *caller_fl, 683 struct file_lock *block_fl) 684 { 685 struct list_head *tmp; 686 687 next_task: 688 if (posix_same_owner(caller_fl, block_fl)) 689 return 1; 690 list_for_each(tmp, &blocked_list) { 691 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 692 if (posix_same_owner(fl, block_fl)) { 693 fl = fl->fl_next; 694 block_fl = fl; 695 goto next_task; 696 } 697 } 698 return 0; 699 } 700 701 EXPORT_SYMBOL(posix_locks_deadlock); 702 703 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 704 * at the head of the list, but that's secret knowledge known only to 705 * flock_lock_file and posix_lock_file. 706 */ 707 static int flock_lock_file(struct file *filp, struct file_lock *new_fl) 708 { 709 struct file_lock **before; 710 struct inode * inode = filp->f_dentry->d_inode; 711 int error = 0; 712 int found = 0; 713 714 lock_kernel(); 715 for_each_lock(inode, before) { 716 struct file_lock *fl = *before; 717 if (IS_POSIX(fl)) 718 break; 719 if (IS_LEASE(fl)) 720 continue; 721 if (filp != fl->fl_file) 722 continue; 723 if (new_fl->fl_type == fl->fl_type) 724 goto out; 725 found = 1; 726 locks_delete_lock(before); 727 break; 728 } 729 unlock_kernel(); 730 731 if (new_fl->fl_type == F_UNLCK) 732 return 0; 733 734 /* 735 * If a higher-priority process was blocked on the old file lock, 736 * give it the opportunity to lock the file. 737 */ 738 if (found) 739 cond_resched(); 740 741 lock_kernel(); 742 for_each_lock(inode, before) { 743 struct file_lock *fl = *before; 744 if (IS_POSIX(fl)) 745 break; 746 if (IS_LEASE(fl)) 747 continue; 748 if (!flock_locks_conflict(new_fl, fl)) 749 continue; 750 error = -EAGAIN; 751 if (new_fl->fl_flags & FL_SLEEP) { 752 locks_insert_block(fl, new_fl); 753 } 754 goto out; 755 } 756 locks_insert_lock(&inode->i_flock, new_fl); 757 error = 0; 758 759 out: 760 unlock_kernel(); 761 return error; 762 } 763 764 EXPORT_SYMBOL(posix_lock_file); 765 766 static int __posix_lock_file(struct inode *inode, struct file_lock *request) 767 { 768 struct file_lock *fl; 769 struct file_lock *new_fl, *new_fl2; 770 struct file_lock *left = NULL; 771 struct file_lock *right = NULL; 772 struct file_lock **before; 773 int error, added = 0; 774 775 /* 776 * We may need two file_lock structures for this operation, 777 * so we get them in advance to avoid races. 778 */ 779 new_fl = locks_alloc_lock(); 780 new_fl2 = locks_alloc_lock(); 781 782 lock_kernel(); 783 if (request->fl_type != F_UNLCK) { 784 for_each_lock(inode, before) { 785 struct file_lock *fl = *before; 786 if (!IS_POSIX(fl)) 787 continue; 788 if (!posix_locks_conflict(request, fl)) 789 continue; 790 error = -EAGAIN; 791 if (!(request->fl_flags & FL_SLEEP)) 792 goto out; 793 error = -EDEADLK; 794 if (posix_locks_deadlock(request, fl)) 795 goto out; 796 error = -EAGAIN; 797 locks_insert_block(fl, request); 798 goto out; 799 } 800 } 801 802 /* If we're just looking for a conflict, we're done. */ 803 error = 0; 804 if (request->fl_flags & FL_ACCESS) 805 goto out; 806 807 error = -ENOLCK; /* "no luck" */ 808 if (!(new_fl && new_fl2)) 809 goto out; 810 811 /* 812 * We've allocated the new locks in advance, so there are no 813 * errors possible (and no blocking operations) from here on. 814 * 815 * Find the first old lock with the same owner as the new lock. 816 */ 817 818 before = &inode->i_flock; 819 820 /* First skip locks owned by other processes. */ 821 while ((fl = *before) && (!IS_POSIX(fl) || 822 !posix_same_owner(request, fl))) { 823 before = &fl->fl_next; 824 } 825 826 /* Process locks with this owner. */ 827 while ((fl = *before) && posix_same_owner(request, fl)) { 828 /* Detect adjacent or overlapping regions (if same lock type) 829 */ 830 if (request->fl_type == fl->fl_type) { 831 if (fl->fl_end < request->fl_start - 1) 832 goto next_lock; 833 /* If the next lock in the list has entirely bigger 834 * addresses than the new one, insert the lock here. 835 */ 836 if (fl->fl_start > request->fl_end + 1) 837 break; 838 839 /* If we come here, the new and old lock are of the 840 * same type and adjacent or overlapping. Make one 841 * lock yielding from the lower start address of both 842 * locks to the higher end address. 843 */ 844 if (fl->fl_start > request->fl_start) 845 fl->fl_start = request->fl_start; 846 else 847 request->fl_start = fl->fl_start; 848 if (fl->fl_end < request->fl_end) 849 fl->fl_end = request->fl_end; 850 else 851 request->fl_end = fl->fl_end; 852 if (added) { 853 locks_delete_lock(before); 854 continue; 855 } 856 request = fl; 857 added = 1; 858 } 859 else { 860 /* Processing for different lock types is a bit 861 * more complex. 862 */ 863 if (fl->fl_end < request->fl_start) 864 goto next_lock; 865 if (fl->fl_start > request->fl_end) 866 break; 867 if (request->fl_type == F_UNLCK) 868 added = 1; 869 if (fl->fl_start < request->fl_start) 870 left = fl; 871 /* If the next lock in the list has a higher end 872 * address than the new one, insert the new one here. 873 */ 874 if (fl->fl_end > request->fl_end) { 875 right = fl; 876 break; 877 } 878 if (fl->fl_start >= request->fl_start) { 879 /* The new lock completely replaces an old 880 * one (This may happen several times). 881 */ 882 if (added) { 883 locks_delete_lock(before); 884 continue; 885 } 886 /* Replace the old lock with the new one. 887 * Wake up anybody waiting for the old one, 888 * as the change in lock type might satisfy 889 * their needs. 890 */ 891 locks_wake_up_blocks(fl); 892 fl->fl_start = request->fl_start; 893 fl->fl_end = request->fl_end; 894 fl->fl_type = request->fl_type; 895 fl->fl_u = request->fl_u; 896 request = fl; 897 added = 1; 898 } 899 } 900 /* Go on to next lock. 901 */ 902 next_lock: 903 before = &fl->fl_next; 904 } 905 906 error = 0; 907 if (!added) { 908 if (request->fl_type == F_UNLCK) 909 goto out; 910 locks_copy_lock(new_fl, request); 911 locks_insert_lock(before, new_fl); 912 new_fl = NULL; 913 } 914 if (right) { 915 if (left == right) { 916 /* The new lock breaks the old one in two pieces, 917 * so we have to use the second new lock. 918 */ 919 left = new_fl2; 920 new_fl2 = NULL; 921 locks_copy_lock(left, right); 922 locks_insert_lock(before, left); 923 } 924 right->fl_start = request->fl_end + 1; 925 locks_wake_up_blocks(right); 926 } 927 if (left) { 928 left->fl_end = request->fl_start - 1; 929 locks_wake_up_blocks(left); 930 } 931 out: 932 unlock_kernel(); 933 /* 934 * Free any unused locks. 935 */ 936 if (new_fl) 937 locks_free_lock(new_fl); 938 if (new_fl2) 939 locks_free_lock(new_fl2); 940 return error; 941 } 942 943 /** 944 * posix_lock_file - Apply a POSIX-style lock to a file 945 * @filp: The file to apply the lock to 946 * @fl: The lock to be applied 947 * 948 * Add a POSIX style lock to a file. 949 * We merge adjacent & overlapping locks whenever possible. 950 * POSIX locks are sorted by owner task, then by starting address 951 */ 952 int posix_lock_file(struct file *filp, struct file_lock *fl) 953 { 954 return __posix_lock_file(filp->f_dentry->d_inode, fl); 955 } 956 957 /** 958 * posix_lock_file_wait - Apply a POSIX-style lock to a file 959 * @filp: The file to apply the lock to 960 * @fl: The lock to be applied 961 * 962 * Add a POSIX style lock to a file. 963 * We merge adjacent & overlapping locks whenever possible. 964 * POSIX locks are sorted by owner task, then by starting address 965 */ 966 int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 967 { 968 int error; 969 might_sleep (); 970 for (;;) { 971 error = __posix_lock_file(filp->f_dentry->d_inode, fl); 972 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 973 break; 974 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 975 if (!error) 976 continue; 977 978 locks_delete_block(fl); 979 break; 980 } 981 return error; 982 } 983 EXPORT_SYMBOL(posix_lock_file_wait); 984 985 /** 986 * locks_mandatory_locked - Check for an active lock 987 * @inode: the file to check 988 * 989 * Searches the inode's list of locks to find any POSIX locks which conflict. 990 * This function is called from locks_verify_locked() only. 991 */ 992 int locks_mandatory_locked(struct inode *inode) 993 { 994 fl_owner_t owner = current->files; 995 struct file_lock *fl; 996 997 /* 998 * Search the lock list for this inode for any POSIX locks. 999 */ 1000 lock_kernel(); 1001 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1002 if (!IS_POSIX(fl)) 1003 continue; 1004 if (fl->fl_owner != owner) 1005 break; 1006 } 1007 unlock_kernel(); 1008 return fl ? -EAGAIN : 0; 1009 } 1010 1011 /** 1012 * locks_mandatory_area - Check for a conflicting lock 1013 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 1014 * for shared 1015 * @inode: the file to check 1016 * @filp: how the file was opened (if it was) 1017 * @offset: start of area to check 1018 * @count: length of area to check 1019 * 1020 * Searches the inode's list of locks to find any POSIX locks which conflict. 1021 * This function is called from rw_verify_area() and 1022 * locks_verify_truncate(). 1023 */ 1024 int locks_mandatory_area(int read_write, struct inode *inode, 1025 struct file *filp, loff_t offset, 1026 size_t count) 1027 { 1028 struct file_lock fl; 1029 int error; 1030 1031 locks_init_lock(&fl); 1032 fl.fl_owner = current->files; 1033 fl.fl_pid = current->tgid; 1034 fl.fl_file = filp; 1035 fl.fl_flags = FL_POSIX | FL_ACCESS; 1036 if (filp && !(filp->f_flags & O_NONBLOCK)) 1037 fl.fl_flags |= FL_SLEEP; 1038 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 1039 fl.fl_start = offset; 1040 fl.fl_end = offset + count - 1; 1041 1042 for (;;) { 1043 error = __posix_lock_file(inode, &fl); 1044 if (error != -EAGAIN) 1045 break; 1046 if (!(fl.fl_flags & FL_SLEEP)) 1047 break; 1048 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1049 if (!error) { 1050 /* 1051 * If we've been sleeping someone might have 1052 * changed the permissions behind our back. 1053 */ 1054 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1055 continue; 1056 } 1057 1058 locks_delete_block(&fl); 1059 break; 1060 } 1061 1062 return error; 1063 } 1064 1065 EXPORT_SYMBOL(locks_mandatory_area); 1066 1067 /* We already had a lease on this file; just change its type */ 1068 int lease_modify(struct file_lock **before, int arg) 1069 { 1070 struct file_lock *fl = *before; 1071 int error = assign_type(fl, arg); 1072 1073 if (error) 1074 return error; 1075 locks_wake_up_blocks(fl); 1076 if (arg == F_UNLCK) 1077 locks_delete_lock(before); 1078 return 0; 1079 } 1080 1081 EXPORT_SYMBOL(lease_modify); 1082 1083 static void time_out_leases(struct inode *inode) 1084 { 1085 struct file_lock **before; 1086 struct file_lock *fl; 1087 1088 before = &inode->i_flock; 1089 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1090 if ((fl->fl_break_time == 0) 1091 || time_before(jiffies, fl->fl_break_time)) { 1092 before = &fl->fl_next; 1093 continue; 1094 } 1095 printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid); 1096 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1097 if (fl == *before) /* lease_modify may have freed fl */ 1098 before = &fl->fl_next; 1099 } 1100 } 1101 1102 /** 1103 * __break_lease - revoke all outstanding leases on file 1104 * @inode: the inode of the file to return 1105 * @mode: the open mode (read or write) 1106 * 1107 * break_lease (inlined for speed) has checked there already 1108 * is a lease on this file. Leases are broken on a call to open() 1109 * or truncate(). This function can sleep unless you 1110 * specified %O_NONBLOCK to your open(). 1111 */ 1112 int __break_lease(struct inode *inode, unsigned int mode) 1113 { 1114 int error = 0, future; 1115 struct file_lock *new_fl, *flock; 1116 struct file_lock *fl; 1117 int alloc_err; 1118 unsigned long break_time; 1119 int i_have_this_lease = 0; 1120 1121 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1122 &new_fl); 1123 1124 lock_kernel(); 1125 1126 time_out_leases(inode); 1127 1128 flock = inode->i_flock; 1129 if ((flock == NULL) || !IS_LEASE(flock)) 1130 goto out; 1131 1132 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1133 if (fl->fl_owner == current->files) 1134 i_have_this_lease = 1; 1135 1136 if (mode & FMODE_WRITE) { 1137 /* If we want write access, we have to revoke any lease. */ 1138 future = F_UNLCK | F_INPROGRESS; 1139 } else if (flock->fl_type & F_INPROGRESS) { 1140 /* If the lease is already being broken, we just leave it */ 1141 future = flock->fl_type; 1142 } else if (flock->fl_type & F_WRLCK) { 1143 /* Downgrade the exclusive lease to a read-only lease. */ 1144 future = F_RDLCK | F_INPROGRESS; 1145 } else { 1146 /* the existing lease was read-only, so we can read too. */ 1147 goto out; 1148 } 1149 1150 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1151 error = alloc_err; 1152 goto out; 1153 } 1154 1155 break_time = 0; 1156 if (lease_break_time > 0) { 1157 break_time = jiffies + lease_break_time * HZ; 1158 if (break_time == 0) 1159 break_time++; /* so that 0 means no break time */ 1160 } 1161 1162 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1163 if (fl->fl_type != future) { 1164 fl->fl_type = future; 1165 fl->fl_break_time = break_time; 1166 /* lease must have lmops break callback */ 1167 fl->fl_lmops->fl_break(fl); 1168 } 1169 } 1170 1171 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1172 error = -EWOULDBLOCK; 1173 goto out; 1174 } 1175 1176 restart: 1177 break_time = flock->fl_break_time; 1178 if (break_time != 0) { 1179 break_time -= jiffies; 1180 if (break_time == 0) 1181 break_time++; 1182 } 1183 error = locks_block_on_timeout(flock, new_fl, break_time); 1184 if (error >= 0) { 1185 if (error == 0) 1186 time_out_leases(inode); 1187 /* Wait for the next lease that has not been broken yet */ 1188 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1189 flock = flock->fl_next) { 1190 if (flock->fl_type & F_INPROGRESS) 1191 goto restart; 1192 } 1193 error = 0; 1194 } 1195 1196 out: 1197 unlock_kernel(); 1198 if (!alloc_err) 1199 locks_free_lock(new_fl); 1200 return error; 1201 } 1202 1203 EXPORT_SYMBOL(__break_lease); 1204 1205 /** 1206 * lease_get_mtime 1207 * @inode: the inode 1208 * @time: pointer to a timespec which will contain the last modified time 1209 * 1210 * This is to force NFS clients to flush their caches for files with 1211 * exclusive leases. The justification is that if someone has an 1212 * exclusive lease, then they could be modifiying it. 1213 */ 1214 void lease_get_mtime(struct inode *inode, struct timespec *time) 1215 { 1216 struct file_lock *flock = inode->i_flock; 1217 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1218 *time = current_fs_time(inode->i_sb); 1219 else 1220 *time = inode->i_mtime; 1221 } 1222 1223 EXPORT_SYMBOL(lease_get_mtime); 1224 1225 /** 1226 * fcntl_getlease - Enquire what lease is currently active 1227 * @filp: the file 1228 * 1229 * The value returned by this function will be one of 1230 * (if no lease break is pending): 1231 * 1232 * %F_RDLCK to indicate a shared lease is held. 1233 * 1234 * %F_WRLCK to indicate an exclusive lease is held. 1235 * 1236 * %F_UNLCK to indicate no lease is held. 1237 * 1238 * (if a lease break is pending): 1239 * 1240 * %F_RDLCK to indicate an exclusive lease needs to be 1241 * changed to a shared lease (or removed). 1242 * 1243 * %F_UNLCK to indicate the lease needs to be removed. 1244 * 1245 * XXX: sfr & willy disagree over whether F_INPROGRESS 1246 * should be returned to userspace. 1247 */ 1248 int fcntl_getlease(struct file *filp) 1249 { 1250 struct file_lock *fl; 1251 int type = F_UNLCK; 1252 1253 lock_kernel(); 1254 time_out_leases(filp->f_dentry->d_inode); 1255 for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1256 fl = fl->fl_next) { 1257 if (fl->fl_file == filp) { 1258 type = fl->fl_type & ~F_INPROGRESS; 1259 break; 1260 } 1261 } 1262 unlock_kernel(); 1263 return type; 1264 } 1265 1266 /** 1267 * __setlease - sets a lease on an open file 1268 * @filp: file pointer 1269 * @arg: type of lease to obtain 1270 * @flp: input - file_lock to use, output - file_lock inserted 1271 * 1272 * The (input) flp->fl_lmops->fl_break function is required 1273 * by break_lease(). 1274 * 1275 * Called with kernel lock held. 1276 */ 1277 static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1278 { 1279 struct file_lock *fl, **before, **my_before = NULL, *lease = *flp; 1280 struct dentry *dentry = filp->f_dentry; 1281 struct inode *inode = dentry->d_inode; 1282 int error, rdlease_count = 0, wrlease_count = 0; 1283 1284 time_out_leases(inode); 1285 1286 error = -EINVAL; 1287 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1288 goto out; 1289 1290 error = -EAGAIN; 1291 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1292 goto out; 1293 if ((arg == F_WRLCK) 1294 && ((atomic_read(&dentry->d_count) > 1) 1295 || (atomic_read(&inode->i_count) > 1))) 1296 goto out; 1297 1298 /* 1299 * At this point, we know that if there is an exclusive 1300 * lease on this file, then we hold it on this filp 1301 * (otherwise our open of this file would have blocked). 1302 * And if we are trying to acquire an exclusive lease, 1303 * then the file is not open by anyone (including us) 1304 * except for this filp. 1305 */ 1306 for (before = &inode->i_flock; 1307 ((fl = *before) != NULL) && IS_LEASE(fl); 1308 before = &fl->fl_next) { 1309 if (lease->fl_lmops->fl_mylease(fl, lease)) 1310 my_before = before; 1311 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1312 /* 1313 * Someone is in the process of opening this 1314 * file for writing so we may not take an 1315 * exclusive lease on it. 1316 */ 1317 wrlease_count++; 1318 else 1319 rdlease_count++; 1320 } 1321 1322 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1323 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1324 goto out; 1325 1326 if (my_before != NULL) { 1327 error = lease->fl_lmops->fl_change(my_before, arg); 1328 goto out; 1329 } 1330 1331 error = 0; 1332 if (arg == F_UNLCK) 1333 goto out; 1334 1335 error = -EINVAL; 1336 if (!leases_enable) 1337 goto out; 1338 1339 error = lease_alloc(filp, arg, &fl); 1340 if (error) 1341 goto out; 1342 1343 locks_copy_lock(fl, lease); 1344 1345 locks_insert_lock(before, fl); 1346 1347 *flp = fl; 1348 out: 1349 return error; 1350 } 1351 1352 /** 1353 * setlease - sets a lease on an open file 1354 * @filp: file pointer 1355 * @arg: type of lease to obtain 1356 * @lease: file_lock to use 1357 * 1358 * Call this to establish a lease on the file. 1359 * The fl_lmops fl_break function is required by break_lease 1360 */ 1361 1362 int setlease(struct file *filp, long arg, struct file_lock **lease) 1363 { 1364 struct dentry *dentry = filp->f_dentry; 1365 struct inode *inode = dentry->d_inode; 1366 int error; 1367 1368 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1369 return -EACCES; 1370 if (!S_ISREG(inode->i_mode)) 1371 return -EINVAL; 1372 error = security_file_lock(filp, arg); 1373 if (error) 1374 return error; 1375 1376 lock_kernel(); 1377 error = __setlease(filp, arg, lease); 1378 unlock_kernel(); 1379 1380 return error; 1381 } 1382 1383 EXPORT_SYMBOL(setlease); 1384 1385 /** 1386 * fcntl_setlease - sets a lease on an open file 1387 * @fd: open file descriptor 1388 * @filp: file pointer 1389 * @arg: type of lease to obtain 1390 * 1391 * Call this fcntl to establish a lease on the file. 1392 * Note that you also need to call %F_SETSIG to 1393 * receive a signal when the lease is broken. 1394 */ 1395 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1396 { 1397 struct file_lock fl, *flp = &fl; 1398 struct dentry *dentry = filp->f_dentry; 1399 struct inode *inode = dentry->d_inode; 1400 int error; 1401 1402 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1403 return -EACCES; 1404 if (!S_ISREG(inode->i_mode)) 1405 return -EINVAL; 1406 error = security_file_lock(filp, arg); 1407 if (error) 1408 return error; 1409 1410 locks_init_lock(&fl); 1411 error = lease_init(filp, arg, &fl); 1412 if (error) 1413 return error; 1414 1415 lock_kernel(); 1416 1417 error = __setlease(filp, arg, &flp); 1418 if (error) 1419 goto out_unlock; 1420 1421 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1422 if (error < 0) { 1423 /* remove lease just inserted by __setlease */ 1424 flp->fl_type = F_UNLCK | F_INPROGRESS; 1425 flp->fl_break_time = jiffies- 10; 1426 time_out_leases(inode); 1427 goto out_unlock; 1428 } 1429 1430 error = f_setown(filp, current->pid, 0); 1431 out_unlock: 1432 unlock_kernel(); 1433 return error; 1434 } 1435 1436 /** 1437 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1438 * @filp: The file to apply the lock to 1439 * @fl: The lock to be applied 1440 * 1441 * Add a FLOCK style lock to a file. 1442 */ 1443 int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1444 { 1445 int error; 1446 might_sleep(); 1447 for (;;) { 1448 error = flock_lock_file(filp, fl); 1449 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1450 break; 1451 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1452 if (!error) 1453 continue; 1454 1455 locks_delete_block(fl); 1456 break; 1457 } 1458 return error; 1459 } 1460 1461 EXPORT_SYMBOL(flock_lock_file_wait); 1462 1463 /** 1464 * sys_flock: - flock() system call. 1465 * @fd: the file descriptor to lock. 1466 * @cmd: the type of lock to apply. 1467 * 1468 * Apply a %FL_FLOCK style lock to an open file descriptor. 1469 * The @cmd can be one of 1470 * 1471 * %LOCK_SH -- a shared lock. 1472 * 1473 * %LOCK_EX -- an exclusive lock. 1474 * 1475 * %LOCK_UN -- remove an existing lock. 1476 * 1477 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1478 * 1479 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1480 * processes read and write access respectively. 1481 */ 1482 asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1483 { 1484 struct file *filp; 1485 struct file_lock *lock; 1486 int can_sleep, unlock; 1487 int error; 1488 1489 error = -EBADF; 1490 filp = fget(fd); 1491 if (!filp) 1492 goto out; 1493 1494 can_sleep = !(cmd & LOCK_NB); 1495 cmd &= ~LOCK_NB; 1496 unlock = (cmd == LOCK_UN); 1497 1498 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1499 goto out_putf; 1500 1501 error = flock_make_lock(filp, &lock, cmd); 1502 if (error) 1503 goto out_putf; 1504 if (can_sleep) 1505 lock->fl_flags |= FL_SLEEP; 1506 1507 error = security_file_lock(filp, cmd); 1508 if (error) 1509 goto out_free; 1510 1511 if (filp->f_op && filp->f_op->flock) 1512 error = filp->f_op->flock(filp, 1513 (can_sleep) ? F_SETLKW : F_SETLK, 1514 lock); 1515 else 1516 error = flock_lock_file_wait(filp, lock); 1517 1518 out_free: 1519 if (list_empty(&lock->fl_link)) { 1520 locks_free_lock(lock); 1521 } 1522 1523 out_putf: 1524 fput(filp); 1525 out: 1526 return error; 1527 } 1528 1529 /* Report the first existing lock that would conflict with l. 1530 * This implements the F_GETLK command of fcntl(). 1531 */ 1532 int fcntl_getlk(struct file *filp, struct flock __user *l) 1533 { 1534 struct file_lock *fl, file_lock; 1535 struct flock flock; 1536 int error; 1537 1538 error = -EFAULT; 1539 if (copy_from_user(&flock, l, sizeof(flock))) 1540 goto out; 1541 error = -EINVAL; 1542 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1543 goto out; 1544 1545 error = flock_to_posix_lock(filp, &file_lock, &flock); 1546 if (error) 1547 goto out; 1548 1549 if (filp->f_op && filp->f_op->lock) { 1550 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1551 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1552 file_lock.fl_ops->fl_release_private(&file_lock); 1553 if (error < 0) 1554 goto out; 1555 else 1556 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1557 } else { 1558 fl = posix_test_lock(filp, &file_lock); 1559 } 1560 1561 flock.l_type = F_UNLCK; 1562 if (fl != NULL) { 1563 flock.l_pid = fl->fl_pid; 1564 #if BITS_PER_LONG == 32 1565 /* 1566 * Make sure we can represent the posix lock via 1567 * legacy 32bit flock. 1568 */ 1569 error = -EOVERFLOW; 1570 if (fl->fl_start > OFFT_OFFSET_MAX) 1571 goto out; 1572 if ((fl->fl_end != OFFSET_MAX) 1573 && (fl->fl_end > OFFT_OFFSET_MAX)) 1574 goto out; 1575 #endif 1576 flock.l_start = fl->fl_start; 1577 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1578 fl->fl_end - fl->fl_start + 1; 1579 flock.l_whence = 0; 1580 flock.l_type = fl->fl_type; 1581 } 1582 error = -EFAULT; 1583 if (!copy_to_user(l, &flock, sizeof(flock))) 1584 error = 0; 1585 out: 1586 return error; 1587 } 1588 1589 /* Apply the lock described by l to an open file descriptor. 1590 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1591 */ 1592 int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l) 1593 { 1594 struct file_lock *file_lock = locks_alloc_lock(); 1595 struct flock flock; 1596 struct inode *inode; 1597 int error; 1598 1599 if (file_lock == NULL) 1600 return -ENOLCK; 1601 1602 /* 1603 * This might block, so we do it before checking the inode. 1604 */ 1605 error = -EFAULT; 1606 if (copy_from_user(&flock, l, sizeof(flock))) 1607 goto out; 1608 1609 inode = filp->f_dentry->d_inode; 1610 1611 /* Don't allow mandatory locks on files that may be memory mapped 1612 * and shared. 1613 */ 1614 if (IS_MANDLOCK(inode) && 1615 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1616 mapping_writably_mapped(filp->f_mapping)) { 1617 error = -EAGAIN; 1618 goto out; 1619 } 1620 1621 error = flock_to_posix_lock(filp, file_lock, &flock); 1622 if (error) 1623 goto out; 1624 if (cmd == F_SETLKW) { 1625 file_lock->fl_flags |= FL_SLEEP; 1626 } 1627 1628 error = -EBADF; 1629 switch (flock.l_type) { 1630 case F_RDLCK: 1631 if (!(filp->f_mode & FMODE_READ)) 1632 goto out; 1633 break; 1634 case F_WRLCK: 1635 if (!(filp->f_mode & FMODE_WRITE)) 1636 goto out; 1637 break; 1638 case F_UNLCK: 1639 break; 1640 default: 1641 error = -EINVAL; 1642 goto out; 1643 } 1644 1645 error = security_file_lock(filp, file_lock->fl_type); 1646 if (error) 1647 goto out; 1648 1649 if (filp->f_op && filp->f_op->lock != NULL) { 1650 error = filp->f_op->lock(filp, cmd, file_lock); 1651 goto out; 1652 } 1653 1654 for (;;) { 1655 error = __posix_lock_file(inode, file_lock); 1656 if ((error != -EAGAIN) || (cmd == F_SETLK)) 1657 break; 1658 error = wait_event_interruptible(file_lock->fl_wait, 1659 !file_lock->fl_next); 1660 if (!error) 1661 continue; 1662 1663 locks_delete_block(file_lock); 1664 break; 1665 } 1666 1667 out: 1668 locks_free_lock(file_lock); 1669 return error; 1670 } 1671 1672 #if BITS_PER_LONG == 32 1673 /* Report the first existing lock that would conflict with l. 1674 * This implements the F_GETLK command of fcntl(). 1675 */ 1676 int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1677 { 1678 struct file_lock *fl, file_lock; 1679 struct flock64 flock; 1680 int error; 1681 1682 error = -EFAULT; 1683 if (copy_from_user(&flock, l, sizeof(flock))) 1684 goto out; 1685 error = -EINVAL; 1686 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1687 goto out; 1688 1689 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1690 if (error) 1691 goto out; 1692 1693 if (filp->f_op && filp->f_op->lock) { 1694 error = filp->f_op->lock(filp, F_GETLK, &file_lock); 1695 if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) 1696 file_lock.fl_ops->fl_release_private(&file_lock); 1697 if (error < 0) 1698 goto out; 1699 else 1700 fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); 1701 } else { 1702 fl = posix_test_lock(filp, &file_lock); 1703 } 1704 1705 flock.l_type = F_UNLCK; 1706 if (fl != NULL) { 1707 flock.l_pid = fl->fl_pid; 1708 flock.l_start = fl->fl_start; 1709 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : 1710 fl->fl_end - fl->fl_start + 1; 1711 flock.l_whence = 0; 1712 flock.l_type = fl->fl_type; 1713 } 1714 error = -EFAULT; 1715 if (!copy_to_user(l, &flock, sizeof(flock))) 1716 error = 0; 1717 1718 out: 1719 return error; 1720 } 1721 1722 /* Apply the lock described by l to an open file descriptor. 1723 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1724 */ 1725 int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l) 1726 { 1727 struct file_lock *file_lock = locks_alloc_lock(); 1728 struct flock64 flock; 1729 struct inode *inode; 1730 int error; 1731 1732 if (file_lock == NULL) 1733 return -ENOLCK; 1734 1735 /* 1736 * This might block, so we do it before checking the inode. 1737 */ 1738 error = -EFAULT; 1739 if (copy_from_user(&flock, l, sizeof(flock))) 1740 goto out; 1741 1742 inode = filp->f_dentry->d_inode; 1743 1744 /* Don't allow mandatory locks on files that may be memory mapped 1745 * and shared. 1746 */ 1747 if (IS_MANDLOCK(inode) && 1748 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1749 mapping_writably_mapped(filp->f_mapping)) { 1750 error = -EAGAIN; 1751 goto out; 1752 } 1753 1754 error = flock64_to_posix_lock(filp, file_lock, &flock); 1755 if (error) 1756 goto out; 1757 if (cmd == F_SETLKW64) { 1758 file_lock->fl_flags |= FL_SLEEP; 1759 } 1760 1761 error = -EBADF; 1762 switch (flock.l_type) { 1763 case F_RDLCK: 1764 if (!(filp->f_mode & FMODE_READ)) 1765 goto out; 1766 break; 1767 case F_WRLCK: 1768 if (!(filp->f_mode & FMODE_WRITE)) 1769 goto out; 1770 break; 1771 case F_UNLCK: 1772 break; 1773 default: 1774 error = -EINVAL; 1775 goto out; 1776 } 1777 1778 error = security_file_lock(filp, file_lock->fl_type); 1779 if (error) 1780 goto out; 1781 1782 if (filp->f_op && filp->f_op->lock != NULL) { 1783 error = filp->f_op->lock(filp, cmd, file_lock); 1784 goto out; 1785 } 1786 1787 for (;;) { 1788 error = __posix_lock_file(inode, file_lock); 1789 if ((error != -EAGAIN) || (cmd == F_SETLK64)) 1790 break; 1791 error = wait_event_interruptible(file_lock->fl_wait, 1792 !file_lock->fl_next); 1793 if (!error) 1794 continue; 1795 1796 locks_delete_block(file_lock); 1797 break; 1798 } 1799 1800 out: 1801 locks_free_lock(file_lock); 1802 return error; 1803 } 1804 #endif /* BITS_PER_LONG == 32 */ 1805 1806 /* 1807 * This function is called when the file is being removed 1808 * from the task's fd array. POSIX locks belonging to this task 1809 * are deleted at this time. 1810 */ 1811 void locks_remove_posix(struct file *filp, fl_owner_t owner) 1812 { 1813 struct file_lock lock, **before; 1814 1815 /* 1816 * If there are no locks held on this file, we don't need to call 1817 * posix_lock_file(). Another process could be setting a lock on this 1818 * file at the same time, but we wouldn't remove that lock anyway. 1819 */ 1820 before = &filp->f_dentry->d_inode->i_flock; 1821 if (*before == NULL) 1822 return; 1823 1824 lock.fl_type = F_UNLCK; 1825 lock.fl_flags = FL_POSIX; 1826 lock.fl_start = 0; 1827 lock.fl_end = OFFSET_MAX; 1828 lock.fl_owner = owner; 1829 lock.fl_pid = current->tgid; 1830 lock.fl_file = filp; 1831 lock.fl_ops = NULL; 1832 lock.fl_lmops = NULL; 1833 1834 if (filp->f_op && filp->f_op->lock != NULL) { 1835 filp->f_op->lock(filp, F_SETLK, &lock); 1836 goto out; 1837 } 1838 1839 /* Can't use posix_lock_file here; we need to remove it no matter 1840 * which pid we have. 1841 */ 1842 lock_kernel(); 1843 while (*before != NULL) { 1844 struct file_lock *fl = *before; 1845 if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { 1846 locks_delete_lock(before); 1847 continue; 1848 } 1849 before = &fl->fl_next; 1850 } 1851 unlock_kernel(); 1852 out: 1853 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1854 lock.fl_ops->fl_release_private(&lock); 1855 } 1856 1857 EXPORT_SYMBOL(locks_remove_posix); 1858 1859 /* 1860 * This function is called on the last close of an open file. 1861 */ 1862 void locks_remove_flock(struct file *filp) 1863 { 1864 struct inode * inode = filp->f_dentry->d_inode; 1865 struct file_lock *fl; 1866 struct file_lock **before; 1867 1868 if (!inode->i_flock) 1869 return; 1870 1871 if (filp->f_op && filp->f_op->flock) { 1872 struct file_lock fl = { 1873 .fl_pid = current->tgid, 1874 .fl_file = filp, 1875 .fl_flags = FL_FLOCK, 1876 .fl_type = F_UNLCK, 1877 .fl_end = OFFSET_MAX, 1878 }; 1879 filp->f_op->flock(filp, F_SETLKW, &fl); 1880 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1881 fl.fl_ops->fl_release_private(&fl); 1882 } 1883 1884 lock_kernel(); 1885 before = &inode->i_flock; 1886 1887 while ((fl = *before) != NULL) { 1888 if (fl->fl_file == filp) { 1889 /* 1890 * We might have a POSIX lock that was created at the same time 1891 * the filp was closed for the last time. Just remove that too, 1892 * regardless of ownership, since nobody can own it. 1893 */ 1894 if (IS_FLOCK(fl) || IS_POSIX(fl)) { 1895 locks_delete_lock(before); 1896 continue; 1897 } 1898 if (IS_LEASE(fl)) { 1899 lease_modify(before, F_UNLCK); 1900 continue; 1901 } 1902 /* What? */ 1903 BUG(); 1904 } 1905 before = &fl->fl_next; 1906 } 1907 unlock_kernel(); 1908 } 1909 1910 /** 1911 * posix_block_lock - blocks waiting for a file lock 1912 * @blocker: the lock which is blocking 1913 * @waiter: the lock which conflicts and has to wait 1914 * 1915 * lockd needs to block waiting for locks. 1916 */ 1917 void 1918 posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) 1919 { 1920 locks_insert_block(blocker, waiter); 1921 } 1922 1923 EXPORT_SYMBOL(posix_block_lock); 1924 1925 /** 1926 * posix_unblock_lock - stop waiting for a file lock 1927 * @filp: how the file was opened 1928 * @waiter: the lock which was waiting 1929 * 1930 * lockd needs to block waiting for locks. 1931 */ 1932 void 1933 posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1934 { 1935 /* 1936 * A remote machine may cancel the lock request after it's been 1937 * granted locally. If that happens, we need to delete the lock. 1938 */ 1939 lock_kernel(); 1940 if (waiter->fl_next) { 1941 __locks_delete_block(waiter); 1942 unlock_kernel(); 1943 } else { 1944 unlock_kernel(); 1945 waiter->fl_type = F_UNLCK; 1946 posix_lock_file(filp, waiter); 1947 } 1948 } 1949 1950 EXPORT_SYMBOL(posix_unblock_lock); 1951 1952 static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1953 { 1954 struct inode *inode = NULL; 1955 1956 if (fl->fl_file != NULL) 1957 inode = fl->fl_file->f_dentry->d_inode; 1958 1959 out += sprintf(out, "%d:%s ", id, pfx); 1960 if (IS_POSIX(fl)) { 1961 out += sprintf(out, "%6s %s ", 1962 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1963 (inode == NULL) ? "*NOINODE*" : 1964 (IS_MANDLOCK(inode) && 1965 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 1966 "MANDATORY" : "ADVISORY "); 1967 } else if (IS_FLOCK(fl)) { 1968 if (fl->fl_type & LOCK_MAND) { 1969 out += sprintf(out, "FLOCK MSNFS "); 1970 } else { 1971 out += sprintf(out, "FLOCK ADVISORY "); 1972 } 1973 } else if (IS_LEASE(fl)) { 1974 out += sprintf(out, "LEASE "); 1975 if (fl->fl_type & F_INPROGRESS) 1976 out += sprintf(out, "BREAKING "); 1977 else if (fl->fl_file) 1978 out += sprintf(out, "ACTIVE "); 1979 else 1980 out += sprintf(out, "BREAKER "); 1981 } else { 1982 out += sprintf(out, "UNKNOWN UNKNOWN "); 1983 } 1984 if (fl->fl_type & LOCK_MAND) { 1985 out += sprintf(out, "%s ", 1986 (fl->fl_type & LOCK_READ) 1987 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 1988 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 1989 } else { 1990 out += sprintf(out, "%s ", 1991 (fl->fl_type & F_INPROGRESS) 1992 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 1993 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 1994 } 1995 if (inode) { 1996 #ifdef WE_CAN_BREAK_LSLK_NOW 1997 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 1998 inode->i_sb->s_id, inode->i_ino); 1999 #else 2000 /* userspace relies on this representation of dev_t ;-( */ 2001 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 2002 MAJOR(inode->i_sb->s_dev), 2003 MINOR(inode->i_sb->s_dev), inode->i_ino); 2004 #endif 2005 } else { 2006 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 2007 } 2008 if (IS_POSIX(fl)) { 2009 if (fl->fl_end == OFFSET_MAX) 2010 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 2011 else 2012 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 2013 fl->fl_end); 2014 } else { 2015 out += sprintf(out, "0 EOF\n"); 2016 } 2017 } 2018 2019 static void move_lock_status(char **p, off_t* pos, off_t offset) 2020 { 2021 int len; 2022 len = strlen(*p); 2023 if(*pos >= offset) { 2024 /* the complete line is valid */ 2025 *p += len; 2026 *pos += len; 2027 return; 2028 } 2029 if(*pos+len > offset) { 2030 /* use the second part of the line */ 2031 int i = offset-*pos; 2032 memmove(*p,*p+i,len-i); 2033 *p += len-i; 2034 *pos += len; 2035 return; 2036 } 2037 /* discard the complete line */ 2038 *pos += len; 2039 } 2040 2041 /** 2042 * get_locks_status - reports lock usage in /proc/locks 2043 * @buffer: address in userspace to write into 2044 * @start: ? 2045 * @offset: how far we are through the buffer 2046 * @length: how much to read 2047 */ 2048 2049 int get_locks_status(char *buffer, char **start, off_t offset, int length) 2050 { 2051 struct list_head *tmp; 2052 char *q = buffer; 2053 off_t pos = 0; 2054 int i = 0; 2055 2056 lock_kernel(); 2057 list_for_each(tmp, &file_lock_list) { 2058 struct list_head *btmp; 2059 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2060 lock_get_status(q, fl, ++i, ""); 2061 move_lock_status(&q, &pos, offset); 2062 2063 if(pos >= offset+length) 2064 goto done; 2065 2066 list_for_each(btmp, &fl->fl_block) { 2067 struct file_lock *bfl = list_entry(btmp, 2068 struct file_lock, fl_block); 2069 lock_get_status(q, bfl, i, " ->"); 2070 move_lock_status(&q, &pos, offset); 2071 2072 if(pos >= offset+length) 2073 goto done; 2074 } 2075 } 2076 done: 2077 unlock_kernel(); 2078 *start = buffer; 2079 if(q-buffer < length) 2080 return (q-buffer); 2081 return length; 2082 } 2083 2084 /** 2085 * lock_may_read - checks that the region is free of locks 2086 * @inode: the inode that is being read 2087 * @start: the first byte to read 2088 * @len: the number of bytes to read 2089 * 2090 * Emulates Windows locking requirements. Whole-file 2091 * mandatory locks (share modes) can prohibit a read and 2092 * byte-range POSIX locks can prohibit a read if they overlap. 2093 * 2094 * N.B. this function is only ever called 2095 * from knfsd and ownership of locks is never checked. 2096 */ 2097 int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2098 { 2099 struct file_lock *fl; 2100 int result = 1; 2101 lock_kernel(); 2102 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2103 if (IS_POSIX(fl)) { 2104 if (fl->fl_type == F_RDLCK) 2105 continue; 2106 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2107 continue; 2108 } else if (IS_FLOCK(fl)) { 2109 if (!(fl->fl_type & LOCK_MAND)) 2110 continue; 2111 if (fl->fl_type & LOCK_READ) 2112 continue; 2113 } else 2114 continue; 2115 result = 0; 2116 break; 2117 } 2118 unlock_kernel(); 2119 return result; 2120 } 2121 2122 EXPORT_SYMBOL(lock_may_read); 2123 2124 /** 2125 * lock_may_write - checks that the region is free of locks 2126 * @inode: the inode that is being written 2127 * @start: the first byte to write 2128 * @len: the number of bytes to write 2129 * 2130 * Emulates Windows locking requirements. Whole-file 2131 * mandatory locks (share modes) can prohibit a write and 2132 * byte-range POSIX locks can prohibit a write if they overlap. 2133 * 2134 * N.B. this function is only ever called 2135 * from knfsd and ownership of locks is never checked. 2136 */ 2137 int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2138 { 2139 struct file_lock *fl; 2140 int result = 1; 2141 lock_kernel(); 2142 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2143 if (IS_POSIX(fl)) { 2144 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2145 continue; 2146 } else if (IS_FLOCK(fl)) { 2147 if (!(fl->fl_type & LOCK_MAND)) 2148 continue; 2149 if (fl->fl_type & LOCK_WRITE) 2150 continue; 2151 } else 2152 continue; 2153 result = 0; 2154 break; 2155 } 2156 unlock_kernel(); 2157 return result; 2158 } 2159 2160 EXPORT_SYMBOL(lock_may_write); 2161 2162 static inline void __steal_locks(struct file *file, fl_owner_t from) 2163 { 2164 struct inode *inode = file->f_dentry->d_inode; 2165 struct file_lock *fl = inode->i_flock; 2166 2167 while (fl) { 2168 if (fl->fl_file == file && fl->fl_owner == from) 2169 fl->fl_owner = current->files; 2170 fl = fl->fl_next; 2171 } 2172 } 2173 2174 /* When getting ready for executing a binary, we make sure that current 2175 * has a files_struct on its own. Before dropping the old files_struct, 2176 * we take over ownership of all locks for all file descriptors we own. 2177 * Note that we may accidentally steal a lock for a file that a sibling 2178 * has created since the unshare_files() call. 2179 */ 2180 void steal_locks(fl_owner_t from) 2181 { 2182 struct files_struct *files = current->files; 2183 int i, j; 2184 2185 if (from == files) 2186 return; 2187 2188 lock_kernel(); 2189 j = 0; 2190 for (;;) { 2191 unsigned long set; 2192 i = j * __NFDBITS; 2193 if (i >= files->max_fdset || i >= files->max_fds) 2194 break; 2195 set = files->open_fds->fds_bits[j++]; 2196 while (set) { 2197 if (set & 1) { 2198 struct file *file = files->fd[i]; 2199 if (file) 2200 __steal_locks(file, from); 2201 } 2202 i++; 2203 set >>= 1; 2204 } 2205 } 2206 unlock_kernel(); 2207 } 2208 EXPORT_SYMBOL(steal_locks); 2209 2210 static int __init filelock_init(void) 2211 { 2212 filelock_cache = kmem_cache_create("file_lock_cache", 2213 sizeof(struct file_lock), 0, SLAB_PANIC, 2214 init_once, NULL); 2215 return 0; 2216 } 2217 2218 core_initcall(filelock_init); 2219