1 /* 2 * linux/fs/locks.c 3 * 4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls. 5 * Doug Evans (dje@spiff.uucp), August 07, 1992 6 * 7 * Deadlock detection added. 8 * FIXME: one thing isn't handled yet: 9 * - mandatory locks (requires lots of changes elsewhere) 10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994. 11 * 12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code. 13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994 14 * 15 * Converted file_lock_table to a linked list from an array, which eliminates 16 * the limits on how many active file locks are open. 17 * Chad Page (pageone@netcom.com), November 27, 1994 18 * 19 * Removed dependency on file descriptors. dup()'ed file descriptors now 20 * get the same locks as the original file descriptors, and a close() on 21 * any file descriptor removes ALL the locks on the file for the current 22 * process. Since locks still depend on the process id, locks are inherited 23 * after an exec() but not after a fork(). This agrees with POSIX, and both 24 * BSD and SVR4 practice. 25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995 26 * 27 * Scrapped free list which is redundant now that we allocate locks 28 * dynamically with kmalloc()/kfree(). 29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995 30 * 31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX. 32 * 33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the 34 * fcntl() system call. They have the semantics described above. 35 * 36 * FL_FLOCK locks are created with calls to flock(), through the flock() 37 * system call, which is new. Old C libraries implement flock() via fcntl() 38 * and will continue to use the old, broken implementation. 39 * 40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated 41 * with a file pointer (filp). As a result they can be shared by a parent 42 * process and its children after a fork(). They are removed when the last 43 * file descriptor referring to the file pointer is closed (unless explicitly 44 * unlocked). 45 * 46 * FL_FLOCK locks never deadlock, an existing lock is always removed before 47 * upgrading from shared to exclusive (or vice versa). When this happens 48 * any processes blocked by the current lock are woken up and allowed to 49 * run before the new lock is applied. 50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995 51 * 52 * Removed some race conditions in flock_lock_file(), marked other possible 53 * races. Just grep for FIXME to see them. 54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996. 55 * 56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive. 57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep 58 * once we've checked for blocking and deadlocking. 59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996. 60 * 61 * Initial implementation of mandatory locks. SunOS turned out to be 62 * a rotten model, so I implemented the "obvious" semantics. 63 * See 'Documentation/filesystems/mandatory-locking.txt' for details. 64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. 65 * 66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to 67 * check if a file has mandatory locks, used by mmap(), open() and creat() to 68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference 69 * Manual, Section 2. 70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996. 71 * 72 * Tidied up block list handling. Added '/proc/locks' interface. 73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996. 74 * 75 * Fixed deadlock condition for pathological code that mixes calls to 76 * flock() and fcntl(). 77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996. 78 * 79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use 80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to 81 * guarantee sensible behaviour in the case where file system modules might 82 * be compiled with different options than the kernel itself. 83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 84 * 85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel 86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this. 87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996. 88 * 89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK 90 * locks. Changed process synchronisation to avoid dereferencing locks that 91 * have already been freed. 92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996. 93 * 94 * Made the block list a circular list to minimise searching in the list. 95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996. 96 * 97 * Made mandatory locking a mount option. Default is not to allow mandatory 98 * locking. 99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996. 100 * 101 * Some adaptations for NFS support. 102 * Olaf Kirch (okir@monad.swb.de), Dec 1996, 103 * 104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. 105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. 106 * 107 * Use slab allocator instead of kmalloc/kfree. 108 * Use generic list implementation from <linux/list.h>. 109 * Sped up posix_locks_deadlock by only considering blocked locks. 110 * Matthew Wilcox <willy@debian.org>, March, 2000. 111 * 112 * Leases and LOCK_MAND 113 * Matthew Wilcox <willy@debian.org>, June, 2000. 114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000. 115 * 116 * Locking conflicts and dependencies: 117 * If multiple threads attempt to lock the same byte (or flock the same file) 118 * only one can be granted the lock, and other must wait their turn. 119 * The first lock has been "applied" or "granted", the others are "waiting" 120 * and are "blocked" by the "applied" lock.. 121 * 122 * Waiting and applied locks are all kept in trees whose properties are: 123 * 124 * - the root of a tree may be an applied or waiting lock. 125 * - every other node in the tree is a waiting lock that 126 * conflicts with every ancestor of that node. 127 * 128 * Every such tree begins life as a waiting singleton which obviously 129 * satisfies the above properties. 130 * 131 * The only ways we modify trees preserve these properties: 132 * 133 * 1. We may add a new leaf node, but only after first verifying that it 134 * conflicts with all of its ancestors. 135 * 2. We may remove the root of a tree, creating a new singleton 136 * tree from the root and N new trees rooted in the immediate 137 * children. 138 * 3. If the root of a tree is not currently an applied lock, we may 139 * apply it (if possible). 140 * 4. We may upgrade the root of the tree (either extend its range, 141 * or upgrade its entire range from read to write). 142 * 143 * When an applied lock is modified in a way that reduces or downgrades any 144 * part of its range, we remove all its children (2 above). This particularly 145 * happens when a lock is unlocked. 146 * 147 * For each of those child trees we "wake up" the thread which is 148 * waiting for the lock so it can continue handling as follows: if the 149 * root of the tree applies, we do so (3). If it doesn't, it must 150 * conflict with some applied lock. We remove (wake up) all of its children 151 * (2), and add it is a new leaf to the tree rooted in the applied 152 * lock (1). We then repeat the process recursively with those 153 * children. 154 * 155 */ 156 157 #include <linux/capability.h> 158 #include <linux/file.h> 159 #include <linux/fdtable.h> 160 #include <linux/fs.h> 161 #include <linux/init.h> 162 #include <linux/security.h> 163 #include <linux/slab.h> 164 #include <linux/syscalls.h> 165 #include <linux/time.h> 166 #include <linux/rcupdate.h> 167 #include <linux/pid_namespace.h> 168 #include <linux/hashtable.h> 169 #include <linux/percpu.h> 170 171 #define CREATE_TRACE_POINTS 172 #include <trace/events/filelock.h> 173 174 #include <linux/uaccess.h> 175 176 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 177 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 178 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) 179 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) 180 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0) 181 182 static bool lease_breaking(struct file_lock *fl) 183 { 184 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); 185 } 186 187 static int target_leasetype(struct file_lock *fl) 188 { 189 if (fl->fl_flags & FL_UNLOCK_PENDING) 190 return F_UNLCK; 191 if (fl->fl_flags & FL_DOWNGRADE_PENDING) 192 return F_RDLCK; 193 return fl->fl_type; 194 } 195 196 int leases_enable = 1; 197 int lease_break_time = 45; 198 199 /* 200 * The global file_lock_list is only used for displaying /proc/locks, so we 201 * keep a list on each CPU, with each list protected by its own spinlock. 202 * Global serialization is done using file_rwsem. 203 * 204 * Note that alterations to the list also require that the relevant flc_lock is 205 * held. 206 */ 207 struct file_lock_list_struct { 208 spinlock_t lock; 209 struct hlist_head hlist; 210 }; 211 static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list); 212 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem); 213 214 /* 215 * The blocked_hash is used to find POSIX lock loops for deadlock detection. 216 * It is protected by blocked_lock_lock. 217 * 218 * We hash locks by lockowner in order to optimize searching for the lock a 219 * particular lockowner is waiting on. 220 * 221 * FIXME: make this value scale via some heuristic? We generally will want more 222 * buckets when we have more lockowners holding locks, but that's a little 223 * difficult to determine without knowing what the workload will look like. 224 */ 225 #define BLOCKED_HASH_BITS 7 226 static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); 227 228 /* 229 * This lock protects the blocked_hash. Generally, if you're accessing it, you 230 * want to be holding this lock. 231 * 232 * In addition, it also protects the fl->fl_blocked_requests list, and the 233 * fl->fl_blocker pointer for file_lock structures that are acting as lock 234 * requests (in contrast to those that are acting as records of acquired locks). 235 * 236 * Note that when we acquire this lock in order to change the above fields, 237 * we often hold the flc_lock as well. In certain cases, when reading the fields 238 * protected by this lock, we can skip acquiring it iff we already hold the 239 * flc_lock. 240 */ 241 static DEFINE_SPINLOCK(blocked_lock_lock); 242 243 static struct kmem_cache *flctx_cache __read_mostly; 244 static struct kmem_cache *filelock_cache __read_mostly; 245 246 static struct file_lock_context * 247 locks_get_lock_context(struct inode *inode, int type) 248 { 249 struct file_lock_context *ctx; 250 251 /* paired with cmpxchg() below */ 252 ctx = smp_load_acquire(&inode->i_flctx); 253 if (likely(ctx) || type == F_UNLCK) 254 goto out; 255 256 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL); 257 if (!ctx) 258 goto out; 259 260 spin_lock_init(&ctx->flc_lock); 261 INIT_LIST_HEAD(&ctx->flc_flock); 262 INIT_LIST_HEAD(&ctx->flc_posix); 263 INIT_LIST_HEAD(&ctx->flc_lease); 264 265 /* 266 * Assign the pointer if it's not already assigned. If it is, then 267 * free the context we just allocated. 268 */ 269 if (cmpxchg(&inode->i_flctx, NULL, ctx)) { 270 kmem_cache_free(flctx_cache, ctx); 271 ctx = smp_load_acquire(&inode->i_flctx); 272 } 273 out: 274 trace_locks_get_lock_context(inode, type, ctx); 275 return ctx; 276 } 277 278 static void 279 locks_dump_ctx_list(struct list_head *list, char *list_type) 280 { 281 struct file_lock *fl; 282 283 list_for_each_entry(fl, list, fl_list) { 284 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); 285 } 286 } 287 288 static void 289 locks_check_ctx_lists(struct inode *inode) 290 { 291 struct file_lock_context *ctx = inode->i_flctx; 292 293 if (unlikely(!list_empty(&ctx->flc_flock) || 294 !list_empty(&ctx->flc_posix) || 295 !list_empty(&ctx->flc_lease))) { 296 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n", 297 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), 298 inode->i_ino); 299 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK"); 300 locks_dump_ctx_list(&ctx->flc_posix, "POSIX"); 301 locks_dump_ctx_list(&ctx->flc_lease, "LEASE"); 302 } 303 } 304 305 static void 306 locks_check_ctx_file_list(struct file *filp, struct list_head *list, 307 char *list_type) 308 { 309 struct file_lock *fl; 310 struct inode *inode = locks_inode(filp); 311 312 list_for_each_entry(fl, list, fl_list) 313 if (fl->fl_file == filp) 314 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx " 315 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", 316 list_type, MAJOR(inode->i_sb->s_dev), 317 MINOR(inode->i_sb->s_dev), inode->i_ino, 318 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); 319 } 320 321 void 322 locks_free_lock_context(struct inode *inode) 323 { 324 struct file_lock_context *ctx = inode->i_flctx; 325 326 if (unlikely(ctx)) { 327 locks_check_ctx_lists(inode); 328 kmem_cache_free(flctx_cache, ctx); 329 } 330 } 331 332 static void locks_init_lock_heads(struct file_lock *fl) 333 { 334 INIT_HLIST_NODE(&fl->fl_link); 335 INIT_LIST_HEAD(&fl->fl_list); 336 INIT_LIST_HEAD(&fl->fl_blocked_requests); 337 INIT_LIST_HEAD(&fl->fl_blocked_member); 338 init_waitqueue_head(&fl->fl_wait); 339 } 340 341 /* Allocate an empty lock structure. */ 342 struct file_lock *locks_alloc_lock(void) 343 { 344 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); 345 346 if (fl) 347 locks_init_lock_heads(fl); 348 349 return fl; 350 } 351 EXPORT_SYMBOL_GPL(locks_alloc_lock); 352 353 void locks_release_private(struct file_lock *fl) 354 { 355 if (fl->fl_ops) { 356 if (fl->fl_ops->fl_release_private) 357 fl->fl_ops->fl_release_private(fl); 358 fl->fl_ops = NULL; 359 } 360 361 if (fl->fl_lmops) { 362 if (fl->fl_lmops->lm_put_owner) { 363 fl->fl_lmops->lm_put_owner(fl->fl_owner); 364 fl->fl_owner = NULL; 365 } 366 fl->fl_lmops = NULL; 367 } 368 } 369 EXPORT_SYMBOL_GPL(locks_release_private); 370 371 /* Free a lock which is not in use. */ 372 void locks_free_lock(struct file_lock *fl) 373 { 374 BUG_ON(waitqueue_active(&fl->fl_wait)); 375 BUG_ON(!list_empty(&fl->fl_list)); 376 BUG_ON(!list_empty(&fl->fl_blocked_requests)); 377 BUG_ON(!list_empty(&fl->fl_blocked_member)); 378 BUG_ON(!hlist_unhashed(&fl->fl_link)); 379 380 locks_release_private(fl); 381 kmem_cache_free(filelock_cache, fl); 382 } 383 EXPORT_SYMBOL(locks_free_lock); 384 385 static void 386 locks_dispose_list(struct list_head *dispose) 387 { 388 struct file_lock *fl; 389 390 while (!list_empty(dispose)) { 391 fl = list_first_entry(dispose, struct file_lock, fl_list); 392 list_del_init(&fl->fl_list); 393 locks_free_lock(fl); 394 } 395 } 396 397 void locks_init_lock(struct file_lock *fl) 398 { 399 memset(fl, 0, sizeof(struct file_lock)); 400 locks_init_lock_heads(fl); 401 } 402 EXPORT_SYMBOL(locks_init_lock); 403 404 /* 405 * Initialize a new lock from an existing file_lock structure. 406 */ 407 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) 408 { 409 new->fl_owner = fl->fl_owner; 410 new->fl_pid = fl->fl_pid; 411 new->fl_file = NULL; 412 new->fl_flags = fl->fl_flags; 413 new->fl_type = fl->fl_type; 414 new->fl_start = fl->fl_start; 415 new->fl_end = fl->fl_end; 416 new->fl_lmops = fl->fl_lmops; 417 new->fl_ops = NULL; 418 419 if (fl->fl_lmops) { 420 if (fl->fl_lmops->lm_get_owner) 421 fl->fl_lmops->lm_get_owner(fl->fl_owner); 422 } 423 } 424 EXPORT_SYMBOL(locks_copy_conflock); 425 426 void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 427 { 428 /* "new" must be a freshly-initialized lock */ 429 WARN_ON_ONCE(new->fl_ops); 430 431 locks_copy_conflock(new, fl); 432 433 new->fl_file = fl->fl_file; 434 new->fl_ops = fl->fl_ops; 435 436 if (fl->fl_ops) { 437 if (fl->fl_ops->fl_copy_lock) 438 fl->fl_ops->fl_copy_lock(new, fl); 439 } 440 } 441 EXPORT_SYMBOL(locks_copy_lock); 442 443 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) 444 { 445 struct file_lock *f; 446 447 /* 448 * As ctx->flc_lock is held, new requests cannot be added to 449 * ->fl_blocked_requests, so we don't need a lock to check if it 450 * is empty. 451 */ 452 if (list_empty(&fl->fl_blocked_requests)) 453 return; 454 spin_lock(&blocked_lock_lock); 455 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests); 456 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member) 457 f->fl_blocker = new; 458 spin_unlock(&blocked_lock_lock); 459 } 460 461 static inline int flock_translate_cmd(int cmd) { 462 if (cmd & LOCK_MAND) 463 return cmd & (LOCK_MAND | LOCK_RW); 464 switch (cmd) { 465 case LOCK_SH: 466 return F_RDLCK; 467 case LOCK_EX: 468 return F_WRLCK; 469 case LOCK_UN: 470 return F_UNLCK; 471 } 472 return -EINVAL; 473 } 474 475 /* Fill in a file_lock structure with an appropriate FLOCK lock. */ 476 static struct file_lock * 477 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl) 478 { 479 int type = flock_translate_cmd(cmd); 480 481 if (type < 0) 482 return ERR_PTR(type); 483 484 if (fl == NULL) { 485 fl = locks_alloc_lock(); 486 if (fl == NULL) 487 return ERR_PTR(-ENOMEM); 488 } else { 489 locks_init_lock(fl); 490 } 491 492 fl->fl_file = filp; 493 fl->fl_owner = filp; 494 fl->fl_pid = current->tgid; 495 fl->fl_flags = FL_FLOCK; 496 fl->fl_type = type; 497 fl->fl_end = OFFSET_MAX; 498 499 return fl; 500 } 501 502 static int assign_type(struct file_lock *fl, long type) 503 { 504 switch (type) { 505 case F_RDLCK: 506 case F_WRLCK: 507 case F_UNLCK: 508 fl->fl_type = type; 509 break; 510 default: 511 return -EINVAL; 512 } 513 return 0; 514 } 515 516 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 517 struct flock64 *l) 518 { 519 switch (l->l_whence) { 520 case SEEK_SET: 521 fl->fl_start = 0; 522 break; 523 case SEEK_CUR: 524 fl->fl_start = filp->f_pos; 525 break; 526 case SEEK_END: 527 fl->fl_start = i_size_read(file_inode(filp)); 528 break; 529 default: 530 return -EINVAL; 531 } 532 if (l->l_start > OFFSET_MAX - fl->fl_start) 533 return -EOVERFLOW; 534 fl->fl_start += l->l_start; 535 if (fl->fl_start < 0) 536 return -EINVAL; 537 538 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 539 POSIX-2001 defines it. */ 540 if (l->l_len > 0) { 541 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) 542 return -EOVERFLOW; 543 fl->fl_end = fl->fl_start + l->l_len - 1; 544 545 } else if (l->l_len < 0) { 546 if (fl->fl_start + l->l_len < 0) 547 return -EINVAL; 548 fl->fl_end = fl->fl_start - 1; 549 fl->fl_start += l->l_len; 550 } else 551 fl->fl_end = OFFSET_MAX; 552 553 fl->fl_owner = current->files; 554 fl->fl_pid = current->tgid; 555 fl->fl_file = filp; 556 fl->fl_flags = FL_POSIX; 557 fl->fl_ops = NULL; 558 fl->fl_lmops = NULL; 559 560 return assign_type(fl, l->l_type); 561 } 562 563 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 564 * style lock. 565 */ 566 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 567 struct flock *l) 568 { 569 struct flock64 ll = { 570 .l_type = l->l_type, 571 .l_whence = l->l_whence, 572 .l_start = l->l_start, 573 .l_len = l->l_len, 574 }; 575 576 return flock64_to_posix_lock(filp, fl, &ll); 577 } 578 579 /* default lease lock manager operations */ 580 static bool 581 lease_break_callback(struct file_lock *fl) 582 { 583 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 584 return false; 585 } 586 587 static void 588 lease_setup(struct file_lock *fl, void **priv) 589 { 590 struct file *filp = fl->fl_file; 591 struct fasync_struct *fa = *priv; 592 593 /* 594 * fasync_insert_entry() returns the old entry if any. If there was no 595 * old entry, then it used "priv" and inserted it into the fasync list. 596 * Clear the pointer to indicate that it shouldn't be freed. 597 */ 598 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa)) 599 *priv = NULL; 600 601 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 602 } 603 604 static const struct lock_manager_operations lease_manager_ops = { 605 .lm_break = lease_break_callback, 606 .lm_change = lease_modify, 607 .lm_setup = lease_setup, 608 }; 609 610 /* 611 * Initialize a lease, use the default lock manager operations 612 */ 613 static int lease_init(struct file *filp, long type, struct file_lock *fl) 614 { 615 if (assign_type(fl, type) != 0) 616 return -EINVAL; 617 618 fl->fl_owner = filp; 619 fl->fl_pid = current->tgid; 620 621 fl->fl_file = filp; 622 fl->fl_flags = FL_LEASE; 623 fl->fl_start = 0; 624 fl->fl_end = OFFSET_MAX; 625 fl->fl_ops = NULL; 626 fl->fl_lmops = &lease_manager_ops; 627 return 0; 628 } 629 630 /* Allocate a file_lock initialised to this type of lease */ 631 static struct file_lock *lease_alloc(struct file *filp, long type) 632 { 633 struct file_lock *fl = locks_alloc_lock(); 634 int error = -ENOMEM; 635 636 if (fl == NULL) 637 return ERR_PTR(error); 638 639 error = lease_init(filp, type, fl); 640 if (error) { 641 locks_free_lock(fl); 642 return ERR_PTR(error); 643 } 644 return fl; 645 } 646 647 /* Check if two locks overlap each other. 648 */ 649 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 650 { 651 return ((fl1->fl_end >= fl2->fl_start) && 652 (fl2->fl_end >= fl1->fl_start)); 653 } 654 655 /* 656 * Check whether two locks have the same owner. 657 */ 658 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 659 { 660 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) 661 return fl2->fl_lmops == fl1->fl_lmops && 662 fl1->fl_lmops->lm_compare_owner(fl1, fl2); 663 return fl1->fl_owner == fl2->fl_owner; 664 } 665 666 /* Must be called with the flc_lock held! */ 667 static void locks_insert_global_locks(struct file_lock *fl) 668 { 669 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list); 670 671 percpu_rwsem_assert_held(&file_rwsem); 672 673 spin_lock(&fll->lock); 674 fl->fl_link_cpu = smp_processor_id(); 675 hlist_add_head(&fl->fl_link, &fll->hlist); 676 spin_unlock(&fll->lock); 677 } 678 679 /* Must be called with the flc_lock held! */ 680 static void locks_delete_global_locks(struct file_lock *fl) 681 { 682 struct file_lock_list_struct *fll; 683 684 percpu_rwsem_assert_held(&file_rwsem); 685 686 /* 687 * Avoid taking lock if already unhashed. This is safe since this check 688 * is done while holding the flc_lock, and new insertions into the list 689 * also require that it be held. 690 */ 691 if (hlist_unhashed(&fl->fl_link)) 692 return; 693 694 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu); 695 spin_lock(&fll->lock); 696 hlist_del_init(&fl->fl_link); 697 spin_unlock(&fll->lock); 698 } 699 700 static unsigned long 701 posix_owner_key(struct file_lock *fl) 702 { 703 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) 704 return fl->fl_lmops->lm_owner_key(fl); 705 return (unsigned long)fl->fl_owner; 706 } 707 708 static void locks_insert_global_blocked(struct file_lock *waiter) 709 { 710 lockdep_assert_held(&blocked_lock_lock); 711 712 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); 713 } 714 715 static void locks_delete_global_blocked(struct file_lock *waiter) 716 { 717 lockdep_assert_held(&blocked_lock_lock); 718 719 hash_del(&waiter->fl_link); 720 } 721 722 /* Remove waiter from blocker's block list. 723 * When blocker ends up pointing to itself then the list is empty. 724 * 725 * Must be called with blocked_lock_lock held. 726 */ 727 static void __locks_delete_block(struct file_lock *waiter) 728 { 729 locks_delete_global_blocked(waiter); 730 list_del_init(&waiter->fl_blocked_member); 731 waiter->fl_blocker = NULL; 732 } 733 734 static void __locks_wake_up_blocks(struct file_lock *blocker) 735 { 736 while (!list_empty(&blocker->fl_blocked_requests)) { 737 struct file_lock *waiter; 738 739 waiter = list_first_entry(&blocker->fl_blocked_requests, 740 struct file_lock, fl_blocked_member); 741 __locks_delete_block(waiter); 742 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) 743 waiter->fl_lmops->lm_notify(waiter); 744 else 745 wake_up(&waiter->fl_wait); 746 } 747 } 748 749 /** 750 * locks_delete_lock - stop waiting for a file lock 751 * @waiter: the lock which was waiting 752 * 753 * lockd/nfsd need to disconnect the lock while working on it. 754 */ 755 int locks_delete_block(struct file_lock *waiter) 756 { 757 int status = -ENOENT; 758 759 /* 760 * If fl_blocker is NULL, it won't be set again as this thread 761 * "owns" the lock and is the only one that might try to claim 762 * the lock. So it is safe to test fl_blocker locklessly. 763 * Also if fl_blocker is NULL, this waiter is not listed on 764 * fl_blocked_requests for some lock, so no other request can 765 * be added to the list of fl_blocked_requests for this 766 * request. So if fl_blocker is NULL, it is safe to 767 * locklessly check if fl_blocked_requests is empty. If both 768 * of these checks succeed, there is no need to take the lock. 769 */ 770 if (waiter->fl_blocker == NULL && 771 list_empty(&waiter->fl_blocked_requests)) 772 return status; 773 spin_lock(&blocked_lock_lock); 774 if (waiter->fl_blocker) 775 status = 0; 776 __locks_wake_up_blocks(waiter); 777 __locks_delete_block(waiter); 778 spin_unlock(&blocked_lock_lock); 779 return status; 780 } 781 EXPORT_SYMBOL(locks_delete_block); 782 783 /* Insert waiter into blocker's block list. 784 * We use a circular list so that processes can be easily woken up in 785 * the order they blocked. The documentation doesn't require this but 786 * it seems like the reasonable thing to do. 787 * 788 * Must be called with both the flc_lock and blocked_lock_lock held. The 789 * fl_blocked_requests list itself is protected by the blocked_lock_lock, 790 * but by ensuring that the flc_lock is also held on insertions we can avoid 791 * taking the blocked_lock_lock in some cases when we see that the 792 * fl_blocked_requests list is empty. 793 * 794 * Rather than just adding to the list, we check for conflicts with any existing 795 * waiters, and add beneath any waiter that blocks the new waiter. 796 * Thus wakeups don't happen until needed. 797 */ 798 static void __locks_insert_block(struct file_lock *blocker, 799 struct file_lock *waiter, 800 bool conflict(struct file_lock *, 801 struct file_lock *)) 802 { 803 struct file_lock *fl; 804 BUG_ON(!list_empty(&waiter->fl_blocked_member)); 805 806 new_blocker: 807 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member) 808 if (conflict(fl, waiter)) { 809 blocker = fl; 810 goto new_blocker; 811 } 812 waiter->fl_blocker = blocker; 813 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); 814 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) 815 locks_insert_global_blocked(waiter); 816 817 /* The requests in waiter->fl_blocked are known to conflict with 818 * waiter, but might not conflict with blocker, or the requests 819 * and lock which block it. So they all need to be woken. 820 */ 821 __locks_wake_up_blocks(waiter); 822 } 823 824 /* Must be called with flc_lock held. */ 825 static void locks_insert_block(struct file_lock *blocker, 826 struct file_lock *waiter, 827 bool conflict(struct file_lock *, 828 struct file_lock *)) 829 { 830 spin_lock(&blocked_lock_lock); 831 __locks_insert_block(blocker, waiter, conflict); 832 spin_unlock(&blocked_lock_lock); 833 } 834 835 /* 836 * Wake up processes blocked waiting for blocker. 837 * 838 * Must be called with the inode->flc_lock held! 839 */ 840 static void locks_wake_up_blocks(struct file_lock *blocker) 841 { 842 /* 843 * Avoid taking global lock if list is empty. This is safe since new 844 * blocked requests are only added to the list under the flc_lock, and 845 * the flc_lock is always held here. Note that removal from the 846 * fl_blocked_requests list does not require the flc_lock, so we must 847 * recheck list_empty() after acquiring the blocked_lock_lock. 848 */ 849 if (list_empty(&blocker->fl_blocked_requests)) 850 return; 851 852 spin_lock(&blocked_lock_lock); 853 __locks_wake_up_blocks(blocker); 854 spin_unlock(&blocked_lock_lock); 855 } 856 857 static void 858 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) 859 { 860 list_add_tail(&fl->fl_list, before); 861 locks_insert_global_locks(fl); 862 } 863 864 static void 865 locks_unlink_lock_ctx(struct file_lock *fl) 866 { 867 locks_delete_global_locks(fl); 868 list_del_init(&fl->fl_list); 869 locks_wake_up_blocks(fl); 870 } 871 872 static void 873 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) 874 { 875 locks_unlink_lock_ctx(fl); 876 if (dispose) 877 list_add(&fl->fl_list, dispose); 878 else 879 locks_free_lock(fl); 880 } 881 882 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality 883 * checks for shared/exclusive status of overlapping locks. 884 */ 885 static bool locks_conflict(struct file_lock *caller_fl, 886 struct file_lock *sys_fl) 887 { 888 if (sys_fl->fl_type == F_WRLCK) 889 return true; 890 if (caller_fl->fl_type == F_WRLCK) 891 return true; 892 return false; 893 } 894 895 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 896 * checking before calling the locks_conflict(). 897 */ 898 static bool posix_locks_conflict(struct file_lock *caller_fl, 899 struct file_lock *sys_fl) 900 { 901 /* POSIX locks owned by the same process do not conflict with 902 * each other. 903 */ 904 if (posix_same_owner(caller_fl, sys_fl)) 905 return false; 906 907 /* Check whether they overlap */ 908 if (!locks_overlap(caller_fl, sys_fl)) 909 return false; 910 911 return locks_conflict(caller_fl, sys_fl); 912 } 913 914 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 915 * checking before calling the locks_conflict(). 916 */ 917 static bool flock_locks_conflict(struct file_lock *caller_fl, 918 struct file_lock *sys_fl) 919 { 920 /* FLOCK locks referring to the same filp do not conflict with 921 * each other. 922 */ 923 if (caller_fl->fl_file == sys_fl->fl_file) 924 return false; 925 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 926 return false; 927 928 return locks_conflict(caller_fl, sys_fl); 929 } 930 931 void 932 posix_test_lock(struct file *filp, struct file_lock *fl) 933 { 934 struct file_lock *cfl; 935 struct file_lock_context *ctx; 936 struct inode *inode = locks_inode(filp); 937 938 ctx = smp_load_acquire(&inode->i_flctx); 939 if (!ctx || list_empty_careful(&ctx->flc_posix)) { 940 fl->fl_type = F_UNLCK; 941 return; 942 } 943 944 spin_lock(&ctx->flc_lock); 945 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { 946 if (posix_locks_conflict(fl, cfl)) { 947 locks_copy_conflock(fl, cfl); 948 goto out; 949 } 950 } 951 fl->fl_type = F_UNLCK; 952 out: 953 spin_unlock(&ctx->flc_lock); 954 return; 955 } 956 EXPORT_SYMBOL(posix_test_lock); 957 958 /* 959 * Deadlock detection: 960 * 961 * We attempt to detect deadlocks that are due purely to posix file 962 * locks. 963 * 964 * We assume that a task can be waiting for at most one lock at a time. 965 * So for any acquired lock, the process holding that lock may be 966 * waiting on at most one other lock. That lock in turns may be held by 967 * someone waiting for at most one other lock. Given a requested lock 968 * caller_fl which is about to wait for a conflicting lock block_fl, we 969 * follow this chain of waiters to ensure we are not about to create a 970 * cycle. 971 * 972 * Since we do this before we ever put a process to sleep on a lock, we 973 * are ensured that there is never a cycle; that is what guarantees that 974 * the while() loop in posix_locks_deadlock() eventually completes. 975 * 976 * Note: the above assumption may not be true when handling lock 977 * requests from a broken NFS client. It may also fail in the presence 978 * of tasks (such as posix threads) sharing the same open file table. 979 * To handle those cases, we just bail out after a few iterations. 980 * 981 * For FL_OFDLCK locks, the owner is the filp, not the files_struct. 982 * Because the owner is not even nominally tied to a thread of 983 * execution, the deadlock detection below can't reasonably work well. Just 984 * skip it for those. 985 * 986 * In principle, we could do a more limited deadlock detection on FL_OFDLCK 987 * locks that just checks for the case where two tasks are attempting to 988 * upgrade from read to write locks on the same inode. 989 */ 990 991 #define MAX_DEADLK_ITERATIONS 10 992 993 /* Find a lock that the owner of the given block_fl is blocking on. */ 994 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 995 { 996 struct file_lock *fl; 997 998 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { 999 if (posix_same_owner(fl, block_fl)) { 1000 while (fl->fl_blocker) 1001 fl = fl->fl_blocker; 1002 return fl; 1003 } 1004 } 1005 return NULL; 1006 } 1007 1008 /* Must be called with the blocked_lock_lock held! */ 1009 static int posix_locks_deadlock(struct file_lock *caller_fl, 1010 struct file_lock *block_fl) 1011 { 1012 int i = 0; 1013 1014 lockdep_assert_held(&blocked_lock_lock); 1015 1016 /* 1017 * This deadlock detector can't reasonably detect deadlocks with 1018 * FL_OFDLCK locks, since they aren't owned by a process, per-se. 1019 */ 1020 if (IS_OFDLCK(caller_fl)) 1021 return 0; 1022 1023 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 1024 if (i++ > MAX_DEADLK_ITERATIONS) 1025 return 0; 1026 if (posix_same_owner(caller_fl, block_fl)) 1027 return 1; 1028 } 1029 return 0; 1030 } 1031 1032 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 1033 * after any leases, but before any posix locks. 1034 * 1035 * Note that if called with an FL_EXISTS argument, the caller may determine 1036 * whether or not a lock was successfully freed by testing the return 1037 * value for -ENOENT. 1038 */ 1039 static int flock_lock_inode(struct inode *inode, struct file_lock *request) 1040 { 1041 struct file_lock *new_fl = NULL; 1042 struct file_lock *fl; 1043 struct file_lock_context *ctx; 1044 int error = 0; 1045 bool found = false; 1046 LIST_HEAD(dispose); 1047 1048 ctx = locks_get_lock_context(inode, request->fl_type); 1049 if (!ctx) { 1050 if (request->fl_type != F_UNLCK) 1051 return -ENOMEM; 1052 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0; 1053 } 1054 1055 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { 1056 new_fl = locks_alloc_lock(); 1057 if (!new_fl) 1058 return -ENOMEM; 1059 } 1060 1061 percpu_down_read(&file_rwsem); 1062 spin_lock(&ctx->flc_lock); 1063 if (request->fl_flags & FL_ACCESS) 1064 goto find_conflict; 1065 1066 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 1067 if (request->fl_file != fl->fl_file) 1068 continue; 1069 if (request->fl_type == fl->fl_type) 1070 goto out; 1071 found = true; 1072 locks_delete_lock_ctx(fl, &dispose); 1073 break; 1074 } 1075 1076 if (request->fl_type == F_UNLCK) { 1077 if ((request->fl_flags & FL_EXISTS) && !found) 1078 error = -ENOENT; 1079 goto out; 1080 } 1081 1082 find_conflict: 1083 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 1084 if (!flock_locks_conflict(request, fl)) 1085 continue; 1086 error = -EAGAIN; 1087 if (!(request->fl_flags & FL_SLEEP)) 1088 goto out; 1089 error = FILE_LOCK_DEFERRED; 1090 locks_insert_block(fl, request, flock_locks_conflict); 1091 goto out; 1092 } 1093 if (request->fl_flags & FL_ACCESS) 1094 goto out; 1095 locks_copy_lock(new_fl, request); 1096 locks_move_blocks(new_fl, request); 1097 locks_insert_lock_ctx(new_fl, &ctx->flc_flock); 1098 new_fl = NULL; 1099 error = 0; 1100 1101 out: 1102 spin_unlock(&ctx->flc_lock); 1103 percpu_up_read(&file_rwsem); 1104 if (new_fl) 1105 locks_free_lock(new_fl); 1106 locks_dispose_list(&dispose); 1107 trace_flock_lock_inode(inode, request, error); 1108 return error; 1109 } 1110 1111 static int posix_lock_inode(struct inode *inode, struct file_lock *request, 1112 struct file_lock *conflock) 1113 { 1114 struct file_lock *fl, *tmp; 1115 struct file_lock *new_fl = NULL; 1116 struct file_lock *new_fl2 = NULL; 1117 struct file_lock *left = NULL; 1118 struct file_lock *right = NULL; 1119 struct file_lock_context *ctx; 1120 int error; 1121 bool added = false; 1122 LIST_HEAD(dispose); 1123 1124 ctx = locks_get_lock_context(inode, request->fl_type); 1125 if (!ctx) 1126 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM; 1127 1128 /* 1129 * We may need two file_lock structures for this operation, 1130 * so we get them in advance to avoid races. 1131 * 1132 * In some cases we can be sure, that no new locks will be needed 1133 */ 1134 if (!(request->fl_flags & FL_ACCESS) && 1135 (request->fl_type != F_UNLCK || 1136 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 1137 new_fl = locks_alloc_lock(); 1138 new_fl2 = locks_alloc_lock(); 1139 } 1140 1141 percpu_down_read(&file_rwsem); 1142 spin_lock(&ctx->flc_lock); 1143 /* 1144 * New lock request. Walk all POSIX locks and look for conflicts. If 1145 * there are any, either return error or put the request on the 1146 * blocker's list of waiters and the global blocked_hash. 1147 */ 1148 if (request->fl_type != F_UNLCK) { 1149 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1150 if (!posix_locks_conflict(request, fl)) 1151 continue; 1152 if (conflock) 1153 locks_copy_conflock(conflock, fl); 1154 error = -EAGAIN; 1155 if (!(request->fl_flags & FL_SLEEP)) 1156 goto out; 1157 /* 1158 * Deadlock detection and insertion into the blocked 1159 * locks list must be done while holding the same lock! 1160 */ 1161 error = -EDEADLK; 1162 spin_lock(&blocked_lock_lock); 1163 /* 1164 * Ensure that we don't find any locks blocked on this 1165 * request during deadlock detection. 1166 */ 1167 __locks_wake_up_blocks(request); 1168 if (likely(!posix_locks_deadlock(request, fl))) { 1169 error = FILE_LOCK_DEFERRED; 1170 __locks_insert_block(fl, request, 1171 posix_locks_conflict); 1172 } 1173 spin_unlock(&blocked_lock_lock); 1174 goto out; 1175 } 1176 } 1177 1178 /* If we're just looking for a conflict, we're done. */ 1179 error = 0; 1180 if (request->fl_flags & FL_ACCESS) 1181 goto out; 1182 1183 /* Find the first old lock with the same owner as the new lock */ 1184 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1185 if (posix_same_owner(request, fl)) 1186 break; 1187 } 1188 1189 /* Process locks with this owner. */ 1190 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { 1191 if (!posix_same_owner(request, fl)) 1192 break; 1193 1194 /* Detect adjacent or overlapping regions (if same lock type) */ 1195 if (request->fl_type == fl->fl_type) { 1196 /* In all comparisons of start vs end, use 1197 * "start - 1" rather than "end + 1". If end 1198 * is OFFSET_MAX, end + 1 will become negative. 1199 */ 1200 if (fl->fl_end < request->fl_start - 1) 1201 continue; 1202 /* If the next lock in the list has entirely bigger 1203 * addresses than the new one, insert the lock here. 1204 */ 1205 if (fl->fl_start - 1 > request->fl_end) 1206 break; 1207 1208 /* If we come here, the new and old lock are of the 1209 * same type and adjacent or overlapping. Make one 1210 * lock yielding from the lower start address of both 1211 * locks to the higher end address. 1212 */ 1213 if (fl->fl_start > request->fl_start) 1214 fl->fl_start = request->fl_start; 1215 else 1216 request->fl_start = fl->fl_start; 1217 if (fl->fl_end < request->fl_end) 1218 fl->fl_end = request->fl_end; 1219 else 1220 request->fl_end = fl->fl_end; 1221 if (added) { 1222 locks_delete_lock_ctx(fl, &dispose); 1223 continue; 1224 } 1225 request = fl; 1226 added = true; 1227 } else { 1228 /* Processing for different lock types is a bit 1229 * more complex. 1230 */ 1231 if (fl->fl_end < request->fl_start) 1232 continue; 1233 if (fl->fl_start > request->fl_end) 1234 break; 1235 if (request->fl_type == F_UNLCK) 1236 added = true; 1237 if (fl->fl_start < request->fl_start) 1238 left = fl; 1239 /* If the next lock in the list has a higher end 1240 * address than the new one, insert the new one here. 1241 */ 1242 if (fl->fl_end > request->fl_end) { 1243 right = fl; 1244 break; 1245 } 1246 if (fl->fl_start >= request->fl_start) { 1247 /* The new lock completely replaces an old 1248 * one (This may happen several times). 1249 */ 1250 if (added) { 1251 locks_delete_lock_ctx(fl, &dispose); 1252 continue; 1253 } 1254 /* 1255 * Replace the old lock with new_fl, and 1256 * remove the old one. It's safe to do the 1257 * insert here since we know that we won't be 1258 * using new_fl later, and that the lock is 1259 * just replacing an existing lock. 1260 */ 1261 error = -ENOLCK; 1262 if (!new_fl) 1263 goto out; 1264 locks_copy_lock(new_fl, request); 1265 request = new_fl; 1266 new_fl = NULL; 1267 locks_insert_lock_ctx(request, &fl->fl_list); 1268 locks_delete_lock_ctx(fl, &dispose); 1269 added = true; 1270 } 1271 } 1272 } 1273 1274 /* 1275 * The above code only modifies existing locks in case of merging or 1276 * replacing. If new lock(s) need to be inserted all modifications are 1277 * done below this, so it's safe yet to bail out. 1278 */ 1279 error = -ENOLCK; /* "no luck" */ 1280 if (right && left == right && !new_fl2) 1281 goto out; 1282 1283 error = 0; 1284 if (!added) { 1285 if (request->fl_type == F_UNLCK) { 1286 if (request->fl_flags & FL_EXISTS) 1287 error = -ENOENT; 1288 goto out; 1289 } 1290 1291 if (!new_fl) { 1292 error = -ENOLCK; 1293 goto out; 1294 } 1295 locks_copy_lock(new_fl, request); 1296 locks_move_blocks(new_fl, request); 1297 locks_insert_lock_ctx(new_fl, &fl->fl_list); 1298 fl = new_fl; 1299 new_fl = NULL; 1300 } 1301 if (right) { 1302 if (left == right) { 1303 /* The new lock breaks the old one in two pieces, 1304 * so we have to use the second new lock. 1305 */ 1306 left = new_fl2; 1307 new_fl2 = NULL; 1308 locks_copy_lock(left, right); 1309 locks_insert_lock_ctx(left, &fl->fl_list); 1310 } 1311 right->fl_start = request->fl_end + 1; 1312 locks_wake_up_blocks(right); 1313 } 1314 if (left) { 1315 left->fl_end = request->fl_start - 1; 1316 locks_wake_up_blocks(left); 1317 } 1318 out: 1319 spin_unlock(&ctx->flc_lock); 1320 percpu_up_read(&file_rwsem); 1321 /* 1322 * Free any unused locks. 1323 */ 1324 if (new_fl) 1325 locks_free_lock(new_fl); 1326 if (new_fl2) 1327 locks_free_lock(new_fl2); 1328 locks_dispose_list(&dispose); 1329 trace_posix_lock_inode(inode, request, error); 1330 1331 return error; 1332 } 1333 1334 /** 1335 * posix_lock_file - Apply a POSIX-style lock to a file 1336 * @filp: The file to apply the lock to 1337 * @fl: The lock to be applied 1338 * @conflock: Place to return a copy of the conflicting lock, if found. 1339 * 1340 * Add a POSIX style lock to a file. 1341 * We merge adjacent & overlapping locks whenever possible. 1342 * POSIX locks are sorted by owner task, then by starting address 1343 * 1344 * Note that if called with an FL_EXISTS argument, the caller may determine 1345 * whether or not a lock was successfully freed by testing the return 1346 * value for -ENOENT. 1347 */ 1348 int posix_lock_file(struct file *filp, struct file_lock *fl, 1349 struct file_lock *conflock) 1350 { 1351 return posix_lock_inode(locks_inode(filp), fl, conflock); 1352 } 1353 EXPORT_SYMBOL(posix_lock_file); 1354 1355 /** 1356 * posix_lock_inode_wait - Apply a POSIX-style lock to a file 1357 * @inode: inode of file to which lock request should be applied 1358 * @fl: The lock to be applied 1359 * 1360 * Apply a POSIX style lock request to an inode. 1361 */ 1362 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) 1363 { 1364 int error; 1365 might_sleep (); 1366 for (;;) { 1367 error = posix_lock_inode(inode, fl, NULL); 1368 if (error != FILE_LOCK_DEFERRED) 1369 break; 1370 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); 1371 if (error) 1372 break; 1373 } 1374 locks_delete_block(fl); 1375 return error; 1376 } 1377 1378 #ifdef CONFIG_MANDATORY_FILE_LOCKING 1379 /** 1380 * locks_mandatory_locked - Check for an active lock 1381 * @file: the file to check 1382 * 1383 * Searches the inode's list of locks to find any POSIX locks which conflict. 1384 * This function is called from locks_verify_locked() only. 1385 */ 1386 int locks_mandatory_locked(struct file *file) 1387 { 1388 int ret; 1389 struct inode *inode = locks_inode(file); 1390 struct file_lock_context *ctx; 1391 struct file_lock *fl; 1392 1393 ctx = smp_load_acquire(&inode->i_flctx); 1394 if (!ctx || list_empty_careful(&ctx->flc_posix)) 1395 return 0; 1396 1397 /* 1398 * Search the lock list for this inode for any POSIX locks. 1399 */ 1400 spin_lock(&ctx->flc_lock); 1401 ret = 0; 1402 list_for_each_entry(fl, &ctx->flc_posix, fl_list) { 1403 if (fl->fl_owner != current->files && 1404 fl->fl_owner != file) { 1405 ret = -EAGAIN; 1406 break; 1407 } 1408 } 1409 spin_unlock(&ctx->flc_lock); 1410 return ret; 1411 } 1412 1413 /** 1414 * locks_mandatory_area - Check for a conflicting lock 1415 * @inode: the file to check 1416 * @filp: how the file was opened (if it was) 1417 * @start: first byte in the file to check 1418 * @end: lastbyte in the file to check 1419 * @type: %F_WRLCK for a write lock, else %F_RDLCK 1420 * 1421 * Searches the inode's list of locks to find any POSIX locks which conflict. 1422 */ 1423 int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start, 1424 loff_t end, unsigned char type) 1425 { 1426 struct file_lock fl; 1427 int error; 1428 bool sleep = false; 1429 1430 locks_init_lock(&fl); 1431 fl.fl_pid = current->tgid; 1432 fl.fl_file = filp; 1433 fl.fl_flags = FL_POSIX | FL_ACCESS; 1434 if (filp && !(filp->f_flags & O_NONBLOCK)) 1435 sleep = true; 1436 fl.fl_type = type; 1437 fl.fl_start = start; 1438 fl.fl_end = end; 1439 1440 for (;;) { 1441 if (filp) { 1442 fl.fl_owner = filp; 1443 fl.fl_flags &= ~FL_SLEEP; 1444 error = posix_lock_inode(inode, &fl, NULL); 1445 if (!error) 1446 break; 1447 } 1448 1449 if (sleep) 1450 fl.fl_flags |= FL_SLEEP; 1451 fl.fl_owner = current->files; 1452 error = posix_lock_inode(inode, &fl, NULL); 1453 if (error != FILE_LOCK_DEFERRED) 1454 break; 1455 error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker); 1456 if (!error) { 1457 /* 1458 * If we've been sleeping someone might have 1459 * changed the permissions behind our back. 1460 */ 1461 if (__mandatory_lock(inode)) 1462 continue; 1463 } 1464 1465 break; 1466 } 1467 locks_delete_block(&fl); 1468 1469 return error; 1470 } 1471 EXPORT_SYMBOL(locks_mandatory_area); 1472 #endif /* CONFIG_MANDATORY_FILE_LOCKING */ 1473 1474 static void lease_clear_pending(struct file_lock *fl, int arg) 1475 { 1476 switch (arg) { 1477 case F_UNLCK: 1478 fl->fl_flags &= ~FL_UNLOCK_PENDING; 1479 /* fall through: */ 1480 case F_RDLCK: 1481 fl->fl_flags &= ~FL_DOWNGRADE_PENDING; 1482 } 1483 } 1484 1485 /* We already had a lease on this file; just change its type */ 1486 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) 1487 { 1488 int error = assign_type(fl, arg); 1489 1490 if (error) 1491 return error; 1492 lease_clear_pending(fl, arg); 1493 locks_wake_up_blocks(fl); 1494 if (arg == F_UNLCK) { 1495 struct file *filp = fl->fl_file; 1496 1497 f_delown(filp); 1498 filp->f_owner.signum = 0; 1499 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 1500 if (fl->fl_fasync != NULL) { 1501 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 1502 fl->fl_fasync = NULL; 1503 } 1504 locks_delete_lock_ctx(fl, dispose); 1505 } 1506 return 0; 1507 } 1508 EXPORT_SYMBOL(lease_modify); 1509 1510 static bool past_time(unsigned long then) 1511 { 1512 if (!then) 1513 /* 0 is a special value meaning "this never expires": */ 1514 return false; 1515 return time_after(jiffies, then); 1516 } 1517 1518 static void time_out_leases(struct inode *inode, struct list_head *dispose) 1519 { 1520 struct file_lock_context *ctx = inode->i_flctx; 1521 struct file_lock *fl, *tmp; 1522 1523 lockdep_assert_held(&ctx->flc_lock); 1524 1525 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1526 trace_time_out_leases(inode, fl); 1527 if (past_time(fl->fl_downgrade_time)) 1528 lease_modify(fl, F_RDLCK, dispose); 1529 if (past_time(fl->fl_break_time)) 1530 lease_modify(fl, F_UNLCK, dispose); 1531 } 1532 } 1533 1534 static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) 1535 { 1536 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) 1537 return false; 1538 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) 1539 return false; 1540 return locks_conflict(breaker, lease); 1541 } 1542 1543 static bool 1544 any_leases_conflict(struct inode *inode, struct file_lock *breaker) 1545 { 1546 struct file_lock_context *ctx = inode->i_flctx; 1547 struct file_lock *fl; 1548 1549 lockdep_assert_held(&ctx->flc_lock); 1550 1551 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1552 if (leases_conflict(fl, breaker)) 1553 return true; 1554 } 1555 return false; 1556 } 1557 1558 /** 1559 * __break_lease - revoke all outstanding leases on file 1560 * @inode: the inode of the file to return 1561 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR: 1562 * break all leases 1563 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break 1564 * only delegations 1565 * 1566 * break_lease (inlined for speed) has checked there already is at least 1567 * some kind of lock (maybe a lease) on this file. Leases are broken on 1568 * a call to open() or truncate(). This function can sleep unless you 1569 * specified %O_NONBLOCK to your open(). 1570 */ 1571 int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) 1572 { 1573 int error = 0; 1574 struct file_lock_context *ctx; 1575 struct file_lock *new_fl, *fl, *tmp; 1576 unsigned long break_time; 1577 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1578 LIST_HEAD(dispose); 1579 1580 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); 1581 if (IS_ERR(new_fl)) 1582 return PTR_ERR(new_fl); 1583 new_fl->fl_flags = type; 1584 1585 /* typically we will check that ctx is non-NULL before calling */ 1586 ctx = smp_load_acquire(&inode->i_flctx); 1587 if (!ctx) { 1588 WARN_ON_ONCE(1); 1589 return error; 1590 } 1591 1592 percpu_down_read(&file_rwsem); 1593 spin_lock(&ctx->flc_lock); 1594 1595 time_out_leases(inode, &dispose); 1596 1597 if (!any_leases_conflict(inode, new_fl)) 1598 goto out; 1599 1600 break_time = 0; 1601 if (lease_break_time > 0) { 1602 break_time = jiffies + lease_break_time * HZ; 1603 if (break_time == 0) 1604 break_time++; /* so that 0 means no break time */ 1605 } 1606 1607 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { 1608 if (!leases_conflict(fl, new_fl)) 1609 continue; 1610 if (want_write) { 1611 if (fl->fl_flags & FL_UNLOCK_PENDING) 1612 continue; 1613 fl->fl_flags |= FL_UNLOCK_PENDING; 1614 fl->fl_break_time = break_time; 1615 } else { 1616 if (lease_breaking(fl)) 1617 continue; 1618 fl->fl_flags |= FL_DOWNGRADE_PENDING; 1619 fl->fl_downgrade_time = break_time; 1620 } 1621 if (fl->fl_lmops->lm_break(fl)) 1622 locks_delete_lock_ctx(fl, &dispose); 1623 } 1624 1625 if (list_empty(&ctx->flc_lease)) 1626 goto out; 1627 1628 if (mode & O_NONBLOCK) { 1629 trace_break_lease_noblock(inode, new_fl); 1630 error = -EWOULDBLOCK; 1631 goto out; 1632 } 1633 1634 restart: 1635 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); 1636 break_time = fl->fl_break_time; 1637 if (break_time != 0) 1638 break_time -= jiffies; 1639 if (break_time == 0) 1640 break_time++; 1641 locks_insert_block(fl, new_fl, leases_conflict); 1642 trace_break_lease_block(inode, new_fl); 1643 spin_unlock(&ctx->flc_lock); 1644 percpu_up_read(&file_rwsem); 1645 1646 locks_dispose_list(&dispose); 1647 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1648 !new_fl->fl_blocker, break_time); 1649 1650 percpu_down_read(&file_rwsem); 1651 spin_lock(&ctx->flc_lock); 1652 trace_break_lease_unblock(inode, new_fl); 1653 locks_delete_block(new_fl); 1654 if (error >= 0) { 1655 /* 1656 * Wait for the next conflicting lease that has not been 1657 * broken yet 1658 */ 1659 if (error == 0) 1660 time_out_leases(inode, &dispose); 1661 if (any_leases_conflict(inode, new_fl)) 1662 goto restart; 1663 error = 0; 1664 } 1665 out: 1666 spin_unlock(&ctx->flc_lock); 1667 percpu_up_read(&file_rwsem); 1668 locks_dispose_list(&dispose); 1669 locks_free_lock(new_fl); 1670 return error; 1671 } 1672 EXPORT_SYMBOL(__break_lease); 1673 1674 /** 1675 * lease_get_mtime - update modified time of an inode with exclusive lease 1676 * @inode: the inode 1677 * @time: pointer to a timespec which contains the last modified time 1678 * 1679 * This is to force NFS clients to flush their caches for files with 1680 * exclusive leases. The justification is that if someone has an 1681 * exclusive lease, then they could be modifying it. 1682 */ 1683 void lease_get_mtime(struct inode *inode, struct timespec64 *time) 1684 { 1685 bool has_lease = false; 1686 struct file_lock_context *ctx; 1687 struct file_lock *fl; 1688 1689 ctx = smp_load_acquire(&inode->i_flctx); 1690 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1691 spin_lock(&ctx->flc_lock); 1692 fl = list_first_entry_or_null(&ctx->flc_lease, 1693 struct file_lock, fl_list); 1694 if (fl && (fl->fl_type == F_WRLCK)) 1695 has_lease = true; 1696 spin_unlock(&ctx->flc_lock); 1697 } 1698 1699 if (has_lease) 1700 *time = current_time(inode); 1701 } 1702 EXPORT_SYMBOL(lease_get_mtime); 1703 1704 /** 1705 * fcntl_getlease - Enquire what lease is currently active 1706 * @filp: the file 1707 * 1708 * The value returned by this function will be one of 1709 * (if no lease break is pending): 1710 * 1711 * %F_RDLCK to indicate a shared lease is held. 1712 * 1713 * %F_WRLCK to indicate an exclusive lease is held. 1714 * 1715 * %F_UNLCK to indicate no lease is held. 1716 * 1717 * (if a lease break is pending): 1718 * 1719 * %F_RDLCK to indicate an exclusive lease needs to be 1720 * changed to a shared lease (or removed). 1721 * 1722 * %F_UNLCK to indicate the lease needs to be removed. 1723 * 1724 * XXX: sfr & willy disagree over whether F_INPROGRESS 1725 * should be returned to userspace. 1726 */ 1727 int fcntl_getlease(struct file *filp) 1728 { 1729 struct file_lock *fl; 1730 struct inode *inode = locks_inode(filp); 1731 struct file_lock_context *ctx; 1732 int type = F_UNLCK; 1733 LIST_HEAD(dispose); 1734 1735 ctx = smp_load_acquire(&inode->i_flctx); 1736 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1737 percpu_down_read(&file_rwsem); 1738 spin_lock(&ctx->flc_lock); 1739 time_out_leases(inode, &dispose); 1740 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1741 if (fl->fl_file != filp) 1742 continue; 1743 type = target_leasetype(fl); 1744 break; 1745 } 1746 spin_unlock(&ctx->flc_lock); 1747 percpu_up_read(&file_rwsem); 1748 1749 locks_dispose_list(&dispose); 1750 } 1751 return type; 1752 } 1753 1754 /** 1755 * check_conflicting_open - see if the given dentry points to a file that has 1756 * an existing open that would conflict with the 1757 * desired lease. 1758 * @dentry: dentry to check 1759 * @arg: type of lease that we're trying to acquire 1760 * @flags: current lock flags 1761 * 1762 * Check to see if there's an existing open fd on this file that would 1763 * conflict with the lease we're trying to set. 1764 */ 1765 static int 1766 check_conflicting_open(const struct dentry *dentry, const long arg, int flags) 1767 { 1768 int ret = 0; 1769 struct inode *inode = dentry->d_inode; 1770 1771 if (flags & FL_LAYOUT) 1772 return 0; 1773 1774 if ((arg == F_RDLCK) && inode_is_open_for_write(inode)) 1775 return -EAGAIN; 1776 1777 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) || 1778 (atomic_read(&inode->i_count) > 1))) 1779 ret = -EAGAIN; 1780 1781 return ret; 1782 } 1783 1784 static int 1785 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) 1786 { 1787 struct file_lock *fl, *my_fl = NULL, *lease; 1788 struct dentry *dentry = filp->f_path.dentry; 1789 struct inode *inode = dentry->d_inode; 1790 struct file_lock_context *ctx; 1791 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1792 int error; 1793 LIST_HEAD(dispose); 1794 1795 lease = *flp; 1796 trace_generic_add_lease(inode, lease); 1797 1798 /* Note that arg is never F_UNLCK here */ 1799 ctx = locks_get_lock_context(inode, arg); 1800 if (!ctx) 1801 return -ENOMEM; 1802 1803 /* 1804 * In the delegation case we need mutual exclusion with 1805 * a number of operations that take the i_mutex. We trylock 1806 * because delegations are an optional optimization, and if 1807 * there's some chance of a conflict--we'd rather not 1808 * bother, maybe that's a sign this just isn't a good file to 1809 * hand out a delegation on. 1810 */ 1811 if (is_deleg && !inode_trylock(inode)) 1812 return -EAGAIN; 1813 1814 if (is_deleg && arg == F_WRLCK) { 1815 /* Write delegations are not currently supported: */ 1816 inode_unlock(inode); 1817 WARN_ON_ONCE(1); 1818 return -EINVAL; 1819 } 1820 1821 percpu_down_read(&file_rwsem); 1822 spin_lock(&ctx->flc_lock); 1823 time_out_leases(inode, &dispose); 1824 error = check_conflicting_open(dentry, arg, lease->fl_flags); 1825 if (error) 1826 goto out; 1827 1828 /* 1829 * At this point, we know that if there is an exclusive 1830 * lease on this file, then we hold it on this filp 1831 * (otherwise our open of this file would have blocked). 1832 * And if we are trying to acquire an exclusive lease, 1833 * then the file is not open by anyone (including us) 1834 * except for this filp. 1835 */ 1836 error = -EAGAIN; 1837 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1838 if (fl->fl_file == filp && 1839 fl->fl_owner == lease->fl_owner) { 1840 my_fl = fl; 1841 continue; 1842 } 1843 1844 /* 1845 * No exclusive leases if someone else has a lease on 1846 * this file: 1847 */ 1848 if (arg == F_WRLCK) 1849 goto out; 1850 /* 1851 * Modifying our existing lease is OK, but no getting a 1852 * new lease if someone else is opening for write: 1853 */ 1854 if (fl->fl_flags & FL_UNLOCK_PENDING) 1855 goto out; 1856 } 1857 1858 if (my_fl != NULL) { 1859 lease = my_fl; 1860 error = lease->fl_lmops->lm_change(lease, arg, &dispose); 1861 if (error) 1862 goto out; 1863 goto out_setup; 1864 } 1865 1866 error = -EINVAL; 1867 if (!leases_enable) 1868 goto out; 1869 1870 locks_insert_lock_ctx(lease, &ctx->flc_lease); 1871 /* 1872 * The check in break_lease() is lockless. It's possible for another 1873 * open to race in after we did the earlier check for a conflicting 1874 * open but before the lease was inserted. Check again for a 1875 * conflicting open and cancel the lease if there is one. 1876 * 1877 * We also add a barrier here to ensure that the insertion of the lock 1878 * precedes these checks. 1879 */ 1880 smp_mb(); 1881 error = check_conflicting_open(dentry, arg, lease->fl_flags); 1882 if (error) { 1883 locks_unlink_lock_ctx(lease); 1884 goto out; 1885 } 1886 1887 out_setup: 1888 if (lease->fl_lmops->lm_setup) 1889 lease->fl_lmops->lm_setup(lease, priv); 1890 out: 1891 spin_unlock(&ctx->flc_lock); 1892 percpu_up_read(&file_rwsem); 1893 locks_dispose_list(&dispose); 1894 if (is_deleg) 1895 inode_unlock(inode); 1896 if (!error && !my_fl) 1897 *flp = NULL; 1898 return error; 1899 } 1900 1901 static int generic_delete_lease(struct file *filp, void *owner) 1902 { 1903 int error = -EAGAIN; 1904 struct file_lock *fl, *victim = NULL; 1905 struct inode *inode = locks_inode(filp); 1906 struct file_lock_context *ctx; 1907 LIST_HEAD(dispose); 1908 1909 ctx = smp_load_acquire(&inode->i_flctx); 1910 if (!ctx) { 1911 trace_generic_delete_lease(inode, NULL); 1912 return error; 1913 } 1914 1915 percpu_down_read(&file_rwsem); 1916 spin_lock(&ctx->flc_lock); 1917 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1918 if (fl->fl_file == filp && 1919 fl->fl_owner == owner) { 1920 victim = fl; 1921 break; 1922 } 1923 } 1924 trace_generic_delete_lease(inode, victim); 1925 if (victim) 1926 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); 1927 spin_unlock(&ctx->flc_lock); 1928 percpu_up_read(&file_rwsem); 1929 locks_dispose_list(&dispose); 1930 return error; 1931 } 1932 1933 /** 1934 * generic_setlease - sets a lease on an open file 1935 * @filp: file pointer 1936 * @arg: type of lease to obtain 1937 * @flp: input - file_lock to use, output - file_lock inserted 1938 * @priv: private data for lm_setup (may be NULL if lm_setup 1939 * doesn't require it) 1940 * 1941 * The (input) flp->fl_lmops->lm_break function is required 1942 * by break_lease(). 1943 */ 1944 int generic_setlease(struct file *filp, long arg, struct file_lock **flp, 1945 void **priv) 1946 { 1947 struct inode *inode = locks_inode(filp); 1948 int error; 1949 1950 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE)) 1951 return -EACCES; 1952 if (!S_ISREG(inode->i_mode)) 1953 return -EINVAL; 1954 error = security_file_lock(filp, arg); 1955 if (error) 1956 return error; 1957 1958 switch (arg) { 1959 case F_UNLCK: 1960 return generic_delete_lease(filp, *priv); 1961 case F_RDLCK: 1962 case F_WRLCK: 1963 if (!(*flp)->fl_lmops->lm_break) { 1964 WARN_ON_ONCE(1); 1965 return -ENOLCK; 1966 } 1967 1968 return generic_add_lease(filp, arg, flp, priv); 1969 default: 1970 return -EINVAL; 1971 } 1972 } 1973 EXPORT_SYMBOL(generic_setlease); 1974 1975 /** 1976 * vfs_setlease - sets a lease on an open file 1977 * @filp: file pointer 1978 * @arg: type of lease to obtain 1979 * @lease: file_lock to use when adding a lease 1980 * @priv: private info for lm_setup when adding a lease (may be 1981 * NULL if lm_setup doesn't require it) 1982 * 1983 * Call this to establish a lease on the file. The "lease" argument is not 1984 * used for F_UNLCK requests and may be NULL. For commands that set or alter 1985 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be 1986 * set; if not, this function will return -ENOLCK (and generate a scary-looking 1987 * stack trace). 1988 * 1989 * The "priv" pointer is passed directly to the lm_setup function as-is. It 1990 * may be NULL if the lm_setup operation doesn't require it. 1991 */ 1992 int 1993 vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv) 1994 { 1995 if (filp->f_op->setlease) 1996 return filp->f_op->setlease(filp, arg, lease, priv); 1997 else 1998 return generic_setlease(filp, arg, lease, priv); 1999 } 2000 EXPORT_SYMBOL_GPL(vfs_setlease); 2001 2002 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 2003 { 2004 struct file_lock *fl; 2005 struct fasync_struct *new; 2006 int error; 2007 2008 fl = lease_alloc(filp, arg); 2009 if (IS_ERR(fl)) 2010 return PTR_ERR(fl); 2011 2012 new = fasync_alloc(); 2013 if (!new) { 2014 locks_free_lock(fl); 2015 return -ENOMEM; 2016 } 2017 new->fa_fd = fd; 2018 2019 error = vfs_setlease(filp, arg, &fl, (void **)&new); 2020 if (fl) 2021 locks_free_lock(fl); 2022 if (new) 2023 fasync_free(new); 2024 return error; 2025 } 2026 2027 /** 2028 * fcntl_setlease - sets a lease on an open file 2029 * @fd: open file descriptor 2030 * @filp: file pointer 2031 * @arg: type of lease to obtain 2032 * 2033 * Call this fcntl to establish a lease on the file. 2034 * Note that you also need to call %F_SETSIG to 2035 * receive a signal when the lease is broken. 2036 */ 2037 int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 2038 { 2039 if (arg == F_UNLCK) 2040 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp); 2041 return do_fcntl_add_lease(fd, filp, arg); 2042 } 2043 2044 /** 2045 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file 2046 * @inode: inode of the file to apply to 2047 * @fl: The lock to be applied 2048 * 2049 * Apply a FLOCK style lock request to an inode. 2050 */ 2051 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) 2052 { 2053 int error; 2054 might_sleep(); 2055 for (;;) { 2056 error = flock_lock_inode(inode, fl); 2057 if (error != FILE_LOCK_DEFERRED) 2058 break; 2059 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); 2060 if (error) 2061 break; 2062 } 2063 locks_delete_block(fl); 2064 return error; 2065 } 2066 2067 /** 2068 * locks_lock_inode_wait - Apply a lock to an inode 2069 * @inode: inode of the file to apply to 2070 * @fl: The lock to be applied 2071 * 2072 * Apply a POSIX or FLOCK style lock request to an inode. 2073 */ 2074 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) 2075 { 2076 int res = 0; 2077 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 2078 case FL_POSIX: 2079 res = posix_lock_inode_wait(inode, fl); 2080 break; 2081 case FL_FLOCK: 2082 res = flock_lock_inode_wait(inode, fl); 2083 break; 2084 default: 2085 BUG(); 2086 } 2087 return res; 2088 } 2089 EXPORT_SYMBOL(locks_lock_inode_wait); 2090 2091 /** 2092 * sys_flock: - flock() system call. 2093 * @fd: the file descriptor to lock. 2094 * @cmd: the type of lock to apply. 2095 * 2096 * Apply a %FL_FLOCK style lock to an open file descriptor. 2097 * The @cmd can be one of: 2098 * 2099 * - %LOCK_SH -- a shared lock. 2100 * - %LOCK_EX -- an exclusive lock. 2101 * - %LOCK_UN -- remove an existing lock. 2102 * - %LOCK_MAND -- a 'mandatory' flock. 2103 * This exists to emulate Windows Share Modes. 2104 * 2105 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 2106 * processes read and write access respectively. 2107 */ 2108 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 2109 { 2110 struct fd f = fdget(fd); 2111 struct file_lock *lock; 2112 int can_sleep, unlock; 2113 int error; 2114 2115 error = -EBADF; 2116 if (!f.file) 2117 goto out; 2118 2119 can_sleep = !(cmd & LOCK_NB); 2120 cmd &= ~LOCK_NB; 2121 unlock = (cmd == LOCK_UN); 2122 2123 if (!unlock && !(cmd & LOCK_MAND) && 2124 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE))) 2125 goto out_putf; 2126 2127 lock = flock_make_lock(f.file, cmd, NULL); 2128 if (IS_ERR(lock)) { 2129 error = PTR_ERR(lock); 2130 goto out_putf; 2131 } 2132 2133 if (can_sleep) 2134 lock->fl_flags |= FL_SLEEP; 2135 2136 error = security_file_lock(f.file, lock->fl_type); 2137 if (error) 2138 goto out_free; 2139 2140 if (f.file->f_op->flock) 2141 error = f.file->f_op->flock(f.file, 2142 (can_sleep) ? F_SETLKW : F_SETLK, 2143 lock); 2144 else 2145 error = locks_lock_file_wait(f.file, lock); 2146 2147 out_free: 2148 locks_free_lock(lock); 2149 2150 out_putf: 2151 fdput(f); 2152 out: 2153 return error; 2154 } 2155 2156 /** 2157 * vfs_test_lock - test file byte range lock 2158 * @filp: The file to test lock for 2159 * @fl: The lock to test; also used to hold result 2160 * 2161 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 2162 * setting conf->fl_type to something other than F_UNLCK. 2163 */ 2164 int vfs_test_lock(struct file *filp, struct file_lock *fl) 2165 { 2166 if (filp->f_op->lock) 2167 return filp->f_op->lock(filp, F_GETLK, fl); 2168 posix_test_lock(filp, fl); 2169 return 0; 2170 } 2171 EXPORT_SYMBOL_GPL(vfs_test_lock); 2172 2173 /** 2174 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace 2175 * @fl: The file_lock who's fl_pid should be translated 2176 * @ns: The namespace into which the pid should be translated 2177 * 2178 * Used to tranlate a fl_pid into a namespace virtual pid number 2179 */ 2180 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) 2181 { 2182 pid_t vnr; 2183 struct pid *pid; 2184 2185 if (IS_OFDLCK(fl)) 2186 return -1; 2187 if (IS_REMOTELCK(fl)) 2188 return fl->fl_pid; 2189 /* 2190 * If the flock owner process is dead and its pid has been already 2191 * freed, the translation below won't work, but we still want to show 2192 * flock owner pid number in init pidns. 2193 */ 2194 if (ns == &init_pid_ns) 2195 return (pid_t)fl->fl_pid; 2196 2197 rcu_read_lock(); 2198 pid = find_pid_ns(fl->fl_pid, &init_pid_ns); 2199 vnr = pid_nr_ns(pid, ns); 2200 rcu_read_unlock(); 2201 return vnr; 2202 } 2203 2204 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 2205 { 2206 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); 2207 #if BITS_PER_LONG == 32 2208 /* 2209 * Make sure we can represent the posix lock via 2210 * legacy 32bit flock. 2211 */ 2212 if (fl->fl_start > OFFT_OFFSET_MAX) 2213 return -EOVERFLOW; 2214 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 2215 return -EOVERFLOW; 2216 #endif 2217 flock->l_start = fl->fl_start; 2218 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 2219 fl->fl_end - fl->fl_start + 1; 2220 flock->l_whence = 0; 2221 flock->l_type = fl->fl_type; 2222 return 0; 2223 } 2224 2225 #if BITS_PER_LONG == 32 2226 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 2227 { 2228 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); 2229 flock->l_start = fl->fl_start; 2230 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 2231 fl->fl_end - fl->fl_start + 1; 2232 flock->l_whence = 0; 2233 flock->l_type = fl->fl_type; 2234 } 2235 #endif 2236 2237 /* Report the first existing lock that would conflict with l. 2238 * This implements the F_GETLK command of fcntl(). 2239 */ 2240 int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock) 2241 { 2242 struct file_lock *fl; 2243 int error; 2244 2245 fl = locks_alloc_lock(); 2246 if (fl == NULL) 2247 return -ENOMEM; 2248 error = -EINVAL; 2249 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2250 goto out; 2251 2252 error = flock_to_posix_lock(filp, fl, flock); 2253 if (error) 2254 goto out; 2255 2256 if (cmd == F_OFD_GETLK) { 2257 error = -EINVAL; 2258 if (flock->l_pid != 0) 2259 goto out; 2260 2261 cmd = F_GETLK; 2262 fl->fl_flags |= FL_OFDLCK; 2263 fl->fl_owner = filp; 2264 } 2265 2266 error = vfs_test_lock(filp, fl); 2267 if (error) 2268 goto out; 2269 2270 flock->l_type = fl->fl_type; 2271 if (fl->fl_type != F_UNLCK) { 2272 error = posix_lock_to_flock(flock, fl); 2273 if (error) 2274 goto out; 2275 } 2276 out: 2277 locks_free_lock(fl); 2278 return error; 2279 } 2280 2281 /** 2282 * vfs_lock_file - file byte range lock 2283 * @filp: The file to apply the lock to 2284 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 2285 * @fl: The lock to be applied 2286 * @conf: Place to return a copy of the conflicting lock, if found. 2287 * 2288 * A caller that doesn't care about the conflicting lock may pass NULL 2289 * as the final argument. 2290 * 2291 * If the filesystem defines a private ->lock() method, then @conf will 2292 * be left unchanged; so a caller that cares should initialize it to 2293 * some acceptable default. 2294 * 2295 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 2296 * locks, the ->lock() interface may return asynchronously, before the lock has 2297 * been granted or denied by the underlying filesystem, if (and only if) 2298 * lm_grant is set. Callers expecting ->lock() to return asynchronously 2299 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 2300 * the request is for a blocking lock. When ->lock() does return asynchronously, 2301 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock 2302 * request completes. 2303 * If the request is for non-blocking lock the file system should return 2304 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine 2305 * with the result. If the request timed out the callback routine will return a 2306 * nonzero return code and the file system should release the lock. The file 2307 * system is also responsible to keep a corresponding posix lock when it 2308 * grants a lock so the VFS can find out which locks are locally held and do 2309 * the correct lock cleanup when required. 2310 * The underlying filesystem must not drop the kernel lock or call 2311 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED 2312 * return code. 2313 */ 2314 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 2315 { 2316 if (filp->f_op->lock) 2317 return filp->f_op->lock(filp, cmd, fl); 2318 else 2319 return posix_lock_file(filp, fl, conf); 2320 } 2321 EXPORT_SYMBOL_GPL(vfs_lock_file); 2322 2323 static int do_lock_file_wait(struct file *filp, unsigned int cmd, 2324 struct file_lock *fl) 2325 { 2326 int error; 2327 2328 error = security_file_lock(filp, fl->fl_type); 2329 if (error) 2330 return error; 2331 2332 for (;;) { 2333 error = vfs_lock_file(filp, cmd, fl, NULL); 2334 if (error != FILE_LOCK_DEFERRED) 2335 break; 2336 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker); 2337 if (error) 2338 break; 2339 } 2340 locks_delete_block(fl); 2341 2342 return error; 2343 } 2344 2345 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */ 2346 static int 2347 check_fmode_for_setlk(struct file_lock *fl) 2348 { 2349 switch (fl->fl_type) { 2350 case F_RDLCK: 2351 if (!(fl->fl_file->f_mode & FMODE_READ)) 2352 return -EBADF; 2353 break; 2354 case F_WRLCK: 2355 if (!(fl->fl_file->f_mode & FMODE_WRITE)) 2356 return -EBADF; 2357 } 2358 return 0; 2359 } 2360 2361 /* Apply the lock described by l to an open file descriptor. 2362 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2363 */ 2364 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 2365 struct flock *flock) 2366 { 2367 struct file_lock *file_lock = locks_alloc_lock(); 2368 struct inode *inode = locks_inode(filp); 2369 struct file *f; 2370 int error; 2371 2372 if (file_lock == NULL) 2373 return -ENOLCK; 2374 2375 /* Don't allow mandatory locks on files that may be memory mapped 2376 * and shared. 2377 */ 2378 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 2379 error = -EAGAIN; 2380 goto out; 2381 } 2382 2383 error = flock_to_posix_lock(filp, file_lock, flock); 2384 if (error) 2385 goto out; 2386 2387 error = check_fmode_for_setlk(file_lock); 2388 if (error) 2389 goto out; 2390 2391 /* 2392 * If the cmd is requesting file-private locks, then set the 2393 * FL_OFDLCK flag and override the owner. 2394 */ 2395 switch (cmd) { 2396 case F_OFD_SETLK: 2397 error = -EINVAL; 2398 if (flock->l_pid != 0) 2399 goto out; 2400 2401 cmd = F_SETLK; 2402 file_lock->fl_flags |= FL_OFDLCK; 2403 file_lock->fl_owner = filp; 2404 break; 2405 case F_OFD_SETLKW: 2406 error = -EINVAL; 2407 if (flock->l_pid != 0) 2408 goto out; 2409 2410 cmd = F_SETLKW; 2411 file_lock->fl_flags |= FL_OFDLCK; 2412 file_lock->fl_owner = filp; 2413 /* Fallthrough */ 2414 case F_SETLKW: 2415 file_lock->fl_flags |= FL_SLEEP; 2416 } 2417 2418 error = do_lock_file_wait(filp, cmd, file_lock); 2419 2420 /* 2421 * Attempt to detect a close/fcntl race and recover by releasing the 2422 * lock that was just acquired. There is no need to do that when we're 2423 * unlocking though, or for OFD locks. 2424 */ 2425 if (!error && file_lock->fl_type != F_UNLCK && 2426 !(file_lock->fl_flags & FL_OFDLCK)) { 2427 /* 2428 * We need that spin_lock here - it prevents reordering between 2429 * update of i_flctx->flc_posix and check for it done in 2430 * close(). rcu_read_lock() wouldn't do. 2431 */ 2432 spin_lock(¤t->files->file_lock); 2433 f = fcheck(fd); 2434 spin_unlock(¤t->files->file_lock); 2435 if (f != filp) { 2436 file_lock->fl_type = F_UNLCK; 2437 error = do_lock_file_wait(filp, cmd, file_lock); 2438 WARN_ON_ONCE(error); 2439 error = -EBADF; 2440 } 2441 } 2442 out: 2443 trace_fcntl_setlk(inode, file_lock, error); 2444 locks_free_lock(file_lock); 2445 return error; 2446 } 2447 2448 #if BITS_PER_LONG == 32 2449 /* Report the first existing lock that would conflict with l. 2450 * This implements the F_GETLK command of fcntl(). 2451 */ 2452 int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock) 2453 { 2454 struct file_lock *fl; 2455 int error; 2456 2457 fl = locks_alloc_lock(); 2458 if (fl == NULL) 2459 return -ENOMEM; 2460 2461 error = -EINVAL; 2462 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2463 goto out; 2464 2465 error = flock64_to_posix_lock(filp, fl, flock); 2466 if (error) 2467 goto out; 2468 2469 if (cmd == F_OFD_GETLK) { 2470 error = -EINVAL; 2471 if (flock->l_pid != 0) 2472 goto out; 2473 2474 cmd = F_GETLK64; 2475 fl->fl_flags |= FL_OFDLCK; 2476 fl->fl_owner = filp; 2477 } 2478 2479 error = vfs_test_lock(filp, fl); 2480 if (error) 2481 goto out; 2482 2483 flock->l_type = fl->fl_type; 2484 if (fl->fl_type != F_UNLCK) 2485 posix_lock_to_flock64(flock, fl); 2486 2487 out: 2488 locks_free_lock(fl); 2489 return error; 2490 } 2491 2492 /* Apply the lock described by l to an open file descriptor. 2493 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2494 */ 2495 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 2496 struct flock64 *flock) 2497 { 2498 struct file_lock *file_lock = locks_alloc_lock(); 2499 struct inode *inode = locks_inode(filp); 2500 struct file *f; 2501 int error; 2502 2503 if (file_lock == NULL) 2504 return -ENOLCK; 2505 2506 /* Don't allow mandatory locks on files that may be memory mapped 2507 * and shared. 2508 */ 2509 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 2510 error = -EAGAIN; 2511 goto out; 2512 } 2513 2514 error = flock64_to_posix_lock(filp, file_lock, flock); 2515 if (error) 2516 goto out; 2517 2518 error = check_fmode_for_setlk(file_lock); 2519 if (error) 2520 goto out; 2521 2522 /* 2523 * If the cmd is requesting file-private locks, then set the 2524 * FL_OFDLCK flag and override the owner. 2525 */ 2526 switch (cmd) { 2527 case F_OFD_SETLK: 2528 error = -EINVAL; 2529 if (flock->l_pid != 0) 2530 goto out; 2531 2532 cmd = F_SETLK64; 2533 file_lock->fl_flags |= FL_OFDLCK; 2534 file_lock->fl_owner = filp; 2535 break; 2536 case F_OFD_SETLKW: 2537 error = -EINVAL; 2538 if (flock->l_pid != 0) 2539 goto out; 2540 2541 cmd = F_SETLKW64; 2542 file_lock->fl_flags |= FL_OFDLCK; 2543 file_lock->fl_owner = filp; 2544 /* Fallthrough */ 2545 case F_SETLKW64: 2546 file_lock->fl_flags |= FL_SLEEP; 2547 } 2548 2549 error = do_lock_file_wait(filp, cmd, file_lock); 2550 2551 /* 2552 * Attempt to detect a close/fcntl race and recover by releasing the 2553 * lock that was just acquired. There is no need to do that when we're 2554 * unlocking though, or for OFD locks. 2555 */ 2556 if (!error && file_lock->fl_type != F_UNLCK && 2557 !(file_lock->fl_flags & FL_OFDLCK)) { 2558 /* 2559 * We need that spin_lock here - it prevents reordering between 2560 * update of i_flctx->flc_posix and check for it done in 2561 * close(). rcu_read_lock() wouldn't do. 2562 */ 2563 spin_lock(¤t->files->file_lock); 2564 f = fcheck(fd); 2565 spin_unlock(¤t->files->file_lock); 2566 if (f != filp) { 2567 file_lock->fl_type = F_UNLCK; 2568 error = do_lock_file_wait(filp, cmd, file_lock); 2569 WARN_ON_ONCE(error); 2570 error = -EBADF; 2571 } 2572 } 2573 out: 2574 locks_free_lock(file_lock); 2575 return error; 2576 } 2577 #endif /* BITS_PER_LONG == 32 */ 2578 2579 /* 2580 * This function is called when the file is being removed 2581 * from the task's fd array. POSIX locks belonging to this task 2582 * are deleted at this time. 2583 */ 2584 void locks_remove_posix(struct file *filp, fl_owner_t owner) 2585 { 2586 int error; 2587 struct inode *inode = locks_inode(filp); 2588 struct file_lock lock; 2589 struct file_lock_context *ctx; 2590 2591 /* 2592 * If there are no locks held on this file, we don't need to call 2593 * posix_lock_file(). Another process could be setting a lock on this 2594 * file at the same time, but we wouldn't remove that lock anyway. 2595 */ 2596 ctx = smp_load_acquire(&inode->i_flctx); 2597 if (!ctx || list_empty(&ctx->flc_posix)) 2598 return; 2599 2600 locks_init_lock(&lock); 2601 lock.fl_type = F_UNLCK; 2602 lock.fl_flags = FL_POSIX | FL_CLOSE; 2603 lock.fl_start = 0; 2604 lock.fl_end = OFFSET_MAX; 2605 lock.fl_owner = owner; 2606 lock.fl_pid = current->tgid; 2607 lock.fl_file = filp; 2608 lock.fl_ops = NULL; 2609 lock.fl_lmops = NULL; 2610 2611 error = vfs_lock_file(filp, F_SETLK, &lock, NULL); 2612 2613 if (lock.fl_ops && lock.fl_ops->fl_release_private) 2614 lock.fl_ops->fl_release_private(&lock); 2615 trace_locks_remove_posix(inode, &lock, error); 2616 } 2617 EXPORT_SYMBOL(locks_remove_posix); 2618 2619 /* The i_flctx must be valid when calling into here */ 2620 static void 2621 locks_remove_flock(struct file *filp, struct file_lock_context *flctx) 2622 { 2623 struct file_lock fl; 2624 struct inode *inode = locks_inode(filp); 2625 2626 if (list_empty(&flctx->flc_flock)) 2627 return; 2628 2629 flock_make_lock(filp, LOCK_UN, &fl); 2630 fl.fl_flags |= FL_CLOSE; 2631 2632 if (filp->f_op->flock) 2633 filp->f_op->flock(filp, F_SETLKW, &fl); 2634 else 2635 flock_lock_inode(inode, &fl); 2636 2637 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2638 fl.fl_ops->fl_release_private(&fl); 2639 } 2640 2641 /* The i_flctx must be valid when calling into here */ 2642 static void 2643 locks_remove_lease(struct file *filp, struct file_lock_context *ctx) 2644 { 2645 struct file_lock *fl, *tmp; 2646 LIST_HEAD(dispose); 2647 2648 if (list_empty(&ctx->flc_lease)) 2649 return; 2650 2651 percpu_down_read(&file_rwsem); 2652 spin_lock(&ctx->flc_lock); 2653 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2654 if (filp == fl->fl_file) 2655 lease_modify(fl, F_UNLCK, &dispose); 2656 spin_unlock(&ctx->flc_lock); 2657 percpu_up_read(&file_rwsem); 2658 2659 locks_dispose_list(&dispose); 2660 } 2661 2662 /* 2663 * This function is called on the last close of an open file. 2664 */ 2665 void locks_remove_file(struct file *filp) 2666 { 2667 struct file_lock_context *ctx; 2668 2669 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx); 2670 if (!ctx) 2671 return; 2672 2673 /* remove any OFD locks */ 2674 locks_remove_posix(filp, filp); 2675 2676 /* remove flock locks */ 2677 locks_remove_flock(filp, ctx); 2678 2679 /* remove any leases */ 2680 locks_remove_lease(filp, ctx); 2681 2682 spin_lock(&ctx->flc_lock); 2683 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX"); 2684 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK"); 2685 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE"); 2686 spin_unlock(&ctx->flc_lock); 2687 } 2688 2689 /** 2690 * vfs_cancel_lock - file byte range unblock lock 2691 * @filp: The file to apply the unblock to 2692 * @fl: The lock to be unblocked 2693 * 2694 * Used by lock managers to cancel blocked requests 2695 */ 2696 int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 2697 { 2698 if (filp->f_op->lock) 2699 return filp->f_op->lock(filp, F_CANCELLK, fl); 2700 return 0; 2701 } 2702 EXPORT_SYMBOL_GPL(vfs_cancel_lock); 2703 2704 #ifdef CONFIG_PROC_FS 2705 #include <linux/proc_fs.h> 2706 #include <linux/seq_file.h> 2707 2708 struct locks_iterator { 2709 int li_cpu; 2710 loff_t li_pos; 2711 }; 2712 2713 static void lock_get_status(struct seq_file *f, struct file_lock *fl, 2714 loff_t id, char *pfx) 2715 { 2716 struct inode *inode = NULL; 2717 unsigned int fl_pid; 2718 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; 2719 2720 fl_pid = locks_translate_pid(fl, proc_pidns); 2721 /* 2722 * If lock owner is dead (and pid is freed) or not visible in current 2723 * pidns, zero is shown as a pid value. Check lock info from 2724 * init_pid_ns to get saved lock pid value. 2725 */ 2726 2727 if (fl->fl_file != NULL) 2728 inode = locks_inode(fl->fl_file); 2729 2730 seq_printf(f, "%lld:%s ", id, pfx); 2731 if (IS_POSIX(fl)) { 2732 if (fl->fl_flags & FL_ACCESS) 2733 seq_puts(f, "ACCESS"); 2734 else if (IS_OFDLCK(fl)) 2735 seq_puts(f, "OFDLCK"); 2736 else 2737 seq_puts(f, "POSIX "); 2738 2739 seq_printf(f, " %s ", 2740 (inode == NULL) ? "*NOINODE*" : 2741 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); 2742 } else if (IS_FLOCK(fl)) { 2743 if (fl->fl_type & LOCK_MAND) { 2744 seq_puts(f, "FLOCK MSNFS "); 2745 } else { 2746 seq_puts(f, "FLOCK ADVISORY "); 2747 } 2748 } else if (IS_LEASE(fl)) { 2749 if (fl->fl_flags & FL_DELEG) 2750 seq_puts(f, "DELEG "); 2751 else 2752 seq_puts(f, "LEASE "); 2753 2754 if (lease_breaking(fl)) 2755 seq_puts(f, "BREAKING "); 2756 else if (fl->fl_file) 2757 seq_puts(f, "ACTIVE "); 2758 else 2759 seq_puts(f, "BREAKER "); 2760 } else { 2761 seq_puts(f, "UNKNOWN UNKNOWN "); 2762 } 2763 if (fl->fl_type & LOCK_MAND) { 2764 seq_printf(f, "%s ", 2765 (fl->fl_type & LOCK_READ) 2766 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 2767 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 2768 } else { 2769 seq_printf(f, "%s ", 2770 (lease_breaking(fl)) 2771 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ " 2772 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ "); 2773 } 2774 if (inode) { 2775 /* userspace relies on this representation of dev_t */ 2776 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, 2777 MAJOR(inode->i_sb->s_dev), 2778 MINOR(inode->i_sb->s_dev), inode->i_ino); 2779 } else { 2780 seq_printf(f, "%d <none>:0 ", fl_pid); 2781 } 2782 if (IS_POSIX(fl)) { 2783 if (fl->fl_end == OFFSET_MAX) 2784 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2785 else 2786 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2787 } else { 2788 seq_puts(f, "0 EOF\n"); 2789 } 2790 } 2791 2792 static int locks_show(struct seq_file *f, void *v) 2793 { 2794 struct locks_iterator *iter = f->private; 2795 struct file_lock *fl, *bfl; 2796 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; 2797 2798 fl = hlist_entry(v, struct file_lock, fl_link); 2799 2800 if (locks_translate_pid(fl, proc_pidns) == 0) 2801 return 0; 2802 2803 lock_get_status(f, fl, iter->li_pos, ""); 2804 2805 list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member) 2806 lock_get_status(f, bfl, iter->li_pos, " ->"); 2807 2808 return 0; 2809 } 2810 2811 static void __show_fd_locks(struct seq_file *f, 2812 struct list_head *head, int *id, 2813 struct file *filp, struct files_struct *files) 2814 { 2815 struct file_lock *fl; 2816 2817 list_for_each_entry(fl, head, fl_list) { 2818 2819 if (filp != fl->fl_file) 2820 continue; 2821 if (fl->fl_owner != files && 2822 fl->fl_owner != filp) 2823 continue; 2824 2825 (*id)++; 2826 seq_puts(f, "lock:\t"); 2827 lock_get_status(f, fl, *id, ""); 2828 } 2829 } 2830 2831 void show_fd_locks(struct seq_file *f, 2832 struct file *filp, struct files_struct *files) 2833 { 2834 struct inode *inode = locks_inode(filp); 2835 struct file_lock_context *ctx; 2836 int id = 0; 2837 2838 ctx = smp_load_acquire(&inode->i_flctx); 2839 if (!ctx) 2840 return; 2841 2842 spin_lock(&ctx->flc_lock); 2843 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files); 2844 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files); 2845 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files); 2846 spin_unlock(&ctx->flc_lock); 2847 } 2848 2849 static void *locks_start(struct seq_file *f, loff_t *pos) 2850 __acquires(&blocked_lock_lock) 2851 { 2852 struct locks_iterator *iter = f->private; 2853 2854 iter->li_pos = *pos + 1; 2855 percpu_down_write(&file_rwsem); 2856 spin_lock(&blocked_lock_lock); 2857 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos); 2858 } 2859 2860 static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2861 { 2862 struct locks_iterator *iter = f->private; 2863 2864 ++iter->li_pos; 2865 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos); 2866 } 2867 2868 static void locks_stop(struct seq_file *f, void *v) 2869 __releases(&blocked_lock_lock) 2870 { 2871 spin_unlock(&blocked_lock_lock); 2872 percpu_up_write(&file_rwsem); 2873 } 2874 2875 static const struct seq_operations locks_seq_operations = { 2876 .start = locks_start, 2877 .next = locks_next, 2878 .stop = locks_stop, 2879 .show = locks_show, 2880 }; 2881 2882 static int __init proc_locks_init(void) 2883 { 2884 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations, 2885 sizeof(struct locks_iterator), NULL); 2886 return 0; 2887 } 2888 fs_initcall(proc_locks_init); 2889 #endif 2890 2891 static int __init filelock_init(void) 2892 { 2893 int i; 2894 2895 flctx_cache = kmem_cache_create("file_lock_ctx", 2896 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL); 2897 2898 filelock_cache = kmem_cache_create("file_lock_cache", 2899 sizeof(struct file_lock), 0, SLAB_PANIC, NULL); 2900 2901 for_each_possible_cpu(i) { 2902 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i); 2903 2904 spin_lock_init(&fll->lock); 2905 INIT_HLIST_HEAD(&fll->hlist); 2906 } 2907 2908 return 0; 2909 } 2910 core_initcall(filelock_init); 2911