1 /* AFS file locking support 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/smp_lock.h> 13 #include "internal.h" 14 15 #define AFS_LOCK_GRANTED 0 16 #define AFS_LOCK_PENDING 1 17 18 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); 19 static void afs_fl_release_private(struct file_lock *fl); 20 21 static struct workqueue_struct *afs_lock_manager; 22 static DEFINE_MUTEX(afs_lock_manager_mutex); 23 24 static struct file_lock_operations afs_lock_ops = { 25 .fl_copy_lock = afs_fl_copy_lock, 26 .fl_release_private = afs_fl_release_private, 27 }; 28 29 /* 30 * initialise the lock manager thread if it isn't already running 31 */ 32 static int afs_init_lock_manager(void) 33 { 34 int ret; 35 36 ret = 0; 37 if (!afs_lock_manager) { 38 mutex_lock(&afs_lock_manager_mutex); 39 if (!afs_lock_manager) { 40 afs_lock_manager = 41 create_singlethread_workqueue("kafs_lockd"); 42 if (!afs_lock_manager) 43 ret = -ENOMEM; 44 } 45 mutex_unlock(&afs_lock_manager_mutex); 46 } 47 return ret; 48 } 49 50 /* 51 * destroy the lock manager thread if it's running 52 */ 53 void __exit afs_kill_lock_manager(void) 54 { 55 if (afs_lock_manager) 56 destroy_workqueue(afs_lock_manager); 57 } 58 59 /* 60 * if the callback is broken on this vnode, then the lock may now be available 61 */ 62 void afs_lock_may_be_available(struct afs_vnode *vnode) 63 { 64 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 65 66 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); 67 } 68 69 /* 70 * the lock will time out in 5 minutes unless we extend it, so schedule 71 * extension in a bit less than that time 72 */ 73 static void afs_schedule_lock_extension(struct afs_vnode *vnode) 74 { 75 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 76 AFS_LOCKWAIT * HZ / 2); 77 } 78 79 /* 80 * grant one or more locks (readlocks are allowed to jump the queue if the 81 * first lock in the queue is itself a readlock) 82 * - the caller must hold the vnode lock 83 */ 84 static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) 85 { 86 struct file_lock *p, *_p; 87 88 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 89 if (fl->fl_type == F_RDLCK) { 90 list_for_each_entry_safe(p, _p, &vnode->pending_locks, 91 fl_u.afs.link) { 92 if (p->fl_type == F_RDLCK) { 93 p->fl_u.afs.state = AFS_LOCK_GRANTED; 94 list_move_tail(&p->fl_u.afs.link, 95 &vnode->granted_locks); 96 wake_up(&p->fl_wait); 97 } 98 } 99 } 100 } 101 102 /* 103 * do work for a lock, including: 104 * - probing for a lock we're waiting on but didn't get immediately 105 * - extending a lock that's close to timing out 106 */ 107 void afs_lock_work(struct work_struct *work) 108 { 109 struct afs_vnode *vnode = 110 container_of(work, struct afs_vnode, lock_work.work); 111 struct file_lock *fl; 112 afs_lock_type_t type; 113 struct key *key; 114 int ret; 115 116 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 117 118 spin_lock(&vnode->lock); 119 120 if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) { 121 _debug("unlock"); 122 spin_unlock(&vnode->lock); 123 124 /* attempt to release the server lock; if it fails, we just 125 * wait 5 minutes and it'll time out anyway */ 126 ret = afs_vnode_release_lock(vnode, vnode->unlock_key); 127 if (ret < 0) 128 printk(KERN_WARNING "AFS:" 129 " Failed to release lock on {%x:%x} error %d\n", 130 vnode->fid.vid, vnode->fid.vnode, ret); 131 132 spin_lock(&vnode->lock); 133 key_put(vnode->unlock_key); 134 vnode->unlock_key = NULL; 135 clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags); 136 } 137 138 /* if we've got a lock, then it must be time to extend that lock as AFS 139 * locks time out after 5 minutes */ 140 if (!list_empty(&vnode->granted_locks)) { 141 _debug("extend"); 142 143 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 144 BUG(); 145 fl = list_entry(vnode->granted_locks.next, 146 struct file_lock, fl_u.afs.link); 147 key = key_get(fl->fl_file->private_data); 148 spin_unlock(&vnode->lock); 149 150 ret = afs_vnode_extend_lock(vnode, key); 151 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 152 key_put(key); 153 switch (ret) { 154 case 0: 155 afs_schedule_lock_extension(vnode); 156 break; 157 default: 158 /* ummm... we failed to extend the lock - retry 159 * extension shortly */ 160 printk(KERN_WARNING "AFS:" 161 " Failed to extend lock on {%x:%x} error %d\n", 162 vnode->fid.vid, vnode->fid.vnode, ret); 163 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 164 HZ * 10); 165 break; 166 } 167 _leave(" [extend]"); 168 return; 169 } 170 171 /* if we don't have a granted lock, then we must've been called back by 172 * the server, and so if might be possible to get a lock we're 173 * currently waiting for */ 174 if (!list_empty(&vnode->pending_locks)) { 175 _debug("get"); 176 177 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 178 BUG(); 179 fl = list_entry(vnode->pending_locks.next, 180 struct file_lock, fl_u.afs.link); 181 key = key_get(fl->fl_file->private_data); 182 type = (fl->fl_type == F_RDLCK) ? 183 AFS_LOCK_READ : AFS_LOCK_WRITE; 184 spin_unlock(&vnode->lock); 185 186 ret = afs_vnode_set_lock(vnode, key, type); 187 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 188 switch (ret) { 189 case -EWOULDBLOCK: 190 _debug("blocked"); 191 break; 192 case 0: 193 _debug("acquired"); 194 if (type == AFS_LOCK_READ) 195 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 196 else 197 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 198 ret = AFS_LOCK_GRANTED; 199 default: 200 spin_lock(&vnode->lock); 201 /* the pending lock may have been withdrawn due to a 202 * signal */ 203 if (list_entry(vnode->pending_locks.next, 204 struct file_lock, fl_u.afs.link) == fl) { 205 fl->fl_u.afs.state = ret; 206 if (ret == AFS_LOCK_GRANTED) 207 afs_grant_locks(vnode, fl); 208 else 209 list_del_init(&fl->fl_u.afs.link); 210 wake_up(&fl->fl_wait); 211 spin_unlock(&vnode->lock); 212 } else { 213 _debug("withdrawn"); 214 clear_bit(AFS_VNODE_READLOCKED, &vnode->flags); 215 clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 216 spin_unlock(&vnode->lock); 217 afs_vnode_release_lock(vnode, key); 218 if (!list_empty(&vnode->pending_locks)) 219 afs_lock_may_be_available(vnode); 220 } 221 break; 222 } 223 key_put(key); 224 _leave(" [pend]"); 225 return; 226 } 227 228 /* looks like the lock request was withdrawn on a signal */ 229 spin_unlock(&vnode->lock); 230 _leave(" [no locks]"); 231 } 232 233 /* 234 * pass responsibility for the unlocking of a vnode on the server to the 235 * manager thread, lest a pending signal in the calling thread interrupt 236 * AF_RXRPC 237 * - the caller must hold the vnode lock 238 */ 239 static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) 240 { 241 cancel_delayed_work(&vnode->lock_work); 242 if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) && 243 !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags)) 244 BUG(); 245 if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) 246 BUG(); 247 vnode->unlock_key = key_get(key); 248 afs_lock_may_be_available(vnode); 249 } 250 251 /* 252 * request a lock on a file on the server 253 */ 254 static int afs_do_setlk(struct file *file, struct file_lock *fl) 255 { 256 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 257 afs_lock_type_t type; 258 struct key *key = file->private_data; 259 int ret; 260 261 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 262 263 /* only whole-file locks are supported */ 264 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 265 return -EINVAL; 266 267 ret = afs_init_lock_manager(); 268 if (ret < 0) 269 return ret; 270 271 fl->fl_ops = &afs_lock_ops; 272 INIT_LIST_HEAD(&fl->fl_u.afs.link); 273 fl->fl_u.afs.state = AFS_LOCK_PENDING; 274 275 type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 276 277 lock_kernel(); 278 279 /* make sure we've got a callback on this file and that our view of the 280 * data version is up to date */ 281 ret = afs_vnode_fetch_status(vnode, NULL, key); 282 if (ret < 0) 283 goto error; 284 285 if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) { 286 ret = -EAGAIN; 287 goto error; 288 } 289 290 spin_lock(&vnode->lock); 291 292 /* if we've already got a readlock on the server then we can instantly 293 * grant another readlock, irrespective of whether there are any 294 * pending writelocks */ 295 if (type == AFS_LOCK_READ && 296 vnode->flags & (1 << AFS_VNODE_READLOCKED)) { 297 _debug("instant readlock"); 298 ASSERTCMP(vnode->flags & 299 ((1 << AFS_VNODE_LOCKING) | 300 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 301 ASSERT(!list_empty(&vnode->granted_locks)); 302 goto sharing_existing_lock; 303 } 304 305 /* if there's no-one else with a lock on this vnode, then we need to 306 * ask the server for a lock */ 307 if (list_empty(&vnode->pending_locks) && 308 list_empty(&vnode->granted_locks)) { 309 _debug("not locked"); 310 ASSERTCMP(vnode->flags & 311 ((1 << AFS_VNODE_LOCKING) | 312 (1 << AFS_VNODE_READLOCKED) | 313 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 314 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 315 set_bit(AFS_VNODE_LOCKING, &vnode->flags); 316 spin_unlock(&vnode->lock); 317 318 ret = afs_vnode_set_lock(vnode, key, type); 319 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 320 switch (ret) { 321 case 0: 322 _debug("acquired"); 323 goto acquired_server_lock; 324 case -EWOULDBLOCK: 325 _debug("would block"); 326 spin_lock(&vnode->lock); 327 ASSERT(list_empty(&vnode->granted_locks)); 328 ASSERTCMP(vnode->pending_locks.next, ==, 329 &fl->fl_u.afs.link); 330 goto wait; 331 default: 332 spin_lock(&vnode->lock); 333 list_del_init(&fl->fl_u.afs.link); 334 spin_unlock(&vnode->lock); 335 goto error; 336 } 337 } 338 339 /* otherwise, we need to wait for a local lock to become available */ 340 _debug("wait local"); 341 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 342 wait: 343 if (!(fl->fl_flags & FL_SLEEP)) { 344 _debug("noblock"); 345 ret = -EAGAIN; 346 goto abort_attempt; 347 } 348 spin_unlock(&vnode->lock); 349 350 /* now we need to sleep and wait for the lock manager thread to get the 351 * lock from the server */ 352 _debug("sleep"); 353 ret = wait_event_interruptible(fl->fl_wait, 354 fl->fl_u.afs.state <= AFS_LOCK_GRANTED); 355 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 356 ret = fl->fl_u.afs.state; 357 if (ret < 0) 358 goto error; 359 spin_lock(&vnode->lock); 360 goto given_lock; 361 } 362 363 /* we were interrupted, but someone may still be in the throes of 364 * giving us the lock */ 365 _debug("intr"); 366 ASSERTCMP(ret, ==, -ERESTARTSYS); 367 368 spin_lock(&vnode->lock); 369 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 370 ret = fl->fl_u.afs.state; 371 if (ret < 0) { 372 spin_unlock(&vnode->lock); 373 goto error; 374 } 375 goto given_lock; 376 } 377 378 abort_attempt: 379 /* we aren't going to get the lock, either because we're unwilling to 380 * wait, or because some signal happened */ 381 _debug("abort"); 382 if (list_empty(&vnode->granted_locks) && 383 vnode->pending_locks.next == &fl->fl_u.afs.link) { 384 if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { 385 /* kick the next pending lock into having a go */ 386 list_del_init(&fl->fl_u.afs.link); 387 afs_lock_may_be_available(vnode); 388 } 389 } else { 390 list_del_init(&fl->fl_u.afs.link); 391 } 392 spin_unlock(&vnode->lock); 393 goto error; 394 395 acquired_server_lock: 396 /* we've acquired a server lock, but it needs to be renewed after 5 397 * mins */ 398 spin_lock(&vnode->lock); 399 afs_schedule_lock_extension(vnode); 400 if (type == AFS_LOCK_READ) 401 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 402 else 403 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 404 sharing_existing_lock: 405 /* the lock has been granted as far as we're concerned... */ 406 fl->fl_u.afs.state = AFS_LOCK_GRANTED; 407 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 408 given_lock: 409 /* ... but we do still need to get the VFS's blessing */ 410 ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); 411 ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | 412 (1 << AFS_VNODE_WRITELOCKED))) != 0); 413 ret = posix_lock_file(file, fl, NULL); 414 if (ret < 0) 415 goto vfs_rejected_lock; 416 spin_unlock(&vnode->lock); 417 418 /* again, make sure we've got a callback on this file and, again, make 419 * sure that our view of the data version is up to date (we ignore 420 * errors incurred here and deal with the consequences elsewhere) */ 421 afs_vnode_fetch_status(vnode, NULL, key); 422 423 error: 424 unlock_kernel(); 425 _leave(" = %d", ret); 426 return ret; 427 428 vfs_rejected_lock: 429 /* the VFS rejected the lock we just obtained, so we have to discard 430 * what we just got */ 431 _debug("vfs refused %d", ret); 432 list_del_init(&fl->fl_u.afs.link); 433 if (list_empty(&vnode->granted_locks)) 434 afs_defer_unlock(vnode, key); 435 spin_unlock(&vnode->lock); 436 goto abort_attempt; 437 } 438 439 /* 440 * unlock on a file on the server 441 */ 442 static int afs_do_unlk(struct file *file, struct file_lock *fl) 443 { 444 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 445 struct key *key = file->private_data; 446 int ret; 447 448 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 449 450 /* only whole-file unlocks are supported */ 451 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 452 return -EINVAL; 453 454 fl->fl_ops = &afs_lock_ops; 455 INIT_LIST_HEAD(&fl->fl_u.afs.link); 456 fl->fl_u.afs.state = AFS_LOCK_PENDING; 457 458 spin_lock(&vnode->lock); 459 ret = posix_lock_file(file, fl, NULL); 460 if (ret < 0) { 461 spin_unlock(&vnode->lock); 462 _leave(" = %d [vfs]", ret); 463 return ret; 464 } 465 466 /* discard the server lock only if all granted locks are gone */ 467 if (list_empty(&vnode->granted_locks)) 468 afs_defer_unlock(vnode, key); 469 spin_unlock(&vnode->lock); 470 _leave(" = 0"); 471 return 0; 472 } 473 474 /* 475 * return information about a lock we currently hold, if indeed we hold one 476 */ 477 static int afs_do_getlk(struct file *file, struct file_lock *fl) 478 { 479 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 480 struct key *key = file->private_data; 481 int ret, lock_count; 482 483 _enter(""); 484 485 fl->fl_type = F_UNLCK; 486 487 mutex_lock(&vnode->vfs_inode.i_mutex); 488 489 /* check local lock records first */ 490 ret = 0; 491 posix_test_lock(file, fl); 492 if (fl->fl_type == F_UNLCK) { 493 /* no local locks; consult the server */ 494 ret = afs_vnode_fetch_status(vnode, NULL, key); 495 if (ret < 0) 496 goto error; 497 lock_count = vnode->status.lock_count; 498 if (lock_count) { 499 if (lock_count > 0) 500 fl->fl_type = F_RDLCK; 501 else 502 fl->fl_type = F_WRLCK; 503 fl->fl_start = 0; 504 fl->fl_end = OFFSET_MAX; 505 } 506 } 507 508 error: 509 mutex_unlock(&vnode->vfs_inode.i_mutex); 510 _leave(" = %d [%hd]", ret, fl->fl_type); 511 return ret; 512 } 513 514 /* 515 * manage POSIX locks on a file 516 */ 517 int afs_lock(struct file *file, int cmd, struct file_lock *fl) 518 { 519 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 520 521 _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", 522 vnode->fid.vid, vnode->fid.vnode, cmd, 523 fl->fl_type, fl->fl_flags, 524 (long long) fl->fl_start, (long long) fl->fl_end); 525 526 /* AFS doesn't support mandatory locks */ 527 if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) 528 return -ENOLCK; 529 530 if (IS_GETLK(cmd)) 531 return afs_do_getlk(file, fl); 532 if (fl->fl_type == F_UNLCK) 533 return afs_do_unlk(file, fl); 534 return afs_do_setlk(file, fl); 535 } 536 537 /* 538 * manage FLOCK locks on a file 539 */ 540 int afs_flock(struct file *file, int cmd, struct file_lock *fl) 541 { 542 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 543 544 _enter("{%x:%u},%d,{t=%x,fl=%x}", 545 vnode->fid.vid, vnode->fid.vnode, cmd, 546 fl->fl_type, fl->fl_flags); 547 548 /* 549 * No BSD flocks over NFS allowed. 550 * Note: we could try to fake a POSIX lock request here by 551 * using ((u32) filp | 0x80000000) or some such as the pid. 552 * Not sure whether that would be unique, though, or whether 553 * that would break in other places. 554 */ 555 if (!(fl->fl_flags & FL_FLOCK)) 556 return -ENOLCK; 557 558 /* we're simulating flock() locks using posix locks on the server */ 559 fl->fl_owner = (fl_owner_t) file; 560 fl->fl_start = 0; 561 fl->fl_end = OFFSET_MAX; 562 563 if (fl->fl_type == F_UNLCK) 564 return afs_do_unlk(file, fl); 565 return afs_do_setlk(file, fl); 566 } 567 568 /* 569 * the POSIX lock management core VFS code copies the lock record and adds the 570 * copy into its own list, so we need to add that copy to the vnode's lock 571 * queue in the same place as the original (which will be deleted shortly 572 * after) 573 */ 574 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) 575 { 576 _enter(""); 577 578 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); 579 } 580 581 /* 582 * need to remove this lock from the vnode queue when it's removed from the 583 * VFS's list 584 */ 585 static void afs_fl_release_private(struct file_lock *fl) 586 { 587 _enter(""); 588 589 list_del_init(&fl->fl_u.afs.link); 590 } 591