1 /* AFS file locking support 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include "internal.h" 13 14 #define AFS_LOCK_GRANTED 0 15 #define AFS_LOCK_PENDING 1 16 17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); 18 static void afs_fl_release_private(struct file_lock *fl); 19 20 static struct workqueue_struct *afs_lock_manager; 21 static DEFINE_MUTEX(afs_lock_manager_mutex); 22 23 static const struct file_lock_operations afs_lock_ops = { 24 .fl_copy_lock = afs_fl_copy_lock, 25 .fl_release_private = afs_fl_release_private, 26 }; 27 28 /* 29 * initialise the lock manager thread if it isn't already running 30 */ 31 static int afs_init_lock_manager(void) 32 { 33 int ret; 34 35 ret = 0; 36 if (!afs_lock_manager) { 37 mutex_lock(&afs_lock_manager_mutex); 38 if (!afs_lock_manager) { 39 afs_lock_manager = 40 create_singlethread_workqueue("kafs_lockd"); 41 if (!afs_lock_manager) 42 ret = -ENOMEM; 43 } 44 mutex_unlock(&afs_lock_manager_mutex); 45 } 46 return ret; 47 } 48 49 /* 50 * destroy the lock manager thread if it's running 51 */ 52 void __exit afs_kill_lock_manager(void) 53 { 54 if (afs_lock_manager) 55 destroy_workqueue(afs_lock_manager); 56 } 57 58 /* 59 * if the callback is broken on this vnode, then the lock may now be available 60 */ 61 void afs_lock_may_be_available(struct afs_vnode *vnode) 62 { 63 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 64 65 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); 66 } 67 68 /* 69 * the lock will time out in 5 minutes unless we extend it, so schedule 70 * extension in a bit less than that time 71 */ 72 static void afs_schedule_lock_extension(struct afs_vnode *vnode) 73 { 74 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 75 AFS_LOCKWAIT * HZ / 2); 76 } 77 78 /* 79 * grant one or more locks (readlocks are allowed to jump the queue if the 80 * first lock in the queue is itself a readlock) 81 * - the caller must hold the vnode lock 82 */ 83 static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) 84 { 85 struct file_lock *p, *_p; 86 87 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 88 if (fl->fl_type == F_RDLCK) { 89 list_for_each_entry_safe(p, _p, &vnode->pending_locks, 90 fl_u.afs.link) { 91 if (p->fl_type == F_RDLCK) { 92 p->fl_u.afs.state = AFS_LOCK_GRANTED; 93 list_move_tail(&p->fl_u.afs.link, 94 &vnode->granted_locks); 95 wake_up(&p->fl_wait); 96 } 97 } 98 } 99 } 100 101 /* 102 * do work for a lock, including: 103 * - probing for a lock we're waiting on but didn't get immediately 104 * - extending a lock that's close to timing out 105 */ 106 void afs_lock_work(struct work_struct *work) 107 { 108 struct afs_vnode *vnode = 109 container_of(work, struct afs_vnode, lock_work.work); 110 struct file_lock *fl; 111 afs_lock_type_t type; 112 struct key *key; 113 int ret; 114 115 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 116 117 spin_lock(&vnode->lock); 118 119 if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) { 120 _debug("unlock"); 121 spin_unlock(&vnode->lock); 122 123 /* attempt to release the server lock; if it fails, we just 124 * wait 5 minutes and it'll time out anyway */ 125 ret = afs_vnode_release_lock(vnode, vnode->unlock_key); 126 if (ret < 0) 127 printk(KERN_WARNING "AFS:" 128 " Failed to release lock on {%x:%x} error %d\n", 129 vnode->fid.vid, vnode->fid.vnode, ret); 130 131 spin_lock(&vnode->lock); 132 key_put(vnode->unlock_key); 133 vnode->unlock_key = NULL; 134 clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags); 135 } 136 137 /* if we've got a lock, then it must be time to extend that lock as AFS 138 * locks time out after 5 minutes */ 139 if (!list_empty(&vnode->granted_locks)) { 140 _debug("extend"); 141 142 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 143 BUG(); 144 fl = list_entry(vnode->granted_locks.next, 145 struct file_lock, fl_u.afs.link); 146 key = key_get(fl->fl_file->private_data); 147 spin_unlock(&vnode->lock); 148 149 ret = afs_vnode_extend_lock(vnode, key); 150 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 151 key_put(key); 152 switch (ret) { 153 case 0: 154 afs_schedule_lock_extension(vnode); 155 break; 156 default: 157 /* ummm... we failed to extend the lock - retry 158 * extension shortly */ 159 printk(KERN_WARNING "AFS:" 160 " Failed to extend lock on {%x:%x} error %d\n", 161 vnode->fid.vid, vnode->fid.vnode, ret); 162 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 163 HZ * 10); 164 break; 165 } 166 _leave(" [extend]"); 167 return; 168 } 169 170 /* if we don't have a granted lock, then we must've been called back by 171 * the server, and so if might be possible to get a lock we're 172 * currently waiting for */ 173 if (!list_empty(&vnode->pending_locks)) { 174 _debug("get"); 175 176 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 177 BUG(); 178 fl = list_entry(vnode->pending_locks.next, 179 struct file_lock, fl_u.afs.link); 180 key = key_get(fl->fl_file->private_data); 181 type = (fl->fl_type == F_RDLCK) ? 182 AFS_LOCK_READ : AFS_LOCK_WRITE; 183 spin_unlock(&vnode->lock); 184 185 ret = afs_vnode_set_lock(vnode, key, type); 186 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 187 switch (ret) { 188 case -EWOULDBLOCK: 189 _debug("blocked"); 190 break; 191 case 0: 192 _debug("acquired"); 193 if (type == AFS_LOCK_READ) 194 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 195 else 196 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 197 ret = AFS_LOCK_GRANTED; 198 default: 199 spin_lock(&vnode->lock); 200 /* the pending lock may have been withdrawn due to a 201 * signal */ 202 if (list_entry(vnode->pending_locks.next, 203 struct file_lock, fl_u.afs.link) == fl) { 204 fl->fl_u.afs.state = ret; 205 if (ret == AFS_LOCK_GRANTED) 206 afs_grant_locks(vnode, fl); 207 else 208 list_del_init(&fl->fl_u.afs.link); 209 wake_up(&fl->fl_wait); 210 spin_unlock(&vnode->lock); 211 } else { 212 _debug("withdrawn"); 213 clear_bit(AFS_VNODE_READLOCKED, &vnode->flags); 214 clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 215 spin_unlock(&vnode->lock); 216 afs_vnode_release_lock(vnode, key); 217 if (!list_empty(&vnode->pending_locks)) 218 afs_lock_may_be_available(vnode); 219 } 220 break; 221 } 222 key_put(key); 223 _leave(" [pend]"); 224 return; 225 } 226 227 /* looks like the lock request was withdrawn on a signal */ 228 spin_unlock(&vnode->lock); 229 _leave(" [no locks]"); 230 } 231 232 /* 233 * pass responsibility for the unlocking of a vnode on the server to the 234 * manager thread, lest a pending signal in the calling thread interrupt 235 * AF_RXRPC 236 * - the caller must hold the vnode lock 237 */ 238 static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) 239 { 240 cancel_delayed_work(&vnode->lock_work); 241 if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) && 242 !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags)) 243 BUG(); 244 if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) 245 BUG(); 246 vnode->unlock_key = key_get(key); 247 afs_lock_may_be_available(vnode); 248 } 249 250 /* 251 * request a lock on a file on the server 252 */ 253 static int afs_do_setlk(struct file *file, struct file_lock *fl) 254 { 255 struct inode *inode = file_inode(file); 256 struct afs_vnode *vnode = AFS_FS_I(inode); 257 afs_lock_type_t type; 258 struct key *key = file->private_data; 259 int ret; 260 261 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 262 263 /* only whole-file locks are supported */ 264 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 265 return -EINVAL; 266 267 ret = afs_init_lock_manager(); 268 if (ret < 0) 269 return ret; 270 271 fl->fl_ops = &afs_lock_ops; 272 INIT_LIST_HEAD(&fl->fl_u.afs.link); 273 fl->fl_u.afs.state = AFS_LOCK_PENDING; 274 275 type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 276 277 spin_lock(&inode->i_lock); 278 279 /* make sure we've got a callback on this file and that our view of the 280 * data version is up to date */ 281 ret = afs_vnode_fetch_status(vnode, NULL, key); 282 if (ret < 0) 283 goto error; 284 285 if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) { 286 ret = -EAGAIN; 287 goto error; 288 } 289 290 spin_lock(&vnode->lock); 291 292 /* if we've already got a readlock on the server then we can instantly 293 * grant another readlock, irrespective of whether there are any 294 * pending writelocks */ 295 if (type == AFS_LOCK_READ && 296 vnode->flags & (1 << AFS_VNODE_READLOCKED)) { 297 _debug("instant readlock"); 298 ASSERTCMP(vnode->flags & 299 ((1 << AFS_VNODE_LOCKING) | 300 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 301 ASSERT(!list_empty(&vnode->granted_locks)); 302 goto sharing_existing_lock; 303 } 304 305 /* if there's no-one else with a lock on this vnode, then we need to 306 * ask the server for a lock */ 307 if (list_empty(&vnode->pending_locks) && 308 list_empty(&vnode->granted_locks)) { 309 _debug("not locked"); 310 ASSERTCMP(vnode->flags & 311 ((1 << AFS_VNODE_LOCKING) | 312 (1 << AFS_VNODE_READLOCKED) | 313 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 314 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 315 set_bit(AFS_VNODE_LOCKING, &vnode->flags); 316 spin_unlock(&vnode->lock); 317 318 ret = afs_vnode_set_lock(vnode, key, type); 319 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 320 switch (ret) { 321 case 0: 322 _debug("acquired"); 323 goto acquired_server_lock; 324 case -EWOULDBLOCK: 325 _debug("would block"); 326 spin_lock(&vnode->lock); 327 ASSERT(list_empty(&vnode->granted_locks)); 328 ASSERTCMP(vnode->pending_locks.next, ==, 329 &fl->fl_u.afs.link); 330 goto wait; 331 default: 332 spin_lock(&vnode->lock); 333 list_del_init(&fl->fl_u.afs.link); 334 spin_unlock(&vnode->lock); 335 goto error; 336 } 337 } 338 339 /* otherwise, we need to wait for a local lock to become available */ 340 _debug("wait local"); 341 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 342 wait: 343 if (!(fl->fl_flags & FL_SLEEP)) { 344 _debug("noblock"); 345 ret = -EAGAIN; 346 goto abort_attempt; 347 } 348 spin_unlock(&vnode->lock); 349 350 /* now we need to sleep and wait for the lock manager thread to get the 351 * lock from the server */ 352 _debug("sleep"); 353 ret = wait_event_interruptible(fl->fl_wait, 354 fl->fl_u.afs.state <= AFS_LOCK_GRANTED); 355 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 356 ret = fl->fl_u.afs.state; 357 if (ret < 0) 358 goto error; 359 spin_lock(&vnode->lock); 360 goto given_lock; 361 } 362 363 /* we were interrupted, but someone may still be in the throes of 364 * giving us the lock */ 365 _debug("intr"); 366 ASSERTCMP(ret, ==, -ERESTARTSYS); 367 368 spin_lock(&vnode->lock); 369 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 370 ret = fl->fl_u.afs.state; 371 if (ret < 0) { 372 spin_unlock(&vnode->lock); 373 goto error; 374 } 375 goto given_lock; 376 } 377 378 abort_attempt: 379 /* we aren't going to get the lock, either because we're unwilling to 380 * wait, or because some signal happened */ 381 _debug("abort"); 382 if (list_empty(&vnode->granted_locks) && 383 vnode->pending_locks.next == &fl->fl_u.afs.link) { 384 if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { 385 /* kick the next pending lock into having a go */ 386 list_del_init(&fl->fl_u.afs.link); 387 afs_lock_may_be_available(vnode); 388 } 389 } else { 390 list_del_init(&fl->fl_u.afs.link); 391 } 392 spin_unlock(&vnode->lock); 393 goto error; 394 395 acquired_server_lock: 396 /* we've acquired a server lock, but it needs to be renewed after 5 397 * mins */ 398 spin_lock(&vnode->lock); 399 afs_schedule_lock_extension(vnode); 400 if (type == AFS_LOCK_READ) 401 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 402 else 403 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 404 sharing_existing_lock: 405 /* the lock has been granted as far as we're concerned... */ 406 fl->fl_u.afs.state = AFS_LOCK_GRANTED; 407 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 408 given_lock: 409 /* ... but we do still need to get the VFS's blessing */ 410 ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); 411 ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | 412 (1 << AFS_VNODE_WRITELOCKED))) != 0); 413 ret = posix_lock_file(file, fl, NULL); 414 if (ret < 0) 415 goto vfs_rejected_lock; 416 spin_unlock(&vnode->lock); 417 418 /* again, make sure we've got a callback on this file and, again, make 419 * sure that our view of the data version is up to date (we ignore 420 * errors incurred here and deal with the consequences elsewhere) */ 421 afs_vnode_fetch_status(vnode, NULL, key); 422 423 error: 424 spin_unlock(&inode->i_lock); 425 _leave(" = %d", ret); 426 return ret; 427 428 vfs_rejected_lock: 429 /* the VFS rejected the lock we just obtained, so we have to discard 430 * what we just got */ 431 _debug("vfs refused %d", ret); 432 list_del_init(&fl->fl_u.afs.link); 433 if (list_empty(&vnode->granted_locks)) 434 afs_defer_unlock(vnode, key); 435 goto abort_attempt; 436 } 437 438 /* 439 * unlock on a file on the server 440 */ 441 static int afs_do_unlk(struct file *file, struct file_lock *fl) 442 { 443 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 444 struct key *key = file->private_data; 445 int ret; 446 447 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 448 449 /* only whole-file unlocks are supported */ 450 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 451 return -EINVAL; 452 453 fl->fl_ops = &afs_lock_ops; 454 INIT_LIST_HEAD(&fl->fl_u.afs.link); 455 fl->fl_u.afs.state = AFS_LOCK_PENDING; 456 457 spin_lock(&vnode->lock); 458 ret = posix_lock_file(file, fl, NULL); 459 if (ret < 0) { 460 spin_unlock(&vnode->lock); 461 _leave(" = %d [vfs]", ret); 462 return ret; 463 } 464 465 /* discard the server lock only if all granted locks are gone */ 466 if (list_empty(&vnode->granted_locks)) 467 afs_defer_unlock(vnode, key); 468 spin_unlock(&vnode->lock); 469 _leave(" = 0"); 470 return 0; 471 } 472 473 /* 474 * return information about a lock we currently hold, if indeed we hold one 475 */ 476 static int afs_do_getlk(struct file *file, struct file_lock *fl) 477 { 478 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 479 struct key *key = file->private_data; 480 int ret, lock_count; 481 482 _enter(""); 483 484 fl->fl_type = F_UNLCK; 485 486 inode_lock(&vnode->vfs_inode); 487 488 /* check local lock records first */ 489 ret = 0; 490 posix_test_lock(file, fl); 491 if (fl->fl_type == F_UNLCK) { 492 /* no local locks; consult the server */ 493 ret = afs_vnode_fetch_status(vnode, NULL, key); 494 if (ret < 0) 495 goto error; 496 lock_count = vnode->status.lock_count; 497 if (lock_count) { 498 if (lock_count > 0) 499 fl->fl_type = F_RDLCK; 500 else 501 fl->fl_type = F_WRLCK; 502 fl->fl_start = 0; 503 fl->fl_end = OFFSET_MAX; 504 } 505 } 506 507 error: 508 inode_unlock(&vnode->vfs_inode); 509 _leave(" = %d [%hd]", ret, fl->fl_type); 510 return ret; 511 } 512 513 /* 514 * manage POSIX locks on a file 515 */ 516 int afs_lock(struct file *file, int cmd, struct file_lock *fl) 517 { 518 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 519 520 _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", 521 vnode->fid.vid, vnode->fid.vnode, cmd, 522 fl->fl_type, fl->fl_flags, 523 (long long) fl->fl_start, (long long) fl->fl_end); 524 525 /* AFS doesn't support mandatory locks */ 526 if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) 527 return -ENOLCK; 528 529 if (IS_GETLK(cmd)) 530 return afs_do_getlk(file, fl); 531 if (fl->fl_type == F_UNLCK) 532 return afs_do_unlk(file, fl); 533 return afs_do_setlk(file, fl); 534 } 535 536 /* 537 * manage FLOCK locks on a file 538 */ 539 int afs_flock(struct file *file, int cmd, struct file_lock *fl) 540 { 541 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 542 543 _enter("{%x:%u},%d,{t=%x,fl=%x}", 544 vnode->fid.vid, vnode->fid.vnode, cmd, 545 fl->fl_type, fl->fl_flags); 546 547 /* 548 * No BSD flocks over NFS allowed. 549 * Note: we could try to fake a POSIX lock request here by 550 * using ((u32) filp | 0x80000000) or some such as the pid. 551 * Not sure whether that would be unique, though, or whether 552 * that would break in other places. 553 */ 554 if (!(fl->fl_flags & FL_FLOCK)) 555 return -ENOLCK; 556 557 /* we're simulating flock() locks using posix locks on the server */ 558 if (fl->fl_type == F_UNLCK) 559 return afs_do_unlk(file, fl); 560 return afs_do_setlk(file, fl); 561 } 562 563 /* 564 * the POSIX lock management core VFS code copies the lock record and adds the 565 * copy into its own list, so we need to add that copy to the vnode's lock 566 * queue in the same place as the original (which will be deleted shortly 567 * after) 568 */ 569 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) 570 { 571 _enter(""); 572 573 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); 574 } 575 576 /* 577 * need to remove this lock from the vnode queue when it's removed from the 578 * VFS's list 579 */ 580 static void afs_fl_release_private(struct file_lock *fl) 581 { 582 _enter(""); 583 584 list_del_init(&fl->fl_u.afs.link); 585 } 586