Lines Matching +full:op +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-or-later
27 _debug("STATE %u -> %u", vnode->lock_state, state); in afs_set_lock_state()
28 vnode->lock_state = state; in afs_set_lock_state()
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_may_be_available()
40 spin_lock(&vnode->lock); in afs_lock_may_be_available()
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) in afs_lock_may_be_available()
44 spin_unlock(&vnode->lock); in afs_lock_may_be_available()
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2); in afs_schedule_lock_extension()
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j); in afs_schedule_lock_extension()
73 struct afs_operation *op = call->op; in afs_lock_op_done() local
74 struct afs_vnode *vnode = op->file[0].vnode; in afs_lock_op_done()
76 if (call->error == 0) { in afs_lock_op_done()
77 spin_lock(&vnode->lock); in afs_lock_op_done()
79 vnode->locked_at = call->issue_time; in afs_lock_op_done()
81 spin_unlock(&vnode->lock); in afs_lock_op_done()
88 * - the caller must hold the vnode lock
93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); in afs_grant_locks()
95 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_grant_locks()
99 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); in afs_grant_locks()
100 p->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_grant_locks()
114 struct key *key = vnode->lock_key; in afs_next_locker()
119 if (vnode->lock_type == AFS_LOCK_WRITE) in afs_next_locker()
122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { in afs_next_locker()
124 p->c.flc_type == type && in afs_next_locker()
125 afs_file_key(p->c.flc_file) == key) { in afs_next_locker()
126 list_del_init(&p->fl_u.afs.link); in afs_next_locker()
127 p->fl_u.afs.state = error; in afs_next_locker()
137 vnode->lock_key = NULL; in afs_next_locker()
142 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; in afs_next_locker()
163 while (!list_empty(&vnode->pending_locks)) { in afs_kill_lockers_enoent()
164 p = list_entry(vnode->pending_locks.next, in afs_kill_lockers_enoent()
166 list_del_init(&p->fl_u.afs.link); in afs_kill_lockers_enoent()
167 p->fl_u.afs.state = -ENOENT; in afs_kill_lockers_enoent()
171 key_put(vnode->lock_key); in afs_kill_lockers_enoent()
172 vnode->lock_key = NULL; in afs_kill_lockers_enoent()
175 static void afs_lock_success(struct afs_operation *op) in afs_lock_success() argument
177 _enter("op=%08x", op->debug_id); in afs_lock_success()
178 afs_vnode_commit_status(op, &op->file[0]); in afs_lock_success()
194 struct afs_operation *op; in afs_set_lock() local
197 vnode->volume->name, in afs_set_lock()
198 vnode->fid.vid, in afs_set_lock()
199 vnode->fid.vnode, in afs_set_lock()
200 vnode->fid.unique, in afs_set_lock()
203 op = afs_alloc_operation(key, vnode->volume); in afs_set_lock()
204 if (IS_ERR(op)) in afs_set_lock()
205 return PTR_ERR(op); in afs_set_lock()
207 afs_op_set_vnode(op, 0, vnode); in afs_set_lock()
209 op->lock.type = type; in afs_set_lock()
210 op->ops = &afs_set_lock_operation; in afs_set_lock()
211 return afs_do_sync_operation(op); in afs_set_lock()
225 struct afs_operation *op; in afs_extend_lock() local
228 vnode->volume->name, in afs_extend_lock()
229 vnode->fid.vid, in afs_extend_lock()
230 vnode->fid.vnode, in afs_extend_lock()
231 vnode->fid.unique, in afs_extend_lock()
234 op = afs_alloc_operation(key, vnode->volume); in afs_extend_lock()
235 if (IS_ERR(op)) in afs_extend_lock()
236 return PTR_ERR(op); in afs_extend_lock()
238 afs_op_set_vnode(op, 0, vnode); in afs_extend_lock()
240 op->flags |= AFS_OPERATION_UNINTR; in afs_extend_lock()
241 op->ops = &afs_extend_lock_operation; in afs_extend_lock()
242 return afs_do_sync_operation(op); in afs_extend_lock()
256 struct afs_operation *op; in afs_release_lock() local
259 vnode->volume->name, in afs_release_lock()
260 vnode->fid.vid, in afs_release_lock()
261 vnode->fid.vnode, in afs_release_lock()
262 vnode->fid.unique, in afs_release_lock()
265 op = afs_alloc_operation(key, vnode->volume); in afs_release_lock()
266 if (IS_ERR(op)) in afs_release_lock()
267 return PTR_ERR(op); in afs_release_lock()
269 afs_op_set_vnode(op, 0, vnode); in afs_release_lock()
271 op->flags |= AFS_OPERATION_UNINTR; in afs_release_lock()
272 op->ops = &afs_release_lock_operation; in afs_release_lock()
273 return afs_do_sync_operation(op); in afs_release_lock()
278 * - probing for a lock we're waiting on but didn't get immediately
279 * - extending a lock that's close to timing out
288 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_lock_work()
290 spin_lock(&vnode->lock); in afs_lock_work()
293 _debug("wstate %u for %p", vnode->lock_state, vnode); in afs_lock_work()
294 switch (vnode->lock_state) { in afs_lock_work()
298 spin_unlock(&vnode->lock); in afs_lock_work()
302 ret = afs_release_lock(vnode, vnode->lock_key); in afs_lock_work()
303 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) { in afs_lock_work()
308 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
311 spin_lock(&vnode->lock); in afs_lock_work()
312 if (ret == -ENOENT) in afs_lock_work()
316 spin_unlock(&vnode->lock); in afs_lock_work()
325 ASSERT(!list_empty(&vnode->granted_locks)); in afs_lock_work()
327 key = key_get(vnode->lock_key); in afs_lock_work()
330 spin_unlock(&vnode->lock); in afs_lock_work()
339 vnode->fid.vid, vnode->fid.vnode, ret); in afs_lock_work()
342 spin_lock(&vnode->lock); in afs_lock_work()
344 if (ret == -ENOENT) { in afs_lock_work()
346 spin_unlock(&vnode->lock); in afs_lock_work()
350 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING) in afs_lock_work()
355 queue_delayed_work(afs_lock_manager, &vnode->lock_work, in afs_lock_work()
357 spin_unlock(&vnode->lock); in afs_lock_work()
370 spin_unlock(&vnode->lock); in afs_lock_work()
375 spin_unlock(&vnode->lock); in afs_lock_work()
380 spin_unlock(&vnode->lock); in afs_lock_work()
390 * - the caller must hold the vnode lock
394 _enter("%u", vnode->lock_state); in afs_defer_unlock()
396 if (list_empty(&vnode->granted_locks) && in afs_defer_unlock()
397 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED || in afs_defer_unlock()
398 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) { in afs_defer_unlock()
399 cancel_delayed_work(&vnode->lock_work); in afs_defer_unlock()
403 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); in afs_defer_unlock()
412 enum afs_flock_mode mode, afs_lock_type_t type) in afs_do_setlk_check() argument
432 * read-lock a file and WRITE or INSERT perm to write-lock a file. in afs_do_setlk_check()
439 return -EACCES; in afs_do_setlk_check()
442 return -EACCES; in afs_do_setlk_check()
455 enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode; in afs_do_setlk() local
461 if (mode == afs_flock_mode_unset) in afs_do_setlk()
462 mode = afs_flock_mode_openafs; in afs_do_setlk()
464 _enter("{%llx:%llu},%llu-%llu,%u,%u", in afs_do_setlk()
465 vnode->fid.vid, vnode->fid.vnode, in afs_do_setlk()
466 fl->fl_start, fl->fl_end, fl->c.flc_type, mode); in afs_do_setlk()
468 fl->fl_ops = &afs_lock_ops; in afs_do_setlk()
469 INIT_LIST_HEAD(&fl->fl_u.afs.link); in afs_do_setlk()
470 fl->fl_u.afs.state = AFS_LOCK_PENDING; in afs_do_setlk()
472 partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX); in afs_do_setlk()
474 if (mode == afs_flock_mode_write && partial) in afs_do_setlk()
477 ret = afs_do_setlk_check(vnode, key, mode, type); in afs_do_setlk()
483 /* AFS3 protocol only supports full-file locks and doesn't provide any in afs_do_setlk()
484 * method of upgrade/downgrade, so we need to emulate for partial-file in afs_do_setlk()
487 * The OpenAFS client only gets a server lock for a full-file lock and in afs_do_setlk()
488 * keeps partial-file locks local. Allow this behaviour to be emulated in afs_do_setlk()
491 if (mode == afs_flock_mode_local || in afs_do_setlk()
492 (partial && mode == afs_flock_mode_openafs)) { in afs_do_setlk()
497 spin_lock(&vnode->lock); in afs_do_setlk()
498 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); in afs_do_setlk()
500 ret = -ENOENT; in afs_do_setlk()
501 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_setlk()
508 _debug("try %u", vnode->lock_state); in afs_do_setlk()
509 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) { in afs_do_setlk()
512 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
513 fl->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_do_setlk()
517 if (vnode->lock_type == AFS_LOCK_WRITE) { in afs_do_setlk()
519 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_do_setlk()
520 fl->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_do_setlk()
525 if (vnode->lock_state == AFS_VNODE_LOCK_NONE && in afs_do_setlk()
526 !(fl->c.flc_flags & FL_SLEEP)) { in afs_do_setlk()
527 ret = -EAGAIN; in afs_do_setlk()
529 if (vnode->status.lock_count == -1) in afs_do_setlk()
532 if (vnode->status.lock_count != 0) in afs_do_setlk()
537 if (vnode->lock_state != AFS_VNODE_LOCK_NONE) in afs_do_setlk()
546 * though we don't wait for the reply (it's not too bad a problem - the in afs_do_setlk()
550 vnode->lock_key = key_get(key); in afs_do_setlk()
551 vnode->lock_type = type; in afs_do_setlk()
553 spin_unlock(&vnode->lock); in afs_do_setlk()
557 spin_lock(&vnode->lock); in afs_do_setlk()
559 case -EKEYREJECTED: in afs_do_setlk()
560 case -EKEYEXPIRED: in afs_do_setlk()
561 case -EKEYREVOKED: in afs_do_setlk()
562 case -EPERM: in afs_do_setlk()
563 case -EACCES: in afs_do_setlk()
564 fl->fl_u.afs.state = ret; in afs_do_setlk()
566 list_del_init(&fl->fl_u.afs.link); in afs_do_setlk()
570 case -ENOENT: in afs_do_setlk()
571 fl->fl_u.afs.state = ret; in afs_do_setlk()
573 list_del_init(&fl->fl_u.afs.link); in afs_do_setlk()
578 fl->fl_u.afs.state = ret; in afs_do_setlk()
580 list_del_init(&fl->fl_u.afs.link); in afs_do_setlk()
584 case -EWOULDBLOCK: in afs_do_setlk()
585 /* The server doesn't have a lock-waiting queue, so the client in afs_do_setlk()
589 ASSERT(list_empty(&vnode->granted_locks)); in afs_do_setlk()
590 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); in afs_do_setlk()
601 spin_unlock(&vnode->lock); in afs_do_setlk()
604 ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED); in afs_do_setlk()
623 if (!(fl->c.flc_flags & FL_SLEEP)) { in afs_do_setlk()
624 list_del_init(&fl->fl_u.afs.link); in afs_do_setlk()
626 ret = -EAGAIN; in afs_do_setlk()
632 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5); in afs_do_setlk()
640 spin_unlock(&vnode->lock); in afs_do_setlk()
643 ret = wait_event_interruptible(fl->c.flc_wait, in afs_do_setlk()
644 fl->fl_u.afs.state != AFS_LOCK_PENDING); in afs_do_setlk()
647 if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) { in afs_do_setlk()
648 spin_lock(&vnode->lock); in afs_do_setlk()
650 switch (fl->fl_u.afs.state) { in afs_do_setlk()
652 fl->fl_u.afs.state = AFS_LOCK_PENDING; in afs_do_setlk()
660 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB); in afs_do_setlk()
662 fl->fl_u.afs.state = AFS_LOCK_PENDING; in afs_do_setlk()
671 spin_unlock(&vnode->lock); in afs_do_setlk()
674 if (fl->fl_u.afs.state == AFS_LOCK_GRANTED) in afs_do_setlk()
676 ret = fl->fl_u.afs.state; in afs_do_setlk()
687 spin_lock(&vnode->lock); in afs_do_setlk()
688 list_del_init(&fl->fl_u.afs.link); in afs_do_setlk()
692 spin_unlock(&vnode->lock); in afs_do_setlk()
706 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, in afs_do_unlk()
707 fl->c.flc_type); in afs_do_unlk()
715 _leave(" = %d [%u]", ret, vnode->lock_state); in afs_do_unlk()
730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) in afs_do_getlk()
731 return -ENOENT; in afs_do_getlk()
733 fl->c.flc_type = F_UNLCK; in afs_do_getlk()
743 lock_count = READ_ONCE(vnode->status.lock_count); in afs_do_getlk()
746 fl->c.flc_type = F_RDLCK; in afs_do_getlk()
748 fl->c.flc_type = F_WRLCK; in afs_do_getlk()
749 fl->fl_start = 0; in afs_do_getlk()
750 fl->fl_end = OFFSET_MAX; in afs_do_getlk()
751 fl->c.flc_pid = 0; in afs_do_getlk()
757 _leave(" = %d [%hd]", ret, fl->c.flc_type); in afs_do_getlk()
767 enum afs_flock_operation op; in afs_lock() local
771 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_lock()
772 fl->c.flc_type, fl->c.flc_flags, in afs_lock()
773 (long long) fl->fl_start, (long long) fl->fl_end); in afs_lock()
778 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); in afs_lock()
787 case 0: op = afs_flock_op_return_ok; break; in afs_lock()
788 case -EAGAIN: op = afs_flock_op_return_eagain; break; in afs_lock()
789 case -EDEADLK: op = afs_flock_op_return_edeadlk; break; in afs_lock()
790 default: op = afs_flock_op_return_error; break; in afs_lock()
792 trace_afs_flock_op(vnode, fl, op); in afs_lock()
802 enum afs_flock_operation op; in afs_flock() local
806 vnode->fid.vid, vnode->fid.vnode, cmd, in afs_flock()
807 fl->c.flc_type, fl->c.flc_flags); in afs_flock()
816 if (!(fl->c.flc_flags & FL_FLOCK)) in afs_flock()
817 return -ENOLCK; in afs_flock()
819 fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); in afs_flock()
829 case 0: op = afs_flock_op_return_ok; break; in afs_flock()
830 case -EAGAIN: op = afs_flock_op_return_eagain; break; in afs_flock()
831 case -EDEADLK: op = afs_flock_op_return_edeadlk; break; in afs_flock()
832 default: op = afs_flock_op_return_error; break; in afs_flock()
834 trace_afs_flock_op(vnode, fl, op); in afs_flock()
846 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); in afs_fl_copy_lock()
850 new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); in afs_fl_copy_lock()
852 spin_lock(&vnode->lock); in afs_fl_copy_lock()
854 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); in afs_fl_copy_lock()
855 spin_unlock(&vnode->lock); in afs_fl_copy_lock()
864 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file)); in afs_fl_release_private()
868 spin_lock(&vnode->lock); in afs_fl_release_private()
871 list_del_init(&fl->fl_u.afs.link); in afs_fl_release_private()
872 if (list_empty(&vnode->granted_locks)) in afs_fl_release_private()
875 _debug("state %u for %p", vnode->lock_state, vnode); in afs_fl_release_private()
876 spin_unlock(&vnode->lock); in afs_fl_release_private()