Lines Matching +full:we +full:- +full:extra +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0-or-later
23 #include <linux/delay.h>
40 /* will exit holding res->spinlock, but may drop in function */
41 /* waits until flags are cleared on res->state */
46 assert_spin_locked(&res->spinlock); in __dlm_wait_on_lockres_flags()
48 add_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags()
51 if (res->state & flags) { in __dlm_wait_on_lockres_flags()
52 spin_unlock(&res->spinlock); in __dlm_wait_on_lockres_flags()
54 spin_lock(&res->spinlock); in __dlm_wait_on_lockres_flags()
57 remove_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags()
63 if (list_empty(&res->granted) && in __dlm_lockres_has_locks()
64 list_empty(&res->converting) && in __dlm_lockres_has_locks()
65 list_empty(&res->blocked)) in __dlm_lockres_has_locks()
78 assert_spin_locked(&res->spinlock); in __dlm_lockres_unused()
84 if (res->inflight_locks) in __dlm_lockres_unused()
87 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) in __dlm_lockres_unused()
90 if (res->state & (DLM_LOCK_RES_RECOVERING| in __dlm_lockres_unused()
95 bit = find_first_bit(res->refmap, O2NM_MAX_NODES); in __dlm_lockres_unused()
109 assert_spin_locked(&dlm->spinlock); in __dlm_lockres_calc_usage()
110 assert_spin_locked(&res->spinlock); in __dlm_lockres_calc_usage()
113 if (list_empty(&res->purge)) { in __dlm_lockres_calc_usage()
115 dlm->name, res->lockname.len, res->lockname.name); in __dlm_lockres_calc_usage()
117 res->last_used = jiffies; in __dlm_lockres_calc_usage()
119 list_add_tail(&res->purge, &dlm->purge_list); in __dlm_lockres_calc_usage()
120 dlm->purge_count++; in __dlm_lockres_calc_usage()
122 } else if (!list_empty(&res->purge)) { in __dlm_lockres_calc_usage()
124 dlm->name, res->lockname.len, res->lockname.name); in __dlm_lockres_calc_usage()
126 list_del_init(&res->purge); in __dlm_lockres_calc_usage()
128 dlm->purge_count--; in __dlm_lockres_calc_usage()
135 spin_lock(&dlm->spinlock); in dlm_lockres_calc_usage()
136 spin_lock(&res->spinlock); in dlm_lockres_calc_usage()
140 spin_unlock(&res->spinlock); in dlm_lockres_calc_usage()
141 spin_unlock(&dlm->spinlock); in dlm_lockres_calc_usage()
153 assert_spin_locked(&dlm->spinlock); in __dlm_do_purge_lockres()
154 assert_spin_locked(&res->spinlock); in __dlm_do_purge_lockres()
156 if (!list_empty(&res->purge)) { in __dlm_do_purge_lockres()
158 dlm->name, res->lockname.len, res->lockname.name); in __dlm_do_purge_lockres()
159 list_del_init(&res->purge); in __dlm_do_purge_lockres()
161 dlm->purge_count--; in __dlm_do_purge_lockres()
166 dlm->name, res->lockname.len, res->lockname.name); in __dlm_do_purge_lockres()
173 spin_lock(&dlm->track_lock); in __dlm_do_purge_lockres()
174 if (!list_empty(&res->tracking)) in __dlm_do_purge_lockres()
175 list_del_init(&res->tracking); in __dlm_do_purge_lockres()
178 dlm->name, res->lockname.len, res->lockname.name); in __dlm_do_purge_lockres()
181 spin_unlock(&dlm->track_lock); in __dlm_do_purge_lockres()
187 res->state &= ~DLM_LOCK_RES_DROPPING_REF; in __dlm_do_purge_lockres()
196 assert_spin_locked(&dlm->spinlock); in dlm_purge_lockres()
197 assert_spin_locked(&res->spinlock); in dlm_purge_lockres()
199 master = (res->owner == dlm->node_num); in dlm_purge_lockres()
201 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, in dlm_purge_lockres()
202 res->lockname.len, res->lockname.name, master); in dlm_purge_lockres()
205 if (res->state & DLM_LOCK_RES_DROPPING_REF) { in dlm_purge_lockres()
207 dlm->name, res->lockname.len, res->lockname.name); in dlm_purge_lockres()
208 spin_unlock(&res->spinlock); in dlm_purge_lockres()
212 res->state |= DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres()
214 spin_unlock(&res->spinlock); in dlm_purge_lockres()
215 spin_unlock(&dlm->spinlock); in dlm_purge_lockres()
217 spin_lock(&res->spinlock); in dlm_purge_lockres()
220 spin_unlock(&res->spinlock); in dlm_purge_lockres()
228 spin_lock(&dlm->spinlock); in dlm_purge_lockres()
229 spin_lock(&res->spinlock); in dlm_purge_lockres()
232 if (!list_empty(&res->purge)) { in dlm_purge_lockres()
234 dlm->name, res->lockname.len, res->lockname.name, master); in dlm_purge_lockres()
235 list_del_init(&res->purge); in dlm_purge_lockres()
237 dlm->purge_count--; in dlm_purge_lockres()
242 dlm->name, res->lockname.len, res->lockname.name); in dlm_purge_lockres()
243 spin_unlock(&res->spinlock); in dlm_purge_lockres()
249 dlm->name, res->lockname.len, res->lockname.name); in dlm_purge_lockres()
256 spin_lock(&dlm->track_lock); in dlm_purge_lockres()
257 if (!list_empty(&res->tracking)) in dlm_purge_lockres()
258 list_del_init(&res->tracking); in dlm_purge_lockres()
261 res->lockname.len, res->lockname.name); in dlm_purge_lockres()
264 spin_unlock(&dlm->track_lock); in dlm_purge_lockres()
269 res->state &= ~DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres()
270 spin_unlock(&res->spinlock); in dlm_purge_lockres()
271 wake_up(&res->wq); in dlm_purge_lockres()
273 spin_unlock(&res->spinlock); in dlm_purge_lockres()
283 spin_lock(&dlm->spinlock); in dlm_run_purge_list()
284 run_max = dlm->purge_count; in dlm_run_purge_list()
286 while(run_max && !list_empty(&dlm->purge_list)) { in dlm_run_purge_list()
287 run_max--; in dlm_run_purge_list()
289 lockres = list_entry(dlm->purge_list.next, in dlm_run_purge_list()
292 spin_lock(&lockres->spinlock); in dlm_run_purge_list()
294 purge_jiffies = lockres->last_used + in dlm_run_purge_list()
297 /* Make sure that we want to be processing this guy at in dlm_run_purge_list()
301 * in tail order, we can stop at the first in dlm_run_purge_list()
302 * unpurgable resource -- anyone added after in dlm_run_purge_list()
304 spin_unlock(&lockres->spinlock); in dlm_run_purge_list()
314 (lockres->state & DLM_LOCK_RES_MIGRATING) || in dlm_run_purge_list()
315 (lockres->inflight_assert_workers != 0)) { in dlm_run_purge_list()
318 dlm->name, lockres->lockname.len, in dlm_run_purge_list()
319 lockres->lockname.name, in dlm_run_purge_list()
320 !unused, lockres->state, in dlm_run_purge_list()
321 lockres->inflight_assert_workers); in dlm_run_purge_list()
322 list_move_tail(&lockres->purge, &dlm->purge_list); in dlm_run_purge_list()
323 spin_unlock(&lockres->spinlock); in dlm_run_purge_list()
334 cond_resched_lock(&dlm->spinlock); in dlm_run_purge_list()
337 spin_unlock(&dlm->spinlock); in dlm_run_purge_list()
348 * spinlock, and because we know that it is not migrating/ in dlm_shuffle_lists()
349 * recovering/in-progress, it is fine to reserve asts and in dlm_shuffle_lists()
352 assert_spin_locked(&dlm->ast_lock); in dlm_shuffle_lists()
353 assert_spin_locked(&res->spinlock); in dlm_shuffle_lists()
354 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| in dlm_shuffle_lists()
359 if (list_empty(&res->converting)) in dlm_shuffle_lists()
361 mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name, in dlm_shuffle_lists()
362 res->lockname.len, res->lockname.name); in dlm_shuffle_lists()
364 target = list_entry(res->converting.next, struct dlm_lock, list); in dlm_shuffle_lists()
365 if (target->ml.convert_type == LKM_IVMODE) { in dlm_shuffle_lists()
367 dlm->name, res->lockname.len, res->lockname.name); in dlm_shuffle_lists()
370 list_for_each_entry(lock, &res->granted, list) { in dlm_shuffle_lists()
373 if (!dlm_lock_compatible(lock->ml.type, in dlm_shuffle_lists()
374 target->ml.convert_type)) { in dlm_shuffle_lists()
377 if (lock->ml.highest_blocked == LKM_IVMODE) { in dlm_shuffle_lists()
382 if (lock->ml.highest_blocked < target->ml.convert_type) in dlm_shuffle_lists()
383 lock->ml.highest_blocked = in dlm_shuffle_lists()
384 target->ml.convert_type; in dlm_shuffle_lists()
388 list_for_each_entry(lock, &res->converting, list) { in dlm_shuffle_lists()
391 if (!dlm_lock_compatible(lock->ml.type, in dlm_shuffle_lists()
392 target->ml.convert_type)) { in dlm_shuffle_lists()
394 if (lock->ml.highest_blocked == LKM_IVMODE) { in dlm_shuffle_lists()
398 if (lock->ml.highest_blocked < target->ml.convert_type) in dlm_shuffle_lists()
399 lock->ml.highest_blocked = in dlm_shuffle_lists()
400 target->ml.convert_type; in dlm_shuffle_lists()
404 /* we can convert the lock */ in dlm_shuffle_lists()
406 spin_lock(&target->spinlock); in dlm_shuffle_lists()
407 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); in dlm_shuffle_lists()
410 "%d => %d, node %u\n", dlm->name, res->lockname.len, in dlm_shuffle_lists()
411 res->lockname.name, in dlm_shuffle_lists()
412 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), in dlm_shuffle_lists()
413 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), in dlm_shuffle_lists()
414 target->ml.type, in dlm_shuffle_lists()
415 target->ml.convert_type, target->ml.node); in dlm_shuffle_lists()
417 target->ml.type = target->ml.convert_type; in dlm_shuffle_lists()
418 target->ml.convert_type = LKM_IVMODE; in dlm_shuffle_lists()
419 list_move_tail(&target->list, &res->granted); in dlm_shuffle_lists()
421 BUG_ON(!target->lksb); in dlm_shuffle_lists()
422 target->lksb->status = DLM_NORMAL; in dlm_shuffle_lists()
424 spin_unlock(&target->spinlock); in dlm_shuffle_lists()
433 if (list_empty(&res->blocked)) in dlm_shuffle_lists()
435 target = list_entry(res->blocked.next, struct dlm_lock, list); in dlm_shuffle_lists()
437 list_for_each_entry(lock, &res->granted, list) { in dlm_shuffle_lists()
440 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { in dlm_shuffle_lists()
442 if (lock->ml.highest_blocked == LKM_IVMODE) { in dlm_shuffle_lists()
446 if (lock->ml.highest_blocked < target->ml.type) in dlm_shuffle_lists()
447 lock->ml.highest_blocked = target->ml.type; in dlm_shuffle_lists()
451 list_for_each_entry(lock, &res->converting, list) { in dlm_shuffle_lists()
454 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { in dlm_shuffle_lists()
456 if (lock->ml.highest_blocked == LKM_IVMODE) { in dlm_shuffle_lists()
460 if (lock->ml.highest_blocked < target->ml.type) in dlm_shuffle_lists()
461 lock->ml.highest_blocked = target->ml.type; in dlm_shuffle_lists()
465 /* we can grant the blocked lock (only in dlm_shuffle_lists()
468 spin_lock(&target->spinlock); in dlm_shuffle_lists()
469 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); in dlm_shuffle_lists()
472 "node %u\n", dlm->name, res->lockname.len, in dlm_shuffle_lists()
473 res->lockname.name, in dlm_shuffle_lists()
474 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), in dlm_shuffle_lists()
475 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), in dlm_shuffle_lists()
476 target->ml.type, target->ml.node); in dlm_shuffle_lists()
478 /* target->ml.type is already correct */ in dlm_shuffle_lists()
479 list_move_tail(&target->list, &res->granted); in dlm_shuffle_lists()
481 BUG_ON(!target->lksb); in dlm_shuffle_lists()
482 target->lksb->status = DLM_NORMAL; in dlm_shuffle_lists()
484 spin_unlock(&target->spinlock); in dlm_shuffle_lists()
500 spin_lock(&dlm->spinlock); in dlm_kick_thread()
501 spin_lock(&res->spinlock); in dlm_kick_thread()
503 spin_unlock(&res->spinlock); in dlm_kick_thread()
504 spin_unlock(&dlm->spinlock); in dlm_kick_thread()
506 wake_up(&dlm->dlm_thread_wq); in dlm_kick_thread()
511 assert_spin_locked(&dlm->spinlock); in __dlm_dirty_lockres()
512 assert_spin_locked(&res->spinlock); in __dlm_dirty_lockres()
515 if (res->owner == dlm->node_num) { in __dlm_dirty_lockres()
516 if (res->state & (DLM_LOCK_RES_MIGRATING | in __dlm_dirty_lockres()
520 if (list_empty(&res->dirty)) { in __dlm_dirty_lockres()
523 list_add_tail(&res->dirty, &dlm->dirty_list); in __dlm_dirty_lockres()
524 res->state |= DLM_LOCK_RES_DIRTY; in __dlm_dirty_lockres()
528 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, in __dlm_dirty_lockres()
529 res->lockname.name); in __dlm_dirty_lockres()
538 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s", in dlm_launch_thread()
539 dlm->name); in dlm_launch_thread()
540 if (IS_ERR(dlm->dlm_thread_task)) { in dlm_launch_thread()
541 mlog_errno(PTR_ERR(dlm->dlm_thread_task)); in dlm_launch_thread()
542 dlm->dlm_thread_task = NULL; in dlm_launch_thread()
543 return -EINVAL; in dlm_launch_thread()
551 if (dlm->dlm_thread_task) { in dlm_complete_thread()
553 kthread_stop(dlm->dlm_thread_task); in dlm_complete_thread()
554 dlm->dlm_thread_task = NULL; in dlm_complete_thread()
562 spin_lock(&dlm->spinlock); in dlm_dirty_list_empty()
563 empty = list_empty(&dlm->dirty_list); in dlm_dirty_list_empty()
564 spin_unlock(&dlm->spinlock); in dlm_dirty_list_empty()
576 spin_lock(&dlm->ast_lock); in dlm_flush_asts()
577 while (!list_empty(&dlm->pending_asts)) { in dlm_flush_asts()
578 lock = list_entry(dlm->pending_asts.next, in dlm_flush_asts()
580 /* get an extra ref on lock */ in dlm_flush_asts()
582 res = lock->lockres; in dlm_flush_asts()
584 "node %u\n", dlm->name, res->lockname.len, in dlm_flush_asts()
585 res->lockname.name, in dlm_flush_asts()
586 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in dlm_flush_asts()
587 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), in dlm_flush_asts()
588 lock->ml.type, lock->ml.node); in dlm_flush_asts()
590 BUG_ON(!lock->ast_pending); in dlm_flush_asts()
593 list_del_init(&lock->ast_list); in dlm_flush_asts()
595 spin_unlock(&dlm->ast_lock); in dlm_flush_asts()
597 if (lock->ml.node != dlm->node_num) { in dlm_flush_asts()
604 spin_lock(&dlm->ast_lock); in dlm_flush_asts()
607 * we were delivering the last one */ in dlm_flush_asts()
608 if (!list_empty(&lock->ast_list)) { in dlm_flush_asts()
610 "one\n", dlm->name, res->lockname.len, in dlm_flush_asts()
611 res->lockname.name); in dlm_flush_asts()
613 lock->ast_pending = 0; in dlm_flush_asts()
615 /* drop the extra ref. in dlm_flush_asts()
621 while (!list_empty(&dlm->pending_basts)) { in dlm_flush_asts()
622 lock = list_entry(dlm->pending_basts.next, in dlm_flush_asts()
624 /* get an extra ref on lock */ in dlm_flush_asts()
626 res = lock->lockres; in dlm_flush_asts()
628 BUG_ON(!lock->bast_pending); in dlm_flush_asts()
631 spin_lock(&lock->spinlock); in dlm_flush_asts()
632 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); in dlm_flush_asts()
633 hi = lock->ml.highest_blocked; in dlm_flush_asts()
634 lock->ml.highest_blocked = LKM_IVMODE; in dlm_flush_asts()
635 spin_unlock(&lock->spinlock); in dlm_flush_asts()
638 list_del_init(&lock->bast_list); in dlm_flush_asts()
640 spin_unlock(&dlm->ast_lock); in dlm_flush_asts()
644 dlm->name, res->lockname.len, res->lockname.name, in dlm_flush_asts()
645 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in dlm_flush_asts()
646 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), in dlm_flush_asts()
647 hi, lock->ml.node); in dlm_flush_asts()
649 if (lock->ml.node != dlm->node_num) { in dlm_flush_asts()
656 spin_lock(&dlm->ast_lock); in dlm_flush_asts()
659 * we were delivering the last one */ in dlm_flush_asts()
660 if (!list_empty(&lock->bast_list)) { in dlm_flush_asts()
662 "one\n", dlm->name, res->lockname.len, in dlm_flush_asts()
663 res->lockname.name); in dlm_flush_asts()
665 lock->bast_pending = 0; in dlm_flush_asts()
667 /* drop the extra ref. in dlm_flush_asts()
672 wake_up(&dlm->ast_wq); in dlm_flush_asts()
673 spin_unlock(&dlm->ast_lock); in dlm_flush_asts()
686 mlog(0, "dlm thread running for %s...\n", dlm->name); in dlm_thread()
691 /* dlm_shutting_down is very point-in-time, but that in dlm_thread()
692 * doesn't matter as we'll just loop back around if we in dlm_thread()
697 /* We really don't want to hold dlm->spinlock while in dlm_thread()
701 * and drop dlm->spinlock ASAP. Once off the list, in dlm_thread()
702 * res->spinlock needs to be taken again to protect in dlm_thread()
704 spin_lock(&dlm->spinlock); in dlm_thread()
705 while (!list_empty(&dlm->dirty_list)) { in dlm_thread()
706 int delay = 0; in dlm_thread() local
707 res = list_entry(dlm->dirty_list.next, in dlm_thread()
715 spin_lock(&res->spinlock); in dlm_thread()
716 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */ in dlm_thread()
717 list_del_init(&res->dirty); in dlm_thread()
718 spin_unlock(&res->spinlock); in dlm_thread()
719 spin_unlock(&dlm->spinlock); in dlm_thread()
723 /* lockres can be re-dirtied/re-added to the in dlm_thread()
726 spin_lock(&dlm->ast_lock); in dlm_thread()
727 spin_lock(&res->spinlock); in dlm_thread()
728 if (res->owner != dlm->node_num) { in dlm_thread()
731 " dirty %d\n", dlm->name, in dlm_thread()
732 !!(res->state & DLM_LOCK_RES_IN_PROGRESS), in dlm_thread()
733 !!(res->state & DLM_LOCK_RES_MIGRATING), in dlm_thread()
734 !!(res->state & DLM_LOCK_RES_RECOVERING), in dlm_thread()
735 !!(res->state & DLM_LOCK_RES_DIRTY)); in dlm_thread()
737 BUG_ON(res->owner != dlm->node_num); in dlm_thread()
742 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in dlm_thread()
743 if (res->state & (DLM_LOCK_RES_IN_PROGRESS | in dlm_thread()
747 res->state &= ~DLM_LOCK_RES_DIRTY; in dlm_thread()
748 spin_unlock(&res->spinlock); in dlm_thread()
749 spin_unlock(&dlm->ast_lock); in dlm_thread()
750 mlog(0, "%s: res %.*s, inprogress, delay list " in dlm_thread()
751 "shuffle, state %d\n", dlm->name, in dlm_thread()
752 res->lockname.len, res->lockname.name, in dlm_thread()
753 res->state); in dlm_thread()
754 delay = 1; in dlm_thread()
759 * recovering/in-progress. we have the lockres in dlm_thread()
765 res->state &= ~DLM_LOCK_RES_DIRTY; in dlm_thread()
766 spin_unlock(&res->spinlock); in dlm_thread()
767 spin_unlock(&dlm->ast_lock); in dlm_thread()
773 spin_lock(&dlm->spinlock); in dlm_thread()
774 /* if the lock was in-progress, stick in dlm_thread()
776 if (delay) { in dlm_thread()
777 spin_lock(&res->spinlock); in dlm_thread()
779 spin_unlock(&res->spinlock); in dlm_thread()
783 /* unlikely, but we may need to give time to in dlm_thread()
785 if (!--n) { in dlm_thread()
787 dlm->name); in dlm_thread()
792 spin_unlock(&dlm->spinlock); in dlm_thread()
801 wait_event_interruptible_timeout(dlm->dlm_thread_wq, in dlm_thread()