Lines Matching refs:res

49 				struct dlm_lock_resource *res,
76 struct dlm_lock_resource *res,
85 static int dlm_do_master_request(struct dlm_lock_resource *res,
90 struct dlm_lock_resource *res,
94 struct dlm_lock_resource *res,
98 struct dlm_lock_resource *res,
105 struct dlm_lock_resource *res);
107 struct dlm_lock_resource *res);
109 struct dlm_lock_resource *res,
112 struct dlm_lock_resource *res);
251 struct dlm_lock_resource *res, in dlm_init_mle() argument
276 BUG_ON(!res); in dlm_init_mle()
277 mle->mleres = res; in dlm_init_mle()
278 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
279 mle->mnamelen = res->lockname.len; in dlm_init_mle()
280 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
466 struct dlm_lock_resource *res; in dlm_lockres_release() local
469 res = container_of(kref, struct dlm_lock_resource, refs); in dlm_lockres_release()
470 dlm = res->dlm; in dlm_lockres_release()
474 BUG_ON(!res->lockname.name); in dlm_lockres_release()
476 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release()
477 res->lockname.name); in dlm_lockres_release()
481 if (!hlist_unhashed(&res->hash_node) || in dlm_lockres_release()
482 !list_empty(&res->granted) || in dlm_lockres_release()
483 !list_empty(&res->converting) || in dlm_lockres_release()
484 !list_empty(&res->blocked) || in dlm_lockres_release()
485 !list_empty(&res->dirty) || in dlm_lockres_release()
486 !list_empty(&res->recovering) || in dlm_lockres_release()
487 !list_empty(&res->purge)) { in dlm_lockres_release()
491 res->lockname.len, res->lockname.name, in dlm_lockres_release()
492 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', in dlm_lockres_release()
493 !list_empty(&res->granted) ? 'G' : ' ', in dlm_lockres_release()
494 !list_empty(&res->converting) ? 'C' : ' ', in dlm_lockres_release()
495 !list_empty(&res->blocked) ? 'B' : ' ', in dlm_lockres_release()
496 !list_empty(&res->dirty) ? 'D' : ' ', in dlm_lockres_release()
497 !list_empty(&res->recovering) ? 'R' : ' ', in dlm_lockres_release()
498 !list_empty(&res->purge) ? 'P' : ' '); in dlm_lockres_release()
500 dlm_print_one_lock_resource(res); in dlm_lockres_release()
505 BUG_ON(!hlist_unhashed(&res->hash_node)); in dlm_lockres_release()
506 BUG_ON(!list_empty(&res->granted)); in dlm_lockres_release()
507 BUG_ON(!list_empty(&res->converting)); in dlm_lockres_release()
508 BUG_ON(!list_empty(&res->blocked)); in dlm_lockres_release()
509 BUG_ON(!list_empty(&res->dirty)); in dlm_lockres_release()
510 BUG_ON(!list_empty(&res->recovering)); in dlm_lockres_release()
511 BUG_ON(!list_empty(&res->purge)); in dlm_lockres_release()
513 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); in dlm_lockres_release()
515 kmem_cache_free(dlm_lockres_cache, res); in dlm_lockres_release()
518 void dlm_lockres_put(struct dlm_lock_resource *res) in dlm_lockres_put() argument
520 kref_put(&res->refs, dlm_lockres_release); in dlm_lockres_put()
524 struct dlm_lock_resource *res, in dlm_init_lockres() argument
533 qname = (char *) res->lockname.name; in dlm_init_lockres()
536 res->lockname.len = namelen; in dlm_init_lockres()
537 res->lockname.hash = dlm_lockid_hash(name, namelen); in dlm_init_lockres()
539 init_waitqueue_head(&res->wq); in dlm_init_lockres()
540 spin_lock_init(&res->spinlock); in dlm_init_lockres()
541 INIT_HLIST_NODE(&res->hash_node); in dlm_init_lockres()
542 INIT_LIST_HEAD(&res->granted); in dlm_init_lockres()
543 INIT_LIST_HEAD(&res->converting); in dlm_init_lockres()
544 INIT_LIST_HEAD(&res->blocked); in dlm_init_lockres()
545 INIT_LIST_HEAD(&res->dirty); in dlm_init_lockres()
546 INIT_LIST_HEAD(&res->recovering); in dlm_init_lockres()
547 INIT_LIST_HEAD(&res->purge); in dlm_init_lockres()
548 INIT_LIST_HEAD(&res->tracking); in dlm_init_lockres()
549 atomic_set(&res->asts_reserved, 0); in dlm_init_lockres()
550 res->migration_pending = 0; in dlm_init_lockres()
551 res->inflight_locks = 0; in dlm_init_lockres()
552 res->inflight_assert_workers = 0; in dlm_init_lockres()
554 res->dlm = dlm; in dlm_init_lockres()
556 kref_init(&res->refs); in dlm_init_lockres()
562 spin_lock(&res->spinlock); in dlm_init_lockres()
563 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_init_lockres()
564 spin_unlock(&res->spinlock); in dlm_init_lockres()
566 res->state = DLM_LOCK_RES_IN_PROGRESS; in dlm_init_lockres()
568 res->last_used = 0; in dlm_init_lockres()
571 list_add_tail(&res->tracking, &dlm->tracking_list); in dlm_init_lockres()
574 memset(res->lvb, 0, DLM_LVB_LEN); in dlm_init_lockres()
575 bitmap_zero(res->refmap, O2NM_MAX_NODES); in dlm_init_lockres()
582 struct dlm_lock_resource *res = NULL; in dlm_new_lockres() local
584 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); in dlm_new_lockres()
585 if (!res) in dlm_new_lockres()
588 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); in dlm_new_lockres()
589 if (!res->lockname.name) in dlm_new_lockres()
592 dlm_init_lockres(dlm, res, name, namelen); in dlm_new_lockres()
593 return res; in dlm_new_lockres()
596 if (res) in dlm_new_lockres()
597 kmem_cache_free(dlm_lockres_cache, res); in dlm_new_lockres()
602 struct dlm_lock_resource *res, int bit) in dlm_lockres_set_refmap_bit() argument
604 assert_spin_locked(&res->spinlock); in dlm_lockres_set_refmap_bit()
606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit()
607 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_set_refmap_bit()
609 set_bit(bit, res->refmap); in dlm_lockres_set_refmap_bit()
613 struct dlm_lock_resource *res, int bit) in dlm_lockres_clear_refmap_bit() argument
615 assert_spin_locked(&res->spinlock); in dlm_lockres_clear_refmap_bit()
617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit()
618 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_clear_refmap_bit()
620 clear_bit(bit, res->refmap); in dlm_lockres_clear_refmap_bit()
624 struct dlm_lock_resource *res) in __dlm_lockres_grab_inflight_ref() argument
626 res->inflight_locks++; in __dlm_lockres_grab_inflight_ref()
629 res->lockname.len, res->lockname.name, res->inflight_locks, in __dlm_lockres_grab_inflight_ref()
634 struct dlm_lock_resource *res) in dlm_lockres_grab_inflight_ref() argument
636 assert_spin_locked(&res->spinlock); in dlm_lockres_grab_inflight_ref()
637 __dlm_lockres_grab_inflight_ref(dlm, res); in dlm_lockres_grab_inflight_ref()
641 struct dlm_lock_resource *res) in dlm_lockres_drop_inflight_ref() argument
643 assert_spin_locked(&res->spinlock); in dlm_lockres_drop_inflight_ref()
645 BUG_ON(res->inflight_locks == 0); in dlm_lockres_drop_inflight_ref()
647 res->inflight_locks--; in dlm_lockres_drop_inflight_ref()
650 res->lockname.len, res->lockname.name, res->inflight_locks, in dlm_lockres_drop_inflight_ref()
653 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref()
657 struct dlm_lock_resource *res) in __dlm_lockres_grab_inflight_worker() argument
659 assert_spin_locked(&res->spinlock); in __dlm_lockres_grab_inflight_worker()
660 res->inflight_assert_workers++; in __dlm_lockres_grab_inflight_worker()
662 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_grab_inflight_worker()
663 res->inflight_assert_workers); in __dlm_lockres_grab_inflight_worker()
667 struct dlm_lock_resource *res) in __dlm_lockres_drop_inflight_worker() argument
669 assert_spin_locked(&res->spinlock); in __dlm_lockres_drop_inflight_worker()
670 BUG_ON(res->inflight_assert_workers == 0); in __dlm_lockres_drop_inflight_worker()
671 res->inflight_assert_workers--; in __dlm_lockres_drop_inflight_worker()
673 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_drop_inflight_worker()
674 res->inflight_assert_workers); in __dlm_lockres_drop_inflight_worker()
678 struct dlm_lock_resource *res) in dlm_lockres_drop_inflight_worker() argument
680 spin_lock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
681 __dlm_lockres_drop_inflight_worker(dlm, res); in dlm_lockres_drop_inflight_worker()
682 spin_unlock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
706 struct dlm_lock_resource *tmpres=NULL, *res=NULL; in dlm_get_lock_resource() local
766 if (res) { in dlm_get_lock_resource()
768 if (!list_empty(&res->tracking)) in dlm_get_lock_resource()
769 list_del_init(&res->tracking); in dlm_get_lock_resource()
773 res->lockname.len, in dlm_get_lock_resource()
774 res->lockname.name); in dlm_get_lock_resource()
776 dlm_lockres_put(res); in dlm_get_lock_resource()
778 res = tmpres; in dlm_get_lock_resource()
782 if (!res) { in dlm_get_lock_resource()
789 res = dlm_new_lockres(dlm, lockid, namelen); in dlm_get_lock_resource()
790 if (!res) in dlm_get_lock_resource()
795 mlog(0, "no lockres found, allocated our own: %p\n", res); in dlm_get_lock_resource()
800 spin_lock(&res->spinlock); in dlm_get_lock_resource()
801 dlm_change_lockres_owner(dlm, res, dlm->node_num); in dlm_get_lock_resource()
802 __dlm_insert_lockres(dlm, res); in dlm_get_lock_resource()
803 dlm_lockres_grab_inflight_ref(dlm, res); in dlm_get_lock_resource()
804 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
856 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); in dlm_get_lock_resource()
879 __dlm_insert_lockres(dlm, res); in dlm_get_lock_resource()
882 __dlm_lockres_grab_inflight_ref(dlm, res); in dlm_get_lock_resource()
900 if (!dlm_pre_master_reco_lockres(dlm, res)) in dlm_get_lock_resource()
936 ret = dlm_do_master_request(res, mle, nodenum); in dlm_get_lock_resource()
955 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); in dlm_get_lock_resource()
959 "request now, blocked=%d\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
960 res->lockname.name, blocked); in dlm_get_lock_resource()
964 dlm->name, res->lockname.len, in dlm_get_lock_resource()
965 res->lockname.name, blocked); in dlm_get_lock_resource()
966 dlm_print_one_lock_resource(res); in dlm_get_lock_resource()
973 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
974 res->lockname.name, res->owner); in dlm_get_lock_resource()
976 BUG_ON(res->owner == O2NM_MAX_NODES); in dlm_get_lock_resource()
985 spin_lock(&res->spinlock); in dlm_get_lock_resource()
986 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; in dlm_get_lock_resource()
987 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
988 wake_up(&res->wq); in dlm_get_lock_resource()
995 return res; in dlm_get_lock_resource()
1002 struct dlm_lock_resource *res, in dlm_wait_for_lock_mastery() argument
1016 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1017 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_wait_for_lock_mastery()
1019 res->lockname.len, res->lockname.name, res->owner); in dlm_wait_for_lock_mastery()
1020 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1023 if (res->owner != dlm->node_num) { in dlm_wait_for_lock_mastery()
1024 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1027 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); in dlm_wait_for_lock_mastery()
1035 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1048 dlm->name, res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1049 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); in dlm_wait_for_lock_mastery()
1053 dlm->name, res->lockname.len, res->lockname.name, in dlm_wait_for_lock_mastery()
1063 "rechecking now\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1064 res->lockname.name); in dlm_wait_for_lock_mastery()
1069 "for %s:%.*s\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1070 res->lockname.name); in dlm_wait_for_lock_mastery()
1107 if (res->owner == O2NM_MAX_NODES) { in dlm_wait_for_lock_mastery()
1109 res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1112 mlog(0, "done waiting, master is %u\n", res->owner); in dlm_wait_for_lock_mastery()
1121 res->lockname.len, res->lockname.name, m); in dlm_wait_for_lock_mastery()
1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1137 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1140 dlm_change_lockres_owner(dlm, res, m); in dlm_wait_for_lock_mastery()
1141 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1207 struct dlm_lock_resource *res, in dlm_restart_lock_mastery() argument
1254 res->lockname.len, in dlm_restart_lock_mastery()
1255 res->lockname.name, in dlm_restart_lock_mastery()
1270 res->lockname.len, in dlm_restart_lock_mastery()
1271 res->lockname.name); in dlm_restart_lock_mastery()
1273 mle->mleres = res; in dlm_restart_lock_mastery()
1306 static int dlm_do_master_request(struct dlm_lock_resource *res, in dlm_do_master_request() argument
1358 "reference\n", dlm->name, res->lockname.len, in dlm_do_master_request()
1359 res->lockname.name, to); in dlm_do_master_request()
1405 struct dlm_lock_resource *res = NULL; in dlm_master_request_handler() local
1434 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_master_request_handler()
1435 if (res) { in dlm_master_request_handler()
1439 spin_lock(&res->spinlock); in dlm_master_request_handler()
1446 if (hlist_unhashed(&res->hash_node)) { in dlm_master_request_handler()
1447 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1448 dlm_lockres_put(res); in dlm_master_request_handler()
1452 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_master_request_handler()
1454 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1463 if (res->owner == dlm->node_num) { in dlm_master_request_handler()
1464 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); in dlm_master_request_handler()
1465 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1478 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1479 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1490 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_master_request_handler()
1529 dlm_lockres_set_refmap_bit(dlm, res, in dlm_master_request_handler()
1543 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1616 dlm->node_num, res->lockname.len, res->lockname.name); in dlm_master_request_handler()
1617 spin_lock(&res->spinlock); in dlm_master_request_handler()
1618 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, in dlm_master_request_handler()
1623 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1624 dlm_lockres_put(res); in dlm_master_request_handler()
1627 __dlm_lockres_grab_inflight_worker(dlm, res); in dlm_master_request_handler()
1628 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1631 if (res) in dlm_master_request_handler()
1632 dlm_lockres_put(res); in dlm_master_request_handler()
1651 struct dlm_lock_resource *res, in dlm_do_assert_master() argument
1659 const char *lockname = res->lockname.name; in dlm_do_assert_master()
1660 unsigned int namelen = res->lockname.len; in dlm_do_assert_master()
1664 spin_lock(&res->spinlock); in dlm_do_assert_master()
1665 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1666 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1733 spin_lock(&res->spinlock); in dlm_do_assert_master()
1734 dlm_lockres_set_refmap_bit(dlm, res, to); in dlm_do_assert_master()
1735 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1742 spin_lock(&res->spinlock); in dlm_do_assert_master()
1743 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1744 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1745 wake_up(&res->wq); in dlm_do_assert_master()
1765 struct dlm_lock_resource *res = NULL; in dlm_assert_master_handler() local
1843 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_assert_master_handler()
1844 if (res) { in dlm_assert_master_handler()
1845 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1846 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_assert_master_handler()
1852 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && in dlm_assert_master_handler()
1853 res->owner != assert->node_idx) { in dlm_assert_master_handler()
1856 assert->node_idx, res->owner, namelen, in dlm_assert_master_handler()
1858 __dlm_print_one_lock_resource(res); in dlm_assert_master_handler()
1862 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_assert_master_handler()
1864 if (res->owner == assert->node_idx) { in dlm_assert_master_handler()
1873 res->owner, namelen, name); in dlm_assert_master_handler()
1876 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_assert_master_handler()
1897 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1927 if (res) { in dlm_assert_master_handler()
1929 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1933 res->lockname.len, res->lockname.name, in dlm_assert_master_handler()
1935 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_assert_master_handler()
1937 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1938 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); in dlm_assert_master_handler()
1940 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1942 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1945 wake_up(&res->wq); in dlm_assert_master_handler()
1983 } else if (res) { in dlm_assert_master_handler()
1984 if (res->owner != assert->node_idx) { in dlm_assert_master_handler()
1987 res->owner, namelen, name); in dlm_assert_master_handler()
1994 if (res) { in dlm_assert_master_handler()
1995 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1996 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_handler()
1997 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1998 *ret_data = (void *)res; in dlm_assert_master_handler()
2023 __dlm_print_one_lock_resource(res); in dlm_assert_master_handler()
2024 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2030 *ret_data = (void *)res; in dlm_assert_master_handler()
2037 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; in dlm_assert_master_post_handler() local
2040 spin_lock(&res->spinlock); in dlm_assert_master_post_handler()
2041 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_post_handler()
2042 spin_unlock(&res->spinlock); in dlm_assert_master_post_handler()
2043 wake_up(&res->wq); in dlm_assert_master_post_handler()
2044 dlm_lockres_put(res); in dlm_assert_master_post_handler()
2050 struct dlm_lock_resource *res, in dlm_dispatch_assert_master() argument
2061 item->u.am.lockres = res; /* already have a ref */ in dlm_dispatch_assert_master()
2068 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, in dlm_dispatch_assert_master()
2069 res->lockname.name); in dlm_dispatch_assert_master()
2083 struct dlm_lock_resource *res; in dlm_assert_master_worker() local
2091 res = item->u.am.lockres; in dlm_assert_master_worker()
2122 spin_lock(&res->spinlock); in dlm_assert_master_worker()
2123 if (res->state & DLM_LOCK_RES_MIGRATING) { in dlm_assert_master_worker()
2127 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2130 __dlm_lockres_reserve_ast(res); in dlm_assert_master_worker()
2131 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2136 res->lockname.len, res->lockname.name, dlm->node_num); in dlm_assert_master_worker()
2137 ret = dlm_do_assert_master(dlm, res, nodemap, flags); in dlm_assert_master_worker()
2145 dlm_lockres_release_ast(dlm, res); in dlm_assert_master_worker()
2148 dlm_lockres_drop_inflight_worker(dlm, res); in dlm_assert_master_worker()
2150 dlm_lockres_put(res); in dlm_assert_master_worker()
2166 struct dlm_lock_resource *res) in dlm_pre_master_reco_lockres() argument
2181 ret = dlm_do_master_requery(dlm, res, nodenum, &master); in dlm_pre_master_reco_lockres()
2215 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) in dlm_drop_lockres_ref() argument
2222 lockname = res->lockname.name; in dlm_drop_lockres_ref()
2223 namelen = res->lockname.len; in dlm_drop_lockres_ref()
2232 &deref, sizeof(deref), res->owner, &r); in dlm_drop_lockres_ref()
2235 dlm->name, namelen, lockname, ret, res->owner); in dlm_drop_lockres_ref()
2239 dlm->name, namelen, lockname, res->owner, r); in dlm_drop_lockres_ref()
2240 dlm_print_one_lock_resource(res); in dlm_drop_lockres_ref()
2254 struct dlm_lock_resource *res = NULL; in dlm_deref_lockres_handler() local
2283 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); in dlm_deref_lockres_handler()
2284 if (!res) { in dlm_deref_lockres_handler()
2292 spin_lock(&res->spinlock); in dlm_deref_lockres_handler()
2293 if (res->state & DLM_LOCK_RES_SETREF_INPROG) in dlm_deref_lockres_handler()
2296 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_handler()
2297 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_handler()
2298 dlm_lockres_clear_refmap_bit(dlm, res, node); in dlm_deref_lockres_handler()
2302 spin_unlock(&res->spinlock); in dlm_deref_lockres_handler()
2306 dlm_lockres_calc_usage(dlm, res); in dlm_deref_lockres_handler()
2310 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_handler()
2311 dlm_print_one_lock_resource(res); in dlm_deref_lockres_handler()
2325 item->u.dl.deref_res = res; in dlm_deref_lockres_handler()
2336 if (res) in dlm_deref_lockres_handler()
2337 dlm_lockres_put(res); in dlm_deref_lockres_handler()
2349 struct dlm_lock_resource *res = NULL; in dlm_deref_lockres_done_handler() local
2375 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); in dlm_deref_lockres_done_handler()
2376 if (!res) { in dlm_deref_lockres_done_handler()
2383 spin_lock(&res->spinlock); in dlm_deref_lockres_done_handler()
2384 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { in dlm_deref_lockres_done_handler()
2385 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2389 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_done_handler()
2394 __dlm_do_purge_lockres(dlm, res); in dlm_deref_lockres_done_handler()
2395 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2396 wake_up(&res->wq); in dlm_deref_lockres_done_handler()
2402 if (res) in dlm_deref_lockres_done_handler()
2403 dlm_lockres_put(res); in dlm_deref_lockres_done_handler()
2409 struct dlm_lock_resource *res, u8 node) in dlm_drop_lockres_ref_done() argument
2416 lockname = res->lockname.name; in dlm_drop_lockres_ref_done()
2417 namelen = res->lockname.len; in dlm_drop_lockres_ref_done()
2435 dlm_print_one_lock_resource(res); in dlm_drop_lockres_ref_done()
2442 struct dlm_lock_resource *res; in dlm_deref_lockres_worker() local
2447 res = item->u.dl.deref_res; in dlm_deref_lockres_worker()
2450 spin_lock(&res->spinlock); in dlm_deref_lockres_worker()
2451 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_worker()
2452 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); in dlm_deref_lockres_worker()
2453 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_worker()
2454 dlm_lockres_clear_refmap_bit(dlm, res, node); in dlm_deref_lockres_worker()
2457 spin_unlock(&res->spinlock); in dlm_deref_lockres_worker()
2459 dlm_drop_lockres_ref_done(dlm, res, node); in dlm_deref_lockres_worker()
2463 dlm->name, res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2464 dlm_lockres_calc_usage(dlm, res); in dlm_deref_lockres_worker()
2468 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2469 dlm_print_one_lock_resource(res); in dlm_deref_lockres_worker()
2472 dlm_lockres_put(res); in dlm_deref_lockres_worker()
2483 struct dlm_lock_resource *res) in dlm_is_lockres_migratable() argument
2491 assert_spin_locked(&res->spinlock); in dlm_is_lockres_migratable()
2494 if (res->state & DLM_LOCK_RES_MIGRATING) in dlm_is_lockres_migratable()
2498 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_is_lockres_migratable()
2502 if (res->owner != dlm->node_num) in dlm_is_lockres_migratable()
2506 queue = dlm_list_idx_to_ptr(res, idx); in dlm_is_lockres_migratable()
2514 "%s list\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2515 res->lockname.name, in dlm_is_lockres_migratable()
2524 node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES); in dlm_is_lockres_migratable()
2529 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2530 res->lockname.name); in dlm_is_lockres_migratable()
2541 struct dlm_lock_resource *res, u8 target) in dlm_migrate_lockres() argument
2555 name = res->lockname.name; in dlm_migrate_lockres()
2556 namelen = res->lockname.len; in dlm_migrate_lockres()
2582 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, in dlm_migrate_lockres()
2604 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { in dlm_migrate_lockres()
2606 "the target went down.\n", res->lockname.len, in dlm_migrate_lockres()
2607 res->lockname.name, target); in dlm_migrate_lockres()
2608 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2609 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2611 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2648 ret = dlm_send_one_lockres(dlm, res, mres, target, in dlm_migrate_lockres()
2658 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2659 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2661 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2687 res->owner == target) in dlm_migrate_lockres()
2691 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2697 dlm->name, res->lockname.len, in dlm_migrate_lockres()
2698 res->lockname.name, target); in dlm_migrate_lockres()
2704 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2705 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2707 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2712 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2716 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2717 dlm_set_lockres_owner(dlm, res, target); in dlm_migrate_lockres()
2718 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2719 dlm_remove_nonlocal_locks(dlm, res); in dlm_migrate_lockres()
2720 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2721 wake_up(&res->wq); in dlm_migrate_lockres()
2728 dlm_lockres_calc_usage(dlm, res); in dlm_migrate_lockres()
2733 dlm_kick_thread(dlm, res); in dlm_migrate_lockres()
2738 wake_up(&res->wq); in dlm_migrate_lockres()
2760 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) in dlm_empty_lockres() argument
2769 spin_lock(&res->spinlock); in dlm_empty_lockres()
2770 if (dlm_is_lockres_migratable(dlm, res)) in dlm_empty_lockres()
2771 target = dlm_pick_migration_target(dlm, res); in dlm_empty_lockres()
2772 spin_unlock(&res->spinlock); in dlm_empty_lockres()
2780 ret = dlm_migrate_lockres(dlm, res, target); in dlm_empty_lockres()
2783 dlm->name, res->lockname.len, res->lockname.name, in dlm_empty_lockres()
2802 struct dlm_lock_resource *res, in dlm_migration_can_proceed() argument
2806 spin_lock(&res->spinlock); in dlm_migration_can_proceed()
2807 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); in dlm_migration_can_proceed()
2808 spin_unlock(&res->spinlock); in dlm_migration_can_proceed()
2820 struct dlm_lock_resource *res) in dlm_lockres_is_dirty() argument
2823 spin_lock(&res->spinlock); in dlm_lockres_is_dirty()
2824 ret = !!(res->state & DLM_LOCK_RES_DIRTY); in dlm_lockres_is_dirty()
2825 spin_unlock(&res->spinlock); in dlm_lockres_is_dirty()
2831 struct dlm_lock_resource *res, in dlm_mark_lockres_migrating() argument
2837 res->lockname.len, res->lockname.name, dlm->node_num, in dlm_mark_lockres_migrating()
2841 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2842 BUG_ON(res->migration_pending); in dlm_mark_lockres_migrating()
2843 res->migration_pending = 1; in dlm_mark_lockres_migrating()
2846 __dlm_lockres_reserve_ast(res); in dlm_mark_lockres_migrating()
2847 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2850 dlm_kick_thread(dlm, res); in dlm_mark_lockres_migrating()
2853 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2854 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); in dlm_mark_lockres_migrating()
2855 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2856 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2858 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); in dlm_mark_lockres_migrating()
2859 dlm_lockres_release_ast(dlm, res); in dlm_mark_lockres_migrating()
2862 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); in dlm_mark_lockres_migrating()
2868 dlm_migration_can_proceed(dlm, res, target), in dlm_mark_lockres_migrating()
2872 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2876 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2879 if (!dlm_migration_can_proceed(dlm, res, target)) { in dlm_mark_lockres_migrating()
2899 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2900 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); in dlm_mark_lockres_migrating()
2901 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2903 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); in dlm_mark_lockres_migrating()
2905 res->migration_pending = 0; in dlm_mark_lockres_migrating()
2906 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2923 struct dlm_lock_resource *res) in dlm_remove_nonlocal_locks() argument
2925 struct list_head *queue = &res->granted; in dlm_remove_nonlocal_locks()
2929 assert_spin_locked(&res->spinlock); in dlm_remove_nonlocal_locks()
2931 BUG_ON(res->owner == dlm->node_num); in dlm_remove_nonlocal_locks()
2943 dlm_lockres_clear_refmap_bit(dlm, res, in dlm_remove_nonlocal_locks()
2956 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); in dlm_remove_nonlocal_locks()
2964 res->lockname.len, res->lockname.name, bit); in dlm_remove_nonlocal_locks()
2965 dlm_lockres_clear_refmap_bit(dlm, res, bit); in dlm_remove_nonlocal_locks()
2977 struct dlm_lock_resource *res) in dlm_pick_migration_target() argument
2986 assert_spin_locked(&res->spinlock); in dlm_pick_migration_target()
2990 queue = dlm_list_idx_to_ptr(res, idx); in dlm_pick_migration_target()
3004 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, in dlm_pick_migration_target()
3023 struct dlm_lock_resource *res, in dlm_do_migrate_request() argument
3032 migrate.namelen = res->lockname.len; in dlm_do_migrate_request()
3033 memcpy(migrate.name, res->lockname.name, migrate.namelen); in dlm_do_migrate_request()
3076 dlm->name, res->lockname.len, res->lockname.name, in dlm_do_migrate_request()
3078 spin_lock(&res->spinlock); in dlm_do_migrate_request()
3079 dlm_lockres_set_refmap_bit(dlm, res, nodenum); in dlm_do_migrate_request()
3080 spin_unlock(&res->spinlock); in dlm_do_migrate_request()
3103 struct dlm_lock_resource *res = NULL; in dlm_migrate_request_handler() local
3127 res = __dlm_lookup_lockres(dlm, name, namelen, hash); in dlm_migrate_request_handler()
3128 if (res) { in dlm_migrate_request_handler()
3129 spin_lock(&res->spinlock); in dlm_migrate_request_handler()
3130 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_migrate_request_handler()
3134 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3141 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_migrate_request_handler()
3142 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3147 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, in dlm_migrate_request_handler()
3165 if (res) in dlm_migrate_request_handler()
3166 dlm_lockres_put(res); in dlm_migrate_request_handler()
3180 struct dlm_lock_resource *res, in dlm_add_migration_mle() argument
3240 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); in dlm_add_migration_mle()
3258 struct dlm_lock_resource *res; in dlm_reset_mleres_owner() local
3261 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3263 if (res) { in dlm_reset_mleres_owner()
3267 spin_lock(&res->spinlock); in dlm_reset_mleres_owner()
3268 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_reset_mleres_owner()
3269 dlm_move_lockres_to_recovery_list(dlm, res); in dlm_reset_mleres_owner()
3270 spin_unlock(&res->spinlock); in dlm_reset_mleres_owner()
3271 dlm_lockres_put(res); in dlm_reset_mleres_owner()
3282 return res; in dlm_reset_mleres_owner()
3330 struct dlm_lock_resource *res; in dlm_clean_master_list() local
3399 res = dlm_reset_mleres_owner(dlm, mle); in dlm_clean_master_list()
3400 if (res) in dlm_clean_master_list()
3411 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, in dlm_finish_migration() argument
3426 spin_lock(&res->spinlock); in dlm_finish_migration()
3427 dlm_lockres_set_refmap_bit(dlm, res, old_master); in dlm_finish_migration()
3428 spin_unlock(&res->spinlock); in dlm_finish_migration()
3431 ret = dlm_do_migrate_request(dlm, res, old_master, in dlm_finish_migration()
3439 res->lockname.len, res->lockname.name); in dlm_finish_migration()
3442 ret = dlm_do_assert_master(dlm, res, iter.node_map, in dlm_finish_migration()
3453 res->lockname.len, res->lockname.name, old_master); in dlm_finish_migration()
3454 ret = dlm_do_assert_master(dlm, res, iter.node_map, in dlm_finish_migration()
3465 spin_lock(&res->spinlock); in dlm_finish_migration()
3466 dlm_set_lockres_owner(dlm, res, dlm->node_num); in dlm_finish_migration()
3467 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_finish_migration()
3468 spin_unlock(&res->spinlock); in dlm_finish_migration()
3470 dlm_kick_thread(dlm, res); in dlm_finish_migration()
3471 wake_up(&res->wq); in dlm_finish_migration()
3485 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) in __dlm_lockres_reserve_ast() argument
3487 assert_spin_locked(&res->spinlock); in __dlm_lockres_reserve_ast()
3488 if (res->state & DLM_LOCK_RES_MIGRATING) { in __dlm_lockres_reserve_ast()
3489 __dlm_print_one_lock_resource(res); in __dlm_lockres_reserve_ast()
3491 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in __dlm_lockres_reserve_ast()
3493 atomic_inc(&res->asts_reserved); in __dlm_lockres_reserve_ast()
3510 struct dlm_lock_resource *res) in dlm_lockres_release_ast() argument
3512 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) in dlm_lockres_release_ast()
3515 if (!res->migration_pending) { in dlm_lockres_release_ast()
3516 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3520 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in dlm_lockres_release_ast()
3521 res->migration_pending = 0; in dlm_lockres_release_ast()
3522 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_lockres_release_ast()
3523 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3524 wake_up(&res->wq); in dlm_lockres_release_ast()