Lines Matching +full:cluster +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
43 return -EINVAL; in dlm_control_store()
53 ret = -EINVAL; in dlm_control_store()
61 int rc = kstrtoint(buf, 0, &ls->ls_uevent_result); in dlm_event_store()
65 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags); in dlm_event_store()
66 wake_up(&ls->ls_uevent_wait); in dlm_event_store()
72 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id); in dlm_id_show()
77 int rc = kstrtouint(buf, 0, &ls->ls_global_id); in dlm_id_store()
97 set_bit(LSFL_NODIR, &ls->ls_flags); in dlm_nodir_store()
109 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid); in dlm_recover_nodeid_show()
119 .attr = {.name = "control", .mode = S_IWUSR},
124 .attr = {.name = "event_done", .mode = S_IWUSR},
129 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
135 .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR},
141 .attr = {.name = "recover_status", .mode = S_IRUGO},
146 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
166 return a->show ? a->show(ls, buf) : 0; in dlm_attr_show()
174 return a->store ? a->store(ls, buf, len) : len; in dlm_attr_store()
192 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE); in do_uevent()
194 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); in do_uevent()
201 wait_event(ls->ls_uevent_wait, in do_uevent()
202 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); in do_uevent()
204 log_rinfo(ls, "group event done %d", ls->ls_uevent_result); in do_uevent()
206 return ls->ls_uevent_result; in do_uevent()
213 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); in dlm_uevent()
231 return -ENOMEM; in dlm_lockspace_init()
248 if (ls->ls_global_id == id) { in dlm_find_lockspace_global()
249 atomic_inc(&ls->ls_count); in dlm_find_lockspace_global()
263 atomic_inc(&ls->ls_count); in dlm_find_lockspace_local()
273 if (ls->ls_device.minor == minor) { in dlm_find_lockspace_device()
274 atomic_inc(&ls->ls_count); in dlm_find_lockspace_device()
286 if (atomic_dec_and_test(&ls->ls_count)) in dlm_put_lockspace()
287 wake_up(&ls->ls_count_wait); in dlm_put_lockspace()
293 wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); in remove_lockspace()
296 if (atomic_read(&ls->ls_count) != 0) { in remove_lockspace()
301 WARN_ON(ls->ls_create_count != 0); in remove_lockspace()
302 list_del(&ls->ls_list); in remove_lockspace()
320 if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) in lkb_idr_free()
321 dlm_free_lvb(lkb->lkb_lvbptr); in lkb_idr_free()
343 xa_for_each(&ls->ls_lkbxa, id, lkb) { in free_lockspace()
346 xa_destroy(&ls->ls_lkbxa); in free_lockspace()
351 rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL); in free_lockspace()
356 static int new_lockspace(const char *name, const char *cluster, in new_lockspace() argument
366 return -EINVAL; in new_lockspace()
369 return -EINVAL; in new_lockspace()
372 return -EINVAL; in new_lockspace()
376 error = -EUNATCH; in new_lockspace()
382 *ops_result = -EOPNOTSUPP; in new_lockspace()
387 if (!cluster) in new_lockspace()
388 log_print("dlm cluster name '%s' is being used without an application provided cluster name", in new_lockspace()
391 if (dlm_config.ci_recover_callbacks && cluster && in new_lockspace()
392 strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) { in new_lockspace()
393 log_print("dlm cluster name '%s' does not match " in new_lockspace()
394 "the application cluster name '%s'", in new_lockspace()
395 dlm_config.ci_cluster_name, cluster); in new_lockspace()
396 error = -EBADR; in new_lockspace()
404 WARN_ON(ls->ls_create_count <= 0); in new_lockspace()
405 if (ls->ls_namelen != namelen) in new_lockspace()
407 if (memcmp(ls->ls_name, name, namelen)) in new_lockspace()
410 error = -EEXIST; in new_lockspace()
413 ls->ls_create_count++; in new_lockspace()
423 error = -ENOMEM; in new_lockspace()
428 memcpy(ls->ls_name, name, namelen); in new_lockspace()
429 ls->ls_namelen = namelen; in new_lockspace()
430 ls->ls_lvblen = lvblen; in new_lockspace()
431 atomic_set(&ls->ls_count, 0); in new_lockspace()
432 init_waitqueue_head(&ls->ls_count_wait); in new_lockspace()
433 ls->ls_flags = 0; in new_lockspace()
436 ls->ls_ops = ops; in new_lockspace()
437 ls->ls_ops_arg = ops_arg; in new_lockspace()
441 set_bit(LSFL_SOFTIRQ, &ls->ls_flags); in new_lockspace()
446 ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL | in new_lockspace()
449 INIT_LIST_HEAD(&ls->ls_slow_inactive); in new_lockspace()
450 INIT_LIST_HEAD(&ls->ls_slow_active); in new_lockspace()
451 rwlock_init(&ls->ls_rsbtbl_lock); in new_lockspace()
453 error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params); in new_lockspace()
457 xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); in new_lockspace()
458 rwlock_init(&ls->ls_lkbxa_lock); in new_lockspace()
460 INIT_LIST_HEAD(&ls->ls_waiters); in new_lockspace()
461 spin_lock_init(&ls->ls_waiters_lock); in new_lockspace()
462 INIT_LIST_HEAD(&ls->ls_orphans); in new_lockspace()
463 spin_lock_init(&ls->ls_orphans_lock); in new_lockspace()
465 INIT_LIST_HEAD(&ls->ls_nodes); in new_lockspace()
466 INIT_LIST_HEAD(&ls->ls_nodes_gone); in new_lockspace()
467 ls->ls_num_nodes = 0; in new_lockspace()
468 ls->ls_low_nodeid = 0; in new_lockspace()
469 ls->ls_total_weight = 0; in new_lockspace()
470 ls->ls_node_array = NULL; in new_lockspace()
472 memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb)); in new_lockspace()
473 ls->ls_local_rsb.res_ls = ls; in new_lockspace()
475 ls->ls_debug_rsb_dentry = NULL; in new_lockspace()
476 ls->ls_debug_waiters_dentry = NULL; in new_lockspace()
478 init_waitqueue_head(&ls->ls_uevent_wait); in new_lockspace()
479 ls->ls_uevent_result = 0; in new_lockspace()
480 init_completion(&ls->ls_recovery_done); in new_lockspace()
481 ls->ls_recovery_result = -1; in new_lockspace()
483 spin_lock_init(&ls->ls_cb_lock); in new_lockspace()
484 INIT_LIST_HEAD(&ls->ls_cb_delay); in new_lockspace()
486 INIT_WORK(&ls->ls_free_work, free_lockspace); in new_lockspace()
488 ls->ls_recoverd_task = NULL; in new_lockspace()
489 mutex_init(&ls->ls_recoverd_active); in new_lockspace()
490 spin_lock_init(&ls->ls_recover_lock); in new_lockspace()
491 spin_lock_init(&ls->ls_rcom_spin); in new_lockspace()
492 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); in new_lockspace()
493 ls->ls_recover_status = 0; in new_lockspace()
494 ls->ls_recover_seq = get_random_u64(); in new_lockspace()
495 ls->ls_recover_args = NULL; in new_lockspace()
496 init_rwsem(&ls->ls_in_recovery); in new_lockspace()
497 rwlock_init(&ls->ls_recv_active); in new_lockspace()
498 INIT_LIST_HEAD(&ls->ls_requestqueue); in new_lockspace()
499 rwlock_init(&ls->ls_requestqueue_lock); in new_lockspace()
500 spin_lock_init(&ls->ls_clear_proc_locks); in new_lockspace()
507 ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); in new_lockspace()
508 if (!ls->ls_recover_buf) { in new_lockspace()
509 error = -ENOMEM; in new_lockspace()
513 ls->ls_slot = 0; in new_lockspace()
514 ls->ls_num_slots = 0; in new_lockspace()
515 ls->ls_slots_size = 0; in new_lockspace()
516 ls->ls_slots = NULL; in new_lockspace()
518 INIT_LIST_HEAD(&ls->ls_recover_list); in new_lockspace()
519 spin_lock_init(&ls->ls_recover_list_lock); in new_lockspace()
520 xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); in new_lockspace()
521 spin_lock_init(&ls->ls_recover_xa_lock); in new_lockspace()
522 ls->ls_recover_list_count = 0; in new_lockspace()
523 init_waitqueue_head(&ls->ls_wait_general); in new_lockspace()
524 INIT_LIST_HEAD(&ls->ls_masters_list); in new_lockspace()
525 rwlock_init(&ls->ls_masters_lock); in new_lockspace()
526 INIT_LIST_HEAD(&ls->ls_dir_dump_list); in new_lockspace()
527 rwlock_init(&ls->ls_dir_dump_lock); in new_lockspace()
529 INIT_LIST_HEAD(&ls->ls_scan_list); in new_lockspace()
530 spin_lock_init(&ls->ls_scan_lock); in new_lockspace()
531 timer_setup(&ls->ls_scan_timer, dlm_rsb_scan, TIMER_DEFERRABLE); in new_lockspace()
534 ls->ls_create_count = 1; in new_lockspace()
535 list_add(&ls->ls_list, &lslist); in new_lockspace()
539 set_bit(LSFL_FS, &ls->ls_flags); in new_lockspace()
547 init_waitqueue_head(&ls->ls_recover_lock_wait); in new_lockspace()
551 * initializes ls_in_recovery as locked in "down" mode. We need in new_lockspace()
553 * has to start out in down mode. in new_lockspace()
562 wait_event(ls->ls_recover_lock_wait, in new_lockspace()
563 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); in new_lockspace()
565 ls->ls_kobj.kset = dlm_kset; in new_lockspace()
566 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, in new_lockspace()
567 "%s", ls->ls_name); in new_lockspace()
570 kobject_uevent(&ls->ls_kobj, KOBJ_ADD); in new_lockspace()
574 cluster infrastructure.) Once it's done that, it tells us who the in new_lockspace()
583 wait_for_completion(&ls->ls_recovery_done); in new_lockspace()
584 error = ls->ls_recovery_result; in new_lockspace()
597 kfree(ls->ls_node_array); in new_lockspace()
604 list_del(&ls->ls_list); in new_lockspace()
606 xa_destroy(&ls->ls_recover_xa); in new_lockspace()
607 kfree(ls->ls_recover_buf); in new_lockspace()
609 xa_destroy(&ls->ls_lkbxa); in new_lockspace()
610 rhashtable_destroy(&ls->ls_rsbtbl); in new_lockspace()
612 kobject_put(&ls->ls_kobj); in new_lockspace()
619 static int __dlm_new_lockspace(const char *name, const char *cluster, in __dlm_new_lockspace() argument
633 error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, in __dlm_new_lockspace()
648 int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, in dlm_new_lockspace() argument
653 return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen, in dlm_new_lockspace()
657 int dlm_new_user_lockspace(const char *name, const char *cluster, in dlm_new_user_lockspace() argument
664 return -EINVAL; in dlm_new_user_lockspace()
666 return __dlm_new_lockspace(name, cluster, flags, lvblen, ops, in dlm_new_user_lockspace()
680 read_lock_bh(&ls->ls_lkbxa_lock); in lockspace_busy()
682 xa_for_each(&ls->ls_lkbxa, id, lkb) { in lockspace_busy()
687 xa_for_each(&ls->ls_lkbxa, id, lkb) { in lockspace_busy()
688 if (lkb->lkb_nodeid == 0 && in lockspace_busy()
689 lkb->lkb_grmode != DLM_LOCK_IV) { in lockspace_busy()
697 read_unlock_bh(&ls->ls_lkbxa_lock); in lockspace_busy()
708 if (ls->ls_create_count == 1) { in release_lockspace()
710 rv = -EBUSY; in release_lockspace()
713 ls->ls_create_count = 0; in release_lockspace()
716 } else if (ls->ls_create_count > 1) { in release_lockspace()
717 rv = --ls->ls_create_count; in release_lockspace()
719 rv = -EINVAL; in release_lockspace()
741 clear_bit(LSFL_RUNNING, &ls->ls_flags); in release_lockspace()
742 timer_shutdown_sync(&ls->ls_scan_timer); in release_lockspace()
755 kobject_put(&ls->ls_kobj); in release_lockspace()
757 xa_destroy(&ls->ls_recover_xa); in release_lockspace()
758 kfree(ls->ls_recover_buf); in release_lockspace()
765 kfree(ls->ls_recover_args); in release_lockspace()
768 kfree(ls->ls_node_array); in release_lockspace()
773 queue_work(dlm_wq, &ls->ls_free_work); in release_lockspace()
786 * 0 - don't destroy lockspace if it has any LKBs
787 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
788 * 2 - destroy lockspace regardless of LKBs
789 * 3 - destroy lockspace as part of a forced shutdown
799 return -EINVAL; in dlm_release_lockspace()
805 ls_count--; in dlm_release_lockspace()
822 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { in dlm_stop_lockspaces()