Lines Matching full:ls
284 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_lock() local
315 down_read(&ls->ls_sem); in gdlm_lock()
317 if (likely(ls->ls_dlm != NULL)) { in gdlm_lock()
318 error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
321 up_read(&ls->ls_sem); in gdlm_lock()
332 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock() local
364 down_read(&ls->ls_sem); in gdlm_put_lock()
366 if (likely(ls->ls_dlm != NULL)) { in gdlm_put_lock()
367 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags, in gdlm_put_lock()
370 up_read(&ls->ls_sem); in gdlm_put_lock()
390 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel() local
392 down_read(&ls->ls_sem); in gdlm_cancel()
393 if (likely(ls->ls_dlm != NULL)) { in gdlm_cancel()
394 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
396 up_read(&ls->ls_sem); in gdlm_cancel()
542 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, in control_lvb_read() argument
546 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
551 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, in control_lvb_write() argument
555 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
557 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
568 struct lm_lockstruct *ls = arg; in sync_wait_cb() local
569 complete(&ls->ls_sync_wait); in sync_wait_cb()
574 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock() local
577 down_read(&ls->ls_sem); in sync_unlock()
579 if (likely(ls->ls_dlm != NULL)) in sync_unlock()
580 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
581 up_read(&ls->ls_sem); in sync_unlock()
588 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
601 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock() local
608 down_read(&ls->ls_sem); in sync_lock()
610 if (likely(ls->ls_dlm != NULL)) { in sync_lock()
611 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
613 0, sync_wait_cb, ls, NULL); in sync_lock()
615 up_read(&ls->ls_sem); in sync_lock()
622 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
636 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock() local
637 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
642 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock() local
644 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
649 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock() local
650 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
655 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock() local
657 &ls->ls_control_lksb, "control_lock"); in control_lock()
685 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func() local
699 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
709 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
710 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
711 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
714 block_gen = ls->ls_recover_block; in gfs2_control_func()
715 start_gen = ls->ls_recover_start; in gfs2_control_func()
716 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
746 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
748 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
749 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
750 start_gen != ls->ls_recover_start) { in gfs2_control_func()
752 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
753 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
758 recover_size = ls->ls_recover_size; in gfs2_control_func()
771 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
774 ls->ls_recover_result[i] = 0; in gfs2_control_func()
776 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
779 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
789 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
791 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
792 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
799 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
801 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
802 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
803 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
814 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
817 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
837 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
853 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
854 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
855 ls->ls_recover_start == start_gen) { in gfs2_control_func()
856 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
857 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
862 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
863 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
869 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount() local
875 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
876 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
877 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
878 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
879 init_completion(&ls->ls_sync_wait); in control_mount()
881 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
975 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
986 spin_lock(&ls->ls_recover_spin); in control_mount()
987 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
988 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
989 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
990 spin_unlock(&ls->ls_recover_spin); in control_mount()
1005 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
1011 spin_lock(&ls->ls_recover_spin); in control_mount()
1012 block_gen = ls->ls_recover_block; in control_mount()
1013 start_gen = ls->ls_recover_start; in control_mount()
1014 mount_gen = ls->ls_recover_mount; in control_mount()
1022 spin_unlock(&ls->ls_recover_spin); in control_mount()
1028 ls->ls_recover_flags); in control_mount()
1029 spin_unlock(&ls->ls_recover_spin); in control_mount()
1039 lvb_gen, ls->ls_recover_flags); in control_mount()
1040 spin_unlock(&ls->ls_recover_spin); in control_mount()
1048 lvb_gen, ls->ls_recover_flags); in control_mount()
1049 spin_unlock(&ls->ls_recover_spin); in control_mount()
1053 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
1054 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
1055 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1056 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1057 spin_unlock(&ls->ls_recover_spin); in control_mount()
1068 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done() local
1073 spin_lock(&ls->ls_recover_spin); in control_first_done()
1074 start_gen = ls->ls_recover_start; in control_first_done()
1075 block_gen = ls->ls_recover_block; in control_first_done()
1077 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
1078 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
1079 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1082 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1083 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1096 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1099 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1104 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1105 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1106 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1107 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1108 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1110 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1111 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1135 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size() local
1141 if (!ls->ls_lvb_bits) { in set_recover_size()
1142 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1143 if (!ls->ls_lvb_bits) in set_recover_size()
1153 old_size = ls->ls_recover_size; in set_recover_size()
1168 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1169 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1170 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1171 kfree(ls->ls_recover_submit); in set_recover_size()
1172 kfree(ls->ls_recover_result); in set_recover_size()
1173 ls->ls_recover_submit = submit; in set_recover_size()
1174 ls->ls_recover_result = result; in set_recover_size()
1175 ls->ls_recover_size = new_size; in set_recover_size()
1176 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1180 static void free_recover_size(struct lm_lockstruct *ls) in free_recover_size() argument
1182 kfree(ls->ls_lvb_bits); in free_recover_size()
1183 kfree(ls->ls_recover_submit); in free_recover_size()
1184 kfree(ls->ls_recover_result); in free_recover_size()
1185 ls->ls_recover_submit = NULL; in free_recover_size()
1186 ls->ls_recover_result = NULL; in free_recover_size()
1187 ls->ls_recover_size = 0; in free_recover_size()
1188 ls->ls_lvb_bits = NULL; in free_recover_size()
1196 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep() local
1202 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1203 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1204 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1206 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1207 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1208 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1211 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1212 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1221 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot() local
1229 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1230 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1232 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1233 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1237 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1239 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1241 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1242 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1251 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done() local
1257 /* ensure the ls jid arrays are large enough */ in gdlm_recover_done()
1260 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1261 ls->ls_recover_start = generation; in gdlm_recover_done()
1263 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1264 ls->ls_recover_mount = generation; in gdlm_recover_done()
1265 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1268 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1271 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1273 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1274 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1282 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result() local
1289 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1293 if (jid == ls->ls_jid) in gdlm_recovery_result()
1296 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1297 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1298 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1301 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1303 jid, ls->ls_recover_size); in gdlm_recovery_result()
1304 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1311 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1317 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1320 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1331 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount() local
1342 ls->ls_dlm = NULL; in gdlm_mount()
1343 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1344 ls->ls_recover_flags = 0; in gdlm_mount()
1345 ls->ls_recover_mount = 0; in gdlm_mount()
1346 ls->ls_recover_start = 0; in gdlm_mount()
1347 ls->ls_recover_block = 0; in gdlm_mount()
1348 ls->ls_recover_size = 0; in gdlm_mount()
1349 ls->ls_recover_submit = NULL; in gdlm_mount()
1350 ls->ls_recover_result = NULL; in gdlm_mount()
1351 ls->ls_lvb_bits = NULL; in gdlm_mount()
1377 init_rwsem(&ls->ls_sem); in gdlm_mount()
1380 &ls->ls_dlm); in gdlm_mount()
1392 free_recover_size(ls); in gdlm_mount()
1393 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1414 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1421 dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL); in gdlm_mount()
1423 free_recover_size(ls); in gdlm_mount()
1430 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done() local
1433 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1443 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount() local
1445 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1450 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1451 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1452 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1457 down_write(&ls->ls_sem); in gdlm_unmount()
1458 if (ls->ls_dlm) { in gdlm_unmount()
1459 dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL); in gdlm_unmount()
1460 ls->ls_dlm = NULL; in gdlm_unmount()
1462 up_write(&ls->ls_sem); in gdlm_unmount()
1464 free_recover_size(ls); in gdlm_unmount()