Lines Matching full:ls

293 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;  in gdlm_lock()  local
318 error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
330 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock() local
346 if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { in gdlm_put_lock()
365 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
381 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel() local
382 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
528 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, in control_lvb_read() argument
532 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
537 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, in control_lvb_write() argument
541 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
543 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
554 struct lm_lockstruct *ls = arg; in sync_wait_cb() local
555 complete(&ls->ls_sync_wait); in sync_wait_cb()
560 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock() local
563 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
570 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
583 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock() local
590 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
592 0, sync_wait_cb, ls, NULL); in sync_lock()
599 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
613 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock() local
614 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
619 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock() local
621 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
626 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock() local
627 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
632 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock() local
634 &ls->ls_control_lksb, "control_lock"); in control_lock()
662 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func() local
676 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
686 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
687 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
688 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
691 block_gen = ls->ls_recover_block; in gfs2_control_func()
692 start_gen = ls->ls_recover_start; in gfs2_control_func()
693 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
723 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
725 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
726 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
727 start_gen != ls->ls_recover_start) { in gfs2_control_func()
729 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
730 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
735 recover_size = ls->ls_recover_size; in gfs2_control_func()
748 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
751 ls->ls_recover_result[i] = 0; in gfs2_control_func()
753 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
756 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
766 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
768 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
769 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
776 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
778 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
779 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
780 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
791 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
794 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
814 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
830 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
831 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
832 ls->ls_recover_start == start_gen) { in gfs2_control_func()
833 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
834 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
839 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
840 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
846 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount() local
852 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
853 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
854 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
855 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
856 init_completion(&ls->ls_sync_wait); in control_mount()
858 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
952 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
963 spin_lock(&ls->ls_recover_spin); in control_mount()
964 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
965 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
966 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
967 spin_unlock(&ls->ls_recover_spin); in control_mount()
982 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
988 spin_lock(&ls->ls_recover_spin); in control_mount()
989 block_gen = ls->ls_recover_block; in control_mount()
990 start_gen = ls->ls_recover_start; in control_mount()
991 mount_gen = ls->ls_recover_mount; in control_mount()
1004 ls->ls_recover_flags); in control_mount()
1006 spin_unlock(&ls->ls_recover_spin); in control_mount()
1015 lvb_gen, ls->ls_recover_flags); in control_mount()
1016 spin_unlock(&ls->ls_recover_spin); in control_mount()
1024 lvb_gen, ls->ls_recover_flags); in control_mount()
1025 spin_unlock(&ls->ls_recover_spin); in control_mount()
1029 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
1030 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
1031 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1032 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1033 spin_unlock(&ls->ls_recover_spin); in control_mount()
1044 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done() local
1049 spin_lock(&ls->ls_recover_spin); in control_first_done()
1050 start_gen = ls->ls_recover_start; in control_first_done()
1051 block_gen = ls->ls_recover_block; in control_first_done()
1053 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
1054 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
1055 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1058 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1059 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1072 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1075 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1080 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1081 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1082 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1083 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1084 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1086 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1087 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1111 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size() local
1117 if (!ls->ls_lvb_bits) { in set_recover_size()
1118 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1119 if (!ls->ls_lvb_bits) in set_recover_size()
1129 old_size = ls->ls_recover_size; in set_recover_size()
1144 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1145 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1146 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1147 kfree(ls->ls_recover_submit); in set_recover_size()
1148 kfree(ls->ls_recover_result); in set_recover_size()
1149 ls->ls_recover_submit = submit; in set_recover_size()
1150 ls->ls_recover_result = result; in set_recover_size()
1151 ls->ls_recover_size = new_size; in set_recover_size()
1152 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1156 static void free_recover_size(struct lm_lockstruct *ls) in free_recover_size() argument
1158 kfree(ls->ls_lvb_bits); in free_recover_size()
1159 kfree(ls->ls_recover_submit); in free_recover_size()
1160 kfree(ls->ls_recover_result); in free_recover_size()
1161 ls->ls_recover_submit = NULL; in free_recover_size()
1162 ls->ls_recover_result = NULL; in free_recover_size()
1163 ls->ls_recover_size = 0; in free_recover_size()
1164 ls->ls_lvb_bits = NULL; in free_recover_size()
1172 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep() local
1178 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1179 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1180 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1182 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1183 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1184 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1187 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1188 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1197 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot() local
1205 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1206 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1208 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1209 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1213 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1215 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1217 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1218 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1227 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done() local
1233 /* ensure the ls jid arrays are large enough */ in gdlm_recover_done()
1236 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1237 ls->ls_recover_start = generation; in gdlm_recover_done()
1239 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1240 ls->ls_recover_mount = generation; in gdlm_recover_done()
1241 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1244 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1247 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1249 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1250 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1258 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result() local
1265 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1269 if (jid == ls->ls_jid) in gdlm_recovery_result()
1272 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1273 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1274 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1277 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1279 jid, ls->ls_recover_size); in gdlm_recovery_result()
1280 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1287 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1293 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1296 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1307 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount() local
1318 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1319 ls->ls_recover_flags = 0; in gdlm_mount()
1320 ls->ls_recover_mount = 0; in gdlm_mount()
1321 ls->ls_recover_start = 0; in gdlm_mount()
1322 ls->ls_recover_block = 0; in gdlm_mount()
1323 ls->ls_recover_size = 0; in gdlm_mount()
1324 ls->ls_recover_submit = NULL; in gdlm_mount()
1325 ls->ls_recover_result = NULL; in gdlm_mount()
1326 ls->ls_lvb_bits = NULL; in gdlm_mount()
1354 &ls->ls_dlm); in gdlm_mount()
1366 free_recover_size(ls); in gdlm_mount()
1367 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1388 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1395 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1397 free_recover_size(ls); in gdlm_mount()
1404 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done() local
1407 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1417 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount() local
1419 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1424 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1425 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1426 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1431 if (ls->ls_dlm) { in gdlm_unmount()
1432 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1433 ls->ls_dlm = NULL; in gdlm_unmount()
1436 free_recover_size(ls); in gdlm_unmount()