Lines Matching +full:first +full:- +full:generation

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright 2004-2011 Red Hat, Inc.
26 * gfs2_update_stats - Update time based stats
52 s64 delta = sample - s->stats[index]; in gfs2_update_stats()
53 s->stats[index] += (delta >> 3); in gfs2_update_stats()
55 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; in gfs2_update_stats()
59 * gfs2_update_reply_times - Update locking statistics
62 * This assumes that gl->gl_dstamp has been set earlier.
72 * TRY_1CB flags are set are classified as non-blocking. All
78 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
79 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
84 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
85 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
86 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
87 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ in gfs2_update_reply_times()
94 * gfs2_update_request_times - Update locking statistics
97 * The irt (lock inter-request times) measures the average time
105 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
110 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
111 gl->gl_dstamp = ktime_get_real(); in gfs2_update_request_times()
112 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); in gfs2_update_request_times()
113 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_request_times()
114 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ in gfs2_update_request_times()
115 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ in gfs2_update_request_times()
122 unsigned ret = gl->gl_state; in gdlm_ast()
125 if (__lockref_is_dead(&gl->gl_lockref) && in gdlm_ast()
126 gl->gl_lksb.sb_status != -DLM_EUNLOCK) in gdlm_ast()
130 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); in gdlm_ast()
132 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) in gdlm_ast()
133 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); in gdlm_ast()
135 switch (gl->gl_lksb.sb_status) { in gdlm_ast()
136 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ in gdlm_ast()
137 if (gl->gl_ops->go_unlocked) in gdlm_ast()
138 gl->gl_ops->go_unlocked(gl); in gdlm_ast()
141 case -DLM_ECANCEL: /* Cancel while getting lock */ in gdlm_ast()
144 case -EAGAIN: /* Try lock fails */ in gdlm_ast()
145 case -EDEADLK: /* Deadlock detected */ in gdlm_ast()
147 case -ETIMEDOUT: /* Canceled due to timeout */ in gdlm_ast()
156 ret = gl->gl_req; in gdlm_ast()
157 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { in gdlm_ast()
158 if (gl->gl_req == LM_ST_SHARED) in gdlm_ast()
160 else if (gl->gl_req == LM_ST_DEFERRED) in gdlm_ast()
168 * first successful new (non-conversion) request, we clear this flag to in gdlm_ast()
169 * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the in gdlm_ast()
173 * the gl->gl_lksb.sb_lkid values that come with such requests. in gdlm_ast()
176 clear_bit(GLF_INITIAL, &gl->gl_flags); in gdlm_ast()
180 if (test_bit(GLF_INITIAL, &gl->gl_flags)) in gdlm_ast()
181 gl->gl_lksb.sb_lkid = 0; in gdlm_ast()
189 if (__lockref_is_dead(&gl->gl_lockref)) in gdlm_bast()
203 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); in gdlm_bast()
208 /* convert gfs lock-state to dlm lock-mode */
224 return -1; in make_mode()
245 if (gl->gl_lksb.sb_lvbptr) in make_flags()
265 if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { in make_flags()
269 * The DLM_LKF_QUECVT flag needs to be set for "first come, in make_flags()
270 * first served" semantics, but it must only be set for in make_flags()
285 *c-- = hex_asc[value & 0x0f]; in gfs2_reverse_hex()
293 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_lock()
299 cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); in gdlm_lock()
300 req = make_mode(gl->gl_name.ln_sbd, req_state); in gdlm_lock()
304 if (test_bit(GLF_INITIAL, &gl->gl_flags)) { in gdlm_lock()
305 memset(strname, ' ', GDLM_STRNAME_BYTES - 1); in gdlm_lock()
306 strname[GDLM_STRNAME_BYTES - 1] = '\0'; in gdlm_lock()
307 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); in gdlm_lock()
308 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); in gdlm_lock()
309 gl->gl_dstamp = ktime_get_real(); in gdlm_lock()
318 error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
319 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); in gdlm_lock()
320 if (error == -EBUSY) { in gdlm_lock()
329 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gdlm_put_lock()
330 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock()
333 BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); in gdlm_put_lock()
335 if (test_bit(GLF_INITIAL, &gl->gl_flags)) { in gdlm_put_lock()
340 clear_bit(GLF_BLOCKING, &gl->gl_flags); in gdlm_put_lock()
346 if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { in gdlm_put_lock()
358 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && in gdlm_put_lock()
359 (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) { in gdlm_put_lock()
365 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
367 if (error == -EBUSY) { in gdlm_put_lock()
374 gl->gl_name.ln_type, in gdlm_put_lock()
375 (unsigned long long)gl->gl_name.ln_number, error); in gdlm_put_lock()
381 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel()
382 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
390 * 2. dlm_controld blocks dlm-kernel locking activity
391 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
393 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
396 * 8. dlm_recoverd unblocks dlm-kernel locking activity
397 * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
398 * 10. gfs2_control updates control_lock lvb with new generation and jid bits
405 * - failures during recovery
413 * recover_done() provides a new lockspace generation number each time it
414 * is called (step 9). This generation number is saved as recover_start.
420 * - more specific gfs2 steps in sequence above
424 * 9. recover_done sets recover_start = new generation number
432 * - parallel recovery steps across all nodes
434 * All nodes attempt to update the control_lock lvb with the new generation
435 * number and jid bits, but only the first to get the control_lock EX will
437 * generation number.)
441 * . One node gets control_lock first and writes the lvb, others see it's done
446 * - is there a problem with clearing an lvb bit that should be set
461 * for the latest lockspace generation before ever unblocking locks
465 * - special case of first mounter: first node to mount the fs
467 * The first node to mount a gfs2 fs needs to check all the journals
470 * for the first mounter to be done before taking locks on the fs
473 * 1. The mounted_lock tells a node it's the first to mount the fs.
477 * other mounted nodes (no PR locks exist), and it is the first mounter.
478 * The mounted_lock is demoted to PR when first recovery is done, so
481 * 2. The control_lock blocks others in control_mount() while the first
482 * mounter is doing first mount recovery of all journals.
484 * it can proceed. The first mounter holds control_lock in EX while doing
485 * the first mount recovery, blocking mounts from other nodes, then demotes
489 * first mounter:
492 * set first=1
493 * do first mounter recovery
494 * mounted_lock EX->PR
495 * control_lock EX->NL, write lvb generation
498 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
499 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
501 * read lvb generation
502 * control_lock EX->NL
503 * set first=0
505 * - mount during recovery
507 * If a node mounts while others are doing recovery (not first mounter),
513 * steps above until the lvb generation number is >= its mount generation
516 * - control_lock lvb format
518 * 4 bytes generation number: the latest dlm lockspace generation number
520 * to reflect all slot failures through that generation.
522 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
526 #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
532 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
541 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
543 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
549 GDLM_LVB_SIZE - JID_BITMAP_OFFSET); in all_jid_bits_clear()
555 complete(&ls->ls_sync_wait); in sync_wait_cb()
560 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock()
563 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
566 name, lksb->sb_lkid, error); in sync_unlock()
570 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
572 if (lksb->sb_status != -DLM_EUNLOCK) { in sync_unlock()
574 name, lksb->sb_lkid, lksb->sb_status); in sync_unlock()
575 return -1; in sync_unlock()
583 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock()
590 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
591 strname, GDLM_STRNAME_BYTES - 1, in sync_lock()
595 name, lksb->sb_lkid, flags, mode, error); in sync_lock()
599 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
601 status = lksb->sb_status; in sync_lock()
603 if (status && status != -EAGAIN) { in sync_lock()
605 name, lksb->sb_lkid, flags, mode, status); in sync_lock()
613 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock()
614 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
619 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock()
621 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
626 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock()
627 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
632 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock()
634 &ls->ls_control_lksb, "control_lock"); in control_lock()
638 * remote_withdraw - react to a node withdrawing from the file system
646 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in remote_withdraw()
647 if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) in remote_withdraw()
662 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func()
669 /* First check for other nodes that may have done a withdraw. */ in gfs2_control_func()
670 if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { in gfs2_control_func()
672 clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); in gfs2_control_func()
676 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
682 * FIRST_MOUNT means this node is doing first mounter recovery, in gfs2_control_func()
686 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
687 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
688 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
691 block_gen = ls->ls_recover_block; in gfs2_control_func()
692 start_gen = ls->ls_recover_start; in gfs2_control_func()
693 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
711 * yet been updated for the generation of the failure in gfs2_control_func()
723 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
725 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
726 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
727 start_gen != ls->ls_recover_start) { in gfs2_control_func()
728 fs_info(sdp, "recover generation %u block1 %u %u\n", in gfs2_control_func()
729 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
730 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
735 recover_size = ls->ls_recover_size; in gfs2_control_func()
742 * in succession. Only the first will really do recovery, in gfs2_control_func()
748 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
751 ls->ls_recover_result[i] = 0; in gfs2_control_func()
753 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
756 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
766 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
768 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
769 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
776 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
778 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
779 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
780 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
784 latest generation to the lvb */ in gfs2_control_func()
791 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
794 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
814 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
815 fs_info(sdp, "recover generation %u jid %d\n", in gfs2_control_func()
830 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
831 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
832 ls->ls_recover_start == start_gen) { in gfs2_control_func()
833 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
834 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
835 fs_info(sdp, "recover generation %u done\n", start_gen); in gfs2_control_func()
838 fs_info(sdp, "recover generation %u block2 %u %u\n", in gfs2_control_func()
839 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
840 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
846 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount()
852 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
853 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
854 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
855 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
856 init_completion(&ls->ls_sync_wait); in control_mount()
858 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
876 error = -EINTR; in control_mount()
907 if (error == -EAGAIN) { in control_mount()
916 * we cannot do the first-mount responsibility it implies: recovery. in control_mount()
918 if (sdp->sd_args.ar_spectator) in control_mount()
925 } else if (error != -EAGAIN) { in control_mount()
935 /* not even -EAGAIN should happen here */ in control_mount()
942 * If we got both locks above in EX, then we're the first mounter. in control_mount()
944 * updated by other mounted nodes to reflect our mount generation. in control_mount()
946 * In simple first mounter cases, first mounter will see zero lvb_gen, in control_mount()
949 * lvb_gen will be non-zero. in control_mount()
952 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
957 error = -EINVAL; in control_mount()
962 /* first mounter, keep both EX while doing first recovery */ in control_mount()
963 spin_lock(&ls->ls_recover_spin); in control_mount()
964 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
965 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
966 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
967 spin_unlock(&ls->ls_recover_spin); in control_mount()
968 fs_info(sdp, "first mounter control generation %u\n", lvb_gen); in control_mount()
977 * We are not first mounter, now we need to wait for the control_lock in control_mount()
978 * lvb generation to be >= the generation from our first recover_done in control_mount()
982 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
988 spin_lock(&ls->ls_recover_spin); in control_mount()
989 block_gen = ls->ls_recover_block; in control_mount()
990 start_gen = ls->ls_recover_start; in control_mount()
991 mount_gen = ls->ls_recover_mount; in control_mount()
995 generation, which might include new recovery bits set */ in control_mount()
996 if (sdp->sd_args.ar_spectator) { in control_mount()
998 "non-spectator to mount.\n"); in control_mount()
1004 ls->ls_recover_flags); in control_mount()
1006 spin_unlock(&ls->ls_recover_spin); in control_mount()
1012 latest recovery generation */ in control_mount()
1015 lvb_gen, ls->ls_recover_flags); in control_mount()
1016 spin_unlock(&ls->ls_recover_spin); in control_mount()
1024 lvb_gen, ls->ls_recover_flags); in control_mount()
1025 spin_unlock(&ls->ls_recover_spin); in control_mount()
1029 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
1030 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
1031 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1032 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
1033 spin_unlock(&ls->ls_recover_spin); in control_mount()
1044 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done()
1049 spin_lock(&ls->ls_recover_spin); in control_first_done()
1050 start_gen = ls->ls_recover_start; in control_first_done()
1051 block_gen = ls->ls_recover_block; in control_first_done()
1053 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
1054 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
1055 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1058 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1059 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1061 return -1; in control_first_done()
1067 * first mounter recovery. We can ignore any recover_slot in control_first_done()
1069 * because we are still the first mounter and any failed nodes in control_first_done()
1072 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1075 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1080 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1081 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1082 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1083 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1084 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1086 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1087 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1103 * gfs2 jids start at 0, so jid = slot - 1)
1111 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size()
1117 if (!ls->ls_lvb_bits) { in set_recover_size()
1118 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1119 if (!ls->ls_lvb_bits) in set_recover_size()
1120 return -ENOMEM; in set_recover_size()
1125 if (max_jid < slots[i].slot - 1) in set_recover_size()
1126 max_jid = slots[i].slot - 1; in set_recover_size()
1129 old_size = ls->ls_recover_size; in set_recover_size()
1141 return -ENOMEM; in set_recover_size()
1144 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1145 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1146 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1147 kfree(ls->ls_recover_submit); in set_recover_size()
1148 kfree(ls->ls_recover_result); in set_recover_size()
1149 ls->ls_recover_submit = submit; in set_recover_size()
1150 ls->ls_recover_result = result; in set_recover_size()
1151 ls->ls_recover_size = new_size; in set_recover_size()
1152 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1158 kfree(ls->ls_lvb_bits); in free_recover_size()
1159 kfree(ls->ls_recover_submit); in free_recover_size()
1160 kfree(ls->ls_recover_result); in free_recover_size()
1161 ls->ls_recover_submit = NULL; in free_recover_size()
1162 ls->ls_recover_result = NULL; in free_recover_size()
1163 ls->ls_recover_size = 0; in free_recover_size()
1164 ls->ls_lvb_bits = NULL; in free_recover_size()
1172 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep()
1178 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1179 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1180 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1182 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1183 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1184 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1187 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1188 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1197 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot()
1198 int jid = slot->slot - 1; in gdlm_recover_slot()
1205 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1206 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1208 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1209 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1213 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1215 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1217 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1218 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1224 int our_slot, uint32_t generation) in gdlm_recover_done() argument
1227 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done()
1236 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1237 ls->ls_recover_start = generation; in gdlm_recover_done()
1239 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1240 ls->ls_recover_mount = generation; in gdlm_recover_done()
1241 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1244 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1245 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); in gdlm_recover_done()
1247 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1249 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1250 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1258 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result()
1265 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1269 if (jid == ls->ls_jid) in gdlm_recovery_result()
1272 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1273 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1274 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1277 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1279 jid, ls->ls_recover_size); in gdlm_recovery_result()
1280 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1287 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1293 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1294 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, in gdlm_recovery_result()
1296 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1307 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount()
1317 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); in gdlm_mount()
1318 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1319 ls->ls_recover_flags = 0; in gdlm_mount()
1320 ls->ls_recover_mount = 0; in gdlm_mount()
1321 ls->ls_recover_start = 0; in gdlm_mount()
1322 ls->ls_recover_block = 0; in gdlm_mount()
1323 ls->ls_recover_size = 0; in gdlm_mount()
1324 ls->ls_recover_submit = NULL; in gdlm_mount()
1325 ls->ls_recover_result = NULL; in gdlm_mount()
1326 ls->ls_lvb_bits = NULL; in gdlm_mount()
1339 error = -EINVAL; in gdlm_mount()
1343 memcpy(cluster, table, strlen(table) - strlen(fsname)); in gdlm_mount()
1354 &ls->ls_dlm); in gdlm_mount()
1367 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1371 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { in gdlm_mount()
1373 error = -EINVAL; in gdlm_mount()
1378 * control_mount() uses control_lock to determine first mounter, in gdlm_mount()
1388 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1389 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); in gdlm_mount()
1391 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); in gdlm_mount()
1395 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1404 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done()
1407 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1417 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount()
1419 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1424 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1425 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1426 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1427 flush_delayed_work(&sdp->sd_control_work); in gdlm_unmount()
1431 if (ls->ls_dlm) { in gdlm_unmount()
1432 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1433 ls->ls_dlm = NULL; in gdlm_unmount()
1442 { Opt_first, "first=%d"},