Lines Matching +full:oc +full:- +full:delay +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
52 L: send_xxxx() -> R: receive_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
95 * Lock compatibilty matrix - thanks Steve
120 * -1 = nothing happens to the LVB
125 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
126 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
127 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
128 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
129 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
130 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
131 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
132 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
165 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, in dlm_print_lkb()
166 dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode, in dlm_print_lkb()
167 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, in dlm_print_lkb()
168 (unsigned long long)lkb->lkb_recover_seq); in dlm_print_lkb()
175 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in dlm_print_rsb()
176 r->res_flags, r->res_first_lkid, r->res_recover_locks_count, in dlm_print_rsb()
177 r->res_name); in dlm_print_rsb()
187 list_empty(&r->res_root_list), list_empty(&r->res_recover_list)); in dlm_dump_rsb()
189 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) in dlm_dump_rsb()
192 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) in dlm_dump_rsb()
195 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) in dlm_dump_rsb()
198 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) in dlm_dump_rsb()
206 down_read(&ls->ls_in_recovery); in dlm_lock_recovery()
211 up_read(&ls->ls_in_recovery); in dlm_unlock_recovery()
216 return down_read_trylock(&ls->ls_in_recovery); in dlm_lock_recovery_try()
221 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE); in can_be_queued()
226 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST); in force_blocking_asts()
231 return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); in is_demoted()
236 return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); in is_altmode()
241 return (lkb->lkb_status == DLM_LKSTS_GRANTED); in is_granted()
246 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r);); in is_remote()
247 return !!r->res_nodeid; in is_remote()
252 return lkb->lkb_nodeid && in is_process_copy()
253 !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in is_process_copy()
258 return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in is_master_copy()
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) || in middle_conversion()
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW)) in middle_conversion()
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode); in down_conversion()
276 return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in is_overlap_unlock()
281 return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in is_overlap_cancel()
286 return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) || in is_overlap()
287 test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in is_overlap()
295 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb);); in queue_cast()
297 if (rv == -DLM_ECANCEL && in queue_cast()
298 test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags)) in queue_cast()
299 rv = -EDEADLK; in queue_cast()
301 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb)); in queue_cast()
307 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL); in queue_cast_overlap()
335 kref_get(&r->res_ref); in hold_rsb()
365 if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) { in dlm_kref_put_write_lock_bh()
375 struct dlm_ls *ls = r->res_ls; in put_rsb()
378 rv = dlm_kref_put_write_lock_bh(&r->res_ref, deactivate_rsb, in put_rsb()
379 &ls->ls_rsbtbl_lock); in put_rsb()
381 write_unlock_bh(&ls->ls_rsbtbl_lock); in put_rsb()
396 mod_timer(&ls->ls_scan_timer, jiffies); in enable_scan_timer()
409 spin_lock_bh(&ls->ls_scan_lock); in resume_scan_timer()
410 r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, in resume_scan_timer()
412 if (r && !timer_pending(&ls->ls_scan_timer)) in resume_scan_timer()
413 enable_scan_timer(ls, r->res_toss_time); in resume_scan_timer()
414 spin_unlock_bh(&ls->ls_scan_lock); in resume_scan_timer()
426 spin_lock_bh(&ls->ls_scan_lock); in del_scan()
427 r->res_toss_time = 0; in del_scan()
430 if (list_empty(&r->res_scan_list)) in del_scan()
434 first = list_first_entry(&ls->ls_scan_list, struct dlm_rsb, in del_scan()
436 list_del_init(&r->res_scan_list); in del_scan()
446 first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, in del_scan()
449 timer_delete(&ls->ls_scan_timer); in del_scan()
451 enable_scan_timer(ls, first->res_toss_time); in del_scan()
455 spin_unlock_bh(&ls->ls_scan_lock); in del_scan()
465 (r->res_master_nodeid != our_nodeid) && in add_scan()
472 WARN_ON(!list_empty(&r->res_scan_list)); in add_scan()
474 spin_lock_bh(&ls->ls_scan_lock); in add_scan()
476 r->res_toss_time = rsb_toss_jiffies(); in add_scan()
477 if (list_empty(&ls->ls_scan_list)) { in add_scan()
481 list_add_tail(&r->res_scan_list, &ls->ls_scan_list); in add_scan()
482 enable_scan_timer(ls, r->res_toss_time); in add_scan()
490 first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, in add_scan()
492 list_add_tail(&r->res_scan_list, &ls->ls_scan_list); in add_scan()
494 enable_scan_timer(ls, r->res_toss_time); in add_scan()
496 enable_scan_timer(ls, first->res_toss_time); in add_scan()
498 spin_unlock_bh(&ls->ls_scan_lock); in add_scan()
525 rv = spin_trylock(&ls->ls_scan_lock); in dlm_rsb_scan()
532 r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, in dlm_rsb_scan()
536 spin_unlock(&ls->ls_scan_lock); in dlm_rsb_scan()
544 if (time_before(jiffies, r->res_toss_time)) { in dlm_rsb_scan()
546 enable_scan_timer(ls, r->res_toss_time); in dlm_rsb_scan()
547 spin_unlock(&ls->ls_scan_lock); in dlm_rsb_scan()
555 rv = write_trylock(&ls->ls_rsbtbl_lock); in dlm_rsb_scan()
557 spin_unlock(&ls->ls_scan_lock); in dlm_rsb_scan()
563 list_del(&r->res_slow_list); in dlm_rsb_scan()
564 rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, in dlm_rsb_scan()
569 write_unlock(&ls->ls_rsbtbl_lock); in dlm_rsb_scan()
571 list_del_init(&r->res_scan_list); in dlm_rsb_scan()
572 spin_unlock(&ls->ls_scan_lock); in dlm_rsb_scan()
578 (r->res_master_nodeid != our_nodeid) && in dlm_rsb_scan()
586 (r->res_master_nodeid == our_nodeid) && in dlm_rsb_scan()
594 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
605 return -ENOMEM; in get_rsb_struct()
607 r->res_ls = ls; in get_rsb_struct()
608 r->res_length = len; in get_rsb_struct()
609 memcpy(r->res_name, name, len); in get_rsb_struct()
610 spin_lock_init(&r->res_lock); in get_rsb_struct()
612 INIT_LIST_HEAD(&r->res_lookup); in get_rsb_struct()
613 INIT_LIST_HEAD(&r->res_grantqueue); in get_rsb_struct()
614 INIT_LIST_HEAD(&r->res_convertqueue); in get_rsb_struct()
615 INIT_LIST_HEAD(&r->res_waitqueue); in get_rsb_struct()
616 INIT_LIST_HEAD(&r->res_root_list); in get_rsb_struct()
617 INIT_LIST_HEAD(&r->res_scan_list); in get_rsb_struct()
618 INIT_LIST_HEAD(&r->res_recover_list); in get_rsb_struct()
619 INIT_LIST_HEAD(&r->res_masters_list); in get_rsb_struct()
635 return -EBADR; in dlm_search_rsb_tree()
642 rv = rhashtable_insert_fast(rhash, &rsb->res_node, in rsb_insert()
657 * to excessive master lookups and removals if we don't delay the release.
667 * - previously used locally but not any more (were on keep list, then
669 * - created and put on toss list as a directory record for a lookup
717 * from_nodeid has sent us a lock in dlm_recover_locks, believing in find_rsb_dir()
723 * If someone sends us a request, we are the dir node, and we do in find_rsb_dir()
725 * someone sends us a request after we have removed/freed an rsb. in find_rsb_dir()
736 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); in find_rsb_dir()
740 /* check if the rsb is active under read lock - likely path */ in find_rsb_dir()
741 read_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
743 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
744 error = -EBADR; in find_rsb_dir()
753 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
757 kref_get(&r->res_ref); in find_rsb_dir()
758 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
763 write_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
773 * but rcu allows us to simply check the HASHED flag, because in find_rsb_dir()
783 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
787 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
788 error = -EBADR; in find_rsb_dir()
799 if ((r->res_master_nodeid != our_nodeid) && from_other) { in find_rsb_dir()
801 has sent us a request */ in find_rsb_dir()
803 from_nodeid, r->res_master_nodeid, dir_nodeid, in find_rsb_dir()
804 r->res_name); in find_rsb_dir()
805 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
806 error = -ENOTBLK; in find_rsb_dir()
810 if ((r->res_master_nodeid != our_nodeid) && from_dir) { in find_rsb_dir()
813 from_nodeid, r->res_master_nodeid); in find_rsb_dir()
816 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
817 r->res_nodeid = 0; in find_rsb_dir()
819 r->res_first_lkid = 0; in find_rsb_dir()
822 if (from_local && (r->res_master_nodeid != our_nodeid)) { in find_rsb_dir()
826 r->res_first_lkid = 0; in find_rsb_dir()
835 list_move(&r->res_slow_list, &ls->ls_slow_active); in find_rsb_dir()
837 kref_init(&r->res_ref); /* ref is now used in active state */ in find_rsb_dir()
838 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
848 if (error == -EBADR && !create) in find_rsb_dir()
855 r->res_hash = hash; in find_rsb_dir()
856 r->res_dir_nodeid = dir_nodeid; in find_rsb_dir()
857 kref_init(&r->res_ref); in find_rsb_dir()
862 from_nodeid, r->res_name); in find_rsb_dir()
863 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
864 r->res_nodeid = 0; in find_rsb_dir()
871 from_nodeid, dir_nodeid, our_nodeid, r->res_name); in find_rsb_dir()
874 error = -ENOTBLK; in find_rsb_dir()
880 from_nodeid, dir_nodeid, r->res_name); in find_rsb_dir()
886 r->res_master_nodeid = our_nodeid; in find_rsb_dir()
887 r->res_nodeid = 0; in find_rsb_dir()
890 r->res_master_nodeid = 0; in find_rsb_dir()
891 r->res_nodeid = -1; in find_rsb_dir()
896 write_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
897 error = rsb_insert(r, &ls->ls_rsbtbl); in find_rsb_dir()
898 if (error == -EEXIST) { in find_rsb_dir()
902 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
906 list_add(&r->res_slow_list, &ls->ls_slow_active); in find_rsb_dir()
908 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_dir()
914 /* During recovery, other nodes can send us new MSTCPY locks (from
928 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); in find_rsb_nodir()
932 /* check if the rsb is in active state under read lock - likely path */ in find_rsb_nodir()
933 read_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
935 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
940 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
948 kref_get(&r->res_ref); in find_rsb_nodir()
949 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
955 write_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
960 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
964 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
975 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) { in find_rsb_nodir()
976 /* our rsb is not master, and another node has sent us a in find_rsb_nodir()
979 from_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
981 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
982 error = -ENOTBLK; in find_rsb_nodir()
986 if (!recover && (r->res_master_nodeid != our_nodeid) && in find_rsb_nodir()
991 our_nodeid, r->res_master_nodeid, dir_nodeid); in find_rsb_nodir()
993 r->res_master_nodeid = our_nodeid; in find_rsb_nodir()
994 r->res_nodeid = 0; in find_rsb_nodir()
998 list_move(&r->res_slow_list, &ls->ls_slow_active); in find_rsb_nodir()
1000 kref_init(&r->res_ref); in find_rsb_nodir()
1001 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
1015 r->res_hash = hash; in find_rsb_nodir()
1016 r->res_dir_nodeid = dir_nodeid; in find_rsb_nodir()
1017 r->res_master_nodeid = dir_nodeid; in find_rsb_nodir()
1018 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; in find_rsb_nodir()
1019 kref_init(&r->res_ref); in find_rsb_nodir()
1021 write_lock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
1022 error = rsb_insert(r, &ls->ls_rsbtbl); in find_rsb_nodir()
1023 if (error == -EEXIST) { in find_rsb_nodir()
1027 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
1031 list_add(&r->res_slow_list, &ls->ls_slow_active); in find_rsb_nodir()
1033 write_unlock_bh(&ls->ls_rsbtbl_lock); in find_rsb_nodir()
1073 * Without the rcu optimization, steps A5-8 would need to do
1090 return -EINVAL; in find_rsb()
1114 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
1115 r->res_dir_nodeid); in validate_master_nodeid()
1117 return -ENOTBLK; in validate_master_nodeid()
1120 if (from_nodeid != r->res_dir_nodeid) { in validate_master_nodeid()
1122 has sent us a request. this is much more common when our in validate_master_nodeid()
1123 master_nodeid is zero, so limit debug to non-zero. */ in validate_master_nodeid()
1125 if (r->res_master_nodeid) { in validate_master_nodeid()
1128 r->res_master_nodeid, r->res_dir_nodeid, in validate_master_nodeid()
1129 r->res_first_lkid, r->res_name); in validate_master_nodeid()
1131 return -ENOTBLK; in validate_master_nodeid()
1133 /* our rsb is not master, but the dir nodeid has sent us a in validate_master_nodeid()
1134 request; this could happen with master 0 / res_nodeid -1 */ in validate_master_nodeid()
1136 if (r->res_master_nodeid) { in validate_master_nodeid()
1139 from_nodeid, r->res_master_nodeid, in validate_master_nodeid()
1140 r->res_first_lkid, r->res_name); in validate_master_nodeid()
1143 r->res_master_nodeid = dlm_our_nodeid(); in validate_master_nodeid()
1144 r->res_nodeid = 0; in validate_master_nodeid()
1156 if (r->res_dir_nodeid != our_nodeid) { in __dlm_master_lookup()
1159 r->res_dir_nodeid, our_nodeid, r->res_name); in __dlm_master_lookup()
1160 r->res_dir_nodeid = our_nodeid; in __dlm_master_lookup()
1163 if (fix_master && r->res_master_nodeid && dlm_is_removed(ls, r->res_master_nodeid)) { in __dlm_master_lookup()
1170 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
1171 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
1181 if (from_master && (r->res_master_nodeid != from_nodeid)) { in __dlm_master_lookup()
1188 __func__, from_nodeid, r->res_master_nodeid, in __dlm_master_lookup()
1189 r->res_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
1191 if (r->res_master_nodeid == our_nodeid) { in __dlm_master_lookup()
1197 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
1198 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
1202 if (!r->res_master_nodeid) { in __dlm_master_lookup()
1208 from_nodeid, r->res_first_lkid, r->res_name); in __dlm_master_lookup()
1209 r->res_master_nodeid = from_nodeid; in __dlm_master_lookup()
1210 r->res_nodeid = from_nodeid; in __dlm_master_lookup()
1214 (r->res_master_nodeid == from_nodeid)) { in __dlm_master_lookup()
1221 __func__, from_nodeid, flags, r->res_first_lkid, in __dlm_master_lookup()
1222 r->res_name); in __dlm_master_lookup()
1226 *r_nodeid = r->res_master_nodeid; in __dlm_master_lookup()
1269 return -EINVAL; in _dlm_master_lookup()
1274 return -EINVAL; in _dlm_master_lookup()
1282 ls->ls_num_nodes); in _dlm_master_lookup()
1283 *r_nodeid = -1; in _dlm_master_lookup()
1284 return -EINVAL; in _dlm_master_lookup()
1288 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); in _dlm_master_lookup()
1292 /* check if the rsb is active under read lock - likely path */ in _dlm_master_lookup()
1293 read_lock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1295 read_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1300 read_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1309 read_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1322 /* unlikely path - check if still part of ls_rsbtbl */ in _dlm_master_lookup()
1323 write_lock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1328 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1335 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1350 WARN_ON(!list_empty(&r->res_scan_list) && in _dlm_master_lookup()
1351 r->res_master_nodeid != our_nodeid); in _dlm_master_lookup()
1353 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1362 r->res_hash = hash; in _dlm_master_lookup()
1363 r->res_dir_nodeid = our_nodeid; in _dlm_master_lookup()
1364 r->res_master_nodeid = from_nodeid; in _dlm_master_lookup()
1365 r->res_nodeid = from_nodeid; in _dlm_master_lookup()
1368 write_lock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1369 error = rsb_insert(r, &ls->ls_rsbtbl); in _dlm_master_lookup()
1370 if (error == -EEXIST) { in _dlm_master_lookup()
1374 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1378 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1384 list_add(&r->res_slow_list, &ls->ls_slow_inactive); in _dlm_master_lookup()
1385 write_unlock_bh(&ls->ls_rsbtbl_lock); in _dlm_master_lookup()
1408 read_lock_bh(&ls->ls_rsbtbl_lock); in dlm_dump_rsb_hash()
1409 list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { in dlm_dump_rsb_hash()
1410 if (r->res_hash == hash) in dlm_dump_rsb_hash()
1413 read_unlock_bh(&ls->ls_rsbtbl_lock); in dlm_dump_rsb_hash()
1422 error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); in dlm_dump_rsb_name()
1434 struct dlm_ls *ls = r->res_ls; in deactivate_rsb()
1437 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); in deactivate_rsb()
1439 list_move(&r->res_slow_list, &ls->ls_slow_inactive); in deactivate_rsb()
1457 (r->res_master_nodeid == our_nodeid || in deactivate_rsb()
1461 if (r->res_lvbptr) { in deactivate_rsb()
1462 dlm_free_lvb(r->res_lvbptr); in deactivate_rsb()
1463 r->res_lvbptr = NULL; in deactivate_rsb()
1471 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); in free_inactive_rsb()
1472 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); in free_inactive_rsb()
1473 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); in free_inactive_rsb()
1474 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); in free_inactive_rsb()
1475 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); in free_inactive_rsb()
1476 DLM_ASSERT(list_empty(&r->res_scan_list), dlm_dump_rsb(r);); in free_inactive_rsb()
1477 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); in free_inactive_rsb()
1478 DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r);); in free_inactive_rsb()
1489 lkb->lkb_resource = r; in attach_lkb()
1494 if (lkb->lkb_resource) { in detach_lkb()
1495 put_rsb(lkb->lkb_resource); in detach_lkb()
1496 lkb->lkb_resource = NULL; in detach_lkb()
1512 return -ENOMEM; in _create_lkb()
1514 lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV; in _create_lkb()
1515 lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV; in _create_lkb()
1516 lkb->lkb_last_cb_mode = DLM_LOCK_IV; in _create_lkb()
1517 lkb->lkb_nodeid = -1; in _create_lkb()
1518 lkb->lkb_grmode = DLM_LOCK_IV; in _create_lkb()
1519 kref_init(&lkb->lkb_ref); in _create_lkb()
1520 INIT_LIST_HEAD(&lkb->lkb_ownqueue); in _create_lkb()
1521 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); in _create_lkb()
1523 write_lock_bh(&ls->ls_lkbxa_lock); in _create_lkb()
1524 rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC); in _create_lkb()
1525 write_unlock_bh(&ls->ls_lkbxa_lock); in _create_lkb()
1547 lkb = xa_load(&ls->ls_lkbxa, lkid); in find_lkb()
1553 read_lock_bh(&ls->ls_lkbxa_lock); in find_lkb()
1554 if (kref_read(&lkb->lkb_ref)) in find_lkb()
1555 kref_get(&lkb->lkb_ref); in find_lkb()
1558 read_unlock_bh(&ls->ls_lkbxa_lock); in find_lkb()
1563 return lkb ? 0 : -ENOENT; in find_lkb()
1573 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in kill_lkb()
1581 uint32_t lkid = lkb->lkb_id; in __put_lkb()
1584 rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb, in __put_lkb()
1585 &ls->ls_lkbxa_lock); in __put_lkb()
1587 xa_erase(&ls->ls_lkbxa, lkid); in __put_lkb()
1588 write_unlock_bh(&ls->ls_lkbxa_lock); in __put_lkb()
1593 if (lkb->lkb_lvbptr && is_master_copy(lkb)) in __put_lkb()
1594 dlm_free_lvb(lkb->lkb_lvbptr); in __put_lkb()
1605 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb);); in dlm_put_lkb()
1606 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb);); in dlm_put_lkb()
1608 ls = lkb->lkb_resource->res_ls; in dlm_put_lkb()
1617 kref_get(&lkb->lkb_ref); in hold_lkb()
1634 kref_put(&lkb->lkb_ref, unhold_lkb_assert); in unhold_lkb()
1643 if (iter->lkb_rqmode < mode) { in lkb_add_ordered()
1645 list_add_tail(new, &iter->lkb_statequeue); in lkb_add_ordered()
1657 kref_get(&lkb->lkb_ref); in add_lkb()
1659 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); in add_lkb()
1661 lkb->lkb_timestamp = ktime_get(); in add_lkb()
1663 lkb->lkb_status = status; in add_lkb()
1667 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1668 list_add(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1670 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue); in add_lkb()
1674 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue, in add_lkb()
1675 lkb->lkb_grmode); in add_lkb()
1678 if (lkb->lkb_exflags & DLM_LKF_HEADQUE) in add_lkb()
1679 list_add(&lkb->lkb_statequeue, &r->res_convertqueue); in add_lkb()
1681 list_add_tail(&lkb->lkb_statequeue, in add_lkb()
1682 &r->res_convertqueue); in add_lkb()
1691 lkb->lkb_status = 0; in del_lkb()
1692 list_del(&lkb->lkb_statequeue); in del_lkb()
1716 return -1; in msg_reply_type()
1724 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in add_to_waiters()
1726 spin_lock_bh(&ls->ls_waiters_lock); in add_to_waiters()
1727 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) { in add_to_waiters()
1730 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in add_to_waiters()
1733 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in add_to_waiters()
1743 lkb->lkb_wait_count++; in add_to_waiters()
1747 lkb->lkb_id, lkb->lkb_wait_type, mstype, in add_to_waiters()
1748 lkb->lkb_wait_count, dlm_iflags_val(lkb)); in add_to_waiters()
1752 DLM_ASSERT(!lkb->lkb_wait_count, in add_to_waiters()
1754 printk("wait_count %d\n", lkb->lkb_wait_count);); in add_to_waiters()
1756 lkb->lkb_wait_count++; in add_to_waiters()
1757 lkb->lkb_wait_type = mstype; in add_to_waiters()
1758 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */ in add_to_waiters()
1760 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); in add_to_waiters()
1762 spin_unlock_bh(&ls->ls_waiters_lock); in add_to_waiters()
1773 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in _remove_from_waiters()
1777 test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1778 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1784 test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1785 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); in _remove_from_waiters()
1794 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) { in _remove_from_waiters()
1796 lkb->lkb_id, lkb->lkb_wait_type); in _remove_from_waiters()
1797 return -1; in _remove_from_waiters()
1806 lingering state of the cancel and fail with -EBUSY. */ in _remove_from_waiters()
1809 (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result && in _remove_from_waiters()
1810 test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { in _remove_from_waiters()
1812 lkb->lkb_id); in _remove_from_waiters()
1813 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1814 lkb->lkb_wait_count--; in _remove_from_waiters()
1820 msg due to lookup->request optimization, verify others? */ in _remove_from_waiters()
1822 if (lkb->lkb_wait_type) { in _remove_from_waiters()
1823 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1828 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0, in _remove_from_waiters()
1829 lkb->lkb_remid, mstype, dlm_iflags_val(lkb)); in _remove_from_waiters()
1830 return -1; in _remove_from_waiters()
1833 /* the force-unlock/cancel has completed and we haven't recvd a reply in _remove_from_waiters()
1838 if (overlap_done && lkb->lkb_wait_type) { in _remove_from_waiters()
1840 lkb->lkb_id, mstype, lkb->lkb_wait_type); in _remove_from_waiters()
1841 lkb->lkb_wait_count--; in _remove_from_waiters()
1843 lkb->lkb_wait_type = 0; in _remove_from_waiters()
1846 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb);); in _remove_from_waiters()
1848 clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in _remove_from_waiters()
1849 lkb->lkb_wait_count--; in _remove_from_waiters()
1850 if (!lkb->lkb_wait_count) in _remove_from_waiters()
1851 list_del_init(&lkb->lkb_wait_reply); in _remove_from_waiters()
1858 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters()
1861 spin_lock_bh(&ls->ls_waiters_lock); in remove_from_waiters()
1863 spin_unlock_bh(&ls->ls_waiters_lock); in remove_from_waiters()
1877 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in remove_from_waiters_ms()
1881 spin_lock_bh(&ls->ls_waiters_lock); in remove_from_waiters_ms()
1883 WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || in remove_from_waiters_ms()
1885 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); in remove_from_waiters_ms()
1887 spin_unlock_bh(&ls->ls_waiters_lock); in remove_from_waiters_ms()
1895 int b, len = r->res_ls->ls_lvblen; in set_lvb_lock()
1899 b=-1 do nothing */ in set_lvb_lock()
1901 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock()
1904 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1907 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1910 if (!r->res_lvbptr) in set_lvb_lock()
1913 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len); in set_lvb_lock()
1914 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1917 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_lock()
1922 if (!lkb->lkb_lvbptr) in set_lvb_lock()
1925 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock()
1928 if (!r->res_lvbptr) in set_lvb_lock()
1929 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_lock()
1931 if (!r->res_lvbptr) in set_lvb_lock()
1934 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len); in set_lvb_lock()
1935 r->res_lvbseq++; in set_lvb_lock()
1936 lkb->lkb_lvbseq = r->res_lvbseq; in set_lvb_lock()
1941 set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags); in set_lvb_lock()
1946 if (lkb->lkb_grmode < DLM_LOCK_PW) in set_lvb_unlock()
1949 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { in set_lvb_unlock()
1954 if (!lkb->lkb_lvbptr) in set_lvb_unlock()
1957 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_unlock()
1960 if (!r->res_lvbptr) in set_lvb_unlock()
1961 r->res_lvbptr = dlm_allocate_lvb(r->res_ls); in set_lvb_unlock()
1963 if (!r->res_lvbptr) in set_lvb_unlock()
1966 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in set_lvb_unlock()
1967 r->res_lvbseq++; in set_lvb_unlock()
1978 if (!lkb->lkb_lvbptr) in set_lvb_lock_pc()
1981 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) in set_lvb_lock_pc()
1984 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; in set_lvb_lock_pc()
1987 if (len > r->res_ls->ls_lvblen) in set_lvb_lock_pc()
1988 len = r->res_ls->ls_lvblen; in set_lvb_lock_pc()
1989 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in set_lvb_lock_pc()
1990 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in set_lvb_lock_pc()
1995 remove_lock -- used for unlock, removes lkb from granted
1996 revert_lock -- used for cancel, moves lkb from convert to granted
1997 grant_lock -- used for request and convert, adds lkb to granted or
2007 lkb->lkb_grmode = DLM_LOCK_IV; in _remove_lock()
2026 -1 removed lock */
2032 lkb->lkb_rqmode = DLM_LOCK_IV; in revert_lock()
2034 switch (lkb->lkb_status) { in revert_lock()
2043 lkb->lkb_grmode = DLM_LOCK_IV; in revert_lock()
2047 rv = -1; in revert_lock()
2050 log_print("invalid status for revert %d", lkb->lkb_status); in revert_lock()
2062 if (lkb->lkb_grmode != lkb->lkb_rqmode) { in _grant_lock()
2063 lkb->lkb_grmode = lkb->lkb_rqmode; in _grant_lock()
2064 if (lkb->lkb_status) in _grant_lock()
2070 lkb->lkb_rqmode = DLM_LOCK_IV; in _grant_lock()
2071 lkb->lkb_highbast = 0; in _grant_lock()
2110 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) { in munge_demoted()
2112 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode); in munge_demoted()
2116 lkb->lkb_grmode = DLM_LOCK_NL; in munge_demoted()
2121 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) && in munge_altmode()
2122 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) { in munge_altmode()
2124 lkb->lkb_id, le32_to_cpu(ms->m_type)); in munge_altmode()
2128 if (lkb->lkb_exflags & DLM_LKF_ALTPR) in munge_altmode()
2129 lkb->lkb_rqmode = DLM_LOCK_PR; in munge_altmode()
2130 else if (lkb->lkb_exflags & DLM_LKF_ALTCW) in munge_altmode()
2131 lkb->lkb_rqmode = DLM_LOCK_CW; in munge_altmode()
2133 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags); in munge_altmode()
2140 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb, in first_in_list()
2142 if (lkb->lkb_id == first->lkb_id) in first_in_list()
2173 * Convert Queue: NL->EX (first lock)
2174 * PR->EX (second lock)
2178 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2182 * Originally, this function detected conv-deadlk in a more limited scope:
2183 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2184 * - if lkb1 was the first entry in the queue (not just earlier), and was
2189 * That second condition meant we'd only say there was conv-deadlk if
2200 * be zero, i.e. there will never be conv-deadlk between two locks that are
2209 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) { in conversion_deadlock_detect()
2246 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV); in _can_be_granted()
2249 * 6-10: Version 5.4 introduced an option to address the phenomenon of in _can_be_granted()
2252 * 6-11: If the optional EXPEDITE flag is used with the new NL mode in _can_be_granted()
2260 * conversion or used with a non-NL requested mode. We also know an in _can_be_granted()
2263 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can in _can_be_granted()
2267 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE) in _can_be_granted()
2275 if (queue_conflict(&r->res_grantqueue, lkb)) in _can_be_granted()
2279 * 6-3: By default, a conversion request is immediately granted if the in _can_be_granted()
2284 if (queue_conflict(&r->res_convertqueue, lkb)) in _can_be_granted()
2295 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX. in _can_be_granted()
2297 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after in _can_be_granted()
2305 * 6-5: But the default algorithm for deciding whether to grant or in _can_be_granted()
2310 * 6-7: This issue is dealt with by using the optional QUECVT flag with in _can_be_granted()
2327 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT)) in _can_be_granted()
2335 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) { in _can_be_granted()
2336 if (list_empty(&r->res_convertqueue)) in _can_be_granted()
2347 if (lkb->lkb_exflags & DLM_LKF_NOORDER) in _can_be_granted()
2351 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be in _can_be_granted()
2356 if (!now && conv && first_in_list(lkb, &r->res_convertqueue)) in _can_be_granted()
2360 * 6-4: By default, a new request is immediately granted only if all in _can_be_granted()
2363 * - The queue of ungranted conversion requests for the resource is in _can_be_granted()
2365 * - The queue of ungranted new requests for the resource is empty. in _can_be_granted()
2366 * - The mode of the new request is compatible with the most in _can_be_granted()
2370 if (now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2371 list_empty(&r->res_waitqueue)) in _can_be_granted()
2375 * 6-4: Once a lock request is in the queue of ungranted new requests, in _can_be_granted()
2382 if (!now && !conv && list_empty(&r->res_convertqueue) && in _can_be_granted()
2383 first_in_list(lkb, &r->res_waitqueue)) in _can_be_granted()
2393 int8_t alt = 0, rqmode = lkb->lkb_rqmode; in can_be_granted()
2394 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV); in can_be_granted()
2404 * The CONVDEADLK flag is non-standard and tells the dlm to resolve in can_be_granted()
2411 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) { in can_be_granted()
2412 lkb->lkb_grmode = DLM_LOCK_NL; in can_be_granted()
2413 set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); in can_be_granted()
2415 *err = -EDEADLK; in can_be_granted()
2418 lkb->lkb_id, now); in can_be_granted()
2425 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try in can_be_granted()
2431 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR)) in can_be_granted()
2433 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW)) in can_be_granted()
2437 lkb->lkb_rqmode = alt; in can_be_granted()
2440 set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); in can_be_granted()
2442 lkb->lkb_rqmode = rqmode; in can_be_granted()
2465 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) { in grant_pending_convert()
2479 lkb->lkb_id, lkb->lkb_nodeid, r->res_name); in grant_pending_convert()
2490 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) { in grant_pending_convert()
2491 if (lkb->lkb_highbast < lkb->lkb_rqmode) { in grant_pending_convert()
2492 queue_bast(r, lkb, lkb->lkb_rqmode); in grant_pending_convert()
2493 lkb->lkb_highbast = lkb->lkb_rqmode; in grant_pending_convert()
2497 lkb->lkb_id, lkb->lkb_nodeid, in grant_pending_convert()
2498 r->res_name); in grant_pending_convert()
2504 hi = max_t(int, lkb->lkb_rqmode, hi); in grant_pending_convert()
2506 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_convert()
2525 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { in grant_pending_wait()
2531 high = max_t(int, lkb->lkb_rqmode, high); in grant_pending_wait()
2532 if (lkb->lkb_rqmode == DLM_LOCK_CW) in grant_pending_wait()
2547 if (gr->lkb_grmode == DLM_LOCK_PR && cw) { in lock_requires_bast()
2548 if (gr->lkb_highbast < DLM_LOCK_EX) in lock_requires_bast()
2553 if (gr->lkb_highbast < high && in lock_requires_bast()
2554 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1]) in lock_requires_bast()
2566 log_print("grant_pending_locks r nodeid %d", r->res_nodeid); in grant_pending_locks()
2583 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { in grant_pending_locks()
2584 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { in grant_pending_locks()
2586 lkb->lkb_grmode == DLM_LOCK_PR) in grant_pending_locks()
2590 lkb->lkb_highbast = high; in grant_pending_locks()
2597 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) || in modes_require_bast()
2598 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) { in modes_require_bast()
2599 if (gr->lkb_highbast < DLM_LOCK_EX) in modes_require_bast()
2604 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq)) in modes_require_bast()
2618 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { in send_bast_queue()
2619 queue_bast(r, gr, lkb->lkb_rqmode); in send_bast_queue()
2620 gr->lkb_highbast = lkb->lkb_rqmode; in send_bast_queue()
2627 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts()
2632 send_bast_queue(r, &r->res_grantqueue, lkb); in send_blocking_asts_all()
2633 send_bast_queue(r, &r->res_convertqueue, lkb); in send_blocking_asts_all()
2636 /* set_master(r, lkb) -- set the master nodeid of a resource
2661 r->res_first_lkid = lkb->lkb_id; in set_master()
2662 lkb->lkb_nodeid = r->res_nodeid; in set_master()
2666 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) { in set_master()
2667 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup); in set_master()
2671 if (r->res_master_nodeid == our_nodeid) { in set_master()
2672 lkb->lkb_nodeid = 0; in set_master()
2676 if (r->res_master_nodeid) { in set_master()
2677 lkb->lkb_nodeid = r->res_master_nodeid; in set_master()
2688 log_debug(r->res_ls, "set_master %x self master %d dir %d %s", in set_master()
2689 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid, in set_master()
2690 r->res_name); in set_master()
2691 r->res_master_nodeid = our_nodeid; in set_master()
2692 r->res_nodeid = 0; in set_master()
2693 lkb->lkb_nodeid = 0; in set_master()
2697 r->res_first_lkid = lkb->lkb_id; in set_master()
2706 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { in process_lookup_list()
2707 list_del_init(&lkb->lkb_rsb_lookup); in process_lookup_list()
2712 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2718 if (!r->res_first_lkid) in confirm_master()
2723 case -EINPROGRESS: in confirm_master()
2724 r->res_first_lkid = 0; in confirm_master()
2728 case -EAGAIN: in confirm_master()
2729 case -EBADR: in confirm_master()
2730 case -ENOTBLK: in confirm_master()
2735 r->res_first_lkid = 0; in confirm_master()
2737 if (!list_empty(&r->res_lookup)) { in confirm_master()
2738 lkb = list_entry(r->res_lookup.next, struct dlm_lkb, in confirm_master()
2740 list_del_init(&lkb->lkb_rsb_lookup); in confirm_master()
2741 r->res_first_lkid = lkb->lkb_id; in confirm_master()
2747 log_error(r->res_ls, "confirm_master unknown error %d", error); in confirm_master()
2757 int rv = -EINVAL; in set_lock_args()
2794 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr) in set_lock_args()
2797 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid) in set_lock_args()
2804 args->flags = flags; in set_lock_args()
2805 args->astfn = ast; in set_lock_args()
2806 args->astparam = astparam; in set_lock_args()
2807 args->bastfn = bast; in set_lock_args()
2808 args->mode = mode; in set_lock_args()
2809 args->lksb = lksb; in set_lock_args()
2819 return -EINVAL; in set_unlock_args()
2822 return -EINVAL; in set_unlock_args()
2824 args->flags = flags; in set_unlock_args()
2825 args->astparam = astarg; in set_unlock_args()
2832 int rv = -EBUSY; in validate_lock_args()
2834 if (args->flags & DLM_LKF_CONVERT) { in validate_lock_args()
2835 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in validate_lock_args()
2839 if (lkb->lkb_wait_type || lkb->lkb_wait_count) in validate_lock_args()
2845 rv = -EINVAL; in validate_lock_args()
2846 if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) in validate_lock_args()
2849 if (args->flags & DLM_LKF_QUECVT && in validate_lock_args()
2850 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) in validate_lock_args()
2854 lkb->lkb_exflags = args->flags; in validate_lock_args()
2856 lkb->lkb_astfn = args->astfn; in validate_lock_args()
2857 lkb->lkb_astparam = args->astparam; in validate_lock_args()
2858 lkb->lkb_bastfn = args->bastfn; in validate_lock_args()
2859 lkb->lkb_rqmode = args->mode; in validate_lock_args()
2860 lkb->lkb_lksb = args->lksb; in validate_lock_args()
2861 lkb->lkb_lvbptr = args->lksb->sb_lvbptr; in validate_lock_args()
2862 lkb->lkb_ownpid = (int) current->pid; in validate_lock_args()
2868 case -EINVAL: in validate_lock_args()
2872 rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, in validate_lock_args()
2873 lkb->lkb_status, lkb->lkb_wait_type); in validate_lock_args()
2877 rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, in validate_lock_args()
2878 lkb->lkb_status, lkb->lkb_wait_type); in validate_lock_args()
2885 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2888 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2894 struct dlm_ls *ls = lkb->lkb_resource->res_ls; in validate_unlock_args()
2895 int rv = -EBUSY; in validate_unlock_args()
2898 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) && in validate_unlock_args()
2899 (lkb->lkb_wait_type || lkb->lkb_wait_count)) in validate_unlock_args()
2905 if (!list_empty(&lkb->lkb_rsb_lookup)) { in validate_unlock_args()
2906 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) { in validate_unlock_args()
2907 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id); in validate_unlock_args()
2908 list_del_init(&lkb->lkb_rsb_lookup); in validate_unlock_args()
2909 queue_cast(lkb->lkb_resource, lkb, in validate_unlock_args()
2910 args->flags & DLM_LKF_CANCEL ? in validate_unlock_args()
2911 -DLM_ECANCEL : -DLM_EUNLOCK); in validate_unlock_args()
2914 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */ in validate_unlock_args()
2918 rv = -EINVAL; in validate_unlock_args()
2919 if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2920 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); in validate_unlock_args()
2930 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2931 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); in validate_unlock_args()
2932 rv = -ENOENT; in validate_unlock_args()
2941 if (args->flags & DLM_LKF_CANCEL) { in validate_unlock_args()
2942 if (lkb->lkb_exflags & DLM_LKF_CANCEL) in validate_unlock_args()
2948 if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2949 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2950 rv = -EBUSY; in validate_unlock_args()
2955 if (lkb->lkb_status == DLM_LKSTS_GRANTED && in validate_unlock_args()
2956 !lkb->lkb_wait_type) { in validate_unlock_args()
2957 rv = -EBUSY; in validate_unlock_args()
2961 switch (lkb->lkb_wait_type) { in validate_unlock_args()
2964 set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2965 rv = -EBUSY; in validate_unlock_args()
2975 /* do we need to allow a force-unlock if there's a normal unlock in validate_unlock_args()
2977 fail such that we'd want to send a force-unlock to be sure? */ in validate_unlock_args()
2979 if (args->flags & DLM_LKF_FORCEUNLOCK) { in validate_unlock_args()
2980 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK) in validate_unlock_args()
2983 if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { in validate_unlock_args()
2984 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2985 rv = -EBUSY; in validate_unlock_args()
2989 switch (lkb->lkb_wait_type) { in validate_unlock_args()
2992 set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in validate_unlock_args()
2993 rv = -EBUSY; in validate_unlock_args()
3003 lkb->lkb_exflags |= args->flags; in validate_unlock_args()
3005 lkb->lkb_astparam = args->astparam; in validate_unlock_args()
3011 case -EINVAL: in validate_unlock_args()
3015 lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, in validate_unlock_args()
3016 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
3017 lkb->lkb_resource->res_name); in validate_unlock_args()
3021 lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, in validate_unlock_args()
3022 args->flags, lkb->lkb_wait_type, in validate_unlock_args()
3023 lkb->lkb_resource->res_name); in validate_unlock_args()
3048 error = -EINPROGRESS; in do_request()
3053 error = -EAGAIN; in do_request()
3054 queue_cast(r, lkb, -EAGAIN); in do_request()
3063 case -EAGAIN: in do_request_effects()
3067 case -EINPROGRESS: in do_request_effects()
3090 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { in do_convert()
3093 queue_cast(r, lkb, -EDEADLK); in do_convert()
3094 error = -EDEADLK; in do_convert()
3099 to NL, and left us on the granted queue. This auto-demotion in do_convert()
3115 error = -EINPROGRESS; in do_convert()
3121 error = -EAGAIN; in do_convert()
3122 queue_cast(r, lkb, -EAGAIN); in do_convert()
3135 case -EAGAIN: in do_convert_effects()
3139 case -EINPROGRESS: in do_convert_effects()
3148 queue_cast(r, lkb, -DLM_EUNLOCK); in do_unlock()
3149 return -DLM_EUNLOCK; in do_unlock()
3158 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3166 queue_cast(r, lkb, -DLM_ECANCEL); in do_cancel()
3167 return -DLM_ECANCEL; in do_cancel()
3293 lkb->lkb_lksb->sb_lkid = lkb->lkb_id; in request_lock()
3308 r = lkb->lkb_resource; in convert_lock()
3330 r = lkb->lkb_resource; in unlock_lock()
3352 r = lkb->lkb_resource; in cancel_lock()
3390 return -EINVAL; in dlm_lock()
3395 error = find_lkb(ls, lksb->sb_lkid, &lkb); in dlm_lock()
3414 if (error == -EINPROGRESS) in dlm_lock()
3421 if (error == -EAGAIN || error == -EDEADLK) in dlm_lock()
3442 return -EINVAL; in dlm_unlock()
3461 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL) in dlm_unlock()
3463 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK))) in dlm_unlock()
3506 /* get_buffer gives us a message handle (mh) that we need to in _create_message()
3512 return -ENOBUFS; in _create_message()
3516 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in _create_message()
3517 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); in _create_message()
3518 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); in _create_message()
3519 ms->m_header.h_length = cpu_to_le16(mb_len); in _create_message()
3520 ms->m_header.h_cmd = DLM_MSG; in _create_message()
3522 ms->m_type = cpu_to_le32(mstype); in _create_message()
3540 mb_len += r->res_length; in create_message()
3547 if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK)) in create_message()
3548 mb_len += r->res_ls->ls_lvblen; in create_message()
3552 return _create_message(r->res_ls, mb_len, to_nodeid, mstype, in create_message()
3569 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid); in send_args()
3570 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid); in send_args()
3571 ms->m_lkid = cpu_to_le32(lkb->lkb_id); in send_args()
3572 ms->m_remid = cpu_to_le32(lkb->lkb_remid); in send_args()
3573 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags); in send_args()
3574 ms->m_sbflags = cpu_to_le32(dlm_sbflags_val(lkb)); in send_args()
3575 ms->m_flags = cpu_to_le32(dlm_dflags_val(lkb)); in send_args()
3576 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); in send_args()
3577 ms->m_status = cpu_to_le32(lkb->lkb_status); in send_args()
3578 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode); in send_args()
3579 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode); in send_args()
3580 ms->m_hash = cpu_to_le32(r->res_hash); in send_args()
3585 if (lkb->lkb_bastfn) in send_args()
3586 ms->m_asts |= cpu_to_le32(DLM_CB_BAST); in send_args()
3587 if (lkb->lkb_astfn) in send_args()
3588 ms->m_asts |= cpu_to_le32(DLM_CB_CAST); in send_args()
3593 switch (ms->m_type) { in send_args()
3596 memcpy(ms->m_extra, r->res_name, r->res_length); in send_args()
3603 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK)) in send_args()
3605 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); in send_args()
3616 to_nodeid = r->res_nodeid; in send_common()
3625 error = send_message(mh, ms, r->res_name, r->res_length); in send_common()
3649 r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in send_convert()
3650 r->res_ls->ls_local_ms.m_result = 0; in send_convert()
3651 __receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true); in send_convert()
3677 to_nodeid = lkb->lkb_nodeid; in send_grant()
3685 ms->m_result = 0; in send_grant()
3687 error = send_message(mh, ms, r->res_name, r->res_length); in send_grant()
3698 to_nodeid = lkb->lkb_nodeid; in send_bast()
3706 ms->m_bastmode = cpu_to_le32(mode); in send_bast()
3708 error = send_message(mh, ms, r->res_name, r->res_length); in send_bast()
3728 error = send_message(mh, ms, r->res_name, r->res_length); in send_lookup()
3750 memcpy(ms->m_extra, r->res_name, r->res_length); in send_remove()
3751 ms->m_hash = cpu_to_le32(r->res_hash); in send_remove()
3753 error = send_message(mh, ms, r->res_name, r->res_length); in send_remove()
3765 to_nodeid = lkb->lkb_nodeid; in send_common_reply()
3773 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_common_reply()
3775 error = send_message(mh, ms, r->res_name, r->res_length); in send_common_reply()
3804 struct dlm_rsb *r = &ls->ls_local_rsb; in send_lookup_reply()
3807 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); in send_lookup_reply()
3813 ms->m_lkid = ms_in->m_lkid; in send_lookup_reply()
3814 ms->m_result = cpu_to_le32(to_dlm_errno(rv)); in send_lookup_reply()
3815 ms->m_nodeid = cpu_to_le32(ret_nodeid); in send_lookup_reply()
3817 error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in)); in send_lookup_reply()
3828 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags); in receive_flags()
3829 dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); in receive_flags()
3830 dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); in receive_flags()
3840 dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); in receive_flags_reply()
3841 dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); in receive_flags_reply()
3846 return (le16_to_cpu(ms->m_header.h_length) - in receive_extralen()
3855 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_lvb()
3856 if (!lkb->lkb_lvbptr) in receive_lvb()
3857 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_lvb()
3858 if (!lkb->lkb_lvbptr) in receive_lvb()
3859 return -ENOMEM; in receive_lvb()
3861 if (len > ls->ls_lvblen) in receive_lvb()
3862 len = ls->ls_lvblen; in receive_lvb()
3863 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); in receive_lvb()
3881 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_args()
3882 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid); in receive_request_args()
3883 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_args()
3884 lkb->lkb_grmode = DLM_LOCK_IV; in receive_request_args()
3885 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_request_args()
3887 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL; in receive_request_args()
3888 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL; in receive_request_args()
3890 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_request_args()
3892 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_request_args()
3893 if (!lkb->lkb_lvbptr) in receive_request_args()
3894 return -ENOMEM; in receive_request_args()
3903 if (lkb->lkb_status != DLM_LKSTS_GRANTED) in receive_convert_args()
3904 return -EBUSY; in receive_convert_args()
3907 return -ENOMEM; in receive_convert_args()
3909 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); in receive_convert_args()
3910 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); in receive_convert_args()
3919 return -ENOMEM; in receive_unlock_args()
3923 /* We fill in the local-lkb fields with the info that send_xxxx_reply()
3928 struct dlm_lkb *lkb = &ls->ls_local_lkb; in setup_local_lkb()
3929 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in setup_local_lkb()
3930 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in setup_local_lkb()
3938 int from = le32_to_cpu(ms->m_header.h_nodeid); in validate_message()
3942 if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) && in validate_message()
3943 !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { in validate_message()
3944 log_error(lkb->lkb_resource->res_ls, in validate_message()
3946 error = -EINVAL; in validate_message()
3950 switch (ms->m_type) { in validate_message()
3954 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
3955 error = -EINVAL; in validate_message()
3963 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) in validate_message()
3964 error = -EINVAL; in validate_message()
3969 error = -EINVAL; in validate_message()
3970 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) in validate_message()
3971 error = -EINVAL; in validate_message()
3975 error = -EINVAL; in validate_message()
3980 log_error(lkb->lkb_resource->res_ls, in validate_message()
3982 le32_to_cpu(ms->m_type), from, lkb->lkb_id, in validate_message()
3983 lkb->lkb_remid, dlm_iflags_val(lkb), in validate_message()
3984 lkb->lkb_nodeid); in validate_message()
3995 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request()
4002 set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in receive_request()
4010 for this rsb or not, so if the master sends us a request, we should in receive_request()
4013 node sends us a request for the rsb. */ in receive_request()
4017 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid, in receive_request()
4026 if (r->res_master_nodeid != dlm_our_nodeid()) { in receive_request()
4044 if (error == -EINPROGRESS) in receive_request()
4054 ENOTBLK request failures when the lookup reply designating us in receive_request()
4057 if (error != -ENOTBLK) { in receive_request()
4059 le32_to_cpu(ms->m_lkid), from_nodeid, error); in receive_request()
4063 send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_request()
4073 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert()
4077 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_convert()
4079 "remote %d %x", lkb->lkb_id, lkb->lkb_remid, in receive_convert()
4080 (unsigned long long)lkb->lkb_recover_seq, in receive_convert()
4081 le32_to_cpu(ms->m_header.h_nodeid), in receive_convert()
4082 le32_to_cpu(ms->m_lkid)); in receive_convert()
4083 error = -ENOENT; in receive_convert()
4088 r = lkb->lkb_resource; in receive_convert()
4119 send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_convert()
4129 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock()
4133 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { in receive_unlock()
4135 lkb->lkb_id, lkb->lkb_remid, in receive_unlock()
4136 le32_to_cpu(ms->m_header.h_nodeid), in receive_unlock()
4137 le32_to_cpu(ms->m_lkid)); in receive_unlock()
4138 error = -ENOENT; in receive_unlock()
4143 r = lkb->lkb_resource; in receive_unlock()
4171 send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_unlock()
4181 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel()
4187 r = lkb->lkb_resource; in receive_cancel()
4207 send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); in receive_cancel()
4217 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_grant()
4221 r = lkb->lkb_resource; in receive_grant()
4248 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_bast()
4252 r = lkb->lkb_resource; in receive_bast()
4261 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode)); in receive_bast()
4262 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode); in receive_bast()
4274 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_lookup()
4279 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0, in receive_lookup()
4296 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_remove()
4306 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash)); in receive_remove()
4325 memcpy(name, ms->m_extra, len); in receive_remove()
4328 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); in receive_remove()
4337 write_lock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4340 write_unlock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4350 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4353 from_nodeid, r->res_master_nodeid); in receive_remove()
4355 write_unlock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4362 from_nodeid, r->res_master_nodeid, r->res_first_lkid, in receive_remove()
4364 write_unlock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4368 if (r->res_master_nodeid != from_nodeid) { in receive_remove()
4370 from_nodeid, r->res_master_nodeid); in receive_remove()
4372 write_unlock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4376 list_del(&r->res_slow_list); in receive_remove()
4377 rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, in receive_remove()
4380 write_unlock_bh(&ls->ls_rsbtbl_lock); in receive_remove()
4387 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid)); in receive_purge()
4396 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); in receive_request_reply()
4398 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_request_reply()
4402 r = lkb->lkb_resource; in receive_request_reply()
4410 mstype = lkb->lkb_wait_type; in receive_request_reply()
4414 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid), in receive_request_reply()
4415 from_dlm_errno(le32_to_cpu(ms->m_result))); in receive_request_reply()
4423 r->res_master_nodeid = from_nodeid; in receive_request_reply()
4424 r->res_nodeid = from_nodeid; in receive_request_reply()
4425 lkb->lkb_nodeid = from_nodeid; in receive_request_reply()
4429 result = from_dlm_errno(le32_to_cpu(ms->m_result)); in receive_request_reply()
4432 case -EAGAIN: in receive_request_reply()
4434 queue_cast(r, lkb, -EAGAIN); in receive_request_reply()
4435 confirm_master(r, -EAGAIN); in receive_request_reply()
4439 case -EINPROGRESS: in receive_request_reply()
4443 lkb->lkb_remid = le32_to_cpu(ms->m_lkid); in receive_request_reply()
4455 case -EBADR: in receive_request_reply()
4456 case -ENOTBLK: in receive_request_reply()
4459 "master %d dir %d first %x %s", lkb->lkb_id, in receive_request_reply()
4460 from_nodeid, result, r->res_master_nodeid, in receive_request_reply()
4461 r->res_dir_nodeid, r->res_first_lkid, r->res_name); in receive_request_reply()
4463 if (r->res_dir_nodeid != dlm_our_nodeid() && in receive_request_reply()
4464 r->res_master_nodeid != dlm_our_nodeid()) { in receive_request_reply()
4465 /* cause _request_lock->set_master->send_lookup */ in receive_request_reply()
4466 r->res_master_nodeid = 0; in receive_request_reply()
4467 r->res_nodeid = -1; in receive_request_reply()
4468 lkb->lkb_nodeid = -1; in receive_request_reply()
4479 if (r->res_master_nodeid == dlm_our_nodeid()) in receive_request_reply()
4486 lkb->lkb_id, result); in receive_request_reply()
4489 if ((result == 0 || result == -EINPROGRESS) && in receive_request_reply()
4490 test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { in receive_request_reply()
4492 lkb->lkb_id, result); in receive_request_reply()
4493 clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in receive_request_reply()
4495 } else if ((result == -EINPROGRESS) && in receive_request_reply()
4497 &lkb->lkb_iflags)) { in receive_request_reply()
4498 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); in receive_request_reply()
4499 clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in receive_request_reply()
4502 clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); in receive_request_reply()
4503 clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); in receive_request_reply()
4516 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in __receive_convert_reply()
4517 case -EAGAIN: in __receive_convert_reply()
4519 queue_cast(r, lkb, -EAGAIN); in __receive_convert_reply()
4522 case -EDEADLK: in __receive_convert_reply()
4525 queue_cast(r, lkb, -EDEADLK); in __receive_convert_reply()
4528 case -EINPROGRESS: in __receive_convert_reply()
4547 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", in __receive_convert_reply()
4548 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in __receive_convert_reply()
4549 le32_to_cpu(ms->m_lkid), in __receive_convert_reply()
4550 from_dlm_errno(le32_to_cpu(ms->m_result))); in __receive_convert_reply()
4559 struct dlm_rsb *r = lkb->lkb_resource; in _receive_convert_reply()
4585 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_convert_reply()
4597 struct dlm_rsb *r = lkb->lkb_resource; in _receive_unlock_reply()
4613 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_unlock_reply()
4614 case -DLM_EUNLOCK: in _receive_unlock_reply()
4617 queue_cast(r, lkb, -DLM_EUNLOCK); in _receive_unlock_reply()
4619 case -ENOENT: in _receive_unlock_reply()
4622 log_error(r->res_ls, "receive_unlock_reply %x error %d", in _receive_unlock_reply()
4623 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_unlock_reply()
4636 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_unlock_reply()
4648 struct dlm_rsb *r = lkb->lkb_resource; in _receive_cancel_reply()
4664 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { in _receive_cancel_reply()
4665 case -DLM_ECANCEL: in _receive_cancel_reply()
4668 queue_cast(r, lkb, -DLM_ECANCEL); in _receive_cancel_reply()
4673 log_error(r->res_ls, "receive_cancel_reply %x error %d", in _receive_cancel_reply()
4674 lkb->lkb_id, in _receive_cancel_reply()
4675 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_cancel_reply()
4688 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); in receive_cancel_reply()
4705 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb); in receive_lookup_reply()
4708 le32_to_cpu(ms->m_lkid)); in receive_lookup_reply()
4712 /* ms->m_result is the value returned by dlm_master_lookup on dir node in receive_lookup_reply()
4713 FIXME: will a non-zero error ever be returned? */ in receive_lookup_reply()
4715 r = lkb->lkb_resource; in receive_lookup_reply()
4723 ret_nodeid = le32_to_cpu(ms->m_nodeid); in receive_lookup_reply()
4731 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) { in receive_lookup_reply()
4735 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), in receive_lookup_reply()
4736 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid, in receive_lookup_reply()
4737 dlm_our_nodeid(), r->res_first_lkid, r->res_name); in receive_lookup_reply()
4741 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4742 r->res_nodeid = 0; in receive_lookup_reply()
4744 r->res_first_lkid = 0; in receive_lookup_reply()
4745 } else if (ret_nodeid == -1) { in receive_lookup_reply()
4748 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid)); in receive_lookup_reply()
4749 r->res_master_nodeid = 0; in receive_lookup_reply()
4750 r->res_nodeid = -1; in receive_lookup_reply()
4751 lkb->lkb_nodeid = -1; in receive_lookup_reply()
4754 r->res_master_nodeid = ret_nodeid; in receive_lookup_reply()
4755 r->res_nodeid = ret_nodeid; in receive_lookup_reply()
4760 lkb->lkb_id, dlm_iflags_val(lkb)); in receive_lookup_reply()
4782 if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) { in _receive_message()
4783 log_limit(ls, "receive %d from non-member %d %x %x %d", in _receive_message()
4784 le32_to_cpu(ms->m_type), in _receive_message()
4785 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4786 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
4787 from_dlm_errno(le32_to_cpu(ms->m_result))); in _receive_message()
4791 switch (ms->m_type) { in _receive_message()
4866 le32_to_cpu(ms->m_type)); in _receive_message()
4880 if (error == -ENOENT && noent) { in _receive_message()
4882 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
4883 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4884 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
4885 } else if (error == -ENOENT) { in _receive_message()
4887 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), in _receive_message()
4888 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4889 le32_to_cpu(ms->m_lkid), saved_seq); in _receive_message()
4891 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT)) in _receive_message()
4892 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash)); in _receive_message()
4895 if (error == -EINVAL) { in _receive_message()
4898 le32_to_cpu(ms->m_type), in _receive_message()
4899 le32_to_cpu(ms->m_header.h_nodeid), in _receive_message()
4900 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), in _receive_message()
4917 read_lock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4918 if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { in dlm_receive_message()
4920 other nodes may still be sending us messages from the in dlm_receive_message()
4922 if (WARN_ON_ONCE(!ls->ls_generation)) { in dlm_receive_message()
4923 read_unlock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4925 le32_to_cpu(ms->m_type), nodeid); in dlm_receive_message()
4929 read_unlock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4930 write_lock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4932 if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { in dlm_receive_message()
4933 write_unlock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4938 write_unlock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4941 read_unlock_bh(&ls->ls_requestqueue_lock); in dlm_receive_message()
4961 const struct dlm_header *hd = &p->header; in dlm_receive_buffer()
4965 switch (hd->h_cmd) { in dlm_receive_buffer()
4967 type = le32_to_cpu(p->message.m_type); in dlm_receive_buffer()
4970 type = le32_to_cpu(p->rcom.rc_type); in dlm_receive_buffer()
4973 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); in dlm_receive_buffer()
4977 if (le32_to_cpu(hd->h_nodeid) != nodeid) { in dlm_receive_buffer()
4979 le32_to_cpu(hd->h_nodeid), nodeid, in dlm_receive_buffer()
4980 le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
4984 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
4989 le32_to_cpu(hd->u.h_lockspace), nodeid, in dlm_receive_buffer()
4990 hd->h_cmd, type); in dlm_receive_buffer()
4993 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) in dlm_receive_buffer()
4994 dlm_send_ls_not_ready(nodeid, &p->rcom); in dlm_receive_buffer()
5001 read_lock_bh(&ls->ls_recv_active); in dlm_receive_buffer()
5002 if (hd->h_cmd == DLM_MSG) in dlm_receive_buffer()
5003 dlm_receive_message(ls, &p->message, nodeid); in dlm_receive_buffer()
5004 else if (hd->h_cmd == DLM_RCOM) in dlm_receive_buffer()
5005 dlm_receive_rcom(ls, &p->rcom, nodeid); in dlm_receive_buffer()
5008 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); in dlm_receive_buffer()
5009 read_unlock_bh(&ls->ls_recv_active); in dlm_receive_buffer()
5019 lkb->lkb_id); in recover_convert_waiter()
5022 * tell us when it's granted. We no longer need a reply, so in recover_convert_waiter()
5027 ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); in recover_convert_waiter()
5028 ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); in recover_convert_waiter()
5029 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in recover_convert_waiter()
5033 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { in recover_convert_waiter()
5034 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in recover_convert_waiter()
5037 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down in recover_convert_waiter()
5050 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) in waiter_needs_recovery()
5058 dead node. Requests and up-conversions we flag to be resent after
5059 recovery. Down-conversions can just be completed with a fake reply like
5073 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { in dlm_recover_waiters_pre()
5075 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); in dlm_recover_waiters_pre()
5080 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { in dlm_recover_waiters_pre()
5083 lkb->lkb_id, in dlm_recover_waiters_pre()
5084 lkb->lkb_remid, in dlm_recover_waiters_pre()
5085 lkb->lkb_wait_type, in dlm_recover_waiters_pre()
5086 lkb->lkb_resource->res_nodeid, in dlm_recover_waiters_pre()
5087 lkb->lkb_nodeid, in dlm_recover_waiters_pre()
5088 lkb->lkb_wait_nodeid, in dlm_recover_waiters_pre()
5095 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) { in dlm_recover_waiters_pre()
5096 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_pre()
5103 wait_type = lkb->lkb_wait_type; in dlm_recover_waiters_pre()
5104 local_unlock_result = -DLM_EUNLOCK; in dlm_recover_waiters_pre()
5105 local_cancel_result = -DLM_ECANCEL; in dlm_recover_waiters_pre()
5115 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
5120 if (lkb->lkb_grmode == DLM_LOCK_IV) in dlm_recover_waiters_pre()
5121 local_unlock_result = -ENOENT; in dlm_recover_waiters_pre()
5125 lkb->lkb_id, dlm_iflags_val(lkb), wait_type, in dlm_recover_waiters_pre()
5132 set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_pre()
5142 ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY); in dlm_recover_waiters_pre()
5143 ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result)); in dlm_recover_waiters_pre()
5144 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
5152 ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY); in dlm_recover_waiters_pre()
5153 ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result)); in dlm_recover_waiters_pre()
5154 ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); in dlm_recover_waiters_pre()
5161 lkb->lkb_wait_type, wait_type); in dlm_recover_waiters_pre()
5172 spin_lock_bh(&ls->ls_waiters_lock); in find_resend_waiter()
5173 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { in find_resend_waiter()
5174 if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { in find_resend_waiter()
5180 spin_unlock_bh(&ls->ls_waiters_lock); in find_resend_waiter()
5207 * force-unlock or cancel, either from before recovery began, or after recovery
5216 int error = 0, mstype, err, oc, ou; in dlm_recover_waiters_post() local
5221 error = -EINTR; in dlm_recover_waiters_post()
5234 r = lkb->lkb_resource; in dlm_recover_waiters_post()
5243 mstype = lkb->lkb_wait_type; in dlm_recover_waiters_post()
5244 oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, in dlm_recover_waiters_post()
5245 &lkb->lkb_iflags); in dlm_recover_waiters_post()
5247 &lkb->lkb_iflags); in dlm_recover_waiters_post()
5252 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype, in dlm_recover_waiters_post()
5253 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, in dlm_recover_waiters_post()
5254 dlm_dir_nodeid(r), oc, ou); in dlm_recover_waiters_post()
5257 * No reply to the pre-recovery operation will now be received, in dlm_recover_waiters_post()
5262 clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); in dlm_recover_waiters_post()
5265 lkb->lkb_wait_type = 0; in dlm_recover_waiters_post()
5274 while (lkb->lkb_wait_count) { in dlm_recover_waiters_post()
5275 lkb->lkb_wait_count--; in dlm_recover_waiters_post()
5280 spin_lock_bh(&ls->ls_waiters_lock); in dlm_recover_waiters_post()
5281 list_del_init(&lkb->lkb_wait_reply); in dlm_recover_waiters_post()
5282 spin_unlock_bh(&ls->ls_waiters_lock); in dlm_recover_waiters_post()
5290 if (oc || ou) { in dlm_recover_waiters_post()
5295 queue_cast(r, lkb, ou ? -DLM_EUNLOCK : in dlm_recover_waiters_post()
5296 -DLM_ECANCEL); in dlm_recover_waiters_post()
5300 if (oc) { in dlm_recover_waiters_post()
5301 queue_cast(r, lkb, -DLM_ECANCEL); in dlm_recover_waiters_post()
5303 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK; in dlm_recover_waiters_post()
5315 if (r->res_nodeid != -1 && is_master(r)) in dlm_recover_waiters_post()
5329 lkb->lkb_id, mstype, r->res_nodeid, in dlm_recover_waiters_post()
5330 dlm_dir_nodeid(r), oc, ou); in dlm_recover_waiters_post()
5352 if (lkb->lkb_recover_seq == ls->ls_recover_seq) in purge_mstcpy_list()
5365 struct dlm_ls *ls = r->res_ls; in dlm_purge_mstcpy_locks()
5367 purge_mstcpy_list(ls, r, &r->res_grantqueue); in dlm_purge_mstcpy_locks()
5368 purge_mstcpy_list(ls, r, &r->res_convertqueue); in dlm_purge_mstcpy_locks()
5369 purge_mstcpy_list(ls, r, &r->res_waitqueue); in dlm_purge_mstcpy_locks()
5382 if ((lkb->lkb_nodeid == nodeid_gone) || in purge_dead_list()
5383 dlm_is_removed(ls, lkb->lkb_nodeid)) { in purge_dead_list()
5387 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) && in purge_dead_list()
5388 (lkb->lkb_grmode >= DLM_LOCK_PW)) { in purge_dead_list()
5418 list_for_each_entry(memb, &ls->ls_nodes_gone, list) { in dlm_recover_purge()
5420 nodeid_gone = memb->nodeid; in dlm_recover_purge()
5428 if (r->res_nodeid != -1 && is_master(r)) { in dlm_recover_purge()
5429 purge_dead_list(ls, r, &r->res_grantqueue, in dlm_recover_purge()
5431 purge_dead_list(ls, r, &r->res_convertqueue, in dlm_recover_purge()
5433 purge_dead_list(ls, r, &r->res_waitqueue, in dlm_recover_purge()
5450 read_lock_bh(&ls->ls_rsbtbl_lock); in find_grant_rsb()
5451 list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { in find_grant_rsb()
5459 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_grant_rsb()
5462 read_unlock_bh(&ls->ls_rsbtbl_lock); in find_grant_rsb()
5474 * Simplest would be to go through each master rsb and check for non-empty
5519 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid) in search_remid_list()
5530 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid); in search_remid()
5533 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid); in search_remid()
5536 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid); in search_remid()
5546 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in receive_rcom_lock_args()
5548 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in receive_rcom_lock_args()
5549 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); in receive_rcom_lock_args()
5550 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); in receive_rcom_lock_args()
5551 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); in receive_rcom_lock_args()
5552 dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags)); in receive_rcom_lock_args()
5553 set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); in receive_rcom_lock_args()
5554 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); in receive_rcom_lock_args()
5555 lkb->lkb_rqmode = rl->rl_rqmode; in receive_rcom_lock_args()
5556 lkb->lkb_grmode = rl->rl_grmode; in receive_rcom_lock_args()
5559 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; in receive_rcom_lock_args()
5560 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; in receive_rcom_lock_args()
5562 if (lkb->lkb_exflags & DLM_LKF_VALBLK) { in receive_rcom_lock_args()
5563 int lvblen = le16_to_cpu(rc->rc_header.h_length) - in receive_rcom_lock_args()
5564 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock); in receive_rcom_lock_args()
5565 if (lvblen > ls->ls_lvblen) in receive_rcom_lock_args()
5566 return -EINVAL; in receive_rcom_lock_args()
5567 lkb->lkb_lvbptr = dlm_allocate_lvb(ls); in receive_rcom_lock_args()
5568 if (!lkb->lkb_lvbptr) in receive_rcom_lock_args()
5569 return -ENOMEM; in receive_rcom_lock_args()
5570 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); in receive_rcom_lock_args()
5577 if (rl->rl_status == DLM_LKSTS_CONVERT && middle_conversion(lkb)) { in receive_rcom_lock_args()
5580 __func__, lkb->lkb_id, lkb->lkb_grmode, in receive_rcom_lock_args()
5581 lkb->lkb_rqmode, lkb->lkb_nodeid, lkb->lkb_remid); in receive_rcom_lock_args()
5598 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_master_copy()
5602 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); in dlm_recover_master_copy()
5606 *rl_remid = rl->rl_remid; in dlm_recover_master_copy()
5608 if (rl->rl_parent_lkid) { in dlm_recover_master_copy()
5609 error = -EOPNOTSUPP; in dlm_recover_master_copy()
5613 remid = le32_to_cpu(rl->rl_lkid); in dlm_recover_master_copy()
5617 recovery of locks on another node, so one node can send us MSTCPY in dlm_recover_master_copy()
5623 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), in dlm_recover_master_copy()
5633 error = -EBADR; in dlm_recover_master_copy()
5639 error = -EEXIST; in dlm_recover_master_copy()
5654 add_lkb(r, lkb, rl->rl_status); in dlm_recover_master_copy()
5655 ls->ls_recover_locks_in++; in dlm_recover_master_copy()
5657 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) in dlm_recover_master_copy()
5662 saving in its process-copy lkb */ in dlm_recover_master_copy()
5663 *rl_remid = cpu_to_le32(lkb->lkb_id); in dlm_recover_master_copy()
5665 lkb->lkb_recover_seq = ls->ls_recover_seq; in dlm_recover_master_copy()
5671 if (error && error != -EEXIST) in dlm_recover_master_copy()
5682 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; in dlm_recover_process_copy()
5688 lkid = le32_to_cpu(rl->rl_lkid); in dlm_recover_process_copy()
5689 remid = le32_to_cpu(rl->rl_remid); in dlm_recover_process_copy()
5690 result = le32_to_cpu(rl->rl_result); in dlm_recover_process_copy()
5695 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5700 r = lkb->lkb_resource; in dlm_recover_process_copy()
5706 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5712 return -EINVAL; in dlm_recover_process_copy()
5716 case -EBADR: in dlm_recover_process_copy()
5722 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5727 case -EEXIST: in dlm_recover_process_copy()
5729 lkb->lkb_remid = remid; in dlm_recover_process_copy()
5733 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, in dlm_recover_process_copy()
5767 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_request()
5768 if (!ua->lksb.sb_lvbptr) { in dlm_user_request()
5770 error = -ENOMEM; in dlm_user_request()
5774 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua, in dlm_user_request()
5777 kfree(ua->lksb.sb_lvbptr); in dlm_user_request()
5778 ua->lksb.sb_lvbptr = NULL; in dlm_user_request()
5786 set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags); in dlm_user_request()
5792 case -EINPROGRESS: in dlm_user_request()
5795 case -EAGAIN: in dlm_user_request()
5802 /* add this new lkb to the per-process list of locks */ in dlm_user_request()
5803 spin_lock_bh(&ua->proc->locks_spin); in dlm_user_request()
5805 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_request()
5806 spin_unlock_bh(&ua->proc->locks_spin); in dlm_user_request()
5836 ua = lkb->lkb_ua; in dlm_user_convert()
5838 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { in dlm_user_convert()
5839 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); in dlm_user_convert()
5840 if (!ua->lksb.sb_lvbptr) { in dlm_user_convert()
5841 error = -ENOMEM; in dlm_user_convert()
5845 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_convert()
5846 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_convert()
5848 ua->xid = ua_tmp->xid; in dlm_user_convert()
5849 ua->castparam = ua_tmp->castparam; in dlm_user_convert()
5850 ua->castaddr = ua_tmp->castaddr; in dlm_user_convert()
5851 ua->bastparam = ua_tmp->bastparam; in dlm_user_convert()
5852 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_convert()
5853 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_convert()
5855 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua, in dlm_user_convert()
5862 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK) in dlm_user_convert()
5888 spin_lock_bh(&ls->ls_orphans_lock); in dlm_user_adopt_orphan()
5889 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { in dlm_user_adopt_orphan()
5890 if (iter->lkb_resource->res_length != namelen) in dlm_user_adopt_orphan()
5892 if (memcmp(iter->lkb_resource->res_name, name, namelen)) in dlm_user_adopt_orphan()
5894 if (iter->lkb_grmode != mode) { in dlm_user_adopt_orphan()
5900 list_del_init(&iter->lkb_ownqueue); in dlm_user_adopt_orphan()
5901 clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags); in dlm_user_adopt_orphan()
5902 *lkid = iter->lkb_id; in dlm_user_adopt_orphan()
5905 spin_unlock_bh(&ls->ls_orphans_lock); in dlm_user_adopt_orphan()
5908 rv = -EAGAIN; in dlm_user_adopt_orphan()
5913 rv = -ENOENT; in dlm_user_adopt_orphan()
5917 lkb->lkb_exflags = flags; in dlm_user_adopt_orphan()
5918 lkb->lkb_ownpid = (int) current->pid; in dlm_user_adopt_orphan()
5920 ua = lkb->lkb_ua; in dlm_user_adopt_orphan()
5922 ua->proc = ua_tmp->proc; in dlm_user_adopt_orphan()
5923 ua->xid = ua_tmp->xid; in dlm_user_adopt_orphan()
5924 ua->castparam = ua_tmp->castparam; in dlm_user_adopt_orphan()
5925 ua->castaddr = ua_tmp->castaddr; in dlm_user_adopt_orphan()
5926 ua->bastparam = ua_tmp->bastparam; in dlm_user_adopt_orphan()
5927 ua->bastaddr = ua_tmp->bastaddr; in dlm_user_adopt_orphan()
5928 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_adopt_orphan()
5936 spin_lock_bh(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
5937 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); in dlm_user_adopt_orphan()
5938 spin_unlock_bh(&ua->proc->locks_spin); in dlm_user_adopt_orphan()
5960 ua = lkb->lkb_ua; in dlm_user_unlock()
5962 if (lvb_in && ua->lksb.sb_lvbptr) in dlm_user_unlock()
5963 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); in dlm_user_unlock()
5964 if (ua_tmp->castparam) in dlm_user_unlock()
5965 ua->castparam = ua_tmp->castparam; in dlm_user_unlock()
5966 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_unlock()
5974 if (error == -DLM_EUNLOCK) in dlm_user_unlock()
5977 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK)) in dlm_user_unlock()
5982 spin_lock_bh(&ua->proc->locks_spin); in dlm_user_unlock()
5984 if (!list_empty(&lkb->lkb_ownqueue)) in dlm_user_unlock()
5985 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); in dlm_user_unlock()
5986 spin_unlock_bh(&ua->proc->locks_spin); in dlm_user_unlock()
6012 ua = lkb->lkb_ua; in dlm_user_cancel()
6013 if (ua_tmp->castparam) in dlm_user_cancel()
6014 ua->castparam = ua_tmp->castparam; in dlm_user_cancel()
6015 ua->user_lksb = ua_tmp->user_lksb; in dlm_user_cancel()
6023 if (error == -DLM_ECANCEL) in dlm_user_cancel()
6026 if (error == -EBUSY) in dlm_user_cancel()
6053 ua = lkb->lkb_ua; in dlm_user_deadlock()
6061 r = lkb->lkb_resource; in dlm_user_deadlock()
6068 set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags); in dlm_user_deadlock()
6075 if (error == -DLM_ECANCEL) in dlm_user_deadlock()
6078 if (error == -EBUSY) in dlm_user_deadlock()
6097 spin_lock_bh(&ls->ls_orphans_lock); in orphan_proc_lock()
6098 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); in orphan_proc_lock()
6099 spin_unlock_bh(&ls->ls_orphans_lock); in orphan_proc_lock()
6101 set_unlock_args(0, lkb->lkb_ua, &args); in orphan_proc_lock()
6104 if (error == -DLM_ECANCEL) in orphan_proc_lock()
6120 lkb->lkb_ua, &args); in unlock_proc_lock()
6123 if (error == -DLM_EUNLOCK) in unlock_proc_lock()
6137 spin_lock_bh(&ls->ls_clear_proc_locks); in del_proc_lock()
6138 if (list_empty(&proc->locks)) in del_proc_lock()
6141 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); in del_proc_lock()
6142 list_del_init(&lkb->lkb_ownqueue); in del_proc_lock()
6144 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in del_proc_lock()
6145 set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags); in del_proc_lock()
6147 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in del_proc_lock()
6149 spin_unlock_bh(&ls->ls_clear_proc_locks); in del_proc_lock()
6154 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6157 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6158 list, and no more device_writes should add lkb's to proc->locks list; so we
6160 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6174 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) in dlm_clear_proc_locks()
6179 /* this removes the reference for the proc->locks list in dlm_clear_proc_locks()
6186 spin_lock_bh(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
6188 /* in-progress unlocks */ in dlm_clear_proc_locks()
6189 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in dlm_clear_proc_locks()
6190 list_del_init(&lkb->lkb_ownqueue); in dlm_clear_proc_locks()
6191 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in dlm_clear_proc_locks()
6195 list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { in dlm_clear_proc_locks()
6196 list_del(&cb->list); in dlm_clear_proc_locks()
6200 spin_unlock_bh(&ls->ls_clear_proc_locks); in dlm_clear_proc_locks()
6211 spin_lock_bh(&proc->locks_spin); in purge_proc_locks()
6212 if (!list_empty(&proc->locks)) { in purge_proc_locks()
6213 lkb = list_entry(proc->locks.next, struct dlm_lkb, in purge_proc_locks()
6215 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6217 spin_unlock_bh(&proc->locks_spin); in purge_proc_locks()
6222 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in purge_proc_locks()
6224 dlm_put_lkb(lkb); /* ref from proc->locks list */ in purge_proc_locks()
6227 spin_lock_bh(&proc->locks_spin); in purge_proc_locks()
6228 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { in purge_proc_locks()
6229 list_del_init(&lkb->lkb_ownqueue); in purge_proc_locks()
6230 set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); in purge_proc_locks()
6233 spin_unlock_bh(&proc->locks_spin); in purge_proc_locks()
6235 spin_lock_bh(&proc->asts_spin); in purge_proc_locks()
6236 list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { in purge_proc_locks()
6237 list_del(&cb->list); in purge_proc_locks()
6240 spin_unlock_bh(&proc->asts_spin); in purge_proc_locks()
6249 spin_lock_bh(&ls->ls_orphans_lock); in do_purge()
6250 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { in do_purge()
6251 if (pid && lkb->lkb_ownpid != pid) in do_purge()
6254 list_del_init(&lkb->lkb_ownqueue); in do_purge()
6257 spin_unlock_bh(&ls->ls_orphans_lock); in do_purge()
6270 ms->m_nodeid = cpu_to_le32(nodeid); in send_purge()
6271 ms->m_pid = cpu_to_le32(pid); in send_purge()
6285 if (pid == current->pid) in dlm_user_purge()
6305 return -EOPNOTSUPP; in dlm_debug_add_lkb()
6309 return -ENOMEM; in dlm_debug_add_lkb()
6318 lkb->lkb_nodeid = lkb_nodeid; in dlm_debug_add_lkb()
6319 lkb->lkb_lksb = lksb; in dlm_debug_add_lkb()
6322 lkb->lkb_astparam = (void *)0xDEADBEEF; in dlm_debug_add_lkb()