Lines Matching full:k

23 		 bpos_cmp(l->k->k.p,	r->k->k.p);  in btree_insert_entry_cmp()
33 struct bkey_s_c k, in extent_front_merge() argument
44 update = bch2_bkey_make_mut_noupdate(trans, k); in extent_front_merge()
52 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?: in extent_front_merge()
53 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p); in extent_front_merge()
70 struct bkey_s_c k) in extent_back_merge() argument
78 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?: in extent_back_merge()
79 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p); in extent_back_merge()
85 bch2_bkey_merge(c, bkey_i_to_s(insert), k); in extent_back_merge()
97 struct bkey_s_c k; in need_whiteout_for_snapshot() local
108 BTREE_ITER_nopreserve, k, ret) { in need_whiteout_for_snapshot()
109 if (!bkey_eq(k.k->p, pos)) in need_whiteout_for_snapshot()
113 k.k->p.snapshot)) { in need_whiteout_for_snapshot()
114 ret = !bkey_whiteout(k.k); in need_whiteout_for_snapshot()
143 while ((old_k = bch2_btree_iter_prev(&old_iter)).k && in __bch2_insert_snapshot_whiteouts()
145 bkey_eq(old_pos, old_k.k->p)) { in __bch2_insert_snapshot_whiteouts()
147 SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot); in __bch2_insert_snapshot_whiteouts()
149 if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) || in __bch2_insert_snapshot_whiteouts()
150 snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot)) in __bch2_insert_snapshot_whiteouts()
160 if (new_k.k->type == KEY_TYPE_deleted) { in __bch2_insert_snapshot_whiteouts()
166 bkey_init(&update->k); in __bch2_insert_snapshot_whiteouts()
167 update->k.p = whiteout_pos; in __bch2_insert_snapshot_whiteouts()
168 update->k.type = KEY_TYPE_whiteout; in __bch2_insert_snapshot_whiteouts()
175 ret = snapshot_list_add(c, &s, old_k.k->p.snapshot); in __bch2_insert_snapshot_whiteouts()
194 struct bpos new_start = bkey_start_pos(new.k); in bch2_trans_update_extent_overwrite()
195 unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start); in bch2_trans_update_extent_overwrite()
196 unsigned back_split = bkey_gt(old.k->p, new.k->p); in bch2_trans_update_extent_overwrite()
198 old.k->p.snapshot != new.k->p.snapshot; in bch2_trans_update_extent_overwrite()
219 old.k->p, update->k.p) ?: in bch2_trans_update_extent_overwrite()
233 bch2_cut_back(new.k->p, update); in bch2_trans_update_extent_overwrite()
236 old.k->p, update->k.p) ?: in bch2_trans_update_extent_overwrite()
243 if (bkey_le(old.k->p, new.k->p)) { in bch2_trans_update_extent_overwrite()
248 bkey_init(&update->k); in bch2_trans_update_extent_overwrite()
249 update->k.p = old.k->p; in bch2_trans_update_extent_overwrite()
250 update->k.p.snapshot = new.k->p.snapshot; in bch2_trans_update_extent_overwrite()
252 if (new.k->p.snapshot != old.k->p.snapshot) { in bch2_trans_update_extent_overwrite()
253 update->k.type = KEY_TYPE_whiteout; in bch2_trans_update_extent_overwrite()
255 ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p); in bch2_trans_update_extent_overwrite()
259 update->k.type = KEY_TYPE_whiteout; in bch2_trans_update_extent_overwrite()
273 bch2_cut_front(new.k->p, update); in bch2_trans_update_extent_overwrite()
291 struct bkey_s_c k; in bch2_trans_update_extent() local
295 bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k), in bch2_trans_update_extent()
299 k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); in bch2_trans_update_extent()
300 if ((ret = bkey_err(k))) in bch2_trans_update_extent()
302 if (!k.k) in bch2_trans_update_extent()
305 if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) { in bch2_trans_update_extent()
306 if (bch2_bkey_maybe_mergable(k.k, &insert->k)) { in bch2_trans_update_extent()
307 ret = extent_front_merge(trans, &iter, k, &insert, flags); in bch2_trans_update_extent()
315 while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) { in bch2_trans_update_extent()
316 bool done = bkey_lt(insert->k.p, k.k->p); in bch2_trans_update_extent()
318 ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert)); in bch2_trans_update_extent()
326 k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); in bch2_trans_update_extent()
327 if ((ret = bkey_err(k))) in bch2_trans_update_extent()
329 if (!k.k) in bch2_trans_update_extent()
333 if (bch2_bkey_maybe_mergable(&insert->k, k.k)) { in bch2_trans_update_extent()
334 ret = extent_back_merge(trans, &iter, insert, k); in bch2_trans_update_extent()
339 if (!bkey_deleted(&insert->k)) in bch2_trans_update_extent()
352 struct bkey k; in flush_new_cached_update() local
370 bch2_btree_path_peek_slot_exact(btree_path, &k); in flush_new_cached_update()
371 if (!bkey_deleted(&k)) in flush_new_cached_update()
378 ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip); in flush_new_cached_update()
386 struct bkey_i *k, enum btree_iter_update_trigger_flags flags, in bch2_trans_update_by_path() argument
396 EBUG_ON(!bpos_eq(k->k.p, path->pos)); in bch2_trans_update_by_path()
405 .k = k, in bch2_trans_update_by_path()
433 i->k = n.k; in bch2_trans_update_by_path()
445 bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p); in bch2_trans_update_by_path()
448 i->old_k = j_k->k; in bch2_trans_update_by_path()
512 struct bkey_i *k, enum btree_iter_update_trigger_flags flags) in bch2_trans_update() argument
518 return bch2_trans_update_extent(trans, iter, k, flags); in bch2_trans_update()
520 if (bkey_deleted(&k->k) && in bch2_trans_update()
523 ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p); in bch2_trans_update()
528 k->k.type = KEY_TYPE_whiteout; in bch2_trans_update()
546 return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_); in bch2_trans_update()
551 struct bkey_i *k) in bch2_btree_insert_clone_trans() argument
553 struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k)); in bch2_btree_insert_clone_trans()
558 bkey_copy(n, k); in bch2_btree_insert_clone_trans()
592 struct bkey_s_c k = bch2_btree_iter_peek_prev(iter); in bch2_bkey_get_empty_slot() local
593 int ret = bkey_err(k); in bch2_bkey_get_empty_slot()
598 k = bch2_btree_iter_peek_slot(iter); in bch2_bkey_get_empty_slot()
599 ret = bkey_err(k); in bch2_bkey_get_empty_slot()
603 BUG_ON(k.k->type != KEY_TYPE_deleted); in bch2_bkey_get_empty_slot()
605 if (bkey_gt(k.k->p, end)) { in bch2_bkey_get_empty_slot()
624 enum btree_id btree, struct bkey_i *k, in bch2_btree_insert_nonextent() argument
630 bch2_trans_iter_init(trans, &iter, btree, k->k.p, in bch2_btree_insert_nonextent()
635 bch2_trans_update(trans, &iter, k, flags); in bch2_btree_insert_nonextent()
641 struct bkey_i *k, enum btree_iter_update_trigger_flags flags) in bch2_btree_insert_trans() argument
644 bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k), in bch2_btree_insert_trans()
647 bch2_trans_update(trans, &iter, k, flags); in bch2_btree_insert_trans()
656 * @k: key to insert
664 int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, in bch2_btree_insert() argument
669 bch2_btree_insert_trans(trans, id, k, iter_flags)); in bch2_btree_insert()
675 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); in bch2_btree_delete_at() local
676 int ret = PTR_ERR_OR_ZERO(k); in bch2_btree_delete_at()
680 bkey_init(&k->k); in bch2_btree_delete_at()
681 k->k.p = iter->pos; in bch2_btree_delete_at()
682 return bch2_trans_update(trans, iter, k, update_flags); in bch2_btree_delete_at()
709 struct bkey_s_c k; in bch2_btree_delete_range_trans() local
713 while ((k = bch2_btree_iter_peek_max(&iter, end)).k) { in bch2_btree_delete_range_trans()
718 ret = bkey_err(k); in bch2_btree_delete_range_trans()
722 bkey_init(&delete.k); in bch2_btree_delete_range_trans()
730 * bkey_start_pos(k.k) (for non extents they always will be the in bch2_btree_delete_range_trans()
733 * of k. in bch2_btree_delete_range_trans()
736 * bkey_start_pos(k.k)). in bch2_btree_delete_range_trans()
738 delete.k.p = iter.pos; in bch2_btree_delete_range_trans()
741 bch2_key_resize(&delete.k, in bch2_btree_delete_range_trans()
742 bpos_min(end, k.k->p).offset - in bch2_btree_delete_range_trans()
788 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); in bch2_btree_bit_mod_iter() local
789 int ret = PTR_ERR_OR_ZERO(k); in bch2_btree_bit_mod_iter()
793 bkey_init(&k->k); in bch2_btree_bit_mod_iter()
794 k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; in bch2_btree_bit_mod_iter()
795 k->k.p = iter->pos; in bch2_btree_bit_mod_iter()
797 bch2_key_resize(&k->k, 1); in bch2_btree_bit_mod_iter()
799 return bch2_trans_update(trans, iter, k, 0); in bch2_btree_bit_mod_iter()
817 struct bkey_i k; in bch2_btree_bit_mod_buffered() local
819 bkey_init(&k.k); in bch2_btree_bit_mod_buffered()
820 k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; in bch2_btree_bit_mod_buffered()
821 k.k.p = pos; in bch2_btree_bit_mod_buffered()
823 return bch2_trans_update_buffered(trans, btree, &k); in bch2_btree_bit_mod_buffered()