Lines Matching full:k
116 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, in bch2_bkey_pick_read_device() argument
120 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_pick_read_device()
126 if (k.k->type == KEY_TYPE_error) in bch2_bkey_pick_read_device()
130 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { in bch2_bkey_pick_read_device()
180 int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_btree_ptr_validate() argument
185 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, in bch2_btree_ptr_validate()
187 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); in bch2_btree_ptr_validate()
189 ret = bch2_bkey_ptrs_validate(c, k, from); in bch2_btree_ptr_validate()
195 struct bkey_s_c k) in bch2_btree_ptr_to_text() argument
197 bch2_bkey_ptrs_to_text(out, c, k); in bch2_btree_ptr_to_text()
200 int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_btree_ptr_v2_validate() argument
203 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); in bch2_btree_ptr_v2_validate()
206 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, in bch2_btree_ptr_v2_validate()
209 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX); in bch2_btree_ptr_v2_validate()
211 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p), in bch2_btree_ptr_v2_validate()
221 ret = bch2_bkey_ptrs_validate(c, k, from); in bch2_btree_ptr_v2_validate()
227 struct bkey_s_c k) in bch2_btree_ptr_v2_to_text() argument
229 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); in bch2_btree_ptr_v2_to_text()
238 bch2_bkey_ptrs_to_text(out, c, k); in bch2_btree_ptr_v2_to_text()
243 struct bkey_s k) in bch2_btree_ptr_v2_compat() argument
245 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k); in bch2_btree_ptr_v2_compat()
283 lp.crc = bch2_extent_crc_unpack(l.k, NULL); in bch2_extent_merge()
284 rp.crc = bch2_extent_crc_unpack(r.k, NULL); in bch2_extent_merge()
286 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) && in bch2_extent_merge()
287 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) { in bch2_extent_merge()
347 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); in bch2_extent_merge()
348 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); in bch2_extent_merge()
369 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); in bch2_extent_merge()
371 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); in bch2_extent_merge()
401 bch2_key_resize(l.k, l.k->size + r.k->size); in bch2_extent_merge()
407 int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_reservation_validate() argument
410 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); in bch2_reservation_validate()
421 struct bkey_s_c k) in bch2_reservation_to_text() argument
423 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); in bch2_reservation_to_text()
439 bch2_key_resize(l.k, l.k->size + r.k->size); in bch2_reservation_merge()
469 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k, in bch2_can_narrow_extent_crcs() argument
472 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_can_narrow_extent_crcs()
479 bkey_for_each_crc(k.k, ptrs, crc, i) in bch2_can_narrow_extent_crcs()
495 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n) in bch2_bkey_narrow_crcs() argument
497 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); in bch2_bkey_narrow_crcs()
505 bkey_for_each_crc(&k->k, ptrs, u, i) in bch2_bkey_narrow_crcs()
517 BUG_ON(n.live_size != k->k.size); in bch2_bkey_narrow_crcs()
520 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); in bch2_bkey_narrow_crcs()
522 bkey_for_each_ptr_decode(&k->k, ptrs, p, i) in bch2_bkey_narrow_crcs()
524 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr); in bch2_bkey_narrow_crcs()
527 bch2_extent_ptr_decoded_append(k, &p); in bch2_bkey_narrow_crcs()
569 void bch2_extent_crc_append(struct bkey_i *k, in bch2_extent_crc_append() argument
572 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); in bch2_extent_crc_append()
593 k->k.u64s += extent_entry_u64s(ptrs.end); in bch2_extent_crc_append()
595 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX); in bch2_extent_crc_append()
600 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k) in bch2_bkey_nr_ptrs() argument
602 return bch2_bkey_devs(k).nr; in bch2_bkey_nr_ptrs()
605 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k) in bch2_bkey_nr_ptrs_allocated() argument
607 return k.k->type == KEY_TYPE_reservation in bch2_bkey_nr_ptrs_allocated()
608 ? bkey_s_c_to_reservation(k).v->nr_replicas in bch2_bkey_nr_ptrs_allocated()
609 : bch2_bkey_dirty_devs(k).nr; in bch2_bkey_nr_ptrs_allocated()
612 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k) in bch2_bkey_nr_ptrs_fully_allocated() argument
616 if (k.k->type == KEY_TYPE_reservation) { in bch2_bkey_nr_ptrs_fully_allocated()
617 ret = bkey_s_c_to_reservation(k).v->nr_replicas; in bch2_bkey_nr_ptrs_fully_allocated()
619 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_nr_ptrs_fully_allocated()
623 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_nr_ptrs_fully_allocated()
630 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k) in bch2_bkey_sectors_compressed() argument
632 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_sectors_compressed()
637 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_sectors_compressed()
644 bool bch2_bkey_is_incompressible(struct bkey_s_c k) in bch2_bkey_is_incompressible() argument
646 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_is_incompressible()
650 bkey_for_each_crc(k.k, ptrs, crc, entry) in bch2_bkey_is_incompressible()
656 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k) in bch2_bkey_replicas() argument
658 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_replicas()
663 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { in bch2_bkey_replicas()
704 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k) in bch2_bkey_durability() argument
706 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_durability()
712 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_durability()
719 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k) in bch2_bkey_durability_safe() argument
721 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_durability_safe()
727 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_durability_safe()
735 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry) in bch2_bkey_extent_entry_drop() argument
737 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k)); in bch2_bkey_extent_entry_drop()
741 k->k.u64s -= extent_entry_u64s(entry); in bch2_bkey_extent_entry_drop()
744 void bch2_extent_ptr_decoded_append(struct bkey_i *k, in bch2_extent_ptr_decoded_append() argument
747 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); in bch2_extent_ptr_decoded_append()
749 bch2_extent_crc_unpack(&k->k, NULL); in bch2_extent_ptr_decoded_append()
757 bkey_for_each_crc(&k->k, ptrs, crc, pos) in bch2_extent_ptr_decoded_append()
763 bch2_extent_crc_append(k, p->crc); in bch2_extent_ptr_decoded_append()
764 pos = bkey_val_end(bkey_i_to_s(k)); in bch2_extent_ptr_decoded_append()
767 __extent_entry_insert(k, pos, to_entry(&p->ptr)); in bch2_extent_ptr_decoded_append()
771 __extent_entry_insert(k, pos, to_entry(&p->ec)); in bch2_extent_ptr_decoded_append()
791 void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr) in bch2_bkey_drop_ptr_noerror() argument
793 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); in bch2_bkey_drop_ptr_noerror()
797 if (k.k->type == KEY_TYPE_stripe) { in bch2_bkey_drop_ptr_noerror()
817 extent_entry_drop(k, entry); in bch2_bkey_drop_ptr_noerror()
825 extent_entry_drop(k, entry); in bch2_bkey_drop_ptr_noerror()
829 void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr) in bch2_bkey_drop_ptr() argument
831 if (k.k->type != KEY_TYPE_stripe) { in bch2_bkey_drop_ptr()
832 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c); in bch2_bkey_drop_ptr()
836 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_drop_ptr()
843 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr; in bch2_bkey_drop_ptr()
845 bch2_bkey_drop_ptr_noerror(k, ptr); in bch2_bkey_drop_ptr()
854 !bch2_bkey_dirty_devs(k.s_c).nr) { in bch2_bkey_drop_ptr()
855 k.k->type = KEY_TYPE_error; in bch2_bkey_drop_ptr()
856 set_bkey_val_u64s(k.k, 0); in bch2_bkey_drop_ptr()
857 } else if (!bch2_bkey_nr_ptrs(k.s_c)) { in bch2_bkey_drop_ptr()
858 k.k->type = KEY_TYPE_deleted; in bch2_bkey_drop_ptr()
859 set_bkey_val_u64s(k.k, 0); in bch2_bkey_drop_ptr()
863 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev) in bch2_bkey_drop_device() argument
865 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device()
868 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev) in bch2_bkey_drop_device_noerror() argument
870 bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device_noerror()
873 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev) in bch2_bkey_has_device_c() argument
875 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_has_device_c()
884 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target) in bch2_bkey_has_target() argument
886 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_has_target()
904 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k, in bch2_bkey_matches_ptr() argument
907 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_matches_ptr()
911 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_bkey_matches_ptr()
914 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) == in bch2_bkey_matches_ptr()
926 if (k1.k->type != k2.k->type) in bch2_extents_match()
929 if (bkey_extent_is_direct_data(k1.k)) { in bch2_extents_match()
938 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) in bch2_extents_match()
939 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) in bch2_extents_match()
949 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extents_match()
950 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) && in bch2_extents_match()
981 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) in bch2_extent_has_ptr()
984 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extent_has_ptr()
985 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) in bch2_extent_has_ptr()
1005 struct bkey_s k, in bch2_extent_ptr_set_cached() argument
1008 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); in bch2_extent_ptr_set_cached()
1014 bch2_bkey_drop_ptr_noerror(k, ptr); in bch2_extent_ptr_set_cached()
1023 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) in bch2_extent_ptr_set_cached()
1026 bch2_bkey_drop_ptr_noerror(k, ptr); in bch2_extent_ptr_set_cached()
1040 * Returns true if @k should be dropped entirely
1045 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) in bch2_extent_normalize() argument
1050 bch2_bkey_drop_ptrs(k, ptr, in bch2_extent_normalize()
1056 return bkey_deleted(k.k); in bch2_extent_normalize()
1067 struct bkey_s k) in bch2_extent_normalize_by_opts() argument
1074 ptrs = bch2_bkey_ptrs(k); in bch2_extent_normalize_by_opts()
1080 bch2_bkey_drop_ptr(k, ptr); in bch2_extent_normalize_by_opts()
1087 return bkey_deleted(k.k); in bch2_extent_normalize_by_opts()
1185 struct bkey_s_c k) in bch2_bkey_ptrs_to_text() argument
1187 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_ptrs_to_text()
1192 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k)); in bch2_bkey_ptrs_to_text()
1207 bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); in bch2_bkey_ptrs_to_text()
1233 struct bkey_s_c k, in extent_ptr_validate() argument
1241 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in extent_ptr_validate()
1275 int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_bkey_ptrs_validate() argument
1278 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in bch2_bkey_ptrs_validate()
1281 unsigned size_ondisk = k.k->size; in bch2_bkey_ptrs_validate()
1287 if (bkey_is_btree_ptr(k.k)) in bch2_bkey_ptrs_validate()
1296 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) && in bch2_bkey_ptrs_validate()
1303 ret = extent_ptr_validate(c, k, from, &entry->ptr, size_ondisk, false); in bch2_bkey_ptrs_validate()
1323 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); in bch2_bkey_ptrs_validate()
1396 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, in bch2_bkey_ptrs_validate()
1409 void bch2_ptr_swab(struct bkey_s k) in bch2_ptr_swab() argument
1411 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); in bch2_ptr_swab()
1452 int bch2_cut_front_s(struct bpos where, struct bkey_s k) in bch2_cut_front_s() argument
1454 unsigned new_val_u64s = bkey_val_u64s(k.k); in bch2_cut_front_s()
1458 if (bkey_le(where, bkey_start_pos(k.k))) in bch2_cut_front_s()
1461 EBUG_ON(bkey_gt(where, k.k->p)); in bch2_cut_front_s()
1463 sub = where.offset - bkey_start_offset(k.k); in bch2_cut_front_s()
1465 k.k->size -= sub; in bch2_cut_front_s()
1467 if (!k.k->size) { in bch2_cut_front_s()
1468 k.k->type = KEY_TYPE_deleted; in bch2_cut_front_s()
1472 switch (k.k->type) { in bch2_cut_front_s()
1475 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); in bch2_cut_front_s()
1507 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k); in bch2_cut_front_s()
1514 void *p = bkey_inline_data_p(k); in bch2_cut_front_s()
1515 unsigned bytes = bkey_inline_data_bytes(k.k); in bch2_cut_front_s()
1526 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; in bch2_cut_front_s()
1529 set_bkey_val_u64s(k.k, new_val_u64s); in bch2_cut_front_s()
1530 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); in bch2_cut_front_s()
1534 int bch2_cut_back_s(struct bpos where, struct bkey_s k) in bch2_cut_back_s() argument
1536 unsigned new_val_u64s = bkey_val_u64s(k.k); in bch2_cut_back_s()
1540 if (bkey_ge(where, k.k->p)) in bch2_cut_back_s()
1543 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k))); in bch2_cut_back_s()
1545 len = where.offset - bkey_start_offset(k.k); in bch2_cut_back_s()
1547 k.k->p.offset = where.offset; in bch2_cut_back_s()
1548 k.k->size = len; in bch2_cut_back_s()
1551 k.k->type = KEY_TYPE_deleted; in bch2_cut_back_s()
1555 switch (k.k->type) { in bch2_cut_back_s()
1558 new_val_u64s = (bkey_inline_data_offset(k.k) + in bch2_cut_back_s()
1559 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3; in bch2_cut_back_s()
1563 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; in bch2_cut_back_s()
1566 set_bkey_val_u64s(k.k, new_val_u64s); in bch2_cut_back_s()
1567 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); in bch2_cut_back_s()