Lines Matching +full:9 +full:k
99 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, in should_promote() argument
111 if (bch2_bkey_has_target(c, k, opts.promote_target)) in should_promote()
114 if (bkey_extent_is_unwritten(k)) in should_promote()
171 struct bkey_s_c k, in __promote_alloc() argument
212 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) { in __promote_alloc()
239 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in __promote_alloc()
252 btree_id, k); in __promote_alloc()
280 struct bkey_s_c k, in promote_alloc() argument
302 ? bkey_start_pos(k.k) in promote_alloc()
303 : POS(k.k->p.inode, iter.bi_sector); in promote_alloc()
307 ret = should_promote(c, k, pos, opts, flags, failed); in promote_alloc()
312 k.k->type == KEY_TYPE_reflink_v in promote_alloc()
315 k, pos, pick, opts, sectors, rbio, failed); in promote_alloc()
335 read_pos.offset << 9); in bch2_read_err_msg_trans()
419 struct bkey_s_c k; in bch2_read_retry_nodecode() local
433 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); in bch2_read_retry_nodecode()
437 bch2_bkey_buf_reassemble(&sk, c, k); in bch2_read_retry_nodecode()
438 k = bkey_i_to_s_c(sk.k); in bch2_read_retry_nodecode()
440 if (!bch2_bkey_matches_ptr(c, k, in bch2_read_retry_nodecode()
452 k, 0, failed, flags); in bch2_read_retry_nodecode()
553 struct bkey_s_c k; in __bch2_rbio_narrow_crcs() local
559 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, in __bch2_rbio_narrow_crcs()
561 if ((ret = bkey_err(k))) in __bch2_rbio_narrow_crcs()
564 if (bversion_cmp(k.k->bversion, rbio->version) || in __bch2_rbio_narrow_crcs()
565 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) in __bch2_rbio_narrow_crcs()
569 if (bkey_start_offset(k.k) < data_offset || in __bch2_rbio_narrow_crcs()
570 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) in __bch2_rbio_narrow_crcs()
575 bkey_start_offset(k.k) - data_offset, k.k->size, in __bch2_rbio_narrow_crcs()
585 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + in __bch2_rbio_narrow_crcs()
590 bkey_reassemble(new, k); in __bch2_rbio_narrow_crcs()
694 src->bi_iter.bi_size = crc.compressed_size << 9; in __bch2_read_endio()
732 nonce = nonce_add(nonce, crc.offset << 9); in __bch2_read_endio()
733 bio_advance(src, crc.offset << 9); in __bch2_read_endio()
837 struct bkey_s_c k, in read_from_stale_dirty_pointer() argument
854 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
859 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); in read_from_stale_dirty_pointer()
862 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
872 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
884 enum btree_id data_btree, struct bkey_s_c k, in __bch2_read_extent() argument
893 struct bpos data_pos = bkey_start_pos(k.k); in __bch2_read_extent()
896 if (bkey_extent_is_inline_data(k.k)) { in __bch2_read_extent()
898 bkey_inline_data_bytes(k.k)); in __bch2_read_extent()
901 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k)); in __bch2_read_extent()
908 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick); in __bch2_read_extent()
918 bch2_bkey_val_to_text(&buf, c, k); in __bch2_read_extent()
929 bch2_bkey_val_to_text(&buf, c, k); in __bch2_read_extent()
948 read_from_stale_dirty_pointer(trans, ca, k, pick.ptr); in __bch2_read_extent()
965 iter.bi_size = pick.crc.compressed_size << 9; in __bch2_read_extent()
974 bch2_can_narrow_extent_crcs(k, pick.crc); in __bch2_read_extent()
979 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); in __bch2_read_extent()
992 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags, in __bch2_read_extent()
1021 pick.crc.compressed_size << 9); in __bch2_read_extent()
1023 pick.crc.compressed_size << 9; in __bch2_read_extent()
1034 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); in __bch2_read_extent()
1074 rbio->devs_have = bch2_bkey_devs(k); in __bch2_read_extent()
1080 rbio->version = k.k->bversion; in __bch2_read_extent()
1124 bch2_bkey_val_to_text(&buf, c, k); in __bch2_read_extent()
1154 if (bch2_ec_read_extent(trans, rbio, k)) { in __bch2_read_extent()
1217 struct bkey_s_c k; in __bch2_read() local
1242 k = bch2_btree_iter_peek_slot(&iter); in __bch2_read()
1243 ret = bkey_err(k); in __bch2_read()
1248 bkey_start_offset(k.k); in __bch2_read()
1249 unsigned sectors = k.k->size - offset_into_extent; in __bch2_read()
1251 bch2_bkey_buf_reassemble(&sk, c, k); in __bch2_read()
1258 k = bkey_i_to_s_c(sk.k); in __bch2_read()
1264 sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); in __bch2_read()
1266 unsigned bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; in __bch2_read()
1273 data_btree, k, in __bch2_read()
1295 bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9); in __bch2_read()