Lines Matching +full:- +full:b
1 // SPDX-License-Identifier: GPL-2.0
21 #include "super-io.h"
29 prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn)); in bch2_btree_node_header_to_text()
31 bch2_bpos_to_text(out, bn->min_key); in bch2_btree_node_header_to_text()
34 bch2_bpos_to_text(out, bn->max_key); in bch2_btree_node_header_to_text()
37 void bch2_btree_node_io_unlock(struct btree *b) in bch2_btree_node_io_unlock() argument
39 EBUG_ON(!btree_node_write_in_flight(b)); in bch2_btree_node_io_unlock()
41 clear_btree_node_write_in_flight_inner(b); in bch2_btree_node_io_unlock()
42 clear_btree_node_write_in_flight(b); in bch2_btree_node_io_unlock()
43 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); in bch2_btree_node_io_unlock()
46 void bch2_btree_node_io_lock(struct btree *b) in bch2_btree_node_io_lock() argument
48 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, in bch2_btree_node_io_lock()
52 void __bch2_btree_node_wait_on_read(struct btree *b) in __bch2_btree_node_wait_on_read() argument
54 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, in __bch2_btree_node_wait_on_read()
58 void __bch2_btree_node_wait_on_write(struct btree *b) in __bch2_btree_node_wait_on_write() argument
60 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, in __bch2_btree_node_wait_on_write()
64 void bch2_btree_node_wait_on_read(struct btree *b) in bch2_btree_node_wait_on_read() argument
66 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, in bch2_btree_node_wait_on_read()
70 void bch2_btree_node_wait_on_write(struct btree *b) in bch2_btree_node_wait_on_write() argument
72 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, in bch2_btree_node_wait_on_write()
76 static void verify_no_dups(struct btree *b, in verify_no_dups() argument
89 struct bkey l = bkey_unpack_key(b, p); in verify_no_dups()
90 struct bkey r = bkey_unpack_key(b, k); in verify_no_dups()
101 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) in set_needs_whiteout()
102 k->needs_whiteout = v; in set_needs_whiteout()
109 mempool_free(p, &c->btree_bounce_pool); in btree_bounce_free()
120 BUG_ON(size > c->opts.btree_node_size); in btree_bounce_alloc()
126 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); in btree_bounce_alloc()
135 unsigned n = nr, a = nr / 2, b, c, d; in sort_bkey_ptrs() local
143 a--; in sort_bkey_ptrs()
144 else if (--n) in sort_bkey_ptrs()
149 for (b = a; c = 2 * b + 1, (d = c + 1) < n;) in sort_bkey_ptrs()
150 b = bch2_bkey_cmp_packed(bt, in sort_bkey_ptrs()
154 b = c; in sort_bkey_ptrs()
156 while (b != a && in sort_bkey_ptrs()
159 ptrs[b]) >= 0) in sort_bkey_ptrs()
160 b = (b - 1) / 2; in sort_bkey_ptrs()
161 c = b; in sort_bkey_ptrs()
162 while (b != a) { in sort_bkey_ptrs()
163 b = (b - 1) / 2; in sort_bkey_ptrs()
164 swap(ptrs[b], ptrs[c]); in sort_bkey_ptrs()
169 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) in bch2_sort_whiteouts() argument
173 size_t bytes = b->whiteout_u64s * sizeof(u64); in bch2_sort_whiteouts()
175 if (!b->whiteout_u64s) in bch2_sort_whiteouts()
182 for (k = unwritten_whiteouts_start(b); in bch2_sort_whiteouts()
183 k != unwritten_whiteouts_end(b); in bch2_sort_whiteouts()
185 *--ptrs = k; in bch2_sort_whiteouts()
187 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); in bch2_sort_whiteouts()
197 verify_no_dups(b, new_whiteouts, in bch2_sort_whiteouts()
198 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); in bch2_sort_whiteouts()
200 memcpy_u64s(unwritten_whiteouts_start(b), in bch2_sort_whiteouts()
201 new_whiteouts, b->whiteout_u64s); in bch2_sort_whiteouts()
206 static bool should_compact_bset(struct btree *b, struct bset_tree *t, in should_compact_bset() argument
209 if (!bset_dead_u64s(b, t)) in should_compact_bset()
214 return should_compact_bset_lazy(b, t) || in should_compact_bset()
215 (compacting && !bset_written(b, bset(b, t))); in should_compact_bset()
223 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) in bch2_drop_whiteouts() argument
227 for_each_bset(b, t) { in bch2_drop_whiteouts()
228 struct bset *i = bset(b, t); in bch2_drop_whiteouts()
232 if (t != b->set && !bset_written(b, i)) { in bch2_drop_whiteouts()
234 dst = max(write_block(b), in bch2_drop_whiteouts()
235 (void *) btree_bkey_last(b, t - 1)); in bch2_drop_whiteouts()
241 if (!should_compact_bset(b, t, ret, mode)) { in bch2_drop_whiteouts()
244 le16_to_cpu(src->keys.u64s) * in bch2_drop_whiteouts()
246 i = &dst->keys; in bch2_drop_whiteouts()
247 set_btree_bset(b, t, i); in bch2_drop_whiteouts()
252 start = btree_bkey_first(b, t); in bch2_drop_whiteouts()
253 end = btree_bkey_last(b, t); in bch2_drop_whiteouts()
257 i = &dst->keys; in bch2_drop_whiteouts()
258 set_btree_bset(b, t, i); in bch2_drop_whiteouts()
261 out = i->start; in bch2_drop_whiteouts()
270 BUG_ON(k->needs_whiteout); in bch2_drop_whiteouts()
274 i->u64s = cpu_to_le16((u64 *) out - i->_data); in bch2_drop_whiteouts()
275 set_btree_bset_end(b, t); in bch2_drop_whiteouts()
276 bch2_bset_set_no_aux_tree(b, t); in bch2_drop_whiteouts()
280 bch2_verify_btree_nr_keys(b); in bch2_drop_whiteouts()
282 bch2_btree_build_aux_trees(b); in bch2_drop_whiteouts()
287 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, in bch2_compact_whiteouts() argument
290 return bch2_drop_whiteouts(b, mode); in bch2_compact_whiteouts()
293 static void btree_node_sort(struct bch_fs *c, struct btree *b, in btree_node_sort() argument
300 struct bset *start_bset = bset(b, &b->set[start_idx]); in btree_node_sort()
303 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; in btree_node_sort()
305 end_idx == b->nsets; in btree_node_sort()
307 sort_iter_stack_init(&sort_iter, b); in btree_node_sort()
309 for (t = b->set + start_idx; in btree_node_sort()
310 t < b->set + end_idx; in btree_node_sort()
312 u64s += le16_to_cpu(bset(b, t)->u64s); in btree_node_sort()
314 btree_bkey_first(b, t), in btree_node_sort()
315 btree_bkey_last(b, t)); in btree_node_sort()
319 ? btree_buf_bytes(b) in btree_node_sort()
326 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter); in btree_node_sort()
328 out->keys.u64s = cpu_to_le16(u64s); in btree_node_sort()
330 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); in btree_node_sort()
333 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], in btree_node_sort()
337 for (t = b->set + start_idx; t < b->set + end_idx; t++) in btree_node_sort()
338 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); in btree_node_sort()
339 start_bset->journal_seq = cpu_to_le64(seq); in btree_node_sort()
342 u64s = le16_to_cpu(out->keys.u64s); in btree_node_sort()
344 BUG_ON(bytes != btree_buf_bytes(b)); in btree_node_sort()
351 *out = *b->data; in btree_node_sort()
352 out->keys.u64s = cpu_to_le16(u64s); in btree_node_sort()
353 swap(out, b->data); in btree_node_sort()
354 set_btree_bset(b, b->set, &b->data->keys); in btree_node_sort()
356 start_bset->u64s = out->keys.u64s; in btree_node_sort()
357 memcpy_u64s(start_bset->start, in btree_node_sort()
358 out->keys.start, in btree_node_sort()
359 le16_to_cpu(out->keys.u64s)); in btree_node_sort()
363 b->nr.bset_u64s[start_idx] += in btree_node_sort()
364 b->nr.bset_u64s[i]; in btree_node_sort()
366 b->nsets -= shift; in btree_node_sort()
368 for (i = start_idx + 1; i < b->nsets; i++) { in btree_node_sort()
369 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; in btree_node_sort()
370 b->set[i] = b->set[i + shift]; in btree_node_sort()
373 for (i = b->nsets; i < MAX_BSETS; i++) in btree_node_sort()
374 b->nr.bset_u64s[i] = 0; in btree_node_sort()
376 set_btree_bset_end(b, &b->set[start_idx]); in btree_node_sort()
377 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); in btree_node_sort()
381 bch2_verify_btree_nr_keys(b); in btree_node_sort()
392 BUG_ON(dst->nsets != 1); in bch2_btree_sort_into()
394 bch2_bset_set_no_aux_tree(dst, dst->set); in bch2_btree_sort_into()
400 &dst->format, in bch2_btree_sort_into()
403 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], in bch2_btree_sort_into()
406 set_btree_bset_end(dst, dst->set); in bch2_btree_sort_into()
408 dst->nr.live_u64s += nr.live_u64s; in bch2_btree_sort_into()
409 dst->nr.bset_u64s[0] += nr.bset_u64s[0]; in bch2_btree_sort_into()
410 dst->nr.packed_keys += nr.packed_keys; in bch2_btree_sort_into()
411 dst->nr.unpacked_keys += nr.unpacked_keys; in bch2_btree_sort_into()
418 * too many bsets - sort some of them together:
420 static bool btree_node_compact(struct bch_fs *c, struct btree *b) in btree_node_compact() argument
426 unwritten_idx < b->nsets; in btree_node_compact()
428 if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) in btree_node_compact()
431 if (b->nsets - unwritten_idx > 1) { in btree_node_compact()
432 btree_node_sort(c, b, unwritten_idx, b->nsets); in btree_node_compact()
437 btree_node_sort(c, b, 0, unwritten_idx); in btree_node_compact()
444 void bch2_btree_build_aux_trees(struct btree *b) in bch2_btree_build_aux_trees() argument
446 for_each_bset(b, t) in bch2_btree_build_aux_trees()
447 bch2_bset_build_aux_tree(b, t, in bch2_btree_build_aux_trees()
448 !bset_written(b, bset(b, t)) && in bch2_btree_build_aux_trees()
449 t == bset_tree_last(b)); in bch2_btree_build_aux_trees()
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b) in should_compact_all() argument
467 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; in should_compact_all()
471 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
474 * Safe to call if there already is an unwritten bset - will only add a new bset
475 * if @b doesn't already have one.
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) in bch2_btree_init_next() argument
481 struct bch_fs *c = trans->c; in bch2_btree_init_next()
485 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); in bch2_btree_init_next()
486 BUG_ON(bset_written(b, bset(b, &b->set[1]))); in bch2_btree_init_next()
487 BUG_ON(btree_node_just_written(b)); in bch2_btree_init_next()
489 if (b->nsets == MAX_BSETS && in bch2_btree_init_next()
490 !btree_node_write_in_flight(b) && in bch2_btree_init_next()
491 should_compact_all(c, b)) { in bch2_btree_init_next()
492 bch2_btree_node_write_trans(trans, b, SIX_LOCK_write, in bch2_btree_init_next()
497 if (b->nsets == MAX_BSETS && in bch2_btree_init_next()
498 btree_node_compact(c, b)) in bch2_btree_init_next()
501 BUG_ON(b->nsets >= MAX_BSETS); in bch2_btree_init_next()
503 bne = want_new_bset(c, b); in bch2_btree_init_next()
505 bch2_bset_init_next(b, bne); in bch2_btree_init_next()
507 bch2_btree_build_aux_trees(b); in bch2_btree_init_next()
510 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_init_next()
515 struct btree *b, struct bset *i, struct bkey_packed *k, in btree_err_msg() argument
523 prt_printf(out, "on %s ", ca->name); in btree_err_msg()
525 bch2_btree_pos_to_text(out, c, b); in btree_err_msg()
530 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key))); in btree_err_msg()
532 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); in btree_err_msg()
535 (unsigned long)(void *)k - in btree_err_msg()
544 struct btree *b, in __btree_err() argument
553 bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes; in __btree_err()
556 btree_err_msg(&out, c, ca, b, i, k, b->written, write); in __btree_err()
564 ret = c->opts.errors == BCH_ON_ERROR_continue in __btree_err()
566 : -BCH_ERR_fsck_errors_not_fixed; in __btree_err()
570 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) in __btree_err()
571 ret = -BCH_ERR_btree_node_read_err_fixable; in __btree_err()
572 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) in __btree_err()
573 ret = -BCH_ERR_btree_node_read_err_bad_node; in __btree_err()
575 if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable) in __btree_err()
579 case -BCH_ERR_btree_node_read_err_fixable: in __btree_err()
582 : -BCH_ERR_fsck_fix; in __btree_err()
583 if (ret != -BCH_ERR_fsck_fix && in __btree_err()
584 ret != -BCH_ERR_fsck_ignore) in __btree_err()
586 ret = -BCH_ERR_fsck_fix; in __btree_err()
588 case -BCH_ERR_btree_node_read_err_want_retry: in __btree_err()
589 case -BCH_ERR_btree_node_read_err_must_retry: in __btree_err()
593 case -BCH_ERR_btree_node_read_err_bad_node: in __btree_err()
598 case -BCH_ERR_btree_node_read_err_incompatible: in __btree_err()
601 ret = -BCH_ERR_fsck_errors_not_fixed; in __btree_err()
612 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \ argument
614 int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
618 if (_ret != -BCH_ERR_fsck_fix) { \
633 void bch2_btree_node_drop_keys_outside_node(struct btree *b) in bch2_btree_node_drop_keys_outside_node() argument
635 for_each_bset(b, t) { in bch2_btree_node_drop_keys_outside_node()
636 struct bset *i = bset(b, t); in bch2_btree_node_drop_keys_outside_node()
639 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) in bch2_btree_node_drop_keys_outside_node()
640 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) in bch2_btree_node_drop_keys_outside_node()
643 if (k != i->start) { in bch2_btree_node_drop_keys_outside_node()
644 unsigned shift = (u64 *) k - (u64 *) i->start; in bch2_btree_node_drop_keys_outside_node()
646 memmove_u64s_down(i->start, k, in bch2_btree_node_drop_keys_outside_node()
647 (u64 *) vstruct_end(i) - (u64 *) k); in bch2_btree_node_drop_keys_outside_node()
648 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); in bch2_btree_node_drop_keys_outside_node()
649 set_btree_bset_end(b, t); in bch2_btree_node_drop_keys_outside_node()
652 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) in bch2_btree_node_drop_keys_outside_node()
653 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) in bch2_btree_node_drop_keys_outside_node()
657 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); in bch2_btree_node_drop_keys_outside_node()
658 set_btree_bset_end(b, t); in bch2_btree_node_drop_keys_outside_node()
666 bch2_bset_set_no_aux_tree(b, b->set); in bch2_btree_node_drop_keys_outside_node()
667 bch2_btree_build_aux_trees(b); in bch2_btree_node_drop_keys_outside_node()
668 b->nr = bch2_btree_node_count_keys(b); in bch2_btree_node_drop_keys_outside_node()
673 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { in bch2_btree_node_drop_keys_outside_node()
674 BUG_ON(bpos_lt(k.k->p, b->data->min_key)); in bch2_btree_node_drop_keys_outside_node()
675 BUG_ON(bpos_gt(k.k->p, b->data->max_key)); in bch2_btree_node_drop_keys_outside_node()
680 struct btree *b, struct bset *i, in validate_bset() argument
684 unsigned version = le16_to_cpu(i->version); in validate_bset()
685 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); in validate_bset()
691 -BCH_ERR_btree_node_read_err_incompatible, in validate_bset()
692 c, ca, b, i, NULL, in validate_bset()
698 if (btree_err_on(version < c->sb.version_min, in validate_bset()
699 -BCH_ERR_btree_node_read_err_fixable, in validate_bset()
700 c, NULL, b, i, NULL, in validate_bset()
703 version, c->sb.version_min)) { in validate_bset()
704 mutex_lock(&c->sb_lock); in validate_bset()
705 c->disk_sb.sb->version_min = cpu_to_le16(version); in validate_bset()
707 mutex_unlock(&c->sb_lock); in validate_bset()
711 BCH_VERSION_MAJOR(c->sb.version), in validate_bset()
712 -BCH_ERR_btree_node_read_err_fixable, in validate_bset()
713 c, NULL, b, i, NULL, in validate_bset()
716 version, c->sb.version)) { in validate_bset()
717 mutex_lock(&c->sb_lock); in validate_bset()
718 c->disk_sb.sb->version = cpu_to_le16(version); in validate_bset()
720 mutex_unlock(&c->sb_lock); in validate_bset()
724 -BCH_ERR_btree_node_read_err_incompatible, in validate_bset()
725 c, ca, b, i, NULL, in validate_bset()
731 -BCH_ERR_btree_node_read_err_fixable, in validate_bset()
732 c, ca, b, i, NULL, in validate_bset()
736 i->u64s = 0; in validate_bset()
738 btree_err_on(offset && !i->u64s, in validate_bset()
739 -BCH_ERR_btree_node_read_err_fixable, in validate_bset()
740 c, ca, b, i, NULL, in validate_bset()
745 -BCH_ERR_btree_node_read_err_want_retry, in validate_bset()
746 c, ca, b, i, NULL, in validate_bset()
755 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in validate_bset()
757 &bkey_i_to_btree_ptr_v2(&b->key)->v; in validate_bset()
760 btree_err_on(bp->seq != bn->keys.seq, in validate_bset()
761 -BCH_ERR_btree_node_read_err_must_retry, in validate_bset()
762 c, ca, b, NULL, NULL, in validate_bset()
767 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, in validate_bset()
768 -BCH_ERR_btree_node_read_err_must_retry, in validate_bset()
769 c, ca, b, i, NULL, in validate_bset()
773 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, in validate_bset()
774 -BCH_ERR_btree_node_read_err_must_retry, in validate_bset()
775 c, ca, b, i, NULL, in validate_bset()
780 compat_btree_node(b->c.level, b->c.btree_id, version, in validate_bset()
783 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in validate_bset()
785 &bkey_i_to_btree_ptr_v2(&b->key)->v; in validate_bset()
788 b->data->min_key = bp->min_key; in validate_bset()
789 b->data->max_key = b->key.k.p; in validate_bset()
792 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), in validate_bset()
793 -BCH_ERR_btree_node_read_err_must_retry, in validate_bset()
794 c, ca, b, NULL, NULL, in validate_bset()
798 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), in validate_bset()
800 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); in validate_bset()
803 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), in validate_bset()
804 -BCH_ERR_btree_node_read_err_must_retry, in validate_bset()
805 c, ca, b, i, NULL, in validate_bset()
809 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); in validate_bset()
812 compat_btree_node(b->c.level, b->c.btree_id, version, in validate_bset()
815 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), in validate_bset()
816 -BCH_ERR_btree_node_read_err_bad_node, in validate_bset()
817 c, ca, b, i, NULL, in validate_bset()
821 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); in validate_bset()
824 compat_bformat(b->c.level, b->c.btree_id, version, in validate_bset()
826 &bn->format); in validate_bset()
834 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b, in btree_node_bkey_val_validate() argument
840 .level = b->c.level, in btree_node_bkey_val_validate()
841 .btree = b->c.btree_id, in btree_node_bkey_val_validate()
846 static int bset_key_validate(struct bch_fs *c, struct btree *b, in bset_key_validate() argument
853 .level = b->c.level, in bset_key_validate()
854 .btree = b->c.btree_id, in bset_key_validate()
858 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?: in bset_key_validate()
859 (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0); in bset_key_validate()
862 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, in bkey_packed_valid() argument
868 if (k->format > KEY_FORMAT_CURRENT) in bkey_packed_valid()
871 if (!bkeyp_u64s_valid(&b->format, k)) in bkey_packed_valid()
875 struct bkey_s u = __bkey_disassemble(b, k, &tmp); in bkey_packed_valid()
879 .level = b->c.level, in bkey_packed_valid()
880 .btree = b->c.btree_id, in bkey_packed_valid()
885 static inline int btree_node_read_bkey_cmp(const struct btree *b, in btree_node_read_bkey_cmp() argument
889 return bch2_bkey_cmp_packed(b, l, r) in btree_node_read_bkey_cmp()
890 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l); in btree_node_read_bkey_cmp()
893 static int validate_bset_keys(struct bch_fs *c, struct btree *b, in validate_bset_keys() argument
897 unsigned version = le16_to_cpu(i->version); in validate_bset_keys()
900 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && in validate_bset_keys()
901 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); in validate_bset_keys()
904 for (k = i->start; in validate_bset_keys()
911 -BCH_ERR_btree_node_read_err_fixable, in validate_bset_keys()
912 c, NULL, b, i, k, in validate_bset_keys()
915 i->u64s = cpu_to_le16((u64 *) k - i->_data); in validate_bset_keys()
919 if (btree_err_on(k->format > KEY_FORMAT_CURRENT, in validate_bset_keys()
920 -BCH_ERR_btree_node_read_err_fixable, in validate_bset_keys()
921 c, NULL, b, i, k, in validate_bset_keys()
923 "invalid bkey format %u", k->format)) in validate_bset_keys()
926 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k), in validate_bset_keys()
927 -BCH_ERR_btree_node_read_err_fixable, in validate_bset_keys()
928 c, NULL, b, i, k, in validate_bset_keys()
930 "bad k->u64s %u (min %u max %zu)", k->u64s, in validate_bset_keys()
931 bkeyp_key_u64s(&b->format, k), in validate_bset_keys()
932 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k))) in validate_bset_keys()
936 bch2_bkey_compat(b->c.level, b->c.btree_id, version, in validate_bset_keys()
938 &b->format, k); in validate_bset_keys()
940 u = __bkey_disassemble(b, k, &tmp); in validate_bset_keys()
942 ret = bset_key_validate(c, b, u.s_c, updated_range, write); in validate_bset_keys()
943 if (ret == -BCH_ERR_fsck_delete_bkey) in validate_bset_keys()
949 bch2_bkey_compat(b->c.level, b->c.btree_id, version, in validate_bset_keys()
951 &b->format, k); in validate_bset_keys()
953 if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) { in validate_bset_keys()
954 struct bkey up = bkey_unpack_key(b, prev); in validate_bset_keys()
962 if (btree_err(-BCH_ERR_btree_node_read_err_fixable, in validate_bset_keys()
963 c, NULL, b, i, k, in validate_bset_keys()
973 next_good_key = k->u64s; in validate_bset_keys()
983 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) { in validate_bset_keys()
985 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k; in validate_bset_keys()
987 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) in validate_bset_keys()
995 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k; in validate_bset_keys()
998 le16_add_cpu(&i->u64s, -next_good_key); in validate_bset_keys()
999 memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k); in validate_bset_keys()
1000 set_btree_node_need_rewrite(b); in validate_bset_keys()
1008 struct btree *b, bool have_retry, bool *saw_error) in bch2_btree_node_read_done() argument
1016 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && in bch2_btree_node_read_done()
1017 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); in bch2_btree_node_read_done()
1019 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); in bch2_btree_node_read_done()
1025 b->version_ondisk = U16_MAX; in bch2_btree_node_read_done()
1027 b->written = 0; in bch2_btree_node_read_done()
1029 iter = mempool_alloc(&c->fill_iter, GFP_NOFS); in bch2_btree_node_read_done()
1030 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); in bch2_btree_node_read_done()
1033 btree_err(-BCH_ERR_btree_node_read_err_must_retry, in bch2_btree_node_read_done()
1034 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1038 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), in bch2_btree_node_read_done()
1039 -BCH_ERR_btree_node_read_err_must_retry, in bch2_btree_node_read_done()
1040 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1043 bset_magic(c), le64_to_cpu(b->data->magic)); in bch2_btree_node_read_done()
1045 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { in bch2_btree_node_read_done()
1047 &bkey_i_to_btree_ptr_v2(&b->key)->v; in bch2_btree_node_read_done()
1049 bch2_bpos_to_text(&buf, b->data->min_key); in bch2_btree_node_read_done()
1050 prt_str(&buf, "-"); in bch2_btree_node_read_done()
1051 bch2_bpos_to_text(&buf, b->data->max_key); in bch2_btree_node_read_done()
1053 btree_err_on(b->data->keys.seq != bp->seq, in bch2_btree_node_read_done()
1054 -BCH_ERR_btree_node_read_err_must_retry, in bch2_btree_node_read_done()
1055 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1059 bch2_btree_node_header_to_text(&buf, b->data), in bch2_btree_node_read_done()
1062 btree_err_on(!b->data->keys.seq, in bch2_btree_node_read_done()
1063 -BCH_ERR_btree_node_read_err_must_retry, in bch2_btree_node_read_done()
1064 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1068 bch2_btree_node_header_to_text(&buf, b->data), in bch2_btree_node_read_done()
1072 while (b->written < (ptr_written ?: btree_sectors(c))) { in bch2_btree_node_read_done()
1074 bool first = !b->written; in bch2_btree_node_read_done()
1078 i = &b->data->keys; in bch2_btree_node_read_done()
1080 bne = write_block(b); in bch2_btree_node_read_done()
1081 i = &bne->keys; in bch2_btree_node_read_done()
1083 if (i->seq != b->data->keys.seq) in bch2_btree_node_read_done()
1087 struct nonce nonce = btree_nonce(i, b->written << 9); in bch2_btree_node_read_done()
1092 ? -BCH_ERR_btree_node_read_err_must_retry in bch2_btree_node_read_done()
1093 : -BCH_ERR_btree_node_read_err_want_retry, in bch2_btree_node_read_done()
1094 c, ca, b, i, NULL, in bch2_btree_node_read_done()
1100 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); in bch2_btree_node_read_done()
1101 bool csum_bad = bch2_crc_cmp(b->data->csum, csum); in bch2_btree_node_read_done()
1106 -BCH_ERR_btree_node_read_err_want_retry, in bch2_btree_node_read_done()
1107 c, ca, b, i, NULL, in bch2_btree_node_read_done()
1111 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), in bch2_btree_node_read_done()
1114 ret = bset_encrypt(c, i, b->written << 9); in bch2_btree_node_read_done()
1120 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && in bch2_btree_node_read_done()
1121 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), in bch2_btree_node_read_done()
1122 -BCH_ERR_btree_node_read_err_incompatible, in bch2_btree_node_read_done()
1123 c, NULL, b, NULL, NULL, in bch2_btree_node_read_done()
1127 sectors = vstruct_sectors(b->data, c->block_bits); in bch2_btree_node_read_done()
1131 bool csum_bad = bch2_crc_cmp(bne->csum, csum); in bch2_btree_node_read_done()
1136 -BCH_ERR_btree_node_read_err_want_retry, in bch2_btree_node_read_done()
1137 c, ca, b, i, NULL, in bch2_btree_node_read_done()
1141 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), in bch2_btree_node_read_done()
1144 ret = bset_encrypt(c, i, b->written << 9); in bch2_btree_node_read_done()
1150 sectors = vstruct_sectors(bne, c->block_bits); in bch2_btree_node_read_done()
1153 b->version_ondisk = min(b->version_ondisk, in bch2_btree_node_read_done()
1154 le16_to_cpu(i->version)); in bch2_btree_node_read_done()
1156 ret = validate_bset(c, ca, b, i, b->written, sectors, in bch2_btree_node_read_done()
1161 if (!b->written) in bch2_btree_node_read_done()
1162 btree_node_set_format(b, b->data->format); in bch2_btree_node_read_done()
1164 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); in bch2_btree_node_read_done()
1171 le64_to_cpu(i->journal_seq), in bch2_btree_node_read_done()
1175 -BCH_ERR_btree_node_read_err_fixable, in bch2_btree_node_read_done()
1176 c, ca, b, i, NULL, in bch2_btree_node_read_done()
1179 le64_to_cpu(i->journal_seq)); in bch2_btree_node_read_done()
1182 -BCH_ERR_btree_node_read_err_fixable, in bch2_btree_node_read_done()
1183 c, ca, b, i, NULL, in bch2_btree_node_read_done()
1185 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", in bch2_btree_node_read_done()
1186 le64_to_cpu(i->journal_seq), in bch2_btree_node_read_done()
1187 b->written, b->written + sectors, ptr_written); in bch2_btree_node_read_done()
1189 b->written = min(b->written + sectors, btree_sectors(c)); in bch2_btree_node_read_done()
1198 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq)); in bch2_btree_node_read_done()
1202 btree_err_on(b->written < ptr_written, in bch2_btree_node_read_done()
1203 -BCH_ERR_btree_node_read_err_want_retry, in bch2_btree_node_read_done()
1204 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1207 ptr_written, b->written); in bch2_btree_node_read_done()
1209 for (bne = write_block(b); in bch2_btree_node_read_done()
1210 bset_byte_offset(b, bne) < btree_buf_bytes(b); in bch2_btree_node_read_done()
1212 btree_err_on(bne->keys.seq == b->data->keys.seq && in bch2_btree_node_read_done()
1214 le64_to_cpu(bne->keys.journal_seq), in bch2_btree_node_read_done()
1216 -BCH_ERR_btree_node_read_err_want_retry, in bch2_btree_node_read_done()
1217 c, ca, b, NULL, NULL, in bch2_btree_node_read_done()
1222 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); in bch2_btree_node_read_done()
1223 sorted->keys.u64s = 0; in bch2_btree_node_read_done()
1225 set_btree_bset(b, b->set, &b->data->keys); in bch2_btree_node_read_done()
1227 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); in bch2_btree_node_read_done()
1228 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0, in bch2_btree_node_read_done()
1229 btree_buf_bytes(b) - in bch2_btree_node_read_done()
1230 sizeof(struct btree_node) - in bch2_btree_node_read_done()
1231 b->nr.live_u64s * sizeof(u64)); in bch2_btree_node_read_done()
1233 u64s = le16_to_cpu(sorted->keys.u64s); in bch2_btree_node_read_done()
1234 *sorted = *b->data; in bch2_btree_node_read_done()
1235 sorted->keys.u64s = cpu_to_le16(u64s); in bch2_btree_node_read_done()
1236 swap(sorted, b->data); in bch2_btree_node_read_done()
1237 set_btree_bset(b, b->set, &b->data->keys); in bch2_btree_node_read_done()
1238 b->nsets = 1; in bch2_btree_node_read_done()
1239 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq); in bch2_btree_node_read_done()
1241 BUG_ON(b->nr.live_u64s != u64s); in bch2_btree_node_read_done()
1243 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); in bch2_btree_node_read_done()
1246 bch2_btree_node_drop_keys_outside_node(b); in bch2_btree_node_read_done()
1248 i = &b->data->keys; in bch2_btree_node_read_done()
1249 for (k = i->start; k != vstruct_last(i);) { in bch2_btree_node_read_done()
1251 struct bkey_s u = __bkey_disassemble(b, k, &tmp); in bch2_btree_node_read_done()
1253 ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); in bch2_btree_node_read_done()
1254 if (ret == -BCH_ERR_fsck_delete_bkey || in bch2_btree_node_read_done()
1256 !bversion_cmp(u.k->bversion, MAX_VERSION))) { in bch2_btree_node_read_done()
1257 btree_keys_account_key_drop(&b->nr, 0, k); in bch2_btree_node_read_done()
1259 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); in bch2_btree_node_read_done()
1261 (u64 *) vstruct_end(i) - (u64 *) k); in bch2_btree_node_read_done()
1262 set_btree_bset_end(b, b->set); in bch2_btree_node_read_done()
1263 set_btree_node_need_rewrite(b); in bch2_btree_node_read_done()
1269 if (u.k->type == KEY_TYPE_btree_ptr_v2) { in bch2_btree_node_read_done()
1272 bp.v->mem_ptr = 0; in bch2_btree_node_read_done()
1278 bch2_bset_build_aux_tree(b, b->set, false); in bch2_btree_node_read_done()
1280 set_needs_whiteout(btree_bset_first(b), true); in bch2_btree_node_read_done()
1282 btree_node_reset_sib_u64s(b); in bch2_btree_node_read_done()
1285 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { in bch2_btree_node_read_done()
1286 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev); in bch2_btree_node_read_done()
1288 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw) in bch2_btree_node_read_done()
1289 set_btree_node_need_rewrite(b); in bch2_btree_node_read_done()
1294 set_btree_node_need_rewrite(b); in bch2_btree_node_read_done()
1296 mempool_free(iter, &c->fill_iter); in bch2_btree_node_read_done()
1298 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); in bch2_btree_node_read_done()
1301 if (ret == -BCH_ERR_btree_node_read_err_want_retry || in bch2_btree_node_read_done()
1302 ret == -BCH_ERR_btree_node_read_err_must_retry) { in bch2_btree_node_read_done()
1305 set_btree_node_read_error(b); in bch2_btree_node_read_done()
1306 bch2_btree_lost_data(c, b->c.btree_id); in bch2_btree_node_read_done()
1315 struct bch_fs *c = rb->c; in btree_node_read_work()
1316 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL; in btree_node_read_work()
1317 struct btree *b = rb->b; in btree_node_read_work() local
1318 struct bio *bio = &rb->bio; in btree_node_read_work()
1329 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ); in btree_node_read_work()
1330 rb->have_ioref = ca != NULL; in btree_node_read_work()
1332 bio->bi_iter.bi_sector = rb->pick.ptr.offset; in btree_node_read_work()
1333 bio->bi_iter.bi_size = btree_buf_bytes(b); in btree_node_read_work()
1335 if (rb->have_ioref) { in btree_node_read_work()
1336 bio_set_dev(bio, ca->disk_sb.bdev); in btree_node_read_work()
1339 bio->bi_status = BLK_STS_REMOVED; in btree_node_read_work()
1343 bch2_btree_pos_to_text(&buf, c, b); in btree_node_read_work()
1344 bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read, in btree_node_read_work()
1346 bch2_blk_status_to_str(bio->bi_status), buf.buf); in btree_node_read_work()
1347 if (rb->have_ioref) in btree_node_read_work()
1348 percpu_ref_put(&ca->io_ref); in btree_node_read_work()
1349 rb->have_ioref = false; in btree_node_read_work()
1351 bch2_mark_io_failure(&failed, &rb->pick); in btree_node_read_work()
1354 bkey_i_to_s_c(&b->key), in btree_node_read_work()
1355 &failed, &rb->pick) > 0; in btree_node_read_work()
1357 if (!bio->bi_status && in btree_node_read_work()
1358 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { in btree_node_read_work()
1367 set_btree_node_read_error(b); in btree_node_read_work()
1368 bch2_btree_lost_data(c, b->c.btree_id); in btree_node_read_work()
1373 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], in btree_node_read_work()
1374 rb->start_time); in btree_node_read_work()
1375 bio_put(&rb->bio); in btree_node_read_work()
1378 btree_node_need_rewrite(b)) && in btree_node_read_work()
1379 !btree_node_read_error(b) && in btree_node_read_work()
1380 c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { in btree_node_read_work()
1383 bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); in btree_node_read_work()
1385 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in btree_node_read_work()
1390 bch2_btree_node_rewrite_async(c, b); in btree_node_read_work()
1394 clear_btree_node_read_in_flight(b); in btree_node_read_work()
1395 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in btree_node_read_work()
1402 struct bch_fs *c = rb->c; in btree_node_read_endio()
1404 if (rb->have_ioref) { in btree_node_read_endio()
1405 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev); in btree_node_read_endio()
1407 bch2_latency_acct(ca, rb->start_time, READ); in btree_node_read_endio()
1410 queue_work(c->btree_read_complete_wq, &rb->work); in btree_node_read_endio()
1416 struct btree *b; member
1429 if (le64_to_cpu(bn->magic) != bset_magic(c)) in btree_node_sectors_written()
1434 offset += vstruct_sectors(bn, c->block_bits); in btree_node_sectors_written()
1437 if (bne->keys.seq != bn->keys.seq) in btree_node_sectors_written()
1439 offset += vstruct_sectors(bne, c->block_bits); in btree_node_sectors_written()
1456 if (bne->keys.seq == bn->keys.seq) in btree_node_has_extra_bsets()
1468 struct bch_fs *c = ra->c; in CLOSURE_CALLBACK()
1469 struct btree *b = ra->b; in CLOSURE_CALLBACK() local
1473 int ret = 0, best = -1, write = READ; in CLOSURE_CALLBACK()
1475 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 in CLOSURE_CALLBACK()
1476 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; in CLOSURE_CALLBACK()
1479 for (i = 0; i < ra->nr; i++) { in CLOSURE_CALLBACK()
1480 struct btree_node *bn = ra->buf[i]; in CLOSURE_CALLBACK()
1482 if (ra->err[i]) in CLOSURE_CALLBACK()
1485 if (le64_to_cpu(bn->magic) != bset_magic(c) || in CLOSURE_CALLBACK()
1486 (seq && seq != bn->keys.seq)) in CLOSURE_CALLBACK()
1495 written2 = btree_node_sectors_written(c, ra->buf[i]); in CLOSURE_CALLBACK()
1496 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, in CLOSURE_CALLBACK()
1497 c, NULL, b, NULL, NULL, in CLOSURE_CALLBACK()
1501 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), in CLOSURE_CALLBACK()
1502 -BCH_ERR_btree_node_read_err_fixable, in CLOSURE_CALLBACK()
1503 c, NULL, b, NULL, NULL, in CLOSURE_CALLBACK()
1506 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), in CLOSURE_CALLBACK()
1507 -BCH_ERR_btree_node_read_err_fixable, in CLOSURE_CALLBACK()
1508 c, NULL, b, NULL, NULL, in CLOSURE_CALLBACK()
1520 for (i = 0; i < ra->nr; i++) { in CLOSURE_CALLBACK()
1521 struct btree_node *bn = ra->buf[i]; in CLOSURE_CALLBACK()
1526 if (ra->err[i]) in CLOSURE_CALLBACK()
1533 sectors = vstruct_sectors(bn, c->block_bits); in CLOSURE_CALLBACK()
1535 bne = ra->buf[i] + (offset << 9); in CLOSURE_CALLBACK()
1536 if (bne->keys.seq != bn->keys.seq) in CLOSURE_CALLBACK()
1538 sectors = vstruct_sectors(bne, c->block_bits); in CLOSURE_CALLBACK()
1541 prt_printf(&buf, " %u-%u", offset, offset + sectors); in CLOSURE_CALLBACK()
1543 le64_to_cpu(bne->keys.journal_seq), false)) in CLOSURE_CALLBACK()
1549 bne = ra->buf[i] + (offset << 9); in CLOSURE_CALLBACK()
1550 if (bne->keys.seq == bn->keys.seq) { in CLOSURE_CALLBACK()
1555 sectors = vstruct_sectors(bne, c->block_bits); in CLOSURE_CALLBACK()
1556 prt_printf(&buf, " %u-%u", offset, offset + sectors); in CLOSURE_CALLBACK()
1558 le64_to_cpu(bne->keys.journal_seq), false)) in CLOSURE_CALLBACK()
1569 memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); in CLOSURE_CALLBACK()
1570 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); in CLOSURE_CALLBACK()
1572 ret = -1; in CLOSURE_CALLBACK()
1576 set_btree_node_read_error(b); in CLOSURE_CALLBACK()
1577 bch2_btree_lost_data(c, b->c.btree_id); in CLOSURE_CALLBACK()
1579 bch2_btree_node_rewrite_async(c, b); in CLOSURE_CALLBACK()
1581 for (i = 0; i < ra->nr; i++) { in CLOSURE_CALLBACK()
1582 mempool_free(ra->buf[i], &c->btree_bounce_pool); in CLOSURE_CALLBACK()
1583 bio_put(ra->bio[i]); in CLOSURE_CALLBACK()
1586 closure_debug_destroy(&ra->cl); in CLOSURE_CALLBACK()
1590 clear_btree_node_read_in_flight(b); in CLOSURE_CALLBACK()
1591 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in CLOSURE_CALLBACK()
1598 struct bch_fs *c = rb->c; in btree_node_read_all_replicas_endio()
1599 struct btree_node_read_all *ra = rb->ra; in btree_node_read_all_replicas_endio()
1601 if (rb->have_ioref) { in btree_node_read_all_replicas_endio()
1602 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev); in btree_node_read_all_replicas_endio()
1604 bch2_latency_acct(ca, rb->start_time, READ); in btree_node_read_all_replicas_endio()
1607 ra->err[rb->idx] = bio->bi_status; in btree_node_read_all_replicas_endio()
1608 closure_put(&ra->cl); in btree_node_read_all_replicas_endio()
1615 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) in btree_node_read_all_replicas() argument
1617 struct bkey_s_c k = bkey_i_to_s_c(&b->key); in btree_node_read_all_replicas()
1626 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas; in btree_node_read_all_replicas()
1628 closure_init(&ra->cl, NULL); in btree_node_read_all_replicas()
1629 ra->c = c; in btree_node_read_all_replicas()
1630 ra->b = b; in btree_node_read_all_replicas()
1631 ra->nr = bch2_bkey_nr_ptrs(k); in btree_node_read_all_replicas()
1633 for (i = 0; i < ra->nr; i++) { in btree_node_read_all_replicas()
1634 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); in btree_node_read_all_replicas()
1635 ra->bio[i] = bio_alloc_bioset(NULL, in btree_node_read_all_replicas()
1636 buf_pages(ra->buf[i], btree_buf_bytes(b)), in btree_node_read_all_replicas()
1639 &c->btree_bio); in btree_node_read_all_replicas()
1646 container_of(ra->bio[i], struct btree_read_bio, bio); in btree_node_read_all_replicas()
1647 rb->c = c; in btree_node_read_all_replicas()
1648 rb->b = b; in btree_node_read_all_replicas()
1649 rb->ra = ra; in btree_node_read_all_replicas()
1650 rb->start_time = local_clock(); in btree_node_read_all_replicas()
1651 rb->have_ioref = ca != NULL; in btree_node_read_all_replicas()
1652 rb->idx = i; in btree_node_read_all_replicas()
1653 rb->pick = pick; in btree_node_read_all_replicas()
1654 rb->bio.bi_iter.bi_sector = pick.ptr.offset; in btree_node_read_all_replicas()
1655 rb->bio.bi_end_io = btree_node_read_all_replicas_endio; in btree_node_read_all_replicas()
1656 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b)); in btree_node_read_all_replicas()
1658 if (rb->have_ioref) { in btree_node_read_all_replicas()
1659 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], in btree_node_read_all_replicas()
1660 bio_sectors(&rb->bio)); in btree_node_read_all_replicas()
1661 bio_set_dev(&rb->bio, ca->disk_sb.bdev); in btree_node_read_all_replicas()
1663 closure_get(&ra->cl); in btree_node_read_all_replicas()
1664 submit_bio(&rb->bio); in btree_node_read_all_replicas()
1666 ra->err[i] = BLK_STS_REMOVED; in btree_node_read_all_replicas()
1673 closure_sync(&ra->cl); in btree_node_read_all_replicas()
1674 btree_node_read_all_replicas_done(&ra->cl.work); in btree_node_read_all_replicas()
1676 continue_at(&ra->cl, btree_node_read_all_replicas_done, in btree_node_read_all_replicas()
1677 c->btree_read_complete_wq); in btree_node_read_all_replicas()
1683 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, in bch2_btree_node_read() argument
1686 struct bch_fs *c = trans->c; in bch2_btree_node_read()
1693 trace_and_count(c, btree_node_read, trans, b); in bch2_btree_node_read()
1696 !btree_node_read_all_replicas(c, b, sync)) in bch2_btree_node_read()
1699 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), in bch2_btree_node_read()
1706 bch2_btree_pos_to_text(&buf, c, b); in bch2_btree_node_read()
1709 if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && in bch2_btree_node_read()
1710 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) in bch2_btree_node_read()
1713 set_btree_node_read_error(b); in bch2_btree_node_read()
1714 bch2_btree_lost_data(c, b->c.btree_id); in bch2_btree_node_read()
1715 clear_btree_node_read_in_flight(b); in bch2_btree_node_read()
1716 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); in bch2_btree_node_read()
1724 buf_pages(b->data, btree_buf_bytes(b)), in bch2_btree_node_read()
1727 &c->btree_bio); in bch2_btree_node_read()
1729 rb->c = c; in bch2_btree_node_read()
1730 rb->b = b; in bch2_btree_node_read()
1731 rb->ra = NULL; in bch2_btree_node_read()
1732 rb->start_time = local_clock(); in bch2_btree_node_read()
1733 rb->have_ioref = ca != NULL; in bch2_btree_node_read()
1734 rb->pick = pick; in bch2_btree_node_read()
1735 INIT_WORK(&rb->work, btree_node_read_work); in bch2_btree_node_read()
1736 bio->bi_iter.bi_sector = pick.ptr.offset; in bch2_btree_node_read()
1737 bio->bi_end_io = btree_node_read_endio; in bch2_btree_node_read()
1738 bch2_bio_map(bio, b->data, btree_buf_bytes(b)); in bch2_btree_node_read()
1740 if (rb->have_ioref) { in bch2_btree_node_read()
1741 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], in bch2_btree_node_read()
1743 bio_set_dev(bio, ca->disk_sb.bdev); in bch2_btree_node_read()
1747 bch2_latency_acct(ca, rb->start_time, READ); in bch2_btree_node_read()
1748 btree_node_read_work(&rb->work); in bch2_btree_node_read()
1753 bio->bi_status = BLK_STS_REMOVED; in bch2_btree_node_read()
1756 btree_node_read_work(&rb->work); in bch2_btree_node_read()
1758 queue_work(c->btree_read_complete_wq, &rb->work); in bch2_btree_node_read()
1765 struct bch_fs *c = trans->c; in __bch2_btree_root_read()
1767 struct btree *b; in __bch2_btree_root_read() local
1777 b = bch2_btree_node_mem_alloc(trans, level != 0); in __bch2_btree_root_read()
1780 BUG_ON(IS_ERR(b)); in __bch2_btree_root_read()
1782 bkey_copy(&b->key, k); in __bch2_btree_root_read()
1783 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); in __bch2_btree_root_read()
1785 set_btree_node_read_in_flight(b); in __bch2_btree_root_read()
1789 bch2_btree_node_read(trans, b, true); in __bch2_btree_root_read()
1791 if (btree_node_read_error(b)) { in __bch2_btree_root_read()
1792 mutex_lock(&c->btree_cache.lock); in __bch2_btree_root_read()
1793 bch2_btree_node_hash_remove(&c->btree_cache, b); in __bch2_btree_root_read()
1794 mutex_unlock(&c->btree_cache.lock); in __bch2_btree_root_read()
1796 ret = -BCH_ERR_btree_node_read_error; in __bch2_btree_root_read()
1800 bch2_btree_set_root_for_read(c, b); in __bch2_btree_root_read()
1802 six_unlock_write(&b->c.lock); in __bch2_btree_root_read()
1803 six_unlock_intent(&b->c.lock); in __bch2_btree_root_read()
1814 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, in bch2_btree_complete_write() argument
1819 old = READ_ONCE(b->will_make_reachable); in bch2_btree_complete_write()
1826 } while (!try_cmpxchg(&b->will_make_reachable, &old, new)); in bch2_btree_complete_write()
1829 closure_put(&((struct btree_update *) new)->cl); in bch2_btree_complete_write()
1831 bch2_journal_pin_drop(&c->journal, &w->journal); in bch2_btree_complete_write()
1834 static void __btree_node_write_done(struct bch_fs *c, struct btree *b) in __btree_node_write_done() argument
1836 struct btree_write *w = btree_prev_write(b); in __btree_node_write_done()
1840 bch2_btree_complete_write(c, b, w); in __btree_node_write_done()
1842 old = READ_ONCE(b->flags); in __btree_node_write_done()
1864 } while (!try_cmpxchg(&b->flags, &old, new)); in __btree_node_write_done()
1867 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); in __btree_node_write_done()
1869 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); in __btree_node_write_done()
1872 static void btree_node_write_done(struct bch_fs *c, struct btree *b) in btree_node_write_done() argument
1876 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_node_write_done()
1880 __btree_node_write_done(c, b); in btree_node_write_done()
1881 six_unlock_read(&b->c.lock); in btree_node_write_done()
1888 struct bch_fs *c = wbio->wbio.c; in btree_node_write_work()
1889 struct btree *b = wbio->wbio.bio.bi_private; in btree_node_write_work() local
1893 wbio->data_bytes, in btree_node_write_work()
1894 wbio->wbio.used_mempool, in btree_node_write_work()
1895 wbio->data); in btree_node_write_work()
1897 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, in btree_node_write_work()
1898 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); in btree_node_write_work()
1900 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) { in btree_node_write_work()
1901 ret = -BCH_ERR_btree_node_write_all_failed; in btree_node_write_work()
1905 if (wbio->wbio.first_btree_write) { in btree_node_write_work()
1906 if (wbio->wbio.failed.nr) { in btree_node_write_work()
1911 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, in btree_node_write_work()
1916 !wbio->wbio.failed.nr)); in btree_node_write_work()
1921 bio_put(&wbio->wbio.bio); in btree_node_write_work()
1922 btree_node_write_done(c, b); in btree_node_write_work()
1925 set_btree_node_noevict(b); in btree_node_write_work()
1934 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; in btree_node_write_endio()
1937 struct bch_fs *c = wbio->c; in btree_node_write_endio()
1938 struct btree *b = wbio->bio.bi_private; in btree_node_write_endio() local
1939 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL; in btree_node_write_endio()
1942 if (wbio->have_ioref) in btree_node_write_endio()
1943 bch2_latency_acct(ca, wbio->submit_time, WRITE); in btree_node_write_endio()
1946 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, in btree_node_write_endio()
1948 bch2_blk_status_to_str(bio->bi_status)) || in btree_node_write_endio()
1950 spin_lock_irqsave(&c->btree_write_error_lock, flags); in btree_node_write_endio()
1951 bch2_dev_list_add_dev(&orig->failed, wbio->dev); in btree_node_write_endio()
1952 spin_unlock_irqrestore(&c->btree_write_error_lock, flags); in btree_node_write_endio()
1955 if (wbio->have_ioref) in btree_node_write_endio()
1956 percpu_ref_put(&ca->io_ref); in btree_node_write_endio()
1960 bio_endio(&parent->bio); in btree_node_write_endio()
1964 clear_btree_node_write_in_flight_inner(b); in btree_node_write_endio()
1965 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); in btree_node_write_endio()
1966 INIT_WORK(&wb->work, btree_node_write_work); in btree_node_write_endio()
1967 queue_work(c->btree_io_complete_wq, &wb->work); in btree_node_write_endio()
1970 static int validate_bset_for_write(struct bch_fs *c, struct btree *b, in validate_bset_for_write() argument
1975 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), in validate_bset_for_write()
1978 .level = b->c.level + 1, in validate_bset_for_write()
1979 .btree = b->c.btree_id, in validate_bset_for_write()
1987 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: in validate_bset_for_write()
1988 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); in validate_bset_for_write()
2002 bkey_copy(&tmp.k, &wbio->key); in btree_write_submit()
2005 ptr->offset += wbio->sector_offset; in btree_write_submit()
2007 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, in btree_write_submit()
2011 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) in __bch2_btree_node_write() argument
2032 * We may only have a read lock on the btree node - the dirty bit is our in __bch2_btree_node_write()
2038 old = READ_ONCE(b->flags); in __bch2_btree_node_write()
2054 if (b->written && in __bch2_btree_node_write()
2071 } while (!try_cmpxchg_acquire(&b->flags, &old, new)); in __bch2_btree_node_write()
2076 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); in __bch2_btree_node_write()
2078 atomic_long_dec(&c->btree_cache.nr_dirty); in __bch2_btree_node_write()
2080 BUG_ON(btree_node_fake(b)); in __bch2_btree_node_write()
2081 BUG_ON((b->will_make_reachable != 0) != !b->written); in __bch2_btree_node_write()
2083 BUG_ON(b->written >= btree_sectors(c)); in __bch2_btree_node_write()
2084 BUG_ON(b->written & (block_sectors(c) - 1)); in __bch2_btree_node_write()
2085 BUG_ON(bset_written(b, btree_bset_last(b))); in __bch2_btree_node_write()
2086 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); in __bch2_btree_node_write()
2087 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); in __bch2_btree_node_write()
2089 bch2_sort_whiteouts(c, b); in __bch2_btree_node_write()
2091 sort_iter_stack_init(&sort_iter, b); in __bch2_btree_node_write()
2093 bytes = !b->written in __bch2_btree_node_write()
2097 bytes += b->whiteout_u64s * sizeof(u64); in __bch2_btree_node_write()
2099 for_each_bset(b, t) { in __bch2_btree_node_write()
2100 i = bset(b, t); in __bch2_btree_node_write()
2102 if (bset_written(b, i)) in __bch2_btree_node_write()
2105 bytes += le16_to_cpu(i->u64s) * sizeof(u64); in __bch2_btree_node_write()
2107 btree_bkey_first(b, t), in __bch2_btree_node_write()
2108 btree_bkey_last(b, t)); in __bch2_btree_node_write()
2109 seq = max(seq, le64_to_cpu(i->journal_seq)); in __bch2_btree_node_write()
2112 BUG_ON(b->written && !seq); in __bch2_btree_node_write()
2122 if (!b->written) { in __bch2_btree_node_write()
2124 *bn = *b->data; in __bch2_btree_node_write()
2125 i = &bn->keys; in __bch2_btree_node_write()
2128 bne->keys = b->data->keys; in __bch2_btree_node_write()
2129 i = &bne->keys; in __bch2_btree_node_write()
2132 i->journal_seq = cpu_to_le64(seq); in __bch2_btree_node_write()
2133 i->u64s = 0; in __bch2_btree_node_write()
2136 unwritten_whiteouts_start(b), in __bch2_btree_node_write()
2137 unwritten_whiteouts_end(b)); in __bch2_btree_node_write()
2140 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter); in __bch2_btree_node_write()
2141 le16_add_cpu(&i->u64s, u64s); in __bch2_btree_node_write()
2143 b->whiteout_u64s = 0; in __bch2_btree_node_write()
2145 BUG_ON(!b->written && i->u64s != b->data->keys.u64s); in __bch2_btree_node_write()
2150 if (b->written && !i->u64s) in __bch2_btree_node_write()
2153 bytes_to_write = vstruct_end(i) - data; in __bch2_btree_node_write()
2156 if (!b->written && in __bch2_btree_node_write()
2157 b->key.k.type == KEY_TYPE_btree_ptr_v2) in __bch2_btree_node_write()
2158 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write); in __bch2_btree_node_write()
2161 (sectors_to_write << 9) - bytes_to_write); in __bch2_btree_node_write()
2163 BUG_ON(b->written + sectors_to_write > btree_sectors(c)); in __bch2_btree_node_write()
2165 BUG_ON(i->seq != b->data->keys.seq); in __bch2_btree_node_write()
2167 i->version = cpu_to_le16(c->sb.version); in __bch2_btree_node_write()
2168 SET_BSET_OFFSET(i, b->written); in __bch2_btree_node_write()
2175 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current) in __bch2_btree_node_write()
2180 validate_bset_for_write(c, b, i, sectors_to_write)) in __bch2_btree_node_write()
2183 ret = bset_encrypt(c, i, b->written << 9); in __bch2_btree_node_write()
2188 nonce = btree_nonce(i, b->written << 9); in __bch2_btree_node_write()
2191 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn); in __bch2_btree_node_write()
2193 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); in __bch2_btree_node_write()
2197 validate_bset_for_write(c, b, i, sectors_to_write)) in __bch2_btree_node_write()
2201 * We handle btree write errors by immediately halting the journal - in __bch2_btree_node_write()
2211 * never journalled (interior nodes, see btree_update_nodes_written()) - in __bch2_btree_node_write()
2215 * Make sure to update b->written so bch2_btree_init_next() doesn't in __bch2_btree_node_write()
2218 if (bch2_journal_error(&c->journal) || in __bch2_btree_node_write()
2219 c->opts.nochanges) in __bch2_btree_node_write()
2222 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); in __bch2_btree_node_write()
2228 &c->btree_bio), in __bch2_btree_node_write()
2230 wbio_init(&wbio->wbio.bio); in __bch2_btree_node_write()
2231 wbio->data = data; in __bch2_btree_node_write()
2232 wbio->data_bytes = bytes; in __bch2_btree_node_write()
2233 wbio->sector_offset = b->written; in __bch2_btree_node_write()
2234 wbio->wbio.c = c; in __bch2_btree_node_write()
2235 wbio->wbio.used_mempool = used_mempool; in __bch2_btree_node_write()
2236 wbio->wbio.first_btree_write = !b->written; in __bch2_btree_node_write()
2237 wbio->wbio.bio.bi_end_io = btree_node_write_endio; in __bch2_btree_node_write()
2238 wbio->wbio.bio.bi_private = b; in __bch2_btree_node_write()
2240 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); in __bch2_btree_node_write()
2242 bkey_copy(&wbio->key, &b->key); in __bch2_btree_node_write()
2244 b->written += sectors_to_write; in __bch2_btree_node_write()
2246 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) in __bch2_btree_node_write()
2247 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = in __bch2_btree_node_write()
2248 cpu_to_le16(b->written); in __bch2_btree_node_write()
2250 atomic64_inc(&c->btree_write_stats[type].nr); in __bch2_btree_node_write()
2251 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); in __bch2_btree_node_write()
2253 INIT_WORK(&wbio->work, btree_write_submit); in __bch2_btree_node_write()
2254 queue_work(c->btree_write_submit_wq, &wbio->work); in __bch2_btree_node_write()
2257 set_btree_node_noevict(b); in __bch2_btree_node_write()
2258 b->written += sectors_to_write; in __bch2_btree_node_write()
2261 __btree_node_write_done(c, b); in __bch2_btree_node_write()
2267 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) in bch2_btree_post_write_cleanup() argument
2272 if (!btree_node_just_written(b)) in bch2_btree_post_write_cleanup()
2275 BUG_ON(b->whiteout_u64s); in bch2_btree_post_write_cleanup()
2277 clear_btree_node_just_written(b); in bch2_btree_post_write_cleanup()
2280 * Note: immediately after write, bset_written() doesn't work - the in bch2_btree_post_write_cleanup()
2292 if (b->nsets > 1) { in bch2_btree_post_write_cleanup()
2293 btree_node_sort(c, b, 0, b->nsets); in bch2_btree_post_write_cleanup()
2296 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); in bch2_btree_post_write_cleanup()
2299 for_each_bset(b, t) in bch2_btree_post_write_cleanup()
2300 set_needs_whiteout(bset(b, t), true); in bch2_btree_post_write_cleanup()
2302 bch2_btree_verify(c, b); in bch2_btree_post_write_cleanup()
2308 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b)); in bch2_btree_post_write_cleanup()
2310 bne = want_new_bset(c, b); in bch2_btree_post_write_cleanup()
2312 bch2_bset_init_next(b, bne); in bch2_btree_post_write_cleanup()
2314 bch2_btree_build_aux_trees(b); in bch2_btree_post_write_cleanup()
2322 void bch2_btree_node_write(struct bch_fs *c, struct btree *b, in bch2_btree_node_write() argument
2328 six_lock_tryupgrade(&b->c.lock))) { in bch2_btree_node_write()
2329 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write()
2332 if (btree_node_just_written(b) && in bch2_btree_node_write()
2333 six_trylock_write(&b->c.lock)) { in bch2_btree_node_write()
2334 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write()
2335 six_unlock_write(&b->c.lock); in bch2_btree_node_write()
2339 six_lock_downgrade(&b->c.lock); in bch2_btree_node_write()
2341 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write()
2343 btree_node_just_written(b)) in bch2_btree_node_write()
2344 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write()
2348 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b, in bch2_btree_node_write_trans() argument
2352 struct bch_fs *c = trans->c; in bch2_btree_node_write_trans()
2356 six_lock_tryupgrade(&b->c.lock))) { in bch2_btree_node_write_trans()
2357 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write_trans()
2360 if (btree_node_just_written(b) && in bch2_btree_node_write_trans()
2361 six_trylock_write(&b->c.lock)) { in bch2_btree_node_write_trans()
2362 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write_trans()
2363 __bch2_btree_node_unlock_write(trans, b); in bch2_btree_node_write_trans()
2367 six_lock_downgrade(&b->c.lock); in bch2_btree_node_write_trans()
2369 __bch2_btree_node_write(c, b, flags); in bch2_btree_node_write_trans()
2371 btree_node_just_written(b)) in bch2_btree_node_write_trans()
2372 bch2_btree_post_write_cleanup(c, b); in bch2_btree_node_write_trans()
2380 struct btree *b; in __bch2_btree_flush_all() local
2385 for_each_cached_btree(b, c, tbl, i, pos) in __bch2_btree_flush_all()
2386 if (test_bit(flag, &b->flags)) { in __bch2_btree_flush_all()
2388 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); in __bch2_btree_flush_all()
2421 u64 nr = atomic64_read(&c->btree_write_stats[i].nr); in bch2_btree_write_stats_to_text()
2422 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); in bch2_btree_write_stats_to_text()