Lines Matching +full:j +full:- +full:to +full:- +full:k
1 // SPDX-License-Identifier: GPL-2.0
24 return (cmp_int(l->hi, r->hi) ?: in __wb_key_ref_cmp()
25 cmp_int(l->mi, r->mi) ?: in __wb_key_ref_cmp()
26 cmp_int(l->lo, r->lo)) >= 0; in __wb_key_ref_cmp()
56 return cmp_int(l->journal_seq, r->journal_seq); in wb_key_seq_cmp()
65 return !((l->hi ^ r->hi)| in wb_key_eq()
66 (l->mi ^ r->mi)| in wb_key_eq()
67 ((l->lo >> 24) ^ (r->lo >> 24))); in wb_key_eq()
80 if (a) /* Building heap: sift down --a */ in wb_sort()
81 --a; in wb_sort()
82 else if (--n) /* Sorting: Extract root to --n */ in wb_sort()
89 * "bottom-up" variant, which significantly reduces in wb_sort()
90 * calls to cmp_func(): we find the sift-down path all in wb_sort()
91 * the way to the leaves (one compare per level), then in wb_sort()
92 * backtrack to find where to insert the target element. in wb_sort()
94 * Because elements tend to sift down close to the leaves, in wb_sort()
97 * average, 3/4 worst-case.) in wb_sort()
104 /* Now backtrack from "b" to the correct location for "a" */ in wb_sort()
106 b = (b - 1) / 2; in wb_sort()
109 b = (b - 1) / 2; in wb_sort()
121 bch2_btree_node_unlock_write(trans, path, path->l[0].b); in wb_flush_one_slowpath()
123 trans->journal_res.seq = wb->journal_seq; in wb_flush_one_slowpath()
125 return bch2_trans_update(trans, iter, &wb->k, in wb_flush_one_slowpath()
143 EBUG_ON(!wb->journal_seq); in wb_flush_one()
144 EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq); in wb_flush_one()
145 EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); in wb_flush_one()
151 if (!*accounting_accumulated && wb->k.k.type == KEY_TYPE_accounting) { in wb_flush_one()
153 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, iter), &u); in wb_flush_one() local
155 if (k.k->type == KEY_TYPE_accounting) in wb_flush_one()
156 bch2_accounting_accumulate(bkey_i_to_accounting(&wb->k), in wb_flush_one()
157 bkey_s_c_to_accounting(k)); in wb_flush_one()
165 if (btree_iter_path(trans, iter)->ref > 1) in wb_flush_one()
166 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_); in wb_flush_one()
171 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c); in wb_flush_one()
175 bch2_btree_node_prep_for_write(trans, path, path->l[0].b); in wb_flush_one()
179 if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) { in wb_flush_one()
184 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq); in wb_flush_one()
193 * It is not safe to rejournal the key once it has been inserted into the write
196 * before the current transaction. If we were to journal this key again and
206 bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), in btree_write_buffered_insert()
209 trans->journal_res.seq = wb->journal_seq; in btree_write_buffered_insert()
212 bch2_trans_update(trans, &iter, &wb->k, in btree_write_buffered_insert()
221 struct journal *j = &c->journal; in move_keys_from_inc_to_flushing() local
223 if (!wb->inc.keys.nr) in move_keys_from_inc_to_flushing()
226 bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin, in move_keys_from_inc_to_flushing()
229 darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr)); in move_keys_from_inc_to_flushing()
230 darray_resize(&wb->sorted, wb->flushing.keys.size); in move_keys_from_inc_to_flushing()
232 if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) { in move_keys_from_inc_to_flushing()
233 swap(wb->flushing.keys, wb->inc.keys); in move_keys_from_inc_to_flushing()
237 size_t nr = min(darray_room(wb->flushing.keys), in move_keys_from_inc_to_flushing()
238 wb->sorted.size - wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
239 nr = min(nr, wb->inc.keys.nr); in move_keys_from_inc_to_flushing()
241 memcpy(&darray_top(wb->flushing.keys), in move_keys_from_inc_to_flushing()
242 wb->inc.keys.data, in move_keys_from_inc_to_flushing()
243 sizeof(wb->inc.keys.data[0]) * nr); in move_keys_from_inc_to_flushing()
245 memmove(wb->inc.keys.data, in move_keys_from_inc_to_flushing()
246 wb->inc.keys.data + nr, in move_keys_from_inc_to_flushing()
247 sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr)); in move_keys_from_inc_to_flushing()
249 wb->flushing.keys.nr += nr; in move_keys_from_inc_to_flushing()
250 wb->inc.keys.nr -= nr; in move_keys_from_inc_to_flushing()
252 if (!wb->inc.keys.nr) in move_keys_from_inc_to_flushing()
253 bch2_journal_pin_drop(j, &wb->inc.pin); in move_keys_from_inc_to_flushing()
255 bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin, in move_keys_from_inc_to_flushing()
258 if (j->watermark) { in move_keys_from_inc_to_flushing()
259 spin_lock(&j->lock); in move_keys_from_inc_to_flushing()
260 bch2_journal_set_watermark(j); in move_keys_from_inc_to_flushing()
261 spin_unlock(&j->lock); in move_keys_from_inc_to_flushing()
264 BUG_ON(wb->sorted.size < wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
268 enum btree_id btree, struct bkey_i *k) in bch2_btree_write_buffer_insert_err() argument
270 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_insert_err()
273 prt_printf(&buf, "attempting to do write buffer update on non wb btree="); in bch2_btree_write_buffer_insert_err()
276 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); in bch2_btree_write_buffer_insert_err()
280 return -EROFS; in bch2_btree_write_buffer_insert_err()
285 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_flush_locked()
286 struct journal *j = &c->journal; in bch2_btree_write_buffer_flush_locked() local
287 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_locked()
291 bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags); in bch2_btree_write_buffer_flush_locked()
294 ret = bch2_journal_error(&c->journal); in bch2_btree_write_buffer_flush_locked()
301 mutex_lock(&wb->inc.lock); in bch2_btree_write_buffer_flush_locked()
303 mutex_unlock(&wb->inc.lock); in bch2_btree_write_buffer_flush_locked()
305 for (size_t i = 0; i < wb->flushing.keys.nr; i++) { in bch2_btree_write_buffer_flush_locked()
306 wb->sorted.data[i].idx = i; in bch2_btree_write_buffer_flush_locked()
307 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree; in bch2_btree_write_buffer_flush_locked()
308 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos)); in bch2_btree_write_buffer_flush_locked()
310 wb->sorted.nr = wb->flushing.keys.nr; in bch2_btree_write_buffer_flush_locked()
314 * then we attempt to flush in sorted btree order, as this is most in bch2_btree_write_buffer_flush_locked()
318 * journal we won't be able to drop our journal pin until everything is in bch2_btree_write_buffer_flush_locked()
319 * flushed - which means this could deadlock the journal if we weren't in bch2_btree_write_buffer_flush_locked()
320 * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail in bch2_btree_write_buffer_flush_locked()
326 wb_sort(wb->sorted.data, wb->sorted.nr); in bch2_btree_write_buffer_flush_locked()
328 darray_for_each(wb->sorted, i) { in bch2_btree_write_buffer_flush_locked()
329 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; in bch2_btree_write_buffer_flush_locked() local
331 if (unlikely(!btree_type_uses_write_buffer(k->btree))) { in bch2_btree_write_buffer_flush_locked()
332 ret = bch2_btree_write_buffer_insert_err(trans, k->btree, &k->k); in bch2_btree_write_buffer_flush_locked()
336 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) in bch2_btree_write_buffer_flush_locked()
337 prefetch(&wb->flushing.keys.data[n->idx]); in bch2_btree_write_buffer_flush_locked()
339 BUG_ON(!k->journal_seq); in bch2_btree_write_buffer_flush_locked()
342 k->k.k.type == KEY_TYPE_accounting) { in bch2_btree_write_buffer_flush_locked()
347 if (i + 1 < &darray_top(wb->sorted) && in bch2_btree_write_buffer_flush_locked()
349 struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx]; in bch2_btree_write_buffer_flush_locked()
351 if (k->k.k.type == KEY_TYPE_accounting && in bch2_btree_write_buffer_flush_locked()
352 n->k.k.type == KEY_TYPE_accounting) in bch2_btree_write_buffer_flush_locked()
353 bch2_accounting_accumulate(bkey_i_to_accounting(&n->k), in bch2_btree_write_buffer_flush_locked()
354 bkey_i_to_s_c_accounting(&k->k)); in bch2_btree_write_buffer_flush_locked()
357 n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq); in bch2_btree_write_buffer_flush_locked()
358 k->journal_seq = 0; in bch2_btree_write_buffer_flush_locked()
365 if (path->btree_id != i->btree || in bch2_btree_write_buffer_flush_locked()
366 bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) { in bch2_btree_write_buffer_flush_locked()
367 bch2_btree_node_unlock_write(trans, path, path->l[0].b); in bch2_btree_write_buffer_flush_locked()
382 if (!iter.path || iter.btree_id != k->btree) { in bch2_btree_write_buffer_flush_locked()
384 bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p, in bch2_btree_write_buffer_flush_locked()
388 bch2_btree_iter_set_pos(&iter, k->k.k.p); in bch2_btree_write_buffer_flush_locked()
389 btree_iter_path(trans, &iter)->preserve = false; in bch2_btree_write_buffer_flush_locked()
394 ret = -BCH_ERR_journal_reclaim_would_deadlock; in bch2_btree_write_buffer_flush_locked()
398 ret = wb_flush_one(trans, &iter, k, &write_locked, in bch2_btree_write_buffer_flush_locked()
405 k->journal_seq = 0; in bch2_btree_write_buffer_flush_locked()
406 } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) { in bch2_btree_write_buffer_flush_locked()
415 bch2_btree_node_unlock_write(trans, path, path->l[0].b); in bch2_btree_write_buffer_flush_locked()
429 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr); in bch2_btree_write_buffer_flush_locked()
431 sort(wb->flushing.keys.data, in bch2_btree_write_buffer_flush_locked()
432 wb->flushing.keys.nr, in bch2_btree_write_buffer_flush_locked()
433 sizeof(wb->flushing.keys.data[0]), in bch2_btree_write_buffer_flush_locked()
436 darray_for_each(wb->flushing.keys, i) { in bch2_btree_write_buffer_flush_locked()
437 if (!i->journal_seq) in bch2_btree_write_buffer_flush_locked()
441 i->k.k.type == KEY_TYPE_accounting) { in bch2_btree_write_buffer_flush_locked()
447 bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin, in bch2_btree_write_buffer_flush_locked()
462 i->journal_seq = 0; in bch2_btree_write_buffer_flush_locked()
467 * can't flush accounting keys at all - condense them and leave in bch2_btree_write_buffer_flush_locked()
473 * going to be generated by interior btree node updates as in bch2_btree_write_buffer_flush_locked()
474 * journal replay has to split/rewrite nodes to make room for in bch2_btree_write_buffer_flush_locked()
477 * And for those new acounting updates, updates to the same in bch2_btree_write_buffer_flush_locked()
479 * to the write buffer - see the patch for eytzingcer tree in bch2_btree_write_buffer_flush_locked()
484 struct btree_write_buffered_key *dst = wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
486 darray_for_each(wb->flushing.keys, i) in bch2_btree_write_buffer_flush_locked()
487 if (i->journal_seq) in bch2_btree_write_buffer_flush_locked()
489 wb->flushing.keys.nr = dst - wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
494 bch2_journal_pin_drop(j, &wb->flushing.pin); in bch2_btree_write_buffer_flush_locked()
495 wb->flushing.keys.nr = 0; in bch2_btree_write_buffer_flush_locked()
499 trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0); in bch2_btree_write_buffer_flush_locked()
508 bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); in bch2_journal_keys_to_write_buffer()
510 for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { in bch2_journal_keys_to_write_buffer()
511 jset_entry_for_each_key(entry, k) { in bch2_journal_keys_to_write_buffer()
512 ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); in bch2_journal_keys_to_write_buffer()
517 entry->type = BCH_JSET_ENTRY_btree_keys; in bch2_journal_keys_to_write_buffer()
526 struct journal *j = &c->journal; in fetch_wb_keys_from_journal() local
531 while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, max_seq, &blocked))) { in fetch_wb_keys_from_journal()
535 spin_lock(&j->lock); in fetch_wb_keys_from_journal()
536 buf->need_flush_to_write_buffer = false; in fetch_wb_keys_from_journal()
537 spin_unlock(&j->lock); in fetch_wb_keys_from_journal()
540 mutex_unlock(&j->buf_lock); in fetch_wb_keys_from_journal()
543 bch2_journal_unblock(j); in fetch_wb_keys_from_journal()
554 struct bch_fs *c = trans->c; in btree_write_buffer_flush_seq()
555 struct btree_write_buffer *wb = &c->btree_write_buffer; in btree_write_buffer_flush_seq()
563 *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; in btree_write_buffer_flush_seq()
567 * is not guaranteed to empty wb->inc: in btree_write_buffer_flush_seq()
569 mutex_lock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
571 mutex_unlock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
574 (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) || in btree_write_buffer_flush_seq()
575 (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq))); in btree_write_buffer_flush_seq()
580 static int bch2_btree_write_buffer_journal_flush(struct journal *j, in bch2_btree_write_buffer_journal_flush() argument
583 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_btree_write_buffer_journal_flush()
591 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_flush_sync()
596 return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work); in bch2_btree_write_buffer_flush_sync()
605 if (bch2_journal_error(&c->journal)) in bch2_btree_write_buffer_flush_going_ro()
610 journal_cur_seq(&c->journal), &did_work)); in bch2_btree_write_buffer_flush_going_ro()
616 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_flush_nocheck_rw()
617 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_nocheck_rw()
620 if (mutex_trylock(&wb->flushing.lock)) { in bch2_btree_write_buffer_flush_nocheck_rw()
622 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_nocheck_rw()
630 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_tryflush()
633 return -BCH_ERR_erofs_no_writes; in bch2_btree_write_buffer_tryflush()
641 * In check and repair code, when checking references to write buffer btrees we
642 * need to issue a flush before we have a definitive error: this issues a flush
649 struct bch_fs *c = trans->c; in bch2_btree_write_buffer_maybe_flush()
655 if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) { in bch2_btree_write_buffer_maybe_flush()
666 if (bkey_is_btree_ptr(referring_k.k)) { in bch2_btree_write_buffer_maybe_flush()
675 bch2_bkey_buf_copy(last_flushed, c, tmp.k); in bch2_btree_write_buffer_maybe_flush()
676 ret = -BCH_ERR_transaction_restart_write_buffer_flush; in bch2_btree_write_buffer_maybe_flush()
686 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_flush_work()
689 mutex_lock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
693 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
700 eytzinger0_sort(wb->accounting.data, wb->accounting.nr, in wb_accounting_sort()
701 sizeof(wb->accounting.data[0]), in wb_accounting_sort()
706 struct bkey_i_accounting *k) in bch2_accounting_key_to_wb_slowpath() argument
708 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_accounting_key_to_wb_slowpath()
711 bkey_copy(&new.k, &k->k_i); in bch2_accounting_key_to_wb_slowpath()
713 int ret = darray_push(&wb->accounting, new); in bch2_accounting_key_to_wb_slowpath()
723 enum btree_id btree, struct bkey_i *k) in bch2_journal_key_to_wb_slowpath() argument
725 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_key_to_wb_slowpath()
728 ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL); in bch2_journal_key_to_wb_slowpath()
729 if (!ret && dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
730 ret = darray_resize(&wb->sorted, wb->flushing.keys.size); in bch2_journal_key_to_wb_slowpath()
733 if (dst->wb == &c->btree_write_buffer.flushing) { in bch2_journal_key_to_wb_slowpath()
734 mutex_unlock(&dst->wb->lock); in bch2_journal_key_to_wb_slowpath()
735 dst->wb = &c->btree_write_buffer.inc; in bch2_journal_key_to_wb_slowpath()
736 bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin, in bch2_journal_key_to_wb_slowpath()
744 dst->room = darray_room(dst->wb->keys); in bch2_journal_key_to_wb_slowpath()
745 if (dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
746 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_key_to_wb_slowpath()
747 BUG_ON(!dst->room); in bch2_journal_key_to_wb_slowpath()
748 BUG_ON(!dst->seq); in bch2_journal_key_to_wb_slowpath()
750 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys); in bch2_journal_key_to_wb_slowpath()
751 wb_k->journal_seq = dst->seq; in bch2_journal_key_to_wb_slowpath()
752 wb_k->btree = btree; in bch2_journal_key_to_wb_slowpath()
753 bkey_copy(&wb_k->k, k); in bch2_journal_key_to_wb_slowpath()
754 dst->wb->keys.nr++; in bch2_journal_key_to_wb_slowpath()
755 dst->room--; in bch2_journal_key_to_wb_slowpath()
761 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_keys_to_write_buffer_start()
763 if (mutex_trylock(&wb->flushing.lock)) { in bch2_journal_keys_to_write_buffer_start()
764 mutex_lock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_start()
768 * Attempt to skip wb->inc, and add keys directly to in bch2_journal_keys_to_write_buffer_start()
769 * wb->flushing, saving us a copy later: in bch2_journal_keys_to_write_buffer_start()
772 if (!wb->inc.keys.nr) { in bch2_journal_keys_to_write_buffer_start()
773 dst->wb = &wb->flushing; in bch2_journal_keys_to_write_buffer_start()
775 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_start()
776 dst->wb = &wb->inc; in bch2_journal_keys_to_write_buffer_start()
779 mutex_lock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_start()
780 dst->wb = &wb->inc; in bch2_journal_keys_to_write_buffer_start()
783 dst->room = darray_room(dst->wb->keys); in bch2_journal_keys_to_write_buffer_start()
784 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_start()
785 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_keys_to_write_buffer_start()
786 dst->seq = seq; in bch2_journal_keys_to_write_buffer_start()
788 bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin, in bch2_journal_keys_to_write_buffer_start()
791 darray_for_each(wb->accounting, i) in bch2_journal_keys_to_write_buffer_start()
792 memset(&i->k.v, 0, bkey_val_bytes(&i->k.k)); in bch2_journal_keys_to_write_buffer_start()
797 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_journal_keys_to_write_buffer_end()
801 darray_for_each(wb->accounting, i) in bch2_journal_keys_to_write_buffer_end()
802 if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&i->k))) { in bch2_journal_keys_to_write_buffer_end()
803 i->journal_seq = dst->seq; in bch2_journal_keys_to_write_buffer_end()
805 ret = __bch2_journal_key_to_wb(c, dst, i->btree, &i->k); in bch2_journal_keys_to_write_buffer_end()
810 if (live_accounting_keys * 2 < wb->accounting.nr) { in bch2_journal_keys_to_write_buffer_end()
811 struct btree_write_buffered_key *dst = wb->accounting.data; in bch2_journal_keys_to_write_buffer_end()
813 darray_for_each(wb->accounting, src) in bch2_journal_keys_to_write_buffer_end()
814 if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&src->k))) in bch2_journal_keys_to_write_buffer_end()
816 wb->accounting.nr = dst - wb->accounting.data; in bch2_journal_keys_to_write_buffer_end()
820 if (!dst->wb->keys.nr) in bch2_journal_keys_to_write_buffer_end()
821 bch2_journal_pin_drop(&c->journal, &dst->wb->pin); in bch2_journal_keys_to_write_buffer_end()
825 !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work)) in bch2_journal_keys_to_write_buffer_end()
828 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_end()
829 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_end()
830 mutex_unlock(&wb->inc.lock); in bch2_journal_keys_to_write_buffer_end()
837 if (wb->keys.size >= new_size) in wb_keys_resize()
840 if (!mutex_trylock(&wb->lock)) in wb_keys_resize()
841 return -EINTR; in wb_keys_resize()
843 int ret = darray_resize(&wb->keys, new_size); in wb_keys_resize()
844 mutex_unlock(&wb->lock); in wb_keys_resize()
850 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_btree_write_buffer_resize()
852 return wb_keys_resize(&wb->flushing, new_size) ?: in bch2_btree_write_buffer_resize()
853 wb_keys_resize(&wb->inc, new_size); in bch2_btree_write_buffer_resize()
858 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_fs_btree_write_buffer_exit()
860 BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) && in bch2_fs_btree_write_buffer_exit()
861 !bch2_journal_error(&c->journal)); in bch2_fs_btree_write_buffer_exit()
863 darray_exit(&wb->accounting); in bch2_fs_btree_write_buffer_exit()
864 darray_exit(&wb->sorted); in bch2_fs_btree_write_buffer_exit()
865 darray_exit(&wb->flushing.keys); in bch2_fs_btree_write_buffer_exit()
866 darray_exit(&wb->inc.keys); in bch2_fs_btree_write_buffer_exit()
871 struct btree_write_buffer *wb = &c->btree_write_buffer; in bch2_fs_btree_write_buffer_init()
873 mutex_init(&wb->inc.lock); in bch2_fs_btree_write_buffer_init()
874 mutex_init(&wb->flushing.lock); in bch2_fs_btree_write_buffer_init()
875 INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work); in bch2_fs_btree_write_buffer_init()
880 return darray_make_room(&wb->inc.keys, initial_size) ?: in bch2_fs_btree_write_buffer_init()
881 darray_make_room(&wb->flushing.keys, initial_size) ?: in bch2_fs_btree_write_buffer_init()
882 darray_make_room(&wb->sorted, initial_size); in bch2_fs_btree_write_buffer_init()