Lines Matching full:flushing
144 EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq); in wb_flush_one()
145 EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); in wb_flush_one()
226 bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin, in move_keys_from_inc_to_flushing()
229 darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr)); in move_keys_from_inc_to_flushing()
230 darray_resize(&wb->sorted, wb->flushing.keys.size); in move_keys_from_inc_to_flushing()
232 if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) { in move_keys_from_inc_to_flushing()
233 swap(wb->flushing.keys, wb->inc.keys); in move_keys_from_inc_to_flushing()
237 size_t nr = min(darray_room(wb->flushing.keys), in move_keys_from_inc_to_flushing()
238 wb->sorted.size - wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
241 memcpy(&darray_top(wb->flushing.keys), in move_keys_from_inc_to_flushing()
249 wb->flushing.keys.nr += nr; in move_keys_from_inc_to_flushing()
264 BUG_ON(wb->sorted.size < wb->flushing.keys.nr); in move_keys_from_inc_to_flushing()
305 for (size_t i = 0; i < wb->flushing.keys.nr; i++) { in bch2_btree_write_buffer_flush_locked()
307 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree; in bch2_btree_write_buffer_flush_locked()
308 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos)); in bch2_btree_write_buffer_flush_locked()
310 wb->sorted.nr = wb->flushing.keys.nr; in bch2_btree_write_buffer_flush_locked()
317 * However, since we're not flushing in the order they appear in the in bch2_btree_write_buffer_flush_locked()
329 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; in bch2_btree_write_buffer_flush_locked()
337 prefetch(&wb->flushing.keys.data[n->idx]); in bch2_btree_write_buffer_flush_locked()
349 struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx]; in bch2_btree_write_buffer_flush_locked()
429 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr); in bch2_btree_write_buffer_flush_locked()
431 sort(wb->flushing.keys.data, in bch2_btree_write_buffer_flush_locked()
432 wb->flushing.keys.nr, in bch2_btree_write_buffer_flush_locked()
433 sizeof(wb->flushing.keys.data[0]), in bch2_btree_write_buffer_flush_locked()
436 darray_for_each(wb->flushing.keys, i) { in bch2_btree_write_buffer_flush_locked()
447 bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin, in bch2_btree_write_buffer_flush_locked()
484 struct btree_write_buffered_key *dst = wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
486 darray_for_each(wb->flushing.keys, i) in bch2_btree_write_buffer_flush_locked()
489 wb->flushing.keys.nr = dst - wb->flushing.keys.data; in bch2_btree_write_buffer_flush_locked()
494 bch2_journal_pin_drop(j, &wb->flushing.pin); in bch2_btree_write_buffer_flush_locked()
495 wb->flushing.keys.nr = 0; in bch2_btree_write_buffer_flush_locked()
499 trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0); in bch2_btree_write_buffer_flush_locked()
563 *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; in btree_write_buffer_flush_seq()
569 mutex_lock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
571 mutex_unlock(&wb->flushing.lock); in btree_write_buffer_flush_seq()
575 (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq))); in btree_write_buffer_flush_seq()
600 * The write buffer requires flushing when going RO: keys in the journal for the
620 if (mutex_trylock(&wb->flushing.lock)) { in bch2_btree_write_buffer_flush_nocheck_rw()
622 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_nocheck_rw()
689 mutex_lock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
693 mutex_unlock(&wb->flushing.lock); in bch2_btree_write_buffer_flush_work()
729 if (!ret && dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
730 ret = darray_resize(&wb->sorted, wb->flushing.keys.size); in bch2_journal_key_to_wb_slowpath()
733 if (dst->wb == &c->btree_write_buffer.flushing) { in bch2_journal_key_to_wb_slowpath()
745 if (dst->wb == &wb->flushing) in bch2_journal_key_to_wb_slowpath()
746 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_key_to_wb_slowpath()
763 if (mutex_trylock(&wb->flushing.lock)) { in bch2_journal_keys_to_write_buffer_start()
769 * wb->flushing, saving us a copy later: in bch2_journal_keys_to_write_buffer_start()
773 dst->wb = &wb->flushing; in bch2_journal_keys_to_write_buffer_start()
775 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_start()
784 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_start()
785 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); in bch2_journal_keys_to_write_buffer_start()
828 if (dst->wb == &wb->flushing) in bch2_journal_keys_to_write_buffer_end()
829 mutex_unlock(&wb->flushing.lock); in bch2_journal_keys_to_write_buffer_end()
852 return wb_keys_resize(&wb->flushing, new_size) ?: in bch2_btree_write_buffer_resize()
860 BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) && in bch2_fs_btree_write_buffer_exit()
865 darray_exit(&wb->flushing.keys); in bch2_fs_btree_write_buffer_exit()
874 mutex_init(&wb->flushing.lock); in bch2_fs_btree_write_buffer_init()
881 darray_make_room(&wb->flushing.keys, initial_size) ?: in bch2_fs_btree_write_buffer_init()