Lines Matching +full:k +full:- +full:to +full:- +full:j
1 // SPDX-License-Identifier: GPL-2.0
17 #include "sb-clean.h"
25 lockdep_assert_held(&c->sb_lock); in bch2_journal_pos_from_member_info_set()
28 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_journal_pos_from_member_info_set()
30 m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx); in bch2_journal_pos_from_member_info_set()
31 m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free); in bch2_journal_pos_from_member_info_set()
37 mutex_lock(&c->sb_lock); in bch2_journal_pos_from_member_info_resume()
39 struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); in bch2_journal_pos_from_member_info_resume()
42 if (idx < ca->journal.nr) in bch2_journal_pos_from_member_info_resume()
43 ca->journal.cur_idx = idx; in bch2_journal_pos_from_member_info_resume()
45 if (offset <= ca->mi.bucket_size) in bch2_journal_pos_from_member_info_resume()
46 ca->journal.sectors_free = ca->mi.bucket_size - offset; in bch2_journal_pos_from_member_info_resume()
48 mutex_unlock(&c->sb_lock); in bch2_journal_pos_from_member_info_resume()
52 struct journal_replay *j) in bch2_journal_ptrs_to_text() argument
54 darray_for_each(j->ptrs, i) { in bch2_journal_ptrs_to_text()
55 if (i != j->ptrs.data) in bch2_journal_ptrs_to_text()
58 i->dev, i->bucket, i->bucket_offset, i->sector); in bch2_journal_ptrs_to_text()
63 struct journal_replay *j) in bch2_journal_replay_to_text() argument
65 prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq)); in bch2_journal_replay_to_text()
67 bch2_journal_ptrs_to_text(out, c, j); in bch2_journal_replay_to_text()
69 for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) { in bch2_journal_replay_to_text()
72 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); in bch2_journal_replay_to_text()
81 [1] = ((__le32 *) &jset->seq)[0], in journal_nonce()
82 [2] = ((__le32 *) &jset->seq)[1], in journal_nonce()
87 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum) in jset_csum_good() argument
89 if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) { in jset_csum_good()
94 *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); in jset_csum_good()
95 return !bch2_crc_cmp(j->csum, *csum); in jset_csum_good()
100 return (seq - c->journal_entries_base_seq) & (~0U >> 1); in journal_entry_radix_idx()
107 genradix_ptr(&c->journal_entries, in __journal_replay_free()
108 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); in __journal_replay_free()
118 i->ignore_blacklisted = true; in journal_replay_free()
120 i->ignore_not_dirty = true; in journal_replay_free()
122 if (!c->opts.read_entire_journal) in journal_replay_free()
137 * Given a journal entry we just read, add it to the list of journal entries to
142 struct journal_list *jlist, struct jset *j) in journal_entry_add() argument
146 size_t bytes = vstruct_bytes(j); in journal_entry_add()
147 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; in journal_entry_add()
151 if (!c->journal.oldest_seq_found_ondisk || in journal_entry_add()
152 le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk) in journal_entry_add()
153 c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq); in journal_entry_add()
156 if (!c->opts.read_entire_journal && in journal_entry_add()
157 le64_to_cpu(j->seq) < jlist->last_seq) in journal_entry_add()
163 * within the range of +-2billion of the filrst one we find. in journal_entry_add()
165 if (!c->journal_entries_base_seq) in journal_entry_add()
166 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); in journal_entry_add()
169 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) { in journal_entry_add()
170 genradix_for_each_from(&c->journal_entries, iter, _i, in journal_entry_add()
171 journal_entry_radix_idx(c, jlist->last_seq)) { in journal_entry_add()
177 if (le64_to_cpu(i->j.seq) >= last_seq) in journal_entry_add()
184 jlist->last_seq = max(jlist->last_seq, last_seq); in journal_entry_add()
186 _i = genradix_ptr_alloc(&c->journal_entries, in journal_entry_add()
187 journal_entry_radix_idx(c, le64_to_cpu(j->seq)), in journal_entry_add()
190 return -BCH_ERR_ENOMEM_journal_entry_add; in journal_entry_add()
198 bool identical = bytes == vstruct_bytes(&dup->j) && in journal_entry_add()
199 !memcmp(j, &dup->j, bytes); in journal_entry_add()
202 dup->csum_good; in journal_entry_add()
205 darray_for_each(dup->ptrs, ptr) in journal_entry_add()
206 if (ptr->dev == ca->dev_idx) in journal_entry_add()
209 ret = darray_push(&dup->ptrs, entry_ptr); in journal_entry_add()
231 i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); in journal_entry_add()
233 return -BCH_ERR_ENOMEM_journal_entry_add; in journal_entry_add()
235 darray_init(&i->ptrs); in journal_entry_add()
236 i->csum_good = entry_ptr.csum_good; in journal_entry_add()
237 i->ignore_blacklisted = false; in journal_entry_add()
238 i->ignore_not_dirty = false; in journal_entry_add()
239 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct"); in journal_entry_add()
243 darray_for_each(dup->ptrs, ptr) in journal_entry_add()
244 darray_push(&i->ptrs, *ptr); in journal_entry_add()
247 darray_push(&i->ptrs, entry_ptr); in journal_entry_add()
280 bch2_prt_jset_entry_type(out, entry->type); in journal_entry_err_msg()
287 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq)); in journal_entry_err_msg()
291 (u64 *) entry - jset->_data, in journal_entry_err_msg()
292 le32_to_cpu(jset->u64s)); in journal_entry_err_msg()
313 ret = -BCH_ERR_fsck_errors_not_fixed; \
331 struct bkey_i *k, in journal_validate_key() argument
340 if (journal_entry_err_on(!k->k.u64s, in journal_validate_key()
343 "k->u64s 0")) { in journal_validate_key()
344 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key()
349 if (journal_entry_err_on((void *) bkey_next(k) > in journal_validate_key()
354 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); in journal_validate_key()
359 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, in journal_validate_key()
362 "bad format %u", k->k.format)) { in journal_validate_key()
363 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key()
364 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); in journal_validate_key()
371 write, NULL, bkey_to_packed(k)); in journal_validate_key()
373 ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), from); in journal_validate_key()
374 if (ret == -BCH_ERR_fsck_delete_bkey) { in journal_validate_key()
375 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); in journal_validate_key()
376 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); in journal_validate_key()
385 write, NULL, bkey_to_packed(k)); in journal_validate_key()
396 struct bkey_i *k = entry->start; in journal_entry_btree_keys_validate() local
398 from.level = entry->level; in journal_entry_btree_keys_validate()
399 from.btree = entry->btree_id; in journal_entry_btree_keys_validate()
401 while (k != vstruct_last(entry)) { in journal_entry_btree_keys_validate()
402 int ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); in journal_entry_btree_keys_validate()
408 k = bkey_next(k); in journal_entry_btree_keys_validate()
419 jset_entry_for_each_key(entry, k) { in journal_entry_btree_keys_to_text()
422 bch2_prt_jset_entry_type(out, entry->type); in journal_entry_btree_keys_to_text()
425 bch2_btree_id_level_to_text(out, entry->btree_id, entry->level); in journal_entry_btree_keys_to_text()
427 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); in journal_entry_btree_keys_to_text()
438 struct bkey_i *k = entry->start; in journal_entry_btree_root_validate() local
442 from.level = entry->level + 1; in journal_entry_btree_root_validate()
443 from.btree = entry->btree_id; in journal_entry_btree_root_validate()
445 if (journal_entry_err_on(!entry->u64s || in journal_entry_btree_root_validate()
446 le16_to_cpu(entry->u64s) != k->k.u64s, in journal_entry_btree_root_validate()
452 * we don't want to null out this jset_entry, in journal_entry_btree_root_validate()
454 * we were _supposed_ to have a btree root in journal_entry_btree_root_validate()
456 entry->u64s = 0; in journal_entry_btree_root_validate()
461 ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); in journal_entry_btree_root_validate()
497 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, in journal_entry_blacklist_validate()
513 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq)); in journal_entry_blacklist_to_text()
525 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, in journal_entry_blacklist_v2_validate()
535 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) > in journal_entry_blacklist_v2_validate()
536 le64_to_cpu(bl_entry->end), in journal_entry_blacklist_v2_validate()
554 le64_to_cpu(bl->start), in journal_entry_blacklist_v2_to_text()
555 le64_to_cpu(bl->end)); in journal_entry_blacklist_v2_to_text()
566 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_usage_validate()
588 bch2_prt_fs_usage_type(out, u->entry.btree_id); in journal_entry_usage_to_text()
589 prt_printf(out, " v=%llu", le64_to_cpu(u->v)); in journal_entry_usage_to_text()
600 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_data_usage_validate()
605 bytes < sizeof(*u) + u->r.nr_devs, in journal_entry_data_usage_validate()
613 if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err), in journal_entry_data_usage_validate()
632 bch2_replicas_entry_to_text(out, &u->r); in journal_entry_data_usage_to_text()
633 prt_printf(out, "=%llu", le64_to_cpu(u->v)); in journal_entry_data_usage_to_text()
644 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_clock_validate()
655 if (journal_entry_err_on(clock->rw > 1, in journal_entry_clock_validate()
673 prt_printf(out, "%s=%llu", str_write_read(clock->rw), le64_to_cpu(clock->time)); in journal_entry_clock_to_text()
684 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); in journal_entry_dev_usage_validate()
697 if (journal_entry_err_on(u->pad, in journal_entry_dev_usage_validate()
719 prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); in journal_entry_dev_usage_to_text()
726 le64_to_cpu(u->d[i].buckets), in journal_entry_dev_usage_to_text()
727 le64_to_cpu(u->d[i].sectors), in journal_entry_dev_usage_to_text()
728 le64_to_cpu(u->d[i].fragmented)); in journal_entry_dev_usage_to_text()
747 prt_printf(out, "%.*s", jset_entry_log_msg_bytes(l), l->d); in journal_entry_log_to_text()
811 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); in journal_entry_datetime_to_text()
837 return entry->type < BCH_JSET_ENTRY_NR in bch2_journal_entry_validate()
838 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry, in bch2_journal_entry_validate()
846 bch2_prt_jset_entry_type(out, entry->type); in bch2_journal_entry_to_text()
848 if (entry->type < BCH_JSET_ENTRY_NR) { in bch2_journal_entry_to_text()
850 bch2_jset_entry_ops[entry->type].to_text(out, c, entry); in bch2_journal_entry_to_text()
860 .journal_seq = le64_to_cpu(jset->seq), in jset_validate_entries()
863 unsigned version = le32_to_cpu(jset->version); in jset_validate_entries()
867 from.journal_offset = (u64 *) entry - jset->_data; in jset_validate_entries()
873 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data); in jset_validate_entries()
894 .journal_seq = le64_to_cpu(jset->seq), in jset_validate()
898 if (le64_to_cpu(jset->magic) != jset_magic(c)) in jset_validate()
901 unsigned version = le32_to_cpu(jset->version); in jset_validate()
906 ca ? ca->name : c->name, in jset_validate()
907 sector, le64_to_cpu(jset->seq), in jset_validate()
910 /* don't try to continue: */ in jset_validate()
911 return -EINVAL; in jset_validate()
918 ca ? ca->name : c->name, in jset_validate()
919 sector, le64_to_cpu(jset->seq), in jset_validate()
925 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), in jset_validate()
929 le64_to_cpu(jset->last_seq), in jset_validate()
930 le64_to_cpu(jset->seq))) { in jset_validate()
931 jset->last_seq = jset->seq; in jset_validate()
948 .journal_seq = le64_to_cpu(jset->seq), in jset_validate_early()
952 if (le64_to_cpu(jset->magic) != jset_magic(c)) in jset_validate_early()
955 unsigned version = le32_to_cpu(jset->version); in jset_validate_early()
960 ca ? ca->name : c->name, in jset_validate_early()
961 sector, le64_to_cpu(jset->seq), in jset_validate_early()
964 /* don't try to continue: */ in jset_validate_early()
965 return -EINVAL; in jset_validate_early()
977 ca ? ca->name : c->name, in jset_validate_early()
978 sector, le64_to_cpu(jset->seq), bytes)) in jset_validate_early()
979 le32_add_cpu(&jset->u64s, in jset_validate_early()
980 -((bytes - (bucket_sectors_left << 9)) / 8)); in jset_validate_early()
997 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; in journal_read_buf_realloc()
1002 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; in journal_read_buf_realloc()
1004 kvfree(b->data); in journal_read_buf_realloc()
1005 b->data = n; in journal_read_buf_realloc()
1006 b->size = new_size; in journal_read_buf_realloc()
1015 struct bch_fs *c = ca->fs; in journal_read_bucket()
1016 struct journal_device *ja = &ca->journal; in journal_read_bucket()
1017 struct jset *j = NULL; in journal_read_bucket() local
1019 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]), in journal_read_bucket()
1020 end = offset + ca->mi.bucket_size; in journal_read_bucket()
1033 end - offset, buf->size >> 9); in journal_read_bucket()
1034 nr_bvecs = buf_pages(buf->data, sectors_read << 9); in journal_read_bucket()
1038 return -BCH_ERR_ENOMEM_journal_read_bucket; in journal_read_bucket()
1039 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); in journal_read_bucket()
1041 bio->bi_iter.bi_sector = offset; in journal_read_bucket()
1042 bch2_bio_map(bio, buf->data, sectors_read << 9); in journal_read_bucket()
1060 j = buf->data; in journal_read_bucket()
1063 ret = jset_validate_early(c, ca, j, offset, in journal_read_bucket()
1064 end - offset, sectors_read); in journal_read_bucket()
1067 sectors = vstruct_sectors(j, c->block_bits); in journal_read_bucket()
1070 if (vstruct_bytes(j) > buf->size) { in journal_read_bucket()
1072 vstruct_bytes(j)); in journal_read_bucket()
1091 if (le64_to_cpu(j->seq) > ja->highest_seq_found) { in journal_read_bucket()
1092 ja->highest_seq_found = le64_to_cpu(j->seq); in journal_read_bucket()
1093 ja->cur_idx = bucket; in journal_read_bucket()
1094 ja->sectors_free = ca->mi.bucket_size - in journal_read_bucket()
1095 bucket_remainder(ca, offset) - sectors; in journal_read_bucket()
1099 * This happens sometimes if we don't have discards on - in journal_read_bucket()
1104 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) in journal_read_bucket()
1107 ja->bucket_seq[bucket] = le64_to_cpu(j->seq); in journal_read_bucket()
1109 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j); in journal_read_bucket()
1111 csum_good = jset_csum_good(c, j, &csum); in journal_read_bucket()
1117 bch2_csum_err_msg(&err, csum_type, j->csum, csum), in journal_read_bucket()
1121 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), in journal_read_bucket()
1122 j->encrypted_start, in journal_read_bucket()
1123 vstruct_end(j) - (void *) j->encrypted_start); in journal_read_bucket()
1126 mutex_lock(&jlist->lock); in journal_read_bucket()
1129 .dev = ca->dev_idx, in journal_read_bucket()
1131 .bucket_offset = offset - in journal_read_bucket()
1132 bucket_to_sector(ca, ja->buckets[bucket]), in journal_read_bucket()
1134 }, jlist, j); in journal_read_bucket()
1135 mutex_unlock(&jlist->lock); in journal_read_bucket()
1148 sectors_read -= sectors; in journal_read_bucket()
1149 j = ((void *) j) + (sectors << 9); in journal_read_bucket()
1163 struct bch_fs *c = ca->fs; in CLOSURE_CALLBACK()
1165 container_of(cl->parent, struct journal_list, cl); in CLOSURE_CALLBACK()
1170 if (!ja->nr) in CLOSURE_CALLBACK()
1177 pr_debug("%u journal buckets", ja->nr); in CLOSURE_CALLBACK()
1179 for (i = 0; i < ja->nr; i++) { in CLOSURE_CALLBACK()
1186 * Set dirty_idx to indicate the entire journal is full and needs to be in CLOSURE_CALLBACK()
1187 * reclaimed - journal reclaim will immediately reclaim whatever isn't in CLOSURE_CALLBACK()
1190 ja->discard_idx = ja->dirty_idx_ondisk = in CLOSURE_CALLBACK()
1191 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; in CLOSURE_CALLBACK()
1193 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); in CLOSURE_CALLBACK()
1195 percpu_ref_put(&ca->io_ref); in CLOSURE_CALLBACK()
1199 mutex_lock(&jlist->lock); in CLOSURE_CALLBACK()
1200 jlist->ret = ret; in CLOSURE_CALLBACK()
1201 mutex_unlock(&jlist->lock); in CLOSURE_CALLBACK()
1224 if (!c->opts.fsck && in bch2_journal_read()
1228 if ((ca->mi.state == BCH_MEMBER_STATE_rw || in bch2_journal_read()
1229 ca->mi.state == BCH_MEMBER_STATE_ro) && in bch2_journal_read()
1230 percpu_ref_tryget(&ca->io_ref)) in bch2_journal_read()
1231 closure_call(&ca->journal.read, in bch2_journal_read()
1249 * Find most recent flush entry, and ignore newer non flush entries - in bch2_journal_read()
1252 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1259 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1261 if (JSET_NO_FLUSH(&i->j)) { in bch2_journal_read()
1262 i->ignore_blacklisted = true; in bch2_journal_read()
1266 if (!last_write_torn && !i->csum_good) { in bch2_journal_read()
1268 i->ignore_blacklisted = true; in bch2_journal_read()
1274 .journal_seq = le64_to_cpu(i->j.seq), in bch2_journal_read()
1276 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), in bch2_journal_read()
1277 c, le32_to_cpu(i->j.version), &i->j, NULL, in bch2_journal_read()
1280 le64_to_cpu(i->j.last_seq), in bch2_journal_read()
1281 le64_to_cpu(i->j.seq))) in bch2_journal_read()
1282 i->j.last_seq = i->j.seq; in bch2_journal_read()
1284 *last_seq = le64_to_cpu(i->j.last_seq); in bch2_journal_read()
1285 *blacklist_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1296 "journal read done, but no entries found after dropping non-flushes"); in bch2_journal_read()
1300 bch_info(c, "journal read done, replaying entries %llu-%llu", in bch2_journal_read()
1301 *last_seq, *blacklist_seq - 1); in bch2_journal_read()
1304 bch_info(c, "dropped unflushed entries %llu-%llu", in bch2_journal_read()
1305 *blacklist_seq, *start_seq - 1); in bch2_journal_read()
1308 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1314 seq = le64_to_cpu(i->j.seq); in bch2_journal_read()
1321 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, in bch2_journal_read()
1324 i->ignore_blacklisted = true; in bch2_journal_read()
1330 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1336 BUG_ON(seq > le64_to_cpu(i->j.seq)); in bch2_journal_read()
1338 while (seq < le64_to_cpu(i->j.seq)) { in bch2_journal_read()
1342 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1346 if (seq == le64_to_cpu(i->j.seq)) in bch2_journal_read()
1351 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1357 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); in bch2_journal_read()
1362 missing_end = seq - 1; in bch2_journal_read()
1364 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" in bch2_journal_read()
1368 *last_seq, *blacklist_seq - 1, in bch2_journal_read()
1379 genradix_for_each(&c->journal_entries, radix_iter, _i) { in bch2_journal_read()
1390 darray_for_each(i->ptrs, ptr) { in bch2_journal_read()
1391 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); in bch2_journal_read()
1393 if (!ptr->csum_good) in bch2_journal_read()
1394 bch_err_dev_offset(ca, ptr->sector, in bch2_journal_read()
1396 le64_to_cpu(i->j.seq), in bch2_journal_read()
1397 i->csum_good ? " (had good copy on another device)" : ""); in bch2_journal_read()
1401 bch2_dev_have_ref(c, i->ptrs.data[0].dev), in bch2_journal_read()
1402 &i->j, in bch2_journal_read()
1403 i->ptrs.data[0].sector, in bch2_journal_read()
1408 darray_for_each(i->ptrs, ptr) in bch2_journal_read()
1409 replicas_entry_add_dev(&replicas.e, ptr->dev); in bch2_journal_read()
1418 (le64_to_cpu(i->j.seq) == *last_seq || in bch2_journal_read()
1421 le64_to_cpu(i->j.seq), buf.buf))) { in bch2_journal_read()
1435 static void journal_advance_devs_to_next_bucket(struct journal *j, in journal_advance_devs_to_next_bucket() argument
1439 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_advance_devs_to_next_bucket()
1442 struct bch_dev *ca = rcu_dereference(c->devs[*i]); in journal_advance_devs_to_next_bucket()
1446 struct journal_device *ja = &ca->journal; in journal_advance_devs_to_next_bucket()
1448 if (sectors > ja->sectors_free && in journal_advance_devs_to_next_bucket()
1449 sectors <= ca->mi.bucket_size && in journal_advance_devs_to_next_bucket()
1450 bch2_journal_dev_buckets_available(j, ja, in journal_advance_devs_to_next_bucket()
1452 ja->cur_idx = (ja->cur_idx + 1) % ja->nr; in journal_advance_devs_to_next_bucket()
1453 ja->sectors_free = ca->mi.bucket_size; in journal_advance_devs_to_next_bucket()
1456 * ja->bucket_seq[ja->cur_idx] must always have in journal_advance_devs_to_next_bucket()
1459 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq); in journal_advance_devs_to_next_bucket()
1464 static void __journal_write_alloc(struct journal *j, in __journal_write_alloc() argument
1471 struct bch_fs *c = container_of(j, struct bch_fs, journal); in __journal_write_alloc()
1474 struct bch_dev *ca = rcu_dereference(c->devs[*i]); in __journal_write_alloc()
1478 struct journal_device *ja = &ca->journal; in __journal_write_alloc()
1484 if (!ca->mi.durability || in __journal_write_alloc()
1485 ca->mi.state != BCH_MEMBER_STATE_rw || in __journal_write_alloc()
1486 !ja->nr || in __journal_write_alloc()
1487 bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) || in __journal_write_alloc()
1488 sectors > ja->sectors_free) in __journal_write_alloc()
1491 bch2_dev_stripe_increment(ca, &j->wp.stripe); in __journal_write_alloc()
1493 bch2_bkey_append_ptr(&w->key, in __journal_write_alloc()
1496 ja->buckets[ja->cur_idx]) + in __journal_write_alloc()
1497 ca->mi.bucket_size - in __journal_write_alloc()
1498 ja->sectors_free, in __journal_write_alloc()
1499 .dev = ca->dev_idx, in __journal_write_alloc()
1502 ja->sectors_free -= sectors; in __journal_write_alloc()
1503 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); in __journal_write_alloc()
1505 *replicas += ca->mi.durability; in __journal_write_alloc()
1513 * journal_write_alloc - decide where to write next journal entry
1515 * @j: journal object
1516 * @w: journal buf (entry to be written)
1518 * Returns: 0 on success, or -EROFS on failure
1520 static int journal_write_alloc(struct journal *j, struct journal_buf *w) in journal_write_alloc() argument
1522 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_write_alloc()
1525 unsigned sectors = vstruct_sectors(w->data, c->block_bits); in journal_write_alloc()
1526 unsigned target = c->opts.metadata_target ?: in journal_write_alloc()
1527 c->opts.foreground_target; in journal_write_alloc()
1529 READ_ONCE(c->opts.metadata_replicas); in journal_write_alloc()
1531 READ_ONCE(c->opts.metadata_replicas_required)); in journal_write_alloc()
1536 /* We might run more than once if we have to stop and do discards: */ in journal_write_alloc()
1537 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&w->key)); in journal_write_alloc()
1539 struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->dev); in journal_write_alloc()
1541 replicas += ca->mi.durability; in journal_write_alloc()
1546 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); in journal_write_alloc()
1548 __journal_write_alloc(j, w, &devs_sorted, sectors, &replicas, replicas_want); in journal_write_alloc()
1554 journal_advance_devs_to_next_bucket(j, &devs_sorted, sectors, w->data->seq); in journal_write_alloc()
1568 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); in journal_write_alloc()
1570 return replicas >= replicas_need ? 0 : -BCH_ERR_insufficient_journal_devices; in journal_write_alloc()
1573 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) in journal_buf_realloc() argument
1575 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_buf_realloc()
1577 /* we aren't holding j->lock: */ in journal_buf_realloc()
1578 unsigned new_size = READ_ONCE(j->buf_size_want); in journal_buf_realloc()
1581 if (buf->buf_size >= new_size) in journal_buf_realloc()
1593 memcpy(new_buf, buf->data, buf->buf_size); in journal_buf_realloc()
1595 spin_lock(&j->lock); in journal_buf_realloc()
1596 swap(buf->data, new_buf); in journal_buf_realloc()
1597 swap(buf->buf_size, new_size); in journal_buf_realloc()
1598 spin_unlock(&j->lock); in journal_buf_realloc()
1603 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) in journal_last_unwritten_buf() argument
1605 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); in journal_last_unwritten_buf()
1611 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1612 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1615 u64 seq = le64_to_cpu(w->data->seq); in CLOSURE_CALLBACK()
1618 bch2_time_stats_update(!JSET_NO_FLUSH(w->data) in CLOSURE_CALLBACK()
1619 ? j->flush_write_time in CLOSURE_CALLBACK()
1620 : j->noflush_write_time, j->write_start_time); in CLOSURE_CALLBACK()
1622 if (!w->devs_written.nr) { in CLOSURE_CALLBACK()
1623 bch_err(c, "unable to write journal to sufficient devices"); in CLOSURE_CALLBACK()
1624 err = -EIO; in CLOSURE_CALLBACK()
1627 w->devs_written); in CLOSURE_CALLBACK()
1629 err = -EIO; in CLOSURE_CALLBACK()
1637 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1638 if (seq >= j->pin.front) in CLOSURE_CALLBACK()
1639 journal_seq_pin(j, seq)->devs = w->devs_written; in CLOSURE_CALLBACK()
1640 if (err && (!j->err_seq || seq < j->err_seq)) in CLOSURE_CALLBACK()
1641 j->err_seq = seq; in CLOSURE_CALLBACK()
1642 w->write_done = true; in CLOSURE_CALLBACK()
1646 for (seq = journal_last_unwritten_seq(j); in CLOSURE_CALLBACK()
1647 seq <= journal_cur_seq(j); in CLOSURE_CALLBACK()
1649 w = j->buf + (seq & JOURNAL_BUF_MASK); in CLOSURE_CALLBACK()
1650 if (!w->write_done) in CLOSURE_CALLBACK()
1653 if (!j->err_seq && !JSET_NO_FLUSH(w->data)) { in CLOSURE_CALLBACK()
1654 j->flushed_seq_ondisk = seq; in CLOSURE_CALLBACK()
1655 j->last_seq_ondisk = w->last_seq; in CLOSURE_CALLBACK()
1658 closure_wake_up(&c->freelist_wait); in CLOSURE_CALLBACK()
1662 j->seq_ondisk = seq; in CLOSURE_CALLBACK()
1671 if (j->watermark != BCH_WATERMARK_stripe) in CLOSURE_CALLBACK()
1672 journal_reclaim_kick(&c->journal); in CLOSURE_CALLBACK()
1674 old.v = atomic64_read(&j->reservations.counter); in CLOSURE_CALLBACK()
1681 } while (!atomic64_try_cmpxchg(&j->reservations.counter, in CLOSURE_CALLBACK()
1684 closure_wake_up(&w->wait); in CLOSURE_CALLBACK()
1689 bch2_journal_reclaim_fast(j); in CLOSURE_CALLBACK()
1690 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
1692 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false); in CLOSURE_CALLBACK()
1694 journal_wake(j); in CLOSURE_CALLBACK()
1697 if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && in CLOSURE_CALLBACK()
1699 struct journal_buf *buf = journal_cur_buf(j); in CLOSURE_CALLBACK()
1700 long delta = buf->expires - jiffies; in CLOSURE_CALLBACK()
1703 * We don't close a journal entry to write it while there's in CLOSURE_CALLBACK()
1704 * previous entries still in flight - the current journal entry in CLOSURE_CALLBACK()
1705 * might want to be written now: in CLOSURE_CALLBACK()
1707 mod_delayed_work(j->wq, &j->write_work, max(0L, delta)); in CLOSURE_CALLBACK()
1711 * We don't typically trigger journal writes from her - the next journal in CLOSURE_CALLBACK()
1713 * allocated, in bch2_journal_write() - but the journal write error path in CLOSURE_CALLBACK()
1716 bch2_journal_do_writes(j); in CLOSURE_CALLBACK()
1717 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1723 struct bch_dev *ca = jbio->ca; in journal_write_endio()
1724 struct journal *j = &ca->fs->journal; in journal_write_endio() local
1725 struct journal_buf *w = j->buf + jbio->buf_idx; in journal_write_endio()
1727 if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, in journal_write_endio()
1729 le64_to_cpu(w->data->seq), in journal_write_endio()
1730 bch2_blk_status_to_str(bio->bi_status)) || in journal_write_endio()
1734 spin_lock_irqsave(&j->err_lock, flags); in journal_write_endio()
1735 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); in journal_write_endio()
1736 spin_unlock_irqrestore(&j->err_lock, flags); in journal_write_endio()
1739 closure_put(&w->io); in journal_write_endio()
1740 percpu_ref_put(&ca->io_ref); in journal_write_endio()
1746 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1747 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1748 unsigned sectors = vstruct_sectors(w->data, c->block_bits); in CLOSURE_CALLBACK()
1750 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { in CLOSURE_CALLBACK()
1751 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); in CLOSURE_CALLBACK()
1758 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], in CLOSURE_CALLBACK()
1761 struct journal_device *ja = &ca->journal; in CLOSURE_CALLBACK()
1762 struct bio *bio = &ja->bio[w->idx]->bio; in CLOSURE_CALLBACK()
1763 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); in CLOSURE_CALLBACK()
1764 bio->bi_iter.bi_sector = ptr->offset; in CLOSURE_CALLBACK()
1765 bio->bi_end_io = journal_write_endio; in CLOSURE_CALLBACK()
1766 bio->bi_private = ca; in CLOSURE_CALLBACK()
1767 bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 0); in CLOSURE_CALLBACK()
1769 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); in CLOSURE_CALLBACK()
1770 ca->prev_journal_sector = bio->bi_iter.bi_sector; in CLOSURE_CALLBACK()
1772 if (!JSET_NO_FLUSH(w->data)) in CLOSURE_CALLBACK()
1773 bio->bi_opf |= REQ_FUA; in CLOSURE_CALLBACK()
1774 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush) in CLOSURE_CALLBACK()
1775 bio->bi_opf |= REQ_PREFLUSH; in CLOSURE_CALLBACK()
1777 bch2_bio_map(bio, w->data, sectors << 9); in CLOSURE_CALLBACK()
1782 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); in CLOSURE_CALLBACK()
1785 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()
1791 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
1792 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1794 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { in CLOSURE_CALLBACK()
1795 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1796 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { in CLOSURE_CALLBACK()
1797 closure_wait(&j->async_wait, cl); in CLOSURE_CALLBACK()
1798 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1799 continue_at(cl, journal_write_preflush, j->wq); in CLOSURE_CALLBACK()
1802 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1805 if (w->separate_flush) { in CLOSURE_CALLBACK()
1807 percpu_ref_get(&ca->io_ref); in CLOSURE_CALLBACK()
1809 struct journal_device *ja = &ca->journal; in CLOSURE_CALLBACK()
1810 struct bio *bio = &ja->bio[w->idx]->bio; in CLOSURE_CALLBACK()
1811 bio_reset(bio, ca->disk_sb.bdev, in CLOSURE_CALLBACK()
1813 bio->bi_end_io = journal_write_endio; in CLOSURE_CALLBACK()
1814 bio->bi_private = ca; in CLOSURE_CALLBACK()
1818 continue_at(cl, journal_write_submit, j->wq); in CLOSURE_CALLBACK()
1821 * no need to punt to another work item if we're not waiting on in CLOSURE_CALLBACK()
1824 journal_write_submit(&cl->work); in CLOSURE_CALLBACK()
1828 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) in bch2_journal_write_prep() argument
1830 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_prep()
1832 struct jset *jset = w->data; in bch2_journal_write_prep()
1837 u64 seq = le64_to_cpu(jset->seq); in bch2_journal_write_prep()
1845 * If we wanted to be really fancy here, we could sort all the keys in in bch2_journal_write_prep()
1846 * the jset and drop keys that were overwritten - probably not worth it: in bch2_journal_write_prep()
1849 unsigned u64s = le16_to_cpu(i->u64s); in bch2_journal_write_prep()
1857 * entry gets written we have to propagate them to in bch2_journal_write_prep()
1858 * c->btree_roots in bch2_journal_write_prep()
1860 * But, every journal entry we write has to contain all the in bch2_journal_write_prep()
1862 * to c->btree_roots we have to get any missing btree roots and in bch2_journal_write_prep()
1863 * add them to this journal entry: in bch2_journal_write_prep()
1865 switch (i->type) { in bch2_journal_write_prep()
1868 __set_bit(i->btree_id, &btree_roots_have); in bch2_journal_write_prep()
1871 EBUG_ON(!w->need_flush_to_write_buffer); in bch2_journal_write_prep()
1876 jset_entry_for_each_key(i, k) { in bch2_journal_write_prep()
1877 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k); in bch2_journal_write_prep()
1879 bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s", in bch2_journal_write_prep()
1885 i->type = BCH_JSET_ENTRY_btree_keys; in bch2_journal_write_prep()
1893 bch2_fs_fatal_error(c, "error flushing journal keys to btree write buffer: %s", in bch2_journal_write_prep()
1899 spin_lock(&c->journal.lock); in bch2_journal_write_prep()
1900 w->need_flush_to_write_buffer = false; in bch2_journal_write_prep()
1901 spin_unlock(&c->journal.lock); in bch2_journal_write_prep()
1909 d->entry.type = BCH_JSET_ENTRY_datetime; in bch2_journal_write_prep()
1910 d->seconds = cpu_to_le64(ktime_get_real_seconds()); in bch2_journal_write_prep()
1913 u64s = (u64 *) end - (u64 *) start; in bch2_journal_write_prep()
1915 WARN_ON(u64s > j->entry_u64s_reserved); in bch2_journal_write_prep()
1917 le32_add_cpu(&jset->u64s, u64s); in bch2_journal_write_prep()
1919 sectors = vstruct_sectors(jset, c->block_bits); in bch2_journal_write_prep()
1922 if (sectors > w->sectors) { in bch2_journal_write_prep()
1924 vstruct_bytes(jset), w->sectors << 9, in bch2_journal_write_prep()
1925 u64s, w->u64s_reserved, j->entry_u64s_reserved); in bch2_journal_write_prep()
1926 return -EINVAL; in bch2_journal_write_prep()
1929 jset->magic = cpu_to_le64(jset_magic(c)); in bch2_journal_write_prep()
1930 jset->version = cpu_to_le32(c->sb.version); in bch2_journal_write_prep()
1936 j->last_empty_seq = seq; in bch2_journal_write_prep()
1941 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current) in bch2_journal_write_prep()
1949 jset->encrypted_start, in bch2_journal_write_prep()
1950 vstruct_end(jset) - (void *) jset->encrypted_start); in bch2_journal_write_prep()
1954 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), in bch2_journal_write_prep()
1961 memset((void *) jset + bytes, 0, (sectors << 9) - bytes); in bch2_journal_write_prep()
1965 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w) in bch2_journal_write_pick_flush() argument
1967 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_pick_flush()
1968 int error = bch2_journal_error(j); in bch2_journal_write_pick_flush()
1971 * If the journal is in an error state - we did an emergency shutdown - in bch2_journal_write_pick_flush()
1972 * we prefer to continue doing journal writes. We just mark them as in bch2_journal_write_pick_flush()
1974 * list_journal tool - this helps in debugging. in bch2_journal_write_pick_flush()
1980 * previously - we can't leave the journal without any flush writes in in bch2_journal_write_pick_flush()
1986 if (error && test_bit(JOURNAL_need_flush_write, &j->flags)) in bch2_journal_write_pick_flush()
1987 return -EIO; in bch2_journal_write_pick_flush()
1990 w->noflush || in bch2_journal_write_pick_flush()
1991 (!w->must_flush && in bch2_journal_write_pick_flush()
1992 time_before(jiffies, j->last_flush_write + in bch2_journal_write_pick_flush()
1993 msecs_to_jiffies(c->opts.journal_flush_delay)) && in bch2_journal_write_pick_flush()
1994 test_bit(JOURNAL_may_skip_flush, &j->flags))) { in bch2_journal_write_pick_flush()
1995 w->noflush = true; in bch2_journal_write_pick_flush()
1996 SET_JSET_NO_FLUSH(w->data, true); in bch2_journal_write_pick_flush()
1997 w->data->last_seq = 0; in bch2_journal_write_pick_flush()
1998 w->last_seq = 0; in bch2_journal_write_pick_flush()
2000 j->nr_noflush_writes++; in bch2_journal_write_pick_flush()
2002 w->must_flush = true; in bch2_journal_write_pick_flush()
2003 j->last_flush_write = jiffies; in bch2_journal_write_pick_flush()
2004 j->nr_flush_writes++; in bch2_journal_write_pick_flush()
2005 clear_bit(JOURNAL_need_flush_write, &j->flags); in bch2_journal_write_pick_flush()
2014 struct journal *j = container_of(w, struct journal, buf[w->idx]); in CLOSURE_CALLBACK() local
2015 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
2023 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); in CLOSURE_CALLBACK()
2024 BUG_ON(!w->write_started); in CLOSURE_CALLBACK()
2025 BUG_ON(w->write_allocated); in CLOSURE_CALLBACK()
2026 BUG_ON(w->write_done); in CLOSURE_CALLBACK()
2028 j->write_start_time = local_clock(); in CLOSURE_CALLBACK()
2030 spin_lock(&j->lock); in CLOSURE_CALLBACK()
2032 w->separate_flush = true; in CLOSURE_CALLBACK()
2034 ret = bch2_journal_write_pick_flush(j, w); in CLOSURE_CALLBACK()
2035 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2039 mutex_lock(&j->buf_lock); in CLOSURE_CALLBACK()
2040 journal_buf_realloc(j, w); in CLOSURE_CALLBACK()
2042 ret = bch2_journal_write_prep(j, w); in CLOSURE_CALLBACK()
2043 mutex_unlock(&j->buf_lock); in CLOSURE_CALLBACK()
2047 j->entry_bytes_written += vstruct_bytes(w->data); in CLOSURE_CALLBACK()
2050 spin_lock(&j->lock); in CLOSURE_CALLBACK()
2051 ret = journal_write_alloc(j, w); in CLOSURE_CALLBACK()
2052 if (!ret || !j->can_discard) in CLOSURE_CALLBACK()
2055 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2056 bch2_journal_do_discards(j); in CLOSURE_CALLBACK()
2059 if (ret && !bch2_journal_error(j)) { in CLOSURE_CALLBACK()
2063 prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), in CLOSURE_CALLBACK()
2064 le64_to_cpu(w->data->seq), in CLOSURE_CALLBACK()
2065 vstruct_sectors(w->data, c->block_bits), in CLOSURE_CALLBACK()
2067 __bch2_journal_debug_to_text(&buf, j); in CLOSURE_CALLBACK()
2068 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2076 * write is allocated, no longer need to account for it in in CLOSURE_CALLBACK()
2079 w->sectors = 0; in CLOSURE_CALLBACK()
2080 w->write_allocated = true; in CLOSURE_CALLBACK()
2086 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
2087 bch2_journal_do_writes(j); in CLOSURE_CALLBACK()
2088 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
2090 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); in CLOSURE_CALLBACK()
2092 if (c->opts.nochanges) in CLOSURE_CALLBACK()
2096 * Mark journal replicas before we submit the write to guarantee in CLOSURE_CALLBACK()
2100 w->devs_written); in CLOSURE_CALLBACK()
2105 if (!JSET_NO_FLUSH(w->data)) in CLOSURE_CALLBACK()
2106 continue_at(cl, journal_write_preflush, j->wq); in CLOSURE_CALLBACK()
2108 continue_at(cl, journal_write_submit, j->wq); in CLOSURE_CALLBACK()
2111 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()
2115 continue_at(cl, journal_write_done, j->wq); in CLOSURE_CALLBACK()