Lines Matching full:era
15 #define DM_MSG_PREFIX "era"
47 * after digesting into the era array.
216 DMERR("Era metadata version %u found, but only versions between %u and %u supported.", in check_metadata_version()
283 * We preallocate 2 writesets. When an era rolls over we
467 DMERR("couldn't create era array"); in create_fresh_metadata()
674 * Writesets get 'digested' into the main era array.
682 uint32_t era; member
698 uint64_t key = d->era; in metadata_digest_remove_writeset()
759 d->era = key; in metadata_digest_lookup_writeset()
962 DMERR("%s: new era failed", __func__); in metadata_era_rollover()
1030 * Metadata snapshots allow userland to access era data.
1044 DMERR("%s: era rollover failed", __func__); in metadata_take_snap()
1078 DMERR("%s: couldn't inc era tree root", __func__); in metadata_take_snap()
1126 DMERR("%s: error deleting era array clone", __func__); in metadata_drop_snap()
1141 uint32_t era; member
1165 s->era = md->current_era; in metadata_get_stats()
1172 struct era { struct
1212 static bool block_size_is_power_of_two(struct era *era) in block_size_is_power_of_two() argument
1214 return era->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
1217 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1221 if (!block_size_is_power_of_two(era)) in get_block()
1222 (void) sector_div(block_nr, era->sectors_per_block); in get_block()
1224 block_nr >>= era->sectors_per_block_shift; in get_block()
1229 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1231 bio_set_dev(bio, era->origin_dev->bdev); in remap_to_origin()
1239 static void wake_worker(struct era *era) in wake_worker() argument
1241 if (!atomic_read(&era->suspended)) in wake_worker()
1242 queue_work(era->wq, &era->worker); in wake_worker()
1245 static void process_old_eras(struct era *era) in process_old_eras() argument
1249 if (!era->digest.step) in process_old_eras()
1252 r = era->digest.step(era->md, &era->digest); in process_old_eras()
1255 era->digest.step = NULL; in process_old_eras()
1257 } else if (era->digest.step) in process_old_eras()
1258 wake_worker(era); in process_old_eras()
1261 static void process_deferred_bios(struct era *era) in process_deferred_bios() argument
1269 struct writeset *ws = era->md->current_writeset; in process_deferred_bios()
1274 spin_lock(&era->deferred_lock); in process_deferred_bios()
1275 bio_list_merge_init(&deferred_bios, &era->deferred_bios); in process_deferred_bios()
1276 spin_unlock(&era->deferred_lock); in process_deferred_bios()
1282 r = writeset_test_and_set(&era->md->bitset_info, ws, in process_deferred_bios()
1283 get_block(era, bio)); in process_deferred_bios()
1297 r = metadata_commit(era->md); in process_deferred_bios()
1313 set_bit(get_block(era, bio), ws->bits); in process_deferred_bios()
1320 static void process_rpc_calls(struct era *era) in process_rpc_calls() argument
1328 spin_lock(&era->rpc_lock); in process_rpc_calls()
1329 list_splice_init(&era->rpc_calls, &calls); in process_rpc_calls()
1330 spin_unlock(&era->rpc_lock); in process_rpc_calls()
1333 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); in process_rpc_calls()
1338 r = metadata_commit(era->md); in process_rpc_calls()
1348 static void kick_off_digest(struct era *era) in kick_off_digest() argument
1350 if (era->md->archived_writesets) { in kick_off_digest()
1351 era->md->archived_writesets = false; in kick_off_digest()
1352 metadata_digest_start(era->md, &era->digest); in kick_off_digest()
1358 struct era *era = container_of(ws, struct era, worker); in do_work() local
1360 kick_off_digest(era); in do_work()
1361 process_old_eras(era); in do_work()
1362 process_deferred_bios(era); in do_work()
1363 process_rpc_calls(era); in do_work()
1366 static void defer_bio(struct era *era, struct bio *bio) in defer_bio() argument
1368 spin_lock(&era->deferred_lock); in defer_bio()
1369 bio_list_add(&era->deferred_bios, bio); in defer_bio()
1370 spin_unlock(&era->deferred_lock); in defer_bio()
1372 wake_worker(era); in defer_bio()
1378 static int perform_rpc(struct era *era, struct rpc *rpc) in perform_rpc() argument
1383 spin_lock(&era->rpc_lock); in perform_rpc()
1384 list_add(&rpc->list, &era->rpc_calls); in perform_rpc()
1385 spin_unlock(&era->rpc_lock); in perform_rpc()
1387 wake_worker(era); in perform_rpc()
1393 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md)) in in_worker0() argument
1400 return perform_rpc(era, &rpc); in in_worker0()
1403 static int in_worker1(struct era *era, in in_worker1() argument
1412 return perform_rpc(era, &rpc); in in_worker1()
1415 static void start_worker(struct era *era) in start_worker() argument
1417 atomic_set(&era->suspended, 0); in start_worker()
1420 static void stop_worker(struct era *era) in stop_worker() argument
1422 atomic_set(&era->suspended, 1); in stop_worker()
1423 drain_workqueue(era->wq); in stop_worker()
1431 static void era_destroy(struct era *era) in era_destroy() argument
1433 if (era->md) in era_destroy()
1434 metadata_close(era->md); in era_destroy()
1436 if (era->wq) in era_destroy()
1437 destroy_workqueue(era->wq); in era_destroy()
1439 if (era->origin_dev) in era_destroy()
1440 dm_put_device(era->ti, era->origin_dev); in era_destroy()
1442 if (era->metadata_dev) in era_destroy()
1443 dm_put_device(era->ti, era->metadata_dev); in era_destroy()
1445 kfree(era); in era_destroy()
1448 static dm_block_t calc_nr_blocks(struct era *era) in calc_nr_blocks() argument
1450 return dm_sector_div_up(era->ti->len, era->sectors_per_block); in calc_nr_blocks()
1468 struct era *era; in era_ctr() local
1476 era = kzalloc(sizeof(*era), GFP_KERNEL); in era_ctr()
1477 if (!era) { in era_ctr()
1478 ti->error = "Error allocating era structure"; in era_ctr()
1482 era->ti = ti; in era_ctr()
1485 &era->metadata_dev); in era_ctr()
1488 era_destroy(era); in era_ctr()
1493 &era->origin_dev); in era_ctr()
1496 era_destroy(era); in era_ctr()
1500 r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); in era_ctr()
1503 era_destroy(era); in era_ctr()
1507 r = dm_set_target_max_io_len(ti, era->sectors_per_block); in era_ctr()
1510 era_destroy(era); in era_ctr()
1514 if (!valid_block_size(era->sectors_per_block)) { in era_ctr()
1516 era_destroy(era); in era_ctr()
1519 if (era->sectors_per_block & (era->sectors_per_block - 1)) in era_ctr()
1520 era->sectors_per_block_shift = -1; in era_ctr()
1522 era->sectors_per_block_shift = __ffs(era->sectors_per_block); in era_ctr()
1524 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); in era_ctr()
1527 era_destroy(era); in era_ctr()
1530 era->md = md; in era_ctr()
1532 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in era_ctr()
1533 if (!era->wq) { in era_ctr()
1535 era_destroy(era); in era_ctr()
1538 INIT_WORK(&era->worker, do_work); in era_ctr()
1540 spin_lock_init(&era->deferred_lock); in era_ctr()
1541 bio_list_init(&era->deferred_bios); in era_ctr()
1543 spin_lock_init(&era->rpc_lock); in era_ctr()
1544 INIT_LIST_HEAD(&era->rpc_calls); in era_ctr()
1546 ti->private = era; in era_ctr()
1562 struct era *era = ti->private; in era_map() local
1563 dm_block_t block = get_block(era, bio); in era_map()
1568 * block is marked in this era. in era_map()
1570 remap_to_origin(era, bio); in era_map()
1577 !metadata_current_marked(era->md, block)) { in era_map()
1578 defer_bio(era, bio); in era_map()
1588 struct era *era = ti->private; in era_postsuspend() local
1590 r = in_worker0(era, metadata_era_archive); in era_postsuspend()
1592 DMERR("%s: couldn't archive current era", __func__); in era_postsuspend()
1596 stop_worker(era); in era_postsuspend()
1598 r = metadata_commit(era->md); in era_postsuspend()
1608 struct era *era = ti->private; in era_preresume() local
1609 dm_block_t new_size = calc_nr_blocks(era); in era_preresume()
1611 if (era->nr_blocks != new_size) { in era_preresume()
1612 r = metadata_resize(era->md, &new_size); in era_preresume()
1618 r = metadata_commit(era->md); in era_preresume()
1624 era->nr_blocks = new_size; in era_preresume()
1627 start_worker(era); in era_preresume()
1629 r = in_worker0(era, metadata_era_rollover); in era_preresume()
1642 * <current era> <held metadata root | '-'>
1648 struct era *era = ti->private; in era_status() local
1655 r = in_worker1(era, metadata_get_stats, &stats); in era_status()
1663 (unsigned int) stats.era); in era_status()
1672 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); in era_status()
1674 format_dev_t(buf, era->origin_dev->bdev->bd_dev); in era_status()
1675 DMEMIT("%s %u", buf, era->sectors_per_block); in era_status()
1692 struct era *era = ti->private; in era_message() local
1700 return in_worker0(era, metadata_checkpoint); in era_message()
1703 return in_worker0(era, metadata_take_snap); in era_message()
1706 return in_worker0(era, metadata_drop_snap); in era_message()
1720 struct era *era = ti->private; in era_iterate_devices() local
1722 return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); in era_iterate_devices()
1727 struct era *era = ti->private; in era_io_hints() local
1732 * era device's blocksize (io_opt is a factor) do not override them. in era_io_hints()
1734 if (io_opt_sectors < era->sectors_per_block || in era_io_hints()
1735 do_div(io_opt_sectors, era->sectors_per_block)) { in era_io_hints()
1737 limits->io_opt = era->sectors_per_block << SECTOR_SHIFT; in era_io_hints()
1744 .name = "era",
1757 module_dm(era);
1759 MODULE_DESCRIPTION(DM_NAME " era target");