1*94c4a1e1SFrank Piva // SPDX-License-Identifier: GPL-2.0
2*94c4a1e1SFrank Piva #include "qcow2.h"
3*94c4a1e1SFrank Piva
Qcow2Image(const char * path)4*94c4a1e1SFrank Piva Qcow2Image:: Qcow2Image(const char *path): fpath(path) {
5*94c4a1e1SFrank Piva fd = open(path, O_RDWR);
6*94c4a1e1SFrank Piva if (fd < 0)
7*94c4a1e1SFrank Piva ublk_err( "%s: backing file %s can't be opened %d\n",
8*94c4a1e1SFrank Piva __func__, path, fd);
9*94c4a1e1SFrank Piva fcntl(fd, F_SETFL, O_DIRECT);
10*94c4a1e1SFrank Piva }
11*94c4a1e1SFrank Piva
~Qcow2Image()12*94c4a1e1SFrank Piva Qcow2Image:: ~Qcow2Image() {
13*94c4a1e1SFrank Piva if (fd >= 0)
14*94c4a1e1SFrank Piva close(fd);
15*94c4a1e1SFrank Piva }
16*94c4a1e1SFrank Piva
Qcow2State(const char * path,const struct ublksrv_dev * d)17*94c4a1e1SFrank Piva Qcow2State:: Qcow2State(const char *path, const struct ublksrv_dev *d):
18*94c4a1e1SFrank Piva dev_info(ublksrv_ctrl_get_dev_info(ublksrv_get_ctrl_dev(d))),
19*94c4a1e1SFrank Piva min_bs_bits(9), dev(d), img(path), header(*this), l1_table(*this),
20*94c4a1e1SFrank Piva refcount_table(*this), cluster_allocator(*this),
21*94c4a1e1SFrank Piva cluster_map(*this),
22*94c4a1e1SFrank Piva meta_io_map(dev_info->nr_hw_queues),
23*94c4a1e1SFrank Piva meta_flushing(*this)
24*94c4a1e1SFrank Piva {
25*94c4a1e1SFrank Piva u64 l1_bytes = get_l1_table_max_size();
26*94c4a1e1SFrank Piva u64 ref_table_bytes = get_refcount_table_act_size();
27*94c4a1e1SFrank Piva
28*94c4a1e1SFrank Piva l1_table.load(*this, 0, l1_bytes, true);
29*94c4a1e1SFrank Piva //l1_table.dump();
30*94c4a1e1SFrank Piva
31*94c4a1e1SFrank Piva refcount_table.load(*this, 0, ref_table_bytes, true);
32*94c4a1e1SFrank Piva //refcount_table.dump();
33*94c4a1e1SFrank Piva
34*94c4a1e1SFrank Piva cluster_allocator.setup();
35*94c4a1e1SFrank Piva }
36*94c4a1e1SFrank Piva
~Qcow2State()37*94c4a1e1SFrank Piva Qcow2State:: ~Qcow2State() {
38*94c4a1e1SFrank Piva }
39*94c4a1e1SFrank Piva
get_l1_table_max_size()40*94c4a1e1SFrank Piva u32 Qcow2State::get_l1_table_max_size()
41*94c4a1e1SFrank Piva {
42*94c4a1e1SFrank Piva u32 l2_entry_size = 8;
43*94c4a1e1SFrank Piva u64 l2_size, res;
44*94c4a1e1SFrank Piva
45*94c4a1e1SFrank Piva l2_entry_size = header.is_extended_l2_entries() ? 16 : 8;
46*94c4a1e1SFrank Piva
47*94c4a1e1SFrank Piva l2_size = ((1 << header.cluster_bits) / l2_entry_size) <<
48*94c4a1e1SFrank Piva header.cluster_bits;
49*94c4a1e1SFrank Piva res = (header.get_size() + l2_size - 1) / l2_size;
50*94c4a1e1SFrank Piva res *= 8;
51*94c4a1e1SFrank Piva
52*94c4a1e1SFrank Piva //qcow2_log("%s: cls bit %d, l2 entry size %d, l2_size %d, l1 tbl size %d\n",
53*94c4a1e1SFrank Piva // __func__, header.cluster_bits, l2_entry_size, l2_size, res);
54*94c4a1e1SFrank Piva if (res < QCOW_MAX_L1_SIZE)
55*94c4a1e1SFrank Piva return round_up(res, 1UL << min_bs_bits);
56*94c4a1e1SFrank Piva return QCOW_MAX_L1_SIZE;
57*94c4a1e1SFrank Piva }
58*94c4a1e1SFrank Piva
get_refcount_table_max_size()59*94c4a1e1SFrank Piva u32 Qcow2State::get_refcount_table_max_size()
60*94c4a1e1SFrank Piva {
61*94c4a1e1SFrank Piva u64 blk_size, res;
62*94c4a1e1SFrank Piva
63*94c4a1e1SFrank Piva blk_size = 1ULL << (2 * header.cluster_bits + 3 - header.refcount_order);
64*94c4a1e1SFrank Piva res = (header.get_size() + blk_size - 1) / blk_size;
65*94c4a1e1SFrank Piva res *= 8;
66*94c4a1e1SFrank Piva
67*94c4a1e1SFrank Piva //qcow2_log("%s: cls bit %d, refcount_order %d, blk_size %llu, ref tbl size %d\n",
68*94c4a1e1SFrank Piva // __func__, header.cluster_bits, header.refcount_order, blk_size, res);
69*94c4a1e1SFrank Piva if (res < QCOW_MAX_REFTABLE_SIZE)
70*94c4a1e1SFrank Piva return round_up(res, 1UL << min_bs_bits);
71*94c4a1e1SFrank Piva return QCOW_MAX_REFTABLE_SIZE;
72*94c4a1e1SFrank Piva }
73*94c4a1e1SFrank Piva
get_refcount_table_act_size()74*94c4a1e1SFrank Piva u32 Qcow2State::get_refcount_table_act_size()
75*94c4a1e1SFrank Piva {
76*94c4a1e1SFrank Piva u64 ref_table_bytes = header.get_refcount_table_clusters() <<
77*94c4a1e1SFrank Piva header.cluster_bits;
78*94c4a1e1SFrank Piva
79*94c4a1e1SFrank Piva if (ref_table_bytes > get_refcount_table_max_size())
80*94c4a1e1SFrank Piva ref_table_bytes = get_refcount_table_max_size();
81*94c4a1e1SFrank Piva
82*94c4a1e1SFrank Piva return round_up(ref_table_bytes, 1UL << min_bs_bits);
83*94c4a1e1SFrank Piva }
84*94c4a1e1SFrank Piva
get_l1_table_offset()85*94c4a1e1SFrank Piva u64 Qcow2State::get_l1_table_offset()
86*94c4a1e1SFrank Piva {
87*94c4a1e1SFrank Piva return header.get_l1_table_offset();
88*94c4a1e1SFrank Piva }
89*94c4a1e1SFrank Piva
get_refcount_table_offset()90*94c4a1e1SFrank Piva u64 Qcow2State::get_refcount_table_offset()
91*94c4a1e1SFrank Piva {
92*94c4a1e1SFrank Piva return header.get_refcount_table_offset();
93*94c4a1e1SFrank Piva }
94*94c4a1e1SFrank Piva
get_l2_slices_count()95*94c4a1e1SFrank Piva u32 Qcow2State::get_l2_slices_count()
96*94c4a1e1SFrank Piva {
97*94c4a1e1SFrank Piva u32 mapping_bytes = get_dev_size() >> (header.cluster_bits - 3);
98*94c4a1e1SFrank Piva
99*94c4a1e1SFrank Piva //align with qemu, at most 32MB
100*94c4a1e1SFrank Piva if (mapping_bytes > (32U << 20))
101*94c4a1e1SFrank Piva mapping_bytes = 32U << 20;
102*94c4a1e1SFrank Piva
103*94c4a1e1SFrank Piva return mapping_bytes >> QCOW2_PARA::L2_TABLE_SLICE_BITS;
104*94c4a1e1SFrank Piva }
105*94c4a1e1SFrank Piva
add_meta_io(u32 qid,Qcow2MappingMeta * m)106*94c4a1e1SFrank Piva u32 Qcow2State::add_meta_io(u32 qid, Qcow2MappingMeta *m)
107*94c4a1e1SFrank Piva {
108*94c4a1e1SFrank Piva struct meta_mapping *map = &meta_io_map[qid];
109*94c4a1e1SFrank Piva std::vector <Qcow2MappingMeta *> &v = map->meta;
110*94c4a1e1SFrank Piva int i;
111*94c4a1e1SFrank Piva
112*94c4a1e1SFrank Piva for (i = 0; i < v.size(); i++)
113*94c4a1e1SFrank Piva if (v[i] == nullptr)
114*94c4a1e1SFrank Piva break;
115*94c4a1e1SFrank Piva if (i < v.size()) {
116*94c4a1e1SFrank Piva v[i] = m;
117*94c4a1e1SFrank Piva } else {
118*94c4a1e1SFrank Piva v.push_back(m);
119*94c4a1e1SFrank Piva i = v.size() - 1;
120*94c4a1e1SFrank Piva }
121*94c4a1e1SFrank Piva
122*94c4a1e1SFrank Piva map->nr += 1;
123*94c4a1e1SFrank Piva
124*94c4a1e1SFrank Piva return i;
125*94c4a1e1SFrank Piva }
126*94c4a1e1SFrank Piva
has_dirty_slice()127*94c4a1e1SFrank Piva bool Qcow2State::has_dirty_slice()
128*94c4a1e1SFrank Piva {
129*94c4a1e1SFrank Piva return cluster_map.cache.has_dirty_slice(*this) ||
130*94c4a1e1SFrank Piva cluster_allocator.cache.has_dirty_slice(*this);
131*94c4a1e1SFrank Piva }
132*94c4a1e1SFrank Piva
reclaim_slice(Qcow2SliceMeta * m)133*94c4a1e1SFrank Piva void Qcow2State::reclaim_slice(Qcow2SliceMeta *m)
134*94c4a1e1SFrank Piva {
135*94c4a1e1SFrank Piva if (m->is_mapping_meta()) {
136*94c4a1e1SFrank Piva Qcow2L2Table *t =
137*94c4a1e1SFrank Piva static_cast<Qcow2L2Table *>(m);
138*94c4a1e1SFrank Piva
139*94c4a1e1SFrank Piva cluster_map.cache.add_slice_to_reclaim_list(t);
140*94c4a1e1SFrank Piva } else {
141*94c4a1e1SFrank Piva Qcow2RefcountBlock *t =
142*94c4a1e1SFrank Piva static_cast<Qcow2RefcountBlock *>(m);
143*94c4a1e1SFrank Piva
144*94c4a1e1SFrank Piva cluster_allocator.cache.add_slice_to_reclaim_list(t);
145*94c4a1e1SFrank Piva }
146*94c4a1e1SFrank Piva }
147*94c4a1e1SFrank Piva
remove_slice_from_evicted_list(Qcow2SliceMeta * m)148*94c4a1e1SFrank Piva void Qcow2State::remove_slice_from_evicted_list(Qcow2SliceMeta *m)
149*94c4a1e1SFrank Piva {
150*94c4a1e1SFrank Piva if (m->is_mapping_meta()) {
151*94c4a1e1SFrank Piva Qcow2L2Table *t =
152*94c4a1e1SFrank Piva static_cast<Qcow2L2Table *>(m);
153*94c4a1e1SFrank Piva
154*94c4a1e1SFrank Piva cluster_map.cache.remove_slice_from_evicted_list(t);
155*94c4a1e1SFrank Piva } else {
156*94c4a1e1SFrank Piva Qcow2RefcountBlock *t =
157*94c4a1e1SFrank Piva static_cast<Qcow2RefcountBlock *>(m);
158*94c4a1e1SFrank Piva
159*94c4a1e1SFrank Piva cluster_allocator.cache.remove_slice_from_evicted_list(t);
160*94c4a1e1SFrank Piva }
161*94c4a1e1SFrank Piva }
162*94c4a1e1SFrank Piva
dump_meta()163*94c4a1e1SFrank Piva void Qcow2State::dump_meta()
164*94c4a1e1SFrank Piva {
165*94c4a1e1SFrank Piva cluster_allocator.dump_meta();
166*94c4a1e1SFrank Piva cluster_map.dump_meta();
167*94c4a1e1SFrank Piva meta_flushing.dump();
168*94c4a1e1SFrank Piva }
169*94c4a1e1SFrank Piva
170*94c4a1e1SFrank Piva //todo: allocate from slices from reclaim_slices
kill_slices(const struct ublksrv_queue * q)171*94c4a1e1SFrank Piva void Qcow2State::kill_slices(const struct ublksrv_queue *q)
172*94c4a1e1SFrank Piva {
173*94c4a1e1SFrank Piva std::vector<Qcow2SliceMeta *> tmp(move(freed_slices));
174*94c4a1e1SFrank Piva
175*94c4a1e1SFrank Piva if (tmp.empty())
176*94c4a1e1SFrank Piva return;
177*94c4a1e1SFrank Piva
178*94c4a1e1SFrank Piva qcow2_assert(!tmp.empty() && freed_slices.empty());
179*94c4a1e1SFrank Piva
180*94c4a1e1SFrank Piva //can't free new added slice from ->wakeup_all()
181*94c4a1e1SFrank Piva for (auto it = tmp.cbegin(); it != tmp.cend(); ++it) {
182*94c4a1e1SFrank Piva auto m = *it;
183*94c4a1e1SFrank Piva
184*94c4a1e1SFrank Piva m->put_ref();
185*94c4a1e1SFrank Piva }
186*94c4a1e1SFrank Piva }
187*94c4a1e1SFrank Piva
shrink_cache()188*94c4a1e1SFrank Piva void Qcow2State::shrink_cache()
189*94c4a1e1SFrank Piva {
190*94c4a1e1SFrank Piva cluster_map.cache.shrink(*this);
191*94c4a1e1SFrank Piva cluster_allocator.cache.shrink(*this);
192*94c4a1e1SFrank Piva }
193*94c4a1e1SFrank Piva
194*94c4a1e1SFrank Piva #ifdef DEBUG_QCOW2_META_VALIDATE
validate_cluster_use(u64 host_off,u64 virt_off,u32 use)195*94c4a1e1SFrank Piva void Qcow2State::validate_cluster_use(u64 host_off, u64 virt_off, u32 use) {
196*94c4a1e1SFrank Piva auto it = cluster_use.find(host_off);
197*94c4a1e1SFrank Piva
198*94c4a1e1SFrank Piva if (it == cluster_use.end())
199*94c4a1e1SFrank Piva cluster_use[host_off] = ((u64)use << 56) | virt_off;
200*94c4a1e1SFrank Piva else {
201*94c4a1e1SFrank Piva qcow2_log("%s: duplicated cluster assignment host off "
202*94c4a1e1SFrank Piva "%llx, virt_off %llx use %d, old entry %llx\n",
203*94c4a1e1SFrank Piva __func__, host_off, virt_off, use,
204*94c4a1e1SFrank Piva it->second);
205*94c4a1e1SFrank Piva qcow2_assert(0);
206*94c4a1e1SFrank Piva }
207*94c4a1e1SFrank Piva }
208*94c4a1e1SFrank Piva
209*94c4a1e1SFrank Piva // call it for each entry before flushing the slice
validate_cluster_map(u64 host_off,u64 virt_off)210*94c4a1e1SFrank Piva bool Qcow2State::validate_cluster_map(u64 host_off, u64 virt_off) {
211*94c4a1e1SFrank Piva auto it = cluster_validate_map.find(host_off);
212*94c4a1e1SFrank Piva
213*94c4a1e1SFrank Piva if (it == cluster_validate_map.end()) {
214*94c4a1e1SFrank Piva cluster_validate_map[host_off] = virt_off;
215*94c4a1e1SFrank Piva return true;
216*94c4a1e1SFrank Piva }
217*94c4a1e1SFrank Piva
218*94c4a1e1SFrank Piva if (virt_off == it->second)
219*94c4a1e1SFrank Piva return true;
220*94c4a1e1SFrank Piva
221*94c4a1e1SFrank Piva qcow2_log("%s: duplicated cluster assignment host off "
222*94c4a1e1SFrank Piva "%llx, virt_off %llx old virt_offset %llx\n",
223*94c4a1e1SFrank Piva __func__, host_off, virt_off, it->second);
224*94c4a1e1SFrank Piva return false;
225*94c4a1e1SFrank Piva }
226*94c4a1e1SFrank Piva #endif
227*94c4a1e1SFrank Piva
228*94c4a1e1SFrank Piva /* Make any kind of Qcow2State, so far only support the plain one */
make_qcow2state(const char * file,struct ublksrv_dev * dev)229*94c4a1e1SFrank Piva Qcow2State *make_qcow2state(const char *file, struct ublksrv_dev *dev)
230*94c4a1e1SFrank Piva {
231*94c4a1e1SFrank Piva return new Qcow2StatePlain(file, dev);
232*94c4a1e1SFrank Piva }
233*94c4a1e1SFrank Piva
234*94c4a1e1SFrank Piva template <class T>
slice_cache(u8 slice_bits,u8 cluster_bits,u8 slice_virt_bits,u32 max_size)235*94c4a1e1SFrank Piva slice_cache<T>::slice_cache(u8 slice_bits, u8 cluster_bits, u8 slice_virt_bits,
236*94c4a1e1SFrank Piva u32 max_size):
237*94c4a1e1SFrank Piva slice_size_bits(slice_bits),
238*94c4a1e1SFrank Piva cluster_size_bits(cluster_bits),
239*94c4a1e1SFrank Piva slice_virt_size_bits(slice_virt_bits),
240*94c4a1e1SFrank Piva slices(max_size >> slice_bits),
241*94c4a1e1SFrank Piva evicted_slices({})
242*94c4a1e1SFrank Piva {
243*94c4a1e1SFrank Piva }
244*94c4a1e1SFrank Piva
245*94c4a1e1SFrank Piva template <class T>
__find_slice(u64 key,bool use_evicted_cache)246*94c4a1e1SFrank Piva T *slice_cache<T>::__find_slice(u64 key, bool use_evicted_cache) {
247*94c4a1e1SFrank Piva T *t = slices.__get(key);
248*94c4a1e1SFrank Piva
249*94c4a1e1SFrank Piva if (t)
250*94c4a1e1SFrank Piva return t;
251*94c4a1e1SFrank Piva
252*94c4a1e1SFrank Piva if (use_evicted_cache) {
253*94c4a1e1SFrank Piva auto it = evicted_slices.find(key);
254*94c4a1e1SFrank Piva
255*94c4a1e1SFrank Piva if (it != evicted_slices.end())
256*94c4a1e1SFrank Piva return it->second;
257*94c4a1e1SFrank Piva }
258*94c4a1e1SFrank Piva return nullptr;
259*94c4a1e1SFrank Piva }
260*94c4a1e1SFrank Piva
261*94c4a1e1SFrank Piva template <class T>
alloc_slice(Qcow2State & state,const qcow2_io_ctx_t & ioc,u64 virt_offset,u64 host_offset,u32 parent_idx)262*94c4a1e1SFrank Piva T *slice_cache<T>::alloc_slice(Qcow2State &state, const qcow2_io_ctx_t &ioc,
263*94c4a1e1SFrank Piva u64 virt_offset, u64 host_offset, u32 parent_idx)
264*94c4a1e1SFrank Piva {
265*94c4a1e1SFrank Piva T *t;
266*94c4a1e1SFrank Piva u32 flags;
267*94c4a1e1SFrank Piva bool zero_buf;
268*94c4a1e1SFrank Piva
269*94c4a1e1SFrank Piva qcow2_assert(__find_slice(virt_offset, true) == nullptr);
270*94c4a1e1SFrank Piva qcow2_assert(!(virt_offset & ((1ULL << cluster_size_bits) - 1)));
271*94c4a1e1SFrank Piva
272*94c4a1e1SFrank Piva if (!state.cluster_allocator.alloc_cluster_is_zeroed(host_offset &
273*94c4a1e1SFrank Piva ~((1ULL << cluster_size_bits) - 1))) {
274*94c4a1e1SFrank Piva flags = QCOW2_META_UPDATE | QCOW2_META_DIRTY;
275*94c4a1e1SFrank Piva zero_buf = true;
276*94c4a1e1SFrank Piva } else {
277*94c4a1e1SFrank Piva flags = 0;
278*94c4a1e1SFrank Piva zero_buf = false;
279*94c4a1e1SFrank Piva }
280*94c4a1e1SFrank Piva
281*94c4a1e1SFrank Piva t = pick_slice_from_reclaim_list();
282*94c4a1e1SFrank Piva if (t == nullptr)
283*94c4a1e1SFrank Piva t = new T(state, host_offset, parent_idx, flags);
284*94c4a1e1SFrank Piva else
285*94c4a1e1SFrank Piva t->reset(state, host_offset, parent_idx, flags);
286*94c4a1e1SFrank Piva
287*94c4a1e1SFrank Piva if (t->get_dirty(-1))
288*94c4a1e1SFrank Piva state.meta_flushing.inc_dirtied_slice(t->is_mapping_meta());
289*94c4a1e1SFrank Piva
290*94c4a1e1SFrank Piva if (zero_buf)
291*94c4a1e1SFrank Piva t->zero_buf();
292*94c4a1e1SFrank Piva
293*94c4a1e1SFrank Piva T *old = slices.put(virt_offset, t);
294*94c4a1e1SFrank Piva if (old) {
295*94c4a1e1SFrank Piva #ifdef DEBUG_QCOW2_META_OBJ
296*94c4a1e1SFrank Piva qcow2_assert(__find_slice(old->virt_offset(), true)
297*94c4a1e1SFrank Piva == nullptr);
298*94c4a1e1SFrank Piva #endif
299*94c4a1e1SFrank Piva //loading or flushing may be in-progress, that is allowed.
300*94c4a1e1SFrank Piva //and we guarantee that the slice isn't released until
301*94c4a1e1SFrank Piva //the loading or flushing is done
302*94c4a1e1SFrank Piva old->set_evicted();
303*94c4a1e1SFrank Piva add_slice_to_evicted_list(old->virt_offset(), old);
304*94c4a1e1SFrank Piva
305*94c4a1e1SFrank Piva //can't free one dirty slice, but one clean slice can't
306*94c4a1e1SFrank Piva //be dirtied after it is evicted, so safe to move clean
307*94c4a1e1SFrank Piva //slice into free list for release
308*94c4a1e1SFrank Piva if (!old->get_dirty(-1))
309*94c4a1e1SFrank Piva state.add_slice_to_free_list(old);
310*94c4a1e1SFrank Piva old->put_ref();
311*94c4a1e1SFrank Piva
312*94c4a1e1SFrank Piva #ifdef QCOW2_DEBUG
313*94c4a1e1SFrank Piva ublk_dbg(UBLK_DBG_QCOW2_META, "%s: %s evicted from tag %d, obj %p flags %x offset %lx ref %d\n",
314*94c4a1e1SFrank Piva __func__, old->get_id(), ioc.get_tag(), old,
315*94c4a1e1SFrank Piva old->get_flags(), old->get_offset(),
316*94c4a1e1SFrank Piva old->read_ref());
317*94c4a1e1SFrank Piva #endif
318*94c4a1e1SFrank Piva }
319*94c4a1e1SFrank Piva
320*94c4a1e1SFrank Piva if (virt_offset != t->virt_offset()) {
321*94c4a1e1SFrank Piva ublk_err( "%s %d: %s %" PRIx64 "/%" PRIx64 " parent_idx %d host_off %" PRIx64 " flags %x\n",
322*94c4a1e1SFrank Piva __func__, __LINE__, typeid(*t).name(),
323*94c4a1e1SFrank Piva virt_offset, t->virt_offset(), parent_idx,
324*94c4a1e1SFrank Piva host_offset, flags);
325*94c4a1e1SFrank Piva qcow2_assert(virt_offset == t->virt_offset());
326*94c4a1e1SFrank Piva }
327*94c4a1e1SFrank Piva
328*94c4a1e1SFrank Piva return t;
329*94c4a1e1SFrank Piva }
330*94c4a1e1SFrank Piva
331*94c4a1e1SFrank Piva template <class T>
add_slice_to_evicted_list(u64 virt_offset,T * t)332*94c4a1e1SFrank Piva void slice_cache<T>::add_slice_to_evicted_list(u64 virt_offset, T *t)
333*94c4a1e1SFrank Piva {
334*94c4a1e1SFrank Piva auto it = evicted_slices.find(virt_offset);
335*94c4a1e1SFrank Piva
336*94c4a1e1SFrank Piva qcow2_assert(virt_offset == t->virt_offset());
337*94c4a1e1SFrank Piva
338*94c4a1e1SFrank Piva if (it == evicted_slices.end())
339*94c4a1e1SFrank Piva evicted_slices[virt_offset] = t;
340*94c4a1e1SFrank Piva else {
341*94c4a1e1SFrank Piva #if 1
342*94c4a1e1SFrank Piva auto m = it->second;
343*94c4a1e1SFrank Piva qcow2_log("%s: add duplicated cache virt_offset %" PRIx64 ", remove old entry(%p %lx/%lx %x %d)\n",
344*94c4a1e1SFrank Piva __func__, virt_offset, m, m->virt_offset(),
345*94c4a1e1SFrank Piva m->get_offset(), m->get_flags(), m->read_ref());
346*94c4a1e1SFrank Piva it->second->show(__func__, __LINE__);
347*94c4a1e1SFrank Piva qcow2_assert(0);
348*94c4a1e1SFrank Piva #endif
349*94c4a1e1SFrank Piva
350*94c4a1e1SFrank Piva //this slice has been in handled in prep_flushing,
351*94c4a1e1SFrank Piva //so it is fine to remove it from freed list now
352*94c4a1e1SFrank Piva evicted_slices.erase(it);
353*94c4a1e1SFrank Piva evicted_slices[virt_offset] = t;
354*94c4a1e1SFrank Piva }
355*94c4a1e1SFrank Piva }
356*94c4a1e1SFrank Piva
357*94c4a1e1SFrank Piva template <class T>
dump(Qcow2State & qs)358*94c4a1e1SFrank Piva void slice_cache<T>::dump(Qcow2State &qs) {
359*94c4a1e1SFrank Piva auto lru_list = slices.get_lru_list_ro();
360*94c4a1e1SFrank Piva
361*94c4a1e1SFrank Piva ublk_log("cache size %zu, dirty cache size %zu\n",
362*94c4a1e1SFrank Piva slices.size(), evicted_slices.size());
363*94c4a1e1SFrank Piva
364*94c4a1e1SFrank Piva //todo: use lrucache iterator to cut the loop time
365*94c4a1e1SFrank Piva for (auto it = lru_list.cbegin(); it != lru_list.cend(); ++it) {
366*94c4a1e1SFrank Piva T *t = it->second;
367*94c4a1e1SFrank Piva
368*94c4a1e1SFrank Piva if (t)
369*94c4a1e1SFrank Piva t->dump();
370*94c4a1e1SFrank Piva }
371*94c4a1e1SFrank Piva }
372*94c4a1e1SFrank Piva
373*94c4a1e1SFrank Piva template <class T>
figure_group_from_dirty_list(Qcow2State & qs)374*94c4a1e1SFrank Piva int slice_cache<T>::figure_group_from_dirty_list(Qcow2State &qs) {
375*94c4a1e1SFrank Piva std::unordered_map<u32, int> cnt;
376*94c4a1e1SFrank Piva int val = -1;
377*94c4a1e1SFrank Piva int idx = -1;
378*94c4a1e1SFrank Piva
379*94c4a1e1SFrank Piva for (auto it = evicted_slices.cbegin(); it != evicted_slices.cend(); ++it) {
380*94c4a1e1SFrank Piva u32 key = (it->second->parent_idx * 8) / 512;
381*94c4a1e1SFrank Piva auto it1 = cnt.find(key);
382*94c4a1e1SFrank Piva
383*94c4a1e1SFrank Piva if (it1 == cnt.end())
384*94c4a1e1SFrank Piva cnt[key] = 0;
385*94c4a1e1SFrank Piva else
386*94c4a1e1SFrank Piva cnt[key] += 1;
387*94c4a1e1SFrank Piva }
388*94c4a1e1SFrank Piva
389*94c4a1e1SFrank Piva for (auto it = cnt.cbegin(); it != cnt.cend(); ++it) {
390*94c4a1e1SFrank Piva if (it->second > val) {
391*94c4a1e1SFrank Piva idx = it->first;
392*94c4a1e1SFrank Piva val = it->second;
393*94c4a1e1SFrank Piva }
394*94c4a1e1SFrank Piva }
395*94c4a1e1SFrank Piva
396*94c4a1e1SFrank Piva flush_log("%s: dirty list: idx %d cnt %u\n", __func__, idx, val);
397*94c4a1e1SFrank Piva
398*94c4a1e1SFrank Piva qcow2_assert(idx != -1);
399*94c4a1e1SFrank Piva return idx;
400*94c4a1e1SFrank Piva }
401*94c4a1e1SFrank Piva
402*94c4a1e1SFrank Piva template <class T>
__figure_group_for_flush(Qcow2State & qs)403*94c4a1e1SFrank Piva int slice_cache<T>::__figure_group_for_flush(Qcow2State &qs)
404*94c4a1e1SFrank Piva {
405*94c4a1e1SFrank Piva std::unordered_map<u32, int> cnt;
406*94c4a1e1SFrank Piva int val = -1;
407*94c4a1e1SFrank Piva int idx = -1;
408*94c4a1e1SFrank Piva auto lru_list = slices.get_lru_list_ro();
409*94c4a1e1SFrank Piva
410*94c4a1e1SFrank Piva //todo: use lrucache iterator to cut the loop time
411*94c4a1e1SFrank Piva for (auto it = lru_list.cbegin(); it != lru_list.cend(); ++it) {
412*94c4a1e1SFrank Piva T *t = it->second;
413*94c4a1e1SFrank Piva
414*94c4a1e1SFrank Piva if (t != nullptr && t->get_dirty(-1) && !t->is_flushing()) {
415*94c4a1e1SFrank Piva u32 key = (t->parent_idx * 8) / 512;
416*94c4a1e1SFrank Piva auto it1 = cnt.find(key);
417*94c4a1e1SFrank Piva
418*94c4a1e1SFrank Piva if (it1 == cnt.end())
419*94c4a1e1SFrank Piva cnt[key] = 0;
420*94c4a1e1SFrank Piva else
421*94c4a1e1SFrank Piva cnt[key] += 1;
422*94c4a1e1SFrank Piva }
423*94c4a1e1SFrank Piva }
424*94c4a1e1SFrank Piva
425*94c4a1e1SFrank Piva if (cnt.size() == 0)
426*94c4a1e1SFrank Piva return -1;
427*94c4a1e1SFrank Piva
428*94c4a1e1SFrank Piva for (auto it = cnt.cbegin(); it != cnt.cend(); ++it) {
429*94c4a1e1SFrank Piva if (it->second > val) {
430*94c4a1e1SFrank Piva idx = it->first;
431*94c4a1e1SFrank Piva val = it->second;
432*94c4a1e1SFrank Piva }
433*94c4a1e1SFrank Piva }
434*94c4a1e1SFrank Piva qcow2_assert(idx != -1);
435*94c4a1e1SFrank Piva flush_log("%s: lru list: idx %d cnt %u\n", __func__, idx, val);
436*94c4a1e1SFrank Piva return idx;
437*94c4a1e1SFrank Piva }
438*94c4a1e1SFrank Piva
439*94c4a1e1SFrank Piva template <class T>
figure_group_for_flush(Qcow2State & qs)440*94c4a1e1SFrank Piva int slice_cache<T>::figure_group_for_flush(Qcow2State &qs)
441*94c4a1e1SFrank Piva {
442*94c4a1e1SFrank Piva if (evicted_slices.size() > 0)
443*94c4a1e1SFrank Piva return figure_group_from_dirty_list(qs);
444*94c4a1e1SFrank Piva
445*94c4a1e1SFrank Piva return __figure_group_for_flush(qs);
446*94c4a1e1SFrank Piva }
447*94c4a1e1SFrank Piva
448*94c4a1e1SFrank Piva template <class T>
has_dirty_slice(Qcow2State & qs)449*94c4a1e1SFrank Piva bool slice_cache<T>::has_dirty_slice(Qcow2State &qs)
450*94c4a1e1SFrank Piva {
451*94c4a1e1SFrank Piva auto lru_list = slices.get_lru_list_ro();
452*94c4a1e1SFrank Piva
453*94c4a1e1SFrank Piva //todo: use lrucache iterator to cut the loop time
454*94c4a1e1SFrank Piva for (auto it = lru_list.cbegin(); it != lru_list.cend(); ++it) {
455*94c4a1e1SFrank Piva T *t = it->second;
456*94c4a1e1SFrank Piva
457*94c4a1e1SFrank Piva if (t != nullptr && t->get_dirty(-1) && !t->is_flushing())
458*94c4a1e1SFrank Piva return true;
459*94c4a1e1SFrank Piva }
460*94c4a1e1SFrank Piva
461*94c4a1e1SFrank Piva return has_evicted_dirty_slices();
462*94c4a1e1SFrank Piva }
463*94c4a1e1SFrank Piva
464*94c4a1e1SFrank Piva template <class T>
shrink(Qcow2State & qs)465*94c4a1e1SFrank Piva void slice_cache<T>::shrink(Qcow2State &qs)
466*94c4a1e1SFrank Piva {
467*94c4a1e1SFrank Piva u32 cnt = qs.get_l2_slices_count();
468*94c4a1e1SFrank Piva
469*94c4a1e1SFrank Piva for (auto it = reclaimed_slices.cbegin();
470*94c4a1e1SFrank Piva it != reclaimed_slices.cend(); ++it) {
471*94c4a1e1SFrank Piva delete *it;
472*94c4a1e1SFrank Piva }
473*94c4a1e1SFrank Piva
474*94c4a1e1SFrank Piva reclaimed_slices.clear();
475*94c4a1e1SFrank Piva
476*94c4a1e1SFrank Piva cnt >>= 3;
477*94c4a1e1SFrank Piva
478*94c4a1e1SFrank Piva //shrink cache until 1/8 slices are kept
479*94c4a1e1SFrank Piva while (slices.size() > cnt) {
480*94c4a1e1SFrank Piva auto t = slices.remove_last();
481*94c4a1e1SFrank Piva
482*94c4a1e1SFrank Piva delete t;
483*94c4a1e1SFrank Piva }
484*94c4a1e1SFrank Piva }
485*94c4a1e1SFrank Piva
486*94c4a1e1SFrank Piva // refcount table shouldn't be so big
Qcow2ClusterAllocator(Qcow2State & qs)487*94c4a1e1SFrank Piva Qcow2ClusterAllocator::Qcow2ClusterAllocator(Qcow2State &qs): state(qs),
488*94c4a1e1SFrank Piva cache(REFCOUNT_BLK_SLICE_BITS, qs.header.cluster_bits,
489*94c4a1e1SFrank Piva qs.header.cluster_bits + 3 - qs.header.refcount_order +
490*94c4a1e1SFrank Piva QCOW2_PARA::REFCOUNT_BLK_SLICE_BITS,
491*94c4a1e1SFrank Piva QCOW2_PARA::REFCOUNT_BLK_MAX_CACHE_BYTES),
492*94c4a1e1SFrank Piva alloc_state({})
493*94c4a1e1SFrank Piva {
494*94c4a1e1SFrank Piva max_alloc_states = 0;
495*94c4a1e1SFrank Piva };
496*94c4a1e1SFrank Piva
__find_slice(u64 key)497*94c4a1e1SFrank Piva Qcow2RefcountBlock* Qcow2ClusterAllocator::__find_slice(u64 key)
498*94c4a1e1SFrank Piva {
499*94c4a1e1SFrank Piva return cache.__find_slice(key, true);
500*94c4a1e1SFrank Piva }
501*94c4a1e1SFrank Piva
figure_group_from_refcount_table()502*94c4a1e1SFrank Piva int Qcow2ClusterAllocator::figure_group_from_refcount_table()
503*94c4a1e1SFrank Piva {
504*94c4a1e1SFrank Piva int ret = cache.figure_group_for_flush(state);
505*94c4a1e1SFrank Piva
506*94c4a1e1SFrank Piva if (ret == -1)
507*94c4a1e1SFrank Piva return state.refcount_table.get_1st_dirty_blk();
508*94c4a1e1SFrank Piva return ret;
509*94c4a1e1SFrank Piva }
510*94c4a1e1SFrank Piva
alloc_cluster_started(const qcow2_io_ctx_t & ioc,u64 cluster_offset,u8 purpose)511*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::alloc_cluster_started(const qcow2_io_ctx_t &ioc,
512*94c4a1e1SFrank Piva u64 cluster_offset, u8 purpose)
513*94c4a1e1SFrank Piva {
514*94c4a1e1SFrank Piva auto it = alloc_state.find(cluster_offset);
515*94c4a1e1SFrank Piva u32 sz;
516*94c4a1e1SFrank Piva
517*94c4a1e1SFrank Piva qcow2_assert(it == alloc_state.end());
518*94c4a1e1SFrank Piva
519*94c4a1e1SFrank Piva alloc_state[cluster_offset] = new Qcow2ClusterState(
520*94c4a1e1SFrank Piva QCOW2_ALLOC_STARTED, purpose);
521*94c4a1e1SFrank Piva
522*94c4a1e1SFrank Piva sz = alloc_state.size();
523*94c4a1e1SFrank Piva
524*94c4a1e1SFrank Piva if (sz > max_alloc_states)
525*94c4a1e1SFrank Piva max_alloc_states = sz;
526*94c4a1e1SFrank Piva
527*94c4a1e1SFrank Piva alloc_log("%s: offset %lx state %d purpose %d\n",
528*94c4a1e1SFrank Piva __func__, cluster_offset,
529*94c4a1e1SFrank Piva QCOW2_ALLOC_STARTED, purpose);
530*94c4a1e1SFrank Piva }
531*94c4a1e1SFrank Piva
alloc_cluster_zeroing(const qcow2_io_ctx_t & ioc,u64 cluster_offset)532*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::alloc_cluster_zeroing(const qcow2_io_ctx_t &ioc,
533*94c4a1e1SFrank Piva u64 cluster_offset)
534*94c4a1e1SFrank Piva {
535*94c4a1e1SFrank Piva auto it = alloc_state.find(cluster_offset);
536*94c4a1e1SFrank Piva
537*94c4a1e1SFrank Piva qcow2_assert(it != alloc_state.end());
538*94c4a1e1SFrank Piva
539*94c4a1e1SFrank Piva it->second->set_state(QCOW2_ALLOC_ZEROING);
540*94c4a1e1SFrank Piva
541*94c4a1e1SFrank Piva alloc_log("%s: offset %lx state %d purpose %d\n", __func__,
542*94c4a1e1SFrank Piva cluster_offset, it->second->get_state(),
543*94c4a1e1SFrank Piva it->second->get_purpose());
544*94c4a1e1SFrank Piva }
545*94c4a1e1SFrank Piva
alloc_cluster_zeroed(const struct ublksrv_queue * q,int tag,u64 cluster_offset)546*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::alloc_cluster_zeroed(const struct ublksrv_queue *q,
547*94c4a1e1SFrank Piva int tag, u64 cluster_offset)
548*94c4a1e1SFrank Piva {
549*94c4a1e1SFrank Piva auto it = alloc_state.find(cluster_offset);
550*94c4a1e1SFrank Piva
551*94c4a1e1SFrank Piva if (it == alloc_state.end())
552*94c4a1e1SFrank Piva ublk_err( "%s: offset %lx\n", __func__, cluster_offset);
553*94c4a1e1SFrank Piva qcow2_assert(it != alloc_state.end());
554*94c4a1e1SFrank Piva
555*94c4a1e1SFrank Piva it->second->set_state(QCOW2_ALLOC_ZEROED);
556*94c4a1e1SFrank Piva alloc_log("%s: offset %lx state %d purpose %d\n", __func__,
557*94c4a1e1SFrank Piva cluster_offset, it->second->get_state(),
558*94c4a1e1SFrank Piva it->second->get_purpose());
559*94c4a1e1SFrank Piva
560*94c4a1e1SFrank Piva it->second->wakeup_all(q, tag);
561*94c4a1e1SFrank Piva
562*94c4a1e1SFrank Piva /* safe to remove it now */
563*94c4a1e1SFrank Piva delete it->second;
564*94c4a1e1SFrank Piva alloc_state.erase(it);
565*94c4a1e1SFrank Piva }
566*94c4a1e1SFrank Piva
567*94c4a1e1SFrank Piva //called after mapping is setup for this cluster
alloc_cluster_done(const qcow2_io_ctx_t & ioc,u64 cluster_offset)568*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::alloc_cluster_done(const qcow2_io_ctx_t &ioc,
569*94c4a1e1SFrank Piva u64 cluster_offset)
570*94c4a1e1SFrank Piva {
571*94c4a1e1SFrank Piva auto it = alloc_state.find(cluster_offset);
572*94c4a1e1SFrank Piva
573*94c4a1e1SFrank Piva qcow2_assert(it != alloc_state.end());
574*94c4a1e1SFrank Piva
575*94c4a1e1SFrank Piva delete it->second;
576*94c4a1e1SFrank Piva
577*94c4a1e1SFrank Piva alloc_state.erase(it);
578*94c4a1e1SFrank Piva }
579*94c4a1e1SFrank Piva
dump_meta()580*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::dump_meta() {
581*94c4a1e1SFrank Piva
582*94c4a1e1SFrank Piva qcow2_log("cluster allocator %s: total allocates %" PRIu64 " clusters, bytes %" PRIu64 "KB, max states %u/%lu\n",
583*94c4a1e1SFrank Piva __func__, alloc_cnt, (alloc_cnt <<
584*94c4a1e1SFrank Piva state.header.cluster_bits) >> 10,
585*94c4a1e1SFrank Piva max_alloc_states, alloc_state.size());
586*94c4a1e1SFrank Piva state.refcount_table.dump();
587*94c4a1e1SFrank Piva cache.dump(state);
588*94c4a1e1SFrank Piva }
589*94c4a1e1SFrank Piva
setup()590*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::setup() {
591*94c4a1e1SFrank Piva long i = 0;
592*94c4a1e1SFrank Piva
593*94c4a1e1SFrank Piva for (i = (state.refcount_table.get_data_len() / 8) - 1; i >= 0; i--)
594*94c4a1e1SFrank Piva if (state.refcount_table.get_entry(i) != 0)
595*94c4a1e1SFrank Piva break;
596*94c4a1e1SFrank Piva /*
597*94c4a1e1SFrank Piva * most of times this entry has slot available yet, otherwise
598*94c4a1e1SFrank Piva * allocate_cluster() will move to next refcount block cache
599*94c4a1e1SFrank Piva */
600*94c4a1e1SFrank Piva state.refcount_table.set_next_free_idx(i);
601*94c4a1e1SFrank Piva
602*94c4a1e1SFrank Piva table_entry_virt_size_bits = 2 * state.header.cluster_bits + 3 -
603*94c4a1e1SFrank Piva state.header.refcount_order;
604*94c4a1e1SFrank Piva slice_idx = 0;
605*94c4a1e1SFrank Piva alloc_cnt = 0;
606*94c4a1e1SFrank Piva
607*94c4a1e1SFrank Piva //just one estimation, for runtime check only
608*94c4a1e1SFrank Piva max_physical_size = ((u64)(i + 1)) << table_entry_virt_size_bits;
609*94c4a1e1SFrank Piva }
610*94c4a1e1SFrank Piva
allocate_refcount_blk(const qcow2_io_ctx_t & ioc,s32 idx)611*94c4a1e1SFrank Piva void Qcow2ClusterAllocator::allocate_refcount_blk(const qcow2_io_ctx_t &ioc,
612*94c4a1e1SFrank Piva s32 idx)
613*94c4a1e1SFrank Piva {
614*94c4a1e1SFrank Piva Qcow2RefcountBlock *rb;
615*94c4a1e1SFrank Piva u64 virt_offset = (u64)idx << table_entry_virt_size_bits;
616*94c4a1e1SFrank Piva u64 host_offset = virt_offset;
617*94c4a1e1SFrank Piva
618*94c4a1e1SFrank Piva if (state.refcount_table.is_flushing(idx)) {
619*94c4a1e1SFrank Piva state.refcount_table.add_waiter(ioc.get_tag());
620*94c4a1e1SFrank Piva throw MetaUpdateException();
621*94c4a1e1SFrank Piva }
622*94c4a1e1SFrank Piva
623*94c4a1e1SFrank Piva max_physical_size = ((u64)(idx + 1)) << table_entry_virt_size_bits;
624*94c4a1e1SFrank Piva state.refcount_table.set_next_free_idx(idx);
625*94c4a1e1SFrank Piva qcow2_assert(!state.refcount_table.get_entry(idx));
626*94c4a1e1SFrank Piva state.refcount_table.set_entry(idx, host_offset);
627*94c4a1e1SFrank Piva
628*94c4a1e1SFrank Piva //track the new allocated cluster
629*94c4a1e1SFrank Piva alloc_cluster_started(ioc, host_offset,
630*94c4a1e1SFrank Piva QCOW2_CLUSTER_USE::REFCOUNT_BLK);
631*94c4a1e1SFrank Piva state.validate_cluster_use(host_offset, virt_offset,
632*94c4a1e1SFrank Piva QCOW2_CLUSTER_USE::REFCOUNT_BLK);
633*94c4a1e1SFrank Piva
634*94c4a1e1SFrank Piva rb = cache.alloc_slice(state, ioc, virt_offset, host_offset, idx);
635*94c4a1e1SFrank Piva qcow2_assert(rb != nullptr);
636*94c4a1e1SFrank Piva qcow2_assert(rb->get_update() && !rb->get_evicted() &&
637*94c4a1e1SFrank Piva !rb->is_flushing());
638*94c4a1e1SFrank Piva
639*94c4a1e1SFrank Piva //the first cluster is for this refcount block
640*94c4a1e1SFrank Piva rb->set_entry(0, 1);
641*94c4a1e1SFrank Piva rb->set_next_free_idx(1);
642*94c4a1e1SFrank Piva }
643*94c4a1e1SFrank Piva
allocate_cluster(const qcow2_io_ctx_t & ioc)644*94c4a1e1SFrank Piva u64 Qcow2ClusterAllocator::allocate_cluster(const qcow2_io_ctx_t &ioc)
645*94c4a1e1SFrank Piva {
646*94c4a1e1SFrank Piva Qcow2RefcountBlock *rb;
647*94c4a1e1SFrank Piva s32 free_idx;
648*94c4a1e1SFrank Piva u64 virt_offset, host_offset;
649*94c4a1e1SFrank Piva
650*94c4a1e1SFrank Piva again:
651*94c4a1e1SFrank Piva free_idx = state.refcount_table.get_next_free_idx();
652*94c4a1e1SFrank Piva virt_offset = ((u64)free_idx << table_entry_virt_size_bits) +
653*94c4a1e1SFrank Piva ((u64)slice_idx << cache.get_slice_virt_size_bits());
654*94c4a1e1SFrank Piva rb = cache.find_slice(virt_offset, true);
655*94c4a1e1SFrank Piva if (rb == nullptr)
656*94c4a1e1SFrank Piva goto alloc_refcount_blk;
657*94c4a1e1SFrank Piva qcow2_assert(rb->read_ref() > 0);
658*94c4a1e1SFrank Piva
659*94c4a1e1SFrank Piva check_new:
660*94c4a1e1SFrank Piva /* the cache has been allocated & being loaded */
661*94c4a1e1SFrank Piva if (!rb->get_update()) {
662*94c4a1e1SFrank Piva rb->add_waiter(ioc.get_tag());
663*94c4a1e1SFrank Piva throw MetaUpdateException();
664*94c4a1e1SFrank Piva }
665*94c4a1e1SFrank Piva
666*94c4a1e1SFrank Piva //if we are being flushed, can't touch the in-ram table,
667*94c4a1e1SFrank Piva //so wait until the flushing is done
668*94c4a1e1SFrank Piva if (rb->is_flushing() || rb->get_evicted()) {
669*94c4a1e1SFrank Piva rb->add_waiter(ioc.get_tag());
670*94c4a1e1SFrank Piva throw MetaUpdateException();
671*94c4a1e1SFrank Piva }
672*94c4a1e1SFrank Piva
673*94c4a1e1SFrank Piva #ifdef QCOW2_CACHE_DEBUG
674*94c4a1e1SFrank Piva qcow2_log("%s: hit: next free %d entries %d virt_off %llx slice_idx %d\n",
675*94c4a1e1SFrank Piva __func__, rb->get_next_free_idx(), rb->get_nr_entries(),
676*94c4a1e1SFrank Piva virt_offset, slice_idx);
677*94c4a1e1SFrank Piva #endif
678*94c4a1e1SFrank Piva //todo: cache the last free entry
679*94c4a1e1SFrank Piva for (int i = rb->get_next_free_idx(); i < rb->get_nr_entries(); i++) {
680*94c4a1e1SFrank Piva if (i < 0)
681*94c4a1e1SFrank Piva continue;
682*94c4a1e1SFrank Piva //qcow2_log("\t entry[%d]=%llx\n", i, rb->get_entry(i));
683*94c4a1e1SFrank Piva if (rb->get_entry_fast(i) == 0) {
684*94c4a1e1SFrank Piva u64 res = virt_offset + (((u64)i) <<
685*94c4a1e1SFrank Piva state.header.cluster_bits);
686*94c4a1e1SFrank Piva
687*94c4a1e1SFrank Piva if (!rb->get_dirty(-1))
688*94c4a1e1SFrank Piva state.meta_flushing.inc_dirtied_slice(false);
689*94c4a1e1SFrank Piva qcow2_assert(rb->get_update() && !rb->is_flushing() &&
690*94c4a1e1SFrank Piva !rb->get_evicted());
691*94c4a1e1SFrank Piva rb->set_entry(i, 1);
692*94c4a1e1SFrank Piva rb->set_next_free_idx(i + 1);
693*94c4a1e1SFrank Piva
694*94c4a1e1SFrank Piva alloc_cnt++;
695*94c4a1e1SFrank Piva return res;
696*94c4a1e1SFrank Piva }
697*94c4a1e1SFrank Piva }
698*94c4a1e1SFrank Piva
699*94c4a1e1SFrank Piva if (++slice_idx < cache.get_nr_slices())
700*94c4a1e1SFrank Piva goto again;
701*94c4a1e1SFrank Piva
702*94c4a1e1SFrank Piva // this current cache is full, so move to next one.
703*94c4a1e1SFrank Piva //
704*94c4a1e1SFrank Piva // Here it is different with l2 table's cache which is sliced, but
705*94c4a1e1SFrank Piva // refcount blk cache size is always equal to one cluster
706*94c4a1e1SFrank Piva qcow2_assert(free_idx < state.refcount_table.get_nr_entries());
707*94c4a1e1SFrank Piva allocate_refcount_blk(ioc, free_idx + 1);
708*94c4a1e1SFrank Piva slice_idx = 0;
709*94c4a1e1SFrank Piva goto again;
710*94c4a1e1SFrank Piva
711*94c4a1e1SFrank Piva alloc_refcount_blk:
712*94c4a1e1SFrank Piva //start is host offset of refcount block object
713*94c4a1e1SFrank Piva host_offset = state.refcount_table.get_entry(free_idx) +
714*94c4a1e1SFrank Piva + (u64(slice_idx) << cache.get_slice_size_bits());
715*94c4a1e1SFrank Piva
716*94c4a1e1SFrank Piva rb = cache.alloc_slice(state, ioc, virt_offset, host_offset, free_idx);
717*94c4a1e1SFrank Piva
718*94c4a1e1SFrank Piva /* the cluster may be allocated just in ram, no need to load */
719*94c4a1e1SFrank Piva if (rb->get_update())
720*94c4a1e1SFrank Piva goto check_new;
721*94c4a1e1SFrank Piva
722*94c4a1e1SFrank Piva rb->load(state, ioc, QCOW2_PARA::REFCOUNT_BLK_SLICE_BYTES, false);
723*94c4a1e1SFrank Piva
724*94c4a1e1SFrank Piva //add our tag into io_waiters, so once we get updated,
725*94c4a1e1SFrank Piva //the current io context will be resumed when handling cqe
726*94c4a1e1SFrank Piva //
727*94c4a1e1SFrank Piva //we have to call it explicitly here for both io contexts
728*94c4a1e1SFrank Piva //which starts to load meta and wait for in-flight meta
729*94c4a1e1SFrank Piva rb->add_waiter(ioc.get_tag());
730*94c4a1e1SFrank Piva
731*94c4a1e1SFrank Piva //->handle_io_async() has to handle this exception
732*94c4a1e1SFrank Piva throw MetaIoException();
733*94c4a1e1SFrank Piva
734*94c4a1e1SFrank Piva return 0;
735*94c4a1e1SFrank Piva }
736*94c4a1e1SFrank Piva
737*94c4a1e1SFrank Piva // refcount table shouldn't be so big
Qcow2ClusterMapping(Qcow2State & qs)738*94c4a1e1SFrank Piva Qcow2ClusterMapping::Qcow2ClusterMapping(Qcow2State &qs): state(qs),
739*94c4a1e1SFrank Piva cache(QCOW2_PARA::L2_TABLE_SLICE_BITS,
740*94c4a1e1SFrank Piva qs.header.cluster_bits,
741*94c4a1e1SFrank Piva qs.header.cluster_bits + L2_TABLE_SLICE_BITS - 3,
742*94c4a1e1SFrank Piva qs.get_l2_slices_count() * QCOW2_PARA::L2_TABLE_SLICE_BYTES),
743*94c4a1e1SFrank Piva cluster_bits(state.header.cluster_bits),
744*94c4a1e1SFrank Piva l2_entries_order(state.header.cluster_bits - 3),
745*94c4a1e1SFrank Piva max_alloc_entries(0)
746*94c4a1e1SFrank Piva {
747*94c4a1e1SFrank Piva }
748*94c4a1e1SFrank Piva
__find_slice(u64 key,bool use_dirty)749*94c4a1e1SFrank Piva Qcow2L2Table* Qcow2ClusterMapping::__find_slice(u64 key, bool use_dirty)
750*94c4a1e1SFrank Piva {
751*94c4a1e1SFrank Piva return cache.__find_slice(key, use_dirty);
752*94c4a1e1SFrank Piva }
753*94c4a1e1SFrank Piva
figure_group_from_l1_table()754*94c4a1e1SFrank Piva int Qcow2ClusterMapping::figure_group_from_l1_table()
755*94c4a1e1SFrank Piva {
756*94c4a1e1SFrank Piva int ret = cache.figure_group_for_flush(state);
757*94c4a1e1SFrank Piva
758*94c4a1e1SFrank Piva if (ret == -1)
759*94c4a1e1SFrank Piva return state.l1_table.get_1st_dirty_blk();
760*94c4a1e1SFrank Piva return ret;
761*94c4a1e1SFrank Piva }
762*94c4a1e1SFrank Piva
create_and_add_l2(const qcow2_io_ctx_t & ioc,u64 offset)763*94c4a1e1SFrank Piva Qcow2L2Table *Qcow2ClusterMapping::create_and_add_l2(const qcow2_io_ctx_t &ioc,
764*94c4a1e1SFrank Piva u64 offset)
765*94c4a1e1SFrank Piva {
766*94c4a1e1SFrank Piva const unsigned idx = l1_idx(offset);
767*94c4a1e1SFrank Piva u64 l1_entry = state.l1_table.get_entry(idx);
768*94c4a1e1SFrank Piva u64 l2_cluster = -1;
769*94c4a1e1SFrank Piva const struct ublksrv_queue *q = ublksrv_get_queue(state.dev, ioc.get_qid());
770*94c4a1e1SFrank Piva Qcow2L2Table *l2 = nullptr;
771*94c4a1e1SFrank Piva
772*94c4a1e1SFrank Piva qcow2_assert(!state.l1_table.entry_allocated(l1_entry));
773*94c4a1e1SFrank Piva
774*94c4a1e1SFrank Piva //in case of being flushed, we can't update in-ram meta, so
775*94c4a1e1SFrank Piva //exit and wait for flush completion
776*94c4a1e1SFrank Piva if (state.l1_table.is_flushing(idx)) {
777*94c4a1e1SFrank Piva state.l1_table.add_waiter(ioc.get_tag());
778*94c4a1e1SFrank Piva throw MetaUpdateException();
779*94c4a1e1SFrank Piva }
780*94c4a1e1SFrank Piva
781*94c4a1e1SFrank Piva //if someone is allocating cluster for this entry, wait until
782*94c4a1e1SFrank Piva //the entry becomes valid or failed
783*94c4a1e1SFrank Piva if (entry_is_allocating(offset, true)) {
784*94c4a1e1SFrank Piva u32 owner = entry_get_alloc_owner(offset, true);
785*94c4a1e1SFrank Piva
786*94c4a1e1SFrank Piva if (owner != ioc.get_tag()) {
787*94c4a1e1SFrank Piva state.l1_table.add_waiter_idx(ioc.get_tag(), idx);
788*94c4a1e1SFrank Piva throw MetaUpdateException();
789*94c4a1e1SFrank Piva }
790*94c4a1e1SFrank Piva } else {
791*94c4a1e1SFrank Piva //store owner into the entry for marking we are allocating, so
792*94c4a1e1SFrank Piva //others can't allocate for this entry any more, and others
793*94c4a1e1SFrank Piva //just need to wait until the allocation is done
794*94c4a1e1SFrank Piva entry_mark_allocating(offset, ioc.get_tag(), true);
795*94c4a1e1SFrank Piva }
796*94c4a1e1SFrank Piva
797*94c4a1e1SFrank Piva l2_cluster = state.cluster_allocator.allocate_cluster(ioc);
798*94c4a1e1SFrank Piva if (l2_cluster == -1) {
799*94c4a1e1SFrank Piva state.l1_table.set_entry(idx, 0);
800*94c4a1e1SFrank Piva } else {
801*94c4a1e1SFrank Piva unsigned long s_idx = cache.get_slice_idx(l2_slice_key(offset));
802*94c4a1e1SFrank Piva u64 host_offset = l2_cluster +
803*94c4a1e1SFrank Piva (s_idx << cache.get_slice_size_bits());
804*94c4a1e1SFrank Piva
805*94c4a1e1SFrank Piva state.cluster_allocator.alloc_cluster_started(ioc,
806*94c4a1e1SFrank Piva l2_cluster, QCOW2_CLUSTER_USE::L2_TABLE);
807*94c4a1e1SFrank Piva state.validate_cluster_use(l2_cluster, l2_slice_key(offset),
808*94c4a1e1SFrank Piva QCOW2_CLUSTER_USE::L2_TABLE);
809*94c4a1e1SFrank Piva //allocate l2 cache
810*94c4a1e1SFrank Piva l2 = cache.alloc_slice(state, ioc, l2_slice_key(offset),
811*94c4a1e1SFrank Piva host_offset, idx);
812*94c4a1e1SFrank Piva l2->get_ref();
813*94c4a1e1SFrank Piva qcow2_assert(l2->get_update());
814*94c4a1e1SFrank Piva
815*94c4a1e1SFrank Piva l2_cluster |= 1ULL << 63;
816*94c4a1e1SFrank Piva state.l1_table.set_entry(idx, l2_cluster);
817*94c4a1e1SFrank Piva }
818*94c4a1e1SFrank Piva
819*94c4a1e1SFrank Piva entry_mark_allocated(offset, true);
820*94c4a1e1SFrank Piva state.l1_table.wakeup_all_idx(q, ioc.get_tag(), idx);
821*94c4a1e1SFrank Piva
822*94c4a1e1SFrank Piva return l2;
823*94c4a1e1SFrank Piva }
824*94c4a1e1SFrank Piva
load_l2_slice(const qcow2_io_ctx_t & ioc,u64 offset,u64 l1_entry)825*94c4a1e1SFrank Piva Qcow2L2Table *Qcow2ClusterMapping::load_l2_slice(const qcow2_io_ctx_t &ioc, u64 offset,
826*94c4a1e1SFrank Piva u64 l1_entry)
827*94c4a1e1SFrank Piva {
828*94c4a1e1SFrank Piva const u64 slice_offset = (l2_idx(offset) << 3) &
829*94c4a1e1SFrank Piva ~(QCOW2_PARA::L2_TABLE_SLICE_BYTES - 1);
830*94c4a1e1SFrank Piva u64 start = (l1_entry & ((1ULL << 63) - 1)) + slice_offset;
831*94c4a1e1SFrank Piva Qcow2L2Table *l2;
832*94c4a1e1SFrank Piva
833*94c4a1e1SFrank Piva l2 = cache.alloc_slice(state, ioc, l2_slice_key(offset), start,
834*94c4a1e1SFrank Piva l1_idx(offset));
835*94c4a1e1SFrank Piva //start may point to one new allocated cluster
836*94c4a1e1SFrank Piva if (l2->get_update()) {
837*94c4a1e1SFrank Piva l2->get_ref();
838*94c4a1e1SFrank Piva return l2;
839*94c4a1e1SFrank Piva }
840*94c4a1e1SFrank Piva
841*94c4a1e1SFrank Piva ublk_dbg(UBLK_DBG_QCOW2_META_L2, "cache: alloc: key %" PRIx64 " val %p, update %d\n",
842*94c4a1e1SFrank Piva start, l2, l2->get_update());
843*94c4a1e1SFrank Piva l2->load(state, ioc, QCOW2_PARA::L2_TABLE_SLICE_BYTES, false);
844*94c4a1e1SFrank Piva l2->add_waiter(ioc.get_tag());
845*94c4a1e1SFrank Piva throw MetaIoException();
846*94c4a1e1SFrank Piva
847*94c4a1e1SFrank Piva return l2;
848*94c4a1e1SFrank Piva }
849*94c4a1e1SFrank Piva
850*94c4a1e1SFrank Piva //return l2 slice object with holding one extra reference
create_l2_map(const qcow2_io_ctx_t & ioc,u64 offset,bool create_l2)851*94c4a1e1SFrank Piva Qcow2L2Table *Qcow2ClusterMapping::create_l2_map(const qcow2_io_ctx_t &ioc,
852*94c4a1e1SFrank Piva u64 offset, bool create_l2)
853*94c4a1e1SFrank Piva {
854*94c4a1e1SFrank Piva u64 l1_entry = state.l1_table.get_entry_fast(l1_idx(offset));
855*94c4a1e1SFrank Piva Qcow2L2Table *l2 = nullptr;
856*94c4a1e1SFrank Piva
857*94c4a1e1SFrank Piva if (state.l1_table.entry_allocated(l1_entry))
858*94c4a1e1SFrank Piva return load_l2_slice(ioc, offset, l1_entry);
859*94c4a1e1SFrank Piva
860*94c4a1e1SFrank Piva if (create_l2) {
861*94c4a1e1SFrank Piva // l2 table isn't allocated yet, so create one and add it here
862*94c4a1e1SFrank Piva l2 = create_and_add_l2(ioc, offset);
863*94c4a1e1SFrank Piva if (!l2)
864*94c4a1e1SFrank Piva ublk_err( "%s: tag %d: allocate l2 failed for %" PRIx64 "\n",
865*94c4a1e1SFrank Piva __func__, ioc.get_tag(), offset);
866*94c4a1e1SFrank Piva }
867*94c4a1e1SFrank Piva return l2;
868*94c4a1e1SFrank Piva }
869*94c4a1e1SFrank Piva
870*94c4a1e1SFrank Piva //virt_offset's l2 table doesn't include this entry yet, so allocate
871*94c4a1e1SFrank Piva //one cluster and install the mapping
build_mapping(const qcow2_io_ctx_t & ioc,u64 virt_offset,Qcow2L2Table * l2,u32 idx_in_slice,u64 * l2_entry)872*94c4a1e1SFrank Piva int Qcow2ClusterMapping::build_mapping(const qcow2_io_ctx_t &ioc,
873*94c4a1e1SFrank Piva u64 virt_offset, Qcow2L2Table *l2, u32 idx_in_slice,
874*94c4a1e1SFrank Piva u64 *l2_entry)
875*94c4a1e1SFrank Piva {
876*94c4a1e1SFrank Piva const struct ublksrv_queue *q = ublksrv_get_queue(state.dev, ioc.get_qid());
877*94c4a1e1SFrank Piva u64 data_cluster = -1;
878*94c4a1e1SFrank Piva int ret;
879*94c4a1e1SFrank Piva
880*94c4a1e1SFrank Piva qcow2_assert(l2->get_update());
881*94c4a1e1SFrank Piva
882*94c4a1e1SFrank Piva //in case of being flushed, we can't update in-ram meta, so
883*94c4a1e1SFrank Piva //exit and wait for flush completion
884*94c4a1e1SFrank Piva //
885*94c4a1e1SFrank Piva //If this slice is marked as PREP_FLUSH, the dependent refcount
886*94c4a1e1SFrank Piva //block tables are being flushed, so delay this slice update
887*94c4a1e1SFrank Piva //until our flushing is done
888*94c4a1e1SFrank Piva if (l2->is_flushing() || l2->get_evicted() || l2->get_prep_flush()) {
889*94c4a1e1SFrank Piva l2->add_waiter(ioc.get_tag());
890*94c4a1e1SFrank Piva throw MetaUpdateException();
891*94c4a1e1SFrank Piva }
892*94c4a1e1SFrank Piva
893*94c4a1e1SFrank Piva qcow2_assert(l2->read_ref() > 0);
894*94c4a1e1SFrank Piva
895*94c4a1e1SFrank Piva if (entry_is_allocating(virt_offset, false)) {
896*94c4a1e1SFrank Piva u32 owner = entry_get_alloc_owner(virt_offset, false);
897*94c4a1e1SFrank Piva
898*94c4a1e1SFrank Piva if (owner != ioc.get_tag()) {
899*94c4a1e1SFrank Piva l2->add_waiter_idx(ioc.get_tag(), idx_in_slice);
900*94c4a1e1SFrank Piva throw MetaUpdateException();
901*94c4a1e1SFrank Piva }
902*94c4a1e1SFrank Piva } else {
903*94c4a1e1SFrank Piva entry_mark_allocating(virt_offset, ioc.get_tag(), false);
904*94c4a1e1SFrank Piva }
905*94c4a1e1SFrank Piva
906*94c4a1e1SFrank Piva data_cluster = state.cluster_allocator.allocate_cluster(ioc);
907*94c4a1e1SFrank Piva qcow2_assert(l2->get_update() && !l2->is_flushing() &&
908*94c4a1e1SFrank Piva !l2->get_evicted());
909*94c4a1e1SFrank Piva if (data_cluster == -1) {
910*94c4a1e1SFrank Piva l2->set_entry(idx_in_slice, 0);
911*94c4a1e1SFrank Piva ret = -ENOSPC;
912*94c4a1e1SFrank Piva } else {
913*94c4a1e1SFrank Piva state.cluster_allocator.alloc_cluster_started(ioc,
914*94c4a1e1SFrank Piva data_cluster, QCOW2_CLUSTER_USE::DATA);
915*94c4a1e1SFrank Piva state.validate_cluster_use(data_cluster, virt_offset,
916*94c4a1e1SFrank Piva QCOW2_CLUSTER_USE::DATA);
917*94c4a1e1SFrank Piva data_cluster |= 1ULL << 63;
918*94c4a1e1SFrank Piva *l2_entry = data_cluster;
919*94c4a1e1SFrank Piva if (!l2->get_dirty(-1))
920*94c4a1e1SFrank Piva state.meta_flushing.inc_dirtied_slice(true);
921*94c4a1e1SFrank Piva l2->set_entry(idx_in_slice, data_cluster);
922*94c4a1e1SFrank Piva ret = 0;
923*94c4a1e1SFrank Piva }
924*94c4a1e1SFrank Piva
925*94c4a1e1SFrank Piva l2->check(state, __func__, __LINE__);
926*94c4a1e1SFrank Piva
927*94c4a1e1SFrank Piva entry_mark_allocated(virt_offset, false);
928*94c4a1e1SFrank Piva l2->wakeup_all_idx(q, ioc.get_tag(), idx_in_slice);
929*94c4a1e1SFrank Piva return ret;
930*94c4a1e1SFrank Piva }
931*94c4a1e1SFrank Piva
932*94c4a1e1SFrank Piva //we get one extra reference of l2 when calling this function.
__map_cluster(const qcow2_io_ctx_t & ioc,Qcow2L2Table * l2,u64 offset,bool create_l2)933*94c4a1e1SFrank Piva u64 Qcow2ClusterMapping::__map_cluster(const qcow2_io_ctx_t &ioc,
934*94c4a1e1SFrank Piva Qcow2L2Table *l2, u64 offset, bool create_l2)
935*94c4a1e1SFrank Piva {
936*94c4a1e1SFrank Piva const u32 idx_in_slice = ((l2_idx(offset) << 3) &
937*94c4a1e1SFrank Piva (QCOW2_PARA::L2_TABLE_SLICE_BYTES - 1)) >> 3;
938*94c4a1e1SFrank Piva u64 l2_entry;
939*94c4a1e1SFrank Piva int ret;
940*94c4a1e1SFrank Piva
941*94c4a1e1SFrank Piva qcow2_assert(l2->read_ref() > 0);
942*94c4a1e1SFrank Piva l2->check(state, __func__, __LINE__);
943*94c4a1e1SFrank Piva
944*94c4a1e1SFrank Piva /* the cache is being loaded */
945*94c4a1e1SFrank Piva if (!l2->get_update()) {
946*94c4a1e1SFrank Piva l2->add_waiter(ioc.get_tag());
947*94c4a1e1SFrank Piva throw MetaUpdateException();
948*94c4a1e1SFrank Piva }
949*94c4a1e1SFrank Piva
950*94c4a1e1SFrank Piva l2_entry = l2->get_entry_fast(idx_in_slice);
951*94c4a1e1SFrank Piva if (l2->entry_allocated(l2_entry))
952*94c4a1e1SFrank Piva goto exit;
953*94c4a1e1SFrank Piva
954*94c4a1e1SFrank Piva if (!create_l2)
955*94c4a1e1SFrank Piva return 0;
956*94c4a1e1SFrank Piva
957*94c4a1e1SFrank Piva ret = build_mapping(ioc, offset, l2, idx_in_slice, &l2_entry);
958*94c4a1e1SFrank Piva if (ret) {
959*94c4a1e1SFrank Piva qcow2_log("%s %d: tag %d build l2 mapping failed %d\n",
960*94c4a1e1SFrank Piva __func__, __LINE__, ioc.get_tag(), ret);
961*94c4a1e1SFrank Piva return 0;
962*94c4a1e1SFrank Piva }
963*94c4a1e1SFrank Piva exit:
964*94c4a1e1SFrank Piva qcow2_assert(l2->entry_allocated(l2_entry));
965*94c4a1e1SFrank Piva return l2_entry & ((1ULL << 63) - 1);
966*94c4a1e1SFrank Piva }
967*94c4a1e1SFrank Piva
968*94c4a1e1SFrank Piva
969*94c4a1e1SFrank Piva //any caller has to catch both MetaIoException and MetaUpdateException
map_cluster(const qcow2_io_ctx_t & ioc,u64 offset,bool create_l2)970*94c4a1e1SFrank Piva u64 Qcow2ClusterMapping::map_cluster(const qcow2_io_ctx_t &ioc, u64 offset,
971*94c4a1e1SFrank Piva bool create_l2)
972*94c4a1e1SFrank Piva {
973*94c4a1e1SFrank Piva Qcow2L2Table *l2 = cache.find_slice(l2_slice_key(offset), true);
974*94c4a1e1SFrank Piva u64 off_in_cls = offset & ((1ULL << cluster_bits) - 1);
975*94c4a1e1SFrank Piva u64 host_off = 0;
976*94c4a1e1SFrank Piva
977*94c4a1e1SFrank Piva offset = offset & ~((1ULL << cluster_bits) - 1);
978*94c4a1e1SFrank Piva
979*94c4a1e1SFrank Piva // l2 could be freed when wakeup() is called, so refcount
980*94c4a1e1SFrank Piva // has to be grabbed
981*94c4a1e1SFrank Piva if (l2) {
982*94c4a1e1SFrank Piva l2->get_ref();
983*94c4a1e1SFrank Piva } else {
984*94c4a1e1SFrank Piva try {
985*94c4a1e1SFrank Piva l2 = create_l2_map(ioc, offset, create_l2);
986*94c4a1e1SFrank Piva } catch (MetaIoException &meta_error) {
987*94c4a1e1SFrank Piva throw MetaIoException();
988*94c4a1e1SFrank Piva } catch (MetaUpdateException &meta_update_error) {
989*94c4a1e1SFrank Piva throw MetaUpdateException();
990*94c4a1e1SFrank Piva }
991*94c4a1e1SFrank Piva }
992*94c4a1e1SFrank Piva
993*94c4a1e1SFrank Piva if (l2 == nullptr)
994*94c4a1e1SFrank Piva return 0;
995*94c4a1e1SFrank Piva
996*94c4a1e1SFrank Piva try {
997*94c4a1e1SFrank Piva host_off = __map_cluster(ioc, l2, offset, create_l2);
998*94c4a1e1SFrank Piva } catch (MetaIoException &meta_error) {
999*94c4a1e1SFrank Piva l2->put_ref();
1000*94c4a1e1SFrank Piva throw MetaIoException();
1001*94c4a1e1SFrank Piva } catch (MetaUpdateException &meta_update_error) {
1002*94c4a1e1SFrank Piva l2->put_ref();
1003*94c4a1e1SFrank Piva throw MetaUpdateException();
1004*94c4a1e1SFrank Piva }
1005*94c4a1e1SFrank Piva
1006*94c4a1e1SFrank Piva l2->put_ref();
1007*94c4a1e1SFrank Piva
1008*94c4a1e1SFrank Piva if (host_off & QCOW_OFLAG_COMPRESSED)
1009*94c4a1e1SFrank Piva return (u64)-1;
1010*94c4a1e1SFrank Piva
1011*94c4a1e1SFrank Piva return host_off + off_in_cls;
1012*94c4a1e1SFrank Piva }
1013*94c4a1e1SFrank Piva
dump_meta()1014*94c4a1e1SFrank Piva void Qcow2ClusterMapping::dump_meta()
1015*94c4a1e1SFrank Piva {
1016*94c4a1e1SFrank Piva qcow2_log("cluster mapping%s: max_alloc_entries %u/%lu\n", __func__,
1017*94c4a1e1SFrank Piva max_alloc_entries, entry_alloc.size());
1018*94c4a1e1SFrank Piva state.l1_table.dump();
1019*94c4a1e1SFrank Piva cache.dump(state);
1020*94c4a1e1SFrank Piva }
1021