1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef UBLK_QCOW2_META_H_ 3 #define UBLK_QCOW2_META_H_ 4 5 #include "qcow2_common.h" 6 7 class Qcow2State; 8 class Qcow2Header; 9 10 /* 11 * Class design: 12 * 1) copy constructor / operator assign overloading / 13 * 14 * 2) one friend function for dumping object 15 * 16 * 17 * Loading meta: 18 * 19 * 20 * Flushing meta: 21 */ 22 class Qcow2Meta { 23 protected: 24 #ifdef DEBUG_QCOW2_META_OBJ 25 const char *id; 26 #endif 27 Qcow2Header &header; 28 void *addr; //buffer address 29 u64 offset; //offset in host image 30 u32 buf_sz; //buffer size 31 u32 data_len; //current data length in the buffer, valid iff update is 32 //true 33 34 #define QCOW2_META_DIRTY (1U << 0) 35 #define QCOW2_META_UPDATE (1U << 1) 36 37 //l1 & refcount table is top meta, set in constructor, should only 38 //be used for flush meta 39 #define QCOW2_META_TOP (1U << 2) 40 41 //the meta slice is being flushed to image 42 #define QCOW2_META_FLUSHING (1U << 3) 43 44 #define QCOW2_META_PREP_FLUSH (1U << 4) 45 46 //set for L1/L2 meta 47 #define QCOW2_META_MAPPING (1U << 5) 48 49 //only used for .reset() 50 #define QCOW2_META_DONT_ALLOC_BUF (1U << 6) 51 52 //evicted from lru cache, and may be in loading or flushing, and will 53 //be freed after loading or flushing is done. 54 // 55 //But can't be re-dirtied any more, so slice marked as EVICTED is readonly 56 #define QCOW2_META_EVICTED (1U << 7) 57 u32 flags; 58 59 int refcnt; 60 public: 61 virtual int load(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u32 len, bool sync); 62 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 63 u32 len); 64 Qcow2Meta(Qcow2Header &h, u64 off, u32 buf_sz, const char *, u32 f); 65 virtual ~Qcow2Meta(); 66 void zero_buf(); 67 virtual void show(const char *f = "", int line = 0); 68 69 #ifdef DEBUG_QCOW2_META_OBJ get_id()70 const char *get_id() { 71 return id; 72 } 73 #endif set_evicted()74 void set_evicted() { 75 flags |= QCOW2_META_EVICTED; 76 } get_evicted()77 bool get_evicted() { 78 return flags & QCOW2_META_EVICTED; 79 } 80 set_dirty(unsigned int idx,bool val)81 void set_dirty(unsigned int idx, bool val) { 82 if (val) 83 flags |= QCOW2_META_DIRTY; 84 else 85 flags &= ~QCOW2_META_DIRTY; 86 } 87 get_dirty(unsigned int idx)88 bool get_dirty(unsigned int idx) const { 89 return flags & QCOW2_META_DIRTY; 90 } 91 get_offset()92 u64 get_offset() const { 93 return offset; 94 } 95 get_buf_size()96 u64 get_buf_size() const { 97 return buf_sz; 98 } 99 get_data_len()100 u32 get_data_len() const { 101 return data_len; 102 } get_update()103 bool get_update() const { 104 return !!(flags & QCOW2_META_UPDATE); 105 } set_update(bool val)106 void set_update(bool val) { 107 if (val) 108 flags |= QCOW2_META_UPDATE; 109 else 110 flags &= ~QCOW2_META_UPDATE; 111 } 112 is_top_meta()113 bool is_top_meta() const 114 { 115 return !!(flags & QCOW2_META_TOP); 116 } 117 is_mapping_meta()118 bool is_mapping_meta() const 119 { 120 return !!(flags & QCOW2_META_MAPPING); 121 } 122 is_flushing()123 bool is_flushing() const { 124 return !!(flags & QCOW2_META_FLUSHING); 125 } 126 get_flags()127 unsigned get_flags() const { 128 return flags; 129 } 130 read_ref()131 int read_ref() const { 132 return refcnt; 133 } 134 get_prep_flush()135 bool get_prep_flush() const { 136 return !!(flags & QCOW2_META_PREP_FLUSH); 137 } 138 set_prep_flush(bool val)139 void set_prep_flush(bool val) 140 { 141 if (val) 142 flags |= QCOW2_META_PREP_FLUSH; 143 else 144 flags &= ~QCOW2_META_PREP_FLUSH; 145 } 146 }; 147 148 #define QCOW2_EXT_MAGIC_END 0 149 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca 150 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 151 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 152 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 153 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 154 155 class Qcow2HeaderExt { 156 private: 157 u64 offset; 158 public: 159 u32 type; 160 u32 len; 161 Qcow2HeaderExt(char * addr,u64 off)162 Qcow2HeaderExt(char *addr, u64 off): offset(off) 163 { 164 u32 *buf = (u32 *)(addr + offset); 165 type = be32_to_cpu(buf[0]); 166 167 buf = (u32 *)(addr + offset + 4); 168 len = be32_to_cpu(buf[0]); 169 } 170 ~Qcow2HeaderExt()171 virtual ~Qcow2HeaderExt() {} 172 dump()173 virtual void dump() const 174 { 175 qcow2_log("%s: type %x len %d\n", 176 typeid(*this).name(), type, len); 177 } 178 }; 179 180 class Qcow2HeaderExtString : public Qcow2HeaderExt { 181 public: 182 std::string str; 183 Qcow2HeaderExtString(char * addr,u64 offset)184 Qcow2HeaderExtString(char *addr, u64 offset): 185 Qcow2HeaderExt(addr, offset), str((char *)addr, 0, len) 186 { 187 } 188 dump()189 virtual void dump() const 190 { 191 qcow2_log("%s: type %x len %d string %s\n", 192 typeid(*this).name(), type, len, str.c_str()); 193 } 194 }; 195 196 class Qcow2HeaderExtFeatureNameTable : public Qcow2HeaderExt { 197 public: 198 struct feature_entry { 199 char feature_type; 200 char bit_num; 201 char feature_name[46]; 202 }; 203 typedef std::valarray<feature_entry> ArrayFeature; 204 ArrayFeature __a; 205 206 Qcow2HeaderExtFeatureNameTable(char *addr, u64 offset); ~Qcow2HeaderExtFeatureNameTable()207 ~Qcow2HeaderExtFeatureNameTable() {}; 208 void dump() const; 209 }; 210 211 class Qcow2HeaderExtBitmaps : public Qcow2HeaderExt { 212 public: 213 u32 nr_bitmap; 214 u64 bitmap_directory_size; 215 u64 bitmap_directory_offset; Qcow2HeaderExtBitmaps(char * addr,u64 offset)216 Qcow2HeaderExtBitmaps(char *addr, u64 offset): 217 Qcow2HeaderExt(addr, offset) 218 { 219 nr_bitmap = be32_to_cpu(*(u32 *)(addr + offset + 8)); 220 bitmap_directory_size = be64_to_cpu(*(u64 *)(addr + 221 offset + 12)); 222 bitmap_directory_offset = be64_to_cpu(*(u64 *)(addr + 223 offset + 20)); 224 } dump()225 virtual void dump() const 226 { 227 qcow2_log("%s: type %x len %d nr_bitmap %d bitmap_dir(offset %lx sz %lu)\n", 228 typeid(*this).name(), type, len, 229 nr_bitmap, bitmap_directory_offset, 230 bitmap_directory_size); 231 } 232 }; 233 234 class Qcow2HeaderExtEncHeader : public Qcow2HeaderExt { 235 public: 236 u64 enc_offset; 237 u64 enc_len; Qcow2HeaderExtEncHeader(char * addr,u64 offset)238 Qcow2HeaderExtEncHeader(char *addr, u64 offset): 239 Qcow2HeaderExt(addr, offset) 240 { 241 enc_offset = be64_to_cpu(*(u64 *)(addr + 242 offset + 8)); 243 enc_len = be64_to_cpu(*(u64 *)(addr + 244 offset + 16)); 245 } dump()246 virtual void dump() const 247 { 248 qcow2_log("%s: type %x len %d enc(offset %" PRIx64 " sz %" PRIu64 ")\n", 249 typeid(*this).name(), type, len, 250 enc_offset, enc_len); 251 } 252 }; 253 254 #define __INLINE_SET_GET(type, prop, v2_val) \ 255 type get_##prop() const \ 256 { \ 257 if (offsetof(QCowHeader, prop) >= 72 && version == 2) \ 258 return v2_val; \ 259 switch(sizeof(type)) { \ 260 case 8: \ 261 return be64_to_cpu(((QCowHeader*)addr)->prop); \ 262 case 4: \ 263 return be32_to_cpu(((QCowHeader*)addr)->prop); \ 264 case 2: \ 265 return be16_to_cpu(((QCowHeader*)addr)->prop); \ 266 case 1: \ 267 return ((QCowHeader*)addr)->prop; \ 268 } \ 269 } \ 270 void set_##prop(type v) \ 271 { \ 272 QCowHeader *h = (QCowHeader *)addr; \ 273 if (offsetof(QCowHeader, prop) >= 72 && version == 2) \ 274 return; \ 275 switch(sizeof(type)) { \ 276 case 8: \ 277 h->prop = cpu_to_be64(v); \ 278 break; \ 279 case 4: \ 280 h->prop = cpu_to_be32(v); \ 281 break; \ 282 case 2: \ 283 h->prop = cpu_to_be16(v); \ 284 break; \ 285 case 1: \ 286 h->prop = v; \ 287 break; \ 288 } \ 289 Qcow2Meta::set_dirty(-1, true); \ 290 } 291 292 #define INLINE_SET_GET(type, prop) __INLINE_SET_GET(type, prop, 0) 293 294 class Qcow2Header: public Qcow2Meta { 295 private: 296 int populate(); 297 Qcow2HeaderExtString *backingfile_format_name; 298 Qcow2HeaderExtString *ext_data_file_name; 299 Qcow2HeaderExtFeatureNameTable *feature_name_table; 300 Qcow2HeaderExtBitmaps *bitmaps; 301 Qcow2HeaderExtEncHeader *enc_header_pointer; 302 public: 303 const u32 magic; 304 const u32 version; 305 const u32 cluster_bits; 306 const u32 refcount_order; 307 308 //this way looks ugly, but just for retrieve qs in destructor of 309 //Qcow2SliceMeta 310 Qcow2State &qs; 311 312 Qcow2Header(Qcow2State &qs); 313 virtual ~Qcow2Header(); 314 virtual int load(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u32 len, bool sync); 315 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 316 u32 len); 317 void dump_ext() const; 318 319 INLINE_SET_GET(u32, magic); 320 INLINE_SET_GET(u32, version); 321 INLINE_SET_GET(u64, backing_file_offset); 322 INLINE_SET_GET(u32, backing_file_size); 323 INLINE_SET_GET(u32, cluster_bits); 324 INLINE_SET_GET(u64, size); 325 INLINE_SET_GET(u32, crypt_method); 326 INLINE_SET_GET(u32, l1_size); 327 INLINE_SET_GET(u64, l1_table_offset); 328 INLINE_SET_GET(u64, refcount_table_offset); 329 INLINE_SET_GET(u32, refcount_table_clusters); 330 INLINE_SET_GET(u32, nb_snapshots); 331 INLINE_SET_GET(u64, snapshots_offset); 332 __INLINE_SET_GET(u64, incompatible_features, 0); 333 __INLINE_SET_GET(u64, compatible_features, 0); 334 __INLINE_SET_GET(u64, autoclear_features, 0); 335 __INLINE_SET_GET(u32, refcount_order, 4); 336 __INLINE_SET_GET(u32, header_length, 72); 337 __INLINE_SET_GET(u8, compression_type, 0); 338 339 friend std::ostream & operator<<(std::ostream &os, const Qcow2Header &h); 340 is_extended_l2_entries()341 bool is_extended_l2_entries() { 342 return get_incompatible_features() & 0x8; 343 } 344 }; 345 346 class Qcow2MappingMeta: public Qcow2Meta { 347 private: 348 IOWaiters io_waiters; 349 protected: 350 u32 entry_bits_order; 351 s32 next_free_idx; //cache the next free idx 352 353 //deprecate now entry_val_is_dirty(u64 val)354 bool entry_val_is_dirty(u64 val) { 355 qcow2_assert(false); 356 return true; 357 } 358 359 int __flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, 360 u64 off, u32 len, bool run_fsync = false); 361 int clear_dirty_entries(Qcow2State &qs, 362 const qcow2_io_ctx_t &ioc, u64 off, u32 len); 363 public: 364 Qcow2MappingMeta(Qcow2State &qs, u64 off, u32 buf_sz, 365 const char *cls_name, u32 f); get_nr_entries()366 s32 get_nr_entries() { 367 return (buf_sz << 3) >> entry_bits_order; 368 } get_next_free_idx()369 s32 get_next_free_idx() { 370 return next_free_idx; 371 } set_next_free_idx(s32 idx)372 void set_next_free_idx(s32 idx) { 373 if (idx < get_nr_entries()) 374 next_free_idx = idx; 375 } 376 add_waiter(unsigned tag)377 void add_waiter(unsigned tag) { 378 io_waiters.add_waiter(tag); 379 } 380 add_waiter_idx(unsigned tag,unsigned entry_idx)381 void add_waiter_idx(unsigned tag, unsigned entry_idx) { 382 io_waiters.add_waiter_idx(tag, entry_idx); 383 } 384 wakeup_all(const struct ublksrv_queue * q,unsigned my_tag)385 void wakeup_all(const struct ublksrv_queue *q, unsigned my_tag) { 386 io_waiters.wakeup_all(q, my_tag); 387 } 388 wakeup_all_idx(const struct ublksrv_queue * q,unsigned my_tag,unsigned entry_idx)389 void wakeup_all_idx(const struct ublksrv_queue *q, 390 unsigned my_tag, unsigned entry_idx) { 391 io_waiters.wakeup_all_idx(q, my_tag, entry_idx); 392 } 393 394 virtual u64 get_entry(u32 idx) = 0; 395 virtual void set_entry(u32 idx, u64 val) = 0; 396 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 397 u32 len) = 0; 398 399 //both load() and flush() should be async, and done() needs to be called 400 //after both load() and flush() meta IO are done. 401 virtual void io_done(Qcow2State &qs, const struct ublksrv_queue *q, 402 const struct io_uring_cqe *cqe); 403 }; 404 405 class Qcow2TopTable: public Qcow2MappingMeta { 406 private: 407 u32 flush_blk_idx; 408 409 protected: 410 u32 min_bs_bits; 411 std::vector <bool> dirty; 412 public: 413 Qcow2TopTable(Qcow2State &qs, u64 off, u32 buf_sz, 414 const char *cls_name, u32 f); 415 is_flushing(u32 idx)416 bool is_flushing(u32 idx) { 417 if (Qcow2Meta::is_flushing() && idx == flush_blk_idx) 418 return true; 419 return false; 420 } 421 get_blk_dirty(u32 idx)422 bool get_blk_dirty(u32 idx) 423 { 424 return dirty[idx]; 425 } 426 set_blk_dirty(u32 idx,bool val)427 void set_blk_dirty(u32 idx, bool val) 428 { 429 dirty[idx] = val; 430 } 431 dirty_blks()432 u32 dirty_blks() { 433 u32 total = 0; 434 435 for (int i = 0; i < dirty.size(); i++) 436 if (dirty[i]) 437 total += 1; 438 return total; 439 } 440 dirty_blk_size()441 u32 dirty_blk_size() { 442 return dirty.size(); 443 } 444 get_1st_dirty_blk()445 int get_1st_dirty_blk() { 446 for (int i = 0; i < dirty.size(); i++) 447 if (dirty[i]) 448 return i; 449 return -1; 450 } 451 set_flush_blk_idx(u32 idx)452 void set_flush_blk_idx(u32 idx) 453 { 454 flush_blk_idx = idx; 455 } 456 get_flush_blk_idx()457 u32 get_flush_blk_idx() 458 { 459 return flush_blk_idx; 460 } 461 single_entry_order()462 u64 single_entry_order() const 463 { 464 if (is_mapping_meta()) 465 return (2 * header.cluster_bits - 3); 466 return 2 * header.cluster_bits + 3 - header.refcount_order; 467 } 468 469 bool prep_flush(const qcow2_io_ctx_t &ioc, u32 blk_idx); 470 void unprep_flush(u32 blk_idx); 471 472 virtual void io_done(Qcow2State &qs, const struct ublksrv_queue *q, 473 const struct io_uring_cqe *); 474 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, u32 len); 475 bool has_dirty_slices(Qcow2State &qs, int idx); 476 }; 477 478 //allocating detection needs to review!!! 479 class Qcow2L1Table: public Qcow2TopTable { 480 public: offset_to_idx(u64 virt_offset)481 u32 offset_to_idx(u64 virt_offset) { 482 u32 cluster_bits = header.cluster_bits; 483 bool has_extended_l2_entries = header.is_extended_l2_entries(); 484 u32 idx = (virt_offset >> cluster_bits) >> 485 (cluster_bits - 3 - !!has_extended_l2_entries); 486 487 return idx; 488 } 489 get_entry_fast(u32 idx)490 u64 get_entry_fast(u32 idx) { 491 u64 val = be64_to_cpu(((const u64 *)addr)[idx]); 492 493 return val; 494 } 495 set_entry_fast(u32 idx,u64 val)496 void set_entry_fast(u32 idx, u64 val) { 497 unsigned i = idx >> (min_bs_bits - 3); 498 499 ((u64 *)addr)[idx] = cpu_to_be64(val); 500 set_dirty(idx, true); 501 502 qcow2_assert(i < dirty.size()); 503 dirty[i] = true; 504 } 505 entry_allocated(u64 entry)506 bool entry_allocated(u64 entry) { 507 return entry != 0; 508 } 509 entry_is_dirty(u32 idx)510 bool entry_is_dirty(u32 idx) { 511 return entry_val_is_dirty(get_entry(idx)); 512 } 513 514 Qcow2L1Table(Qcow2State &qs); 515 virtual int load(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u32 len, bool sync); 516 virtual u64 get_entry(u32 idx); 517 virtual void set_entry(u32 idx, u64 val); 518 void dump(); 519 }; 520 521 class Qcow2RefcountTable: public Qcow2TopTable { 522 public: offset_to_idx(u64 virt_offset)523 u32 offset_to_idx(u64 virt_offset) { 524 u32 cluster_bits = header.cluster_bits; 525 u32 idx = (virt_offset >> cluster_bits) >> 526 (cluster_bits + 3 - header.refcount_order); 527 528 return idx; 529 } set_entry_fast(u32 idx,u64 val)530 void set_entry_fast(u32 idx, u64 val) { 531 unsigned i = idx >> (min_bs_bits - 3); 532 533 ((u64 *)addr)[idx] = cpu_to_be64(val); 534 set_dirty(idx, true); 535 536 qcow2_assert(i < dirty.size()); 537 dirty[i] = true; 538 } get_entry_fast(u32 idx)539 u64 get_entry_fast(u32 idx) { 540 return be64_to_cpu(((u64 *)addr)[idx]); 541 } entry_is_dirty(u32 idx)542 bool entry_is_dirty(u32 idx) { 543 return entry_val_is_dirty(get_entry(idx)); 544 } 545 546 Qcow2RefcountTable(Qcow2State &qs); 547 virtual int load(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u32 len, bool sync); 548 virtual u64 get_entry(u32 idx); 549 virtual void set_entry(u32 idx, u64 val); 550 void dump(); 551 }; 552 553 class Qcow2SliceMeta: public Qcow2MappingMeta { 554 protected: 555 bool prep_flush(const qcow2_io_ctx_t &ioc); 556 void unprep_flush(); 557 virtual void wait_clusters(Qcow2State &qs, const qcow2_io_ctx_t &ioc); 558 #ifdef DEBUG_QCOW2_META_VALIDATE 559 void *validate_addr; 560 #endif 561 public: 562 unsigned int parent_idx; //parent's this entry points to us 563 564 Qcow2SliceMeta(Qcow2State &qs, u64 off, u32 buf_sz, 565 const char *cls_name, u32 p_idx, u32 f); 566 virtual int load(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u32 len, bool sync); 567 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 568 u32 len) = 0; 569 virtual void dump() = 0; 570 virtual ~Qcow2SliceMeta(); 571 virtual void get_dirty_range(u64 *start, u64 *end) = 0; 572 573 //both load() and flush() should be async, and done() needs to be called 574 //after both load() and flush() meta IO are done. 575 virtual void io_done(Qcow2State &qs, const struct ublksrv_queue *q, 576 const struct io_uring_cqe *cqe); 577 int zero_my_cluster(Qcow2State &qs, const qcow2_io_ctx_t &ioc); 578 579 void reclaim_me(); 580 get_offset()581 u64 get_offset() const { 582 return offset; 583 } 584 get_ref()585 void get_ref() { 586 qcow2_assert(refcnt > 0); 587 refcnt += 1; 588 } 589 put_ref()590 void put_ref() { 591 qcow2_assert(refcnt > 0); 592 if (--refcnt == 0) 593 reclaim_me(); 594 } 595 596 //In theory, virt_offset() should be implemented as virtual function. 597 //However, it is actually one helper for fast path, so move it to 598 //parent class, and use base flag to return the proper return value. virt_offset()599 u64 virt_offset() { 600 if (is_mapping_meta()) { 601 u64 base = ((u64)parent_idx) << (header.cluster_bits - 602 3 + header.cluster_bits); 603 u64 clusters = (get_offset() & 604 ((1ULL << header.cluster_bits) - 1)) >> 3; 605 606 return base + (clusters << header.cluster_bits); 607 } 608 609 const u64 single_entry_order = 2 * header.cluster_bits + 610 3 - header.refcount_order; 611 u32 slice_idx = (get_offset() & ((1U << header.cluster_bits) - 1)) >> 612 QCOW2_PARA::REFCOUNT_BLK_SLICE_BITS; 613 u32 slice_virt_bits = header.cluster_bits + 3 - 614 header.refcount_order + QCOW2_PARA::REFCOUNT_BLK_SLICE_BITS; 615 616 return ((u64)parent_idx << single_entry_order) + 617 ((u64)slice_idx << slice_virt_bits); 618 } 619 #ifdef DEBUG_QCOW2_META_VALIDATE 620 void io_done_validate(Qcow2State &qs, const struct ublksrv_queue *q, 621 const struct io_uring_cqe *cqe); 622 #else io_done_validate(Qcow2State & qs,const struct ublksrv_queue * q,const struct io_uring_cqe * cqe)623 void io_done_validate(Qcow2State &qs, const struct ublksrv_queue *q, 624 const struct io_uring_cqe *cqe) {} 625 #endif 626 }; 627 628 class Qcow2RefcountBlock: public Qcow2SliceMeta { 629 public: 630 unsigned dirty_start_idx; get_entry_fast(u32 idx)631 u64 get_entry_fast(u32 idx) { 632 switch (header.refcount_order) { 633 case 0: 634 return (((const u8 *)addr)[idx / 8] >> (idx % 8)) & 0x1; 635 636 case 1: 637 return (((const u8 *)addr)[idx / 4] >> (2 * (idx % 4))) & 0x3; 638 639 case 2: 640 return (((const u8 *)addr)[idx / 2] >> (4 * (idx % 2))) & 0xf; 641 642 case 3: 643 return ((const u8 *)addr)[idx]; 644 645 case 4: 646 return be16_to_cpu(((const u16 *)addr)[idx]); 647 648 case 5: 649 return be32_to_cpu(((const u32 *)addr)[idx]); 650 651 case 6: 652 return be64_to_cpu(((const u64 *)addr)[idx]); 653 } 654 return 0; 655 } 656 set_entry_fast(u32 idx,u64 val)657 void set_entry_fast(u32 idx, u64 val) { 658 switch (header.refcount_order) { 659 case 0: 660 qcow2_assert(!(val >> 1)); 661 ((u8 *)addr)[idx / 8] &= ~(0x1 << (idx % 8)); 662 ((u8 *)addr)[idx / 8] |= val << (idx % 8); 663 break; 664 case 1: 665 qcow2_assert(!(val >> 2)); 666 ((u8 *)addr)[idx / 4] &= ~(0x3 << (2 * (idx % 4))); 667 ((u8 *)addr)[idx / 4] |= val << (2 * (idx % 4)); 668 break; 669 case 2: 670 qcow2_assert(!(val >> 4)); 671 ((u8 *)addr)[idx / 2] &= ~(0xf << (4 * (idx % 2))); 672 ((u8 *)addr)[idx / 2] |= val << (4 * (idx % 2)); 673 break; 674 case 3: 675 qcow2_assert(!(val >> 8)); 676 ((u8 *)addr)[idx] = val; 677 break; 678 case 4: 679 qcow2_assert(!(val >> 16)); 680 ((u16 *)addr)[idx] = cpu_to_be16(val); 681 break; 682 case 5: 683 qcow2_assert(!(val >> 32)); 684 ((u32 *)addr)[idx] = cpu_to_be32(val); 685 break; 686 case 6: 687 ((u64 *)addr)[idx] = cpu_to_be64(val); 688 break; 689 } 690 set_dirty(idx, true); 691 if (dirty_start_idx == ((unsigned)-1)) 692 dirty_start_idx = idx; 693 } 694 entry_is_dirty(u32 idx)695 bool entry_is_dirty(u32 idx) { 696 return idx >= dirty_start_idx; 697 } 698 699 Qcow2RefcountBlock(Qcow2State &qs, u64 off, u32 p_idx, u32 f); 700 void reset(Qcow2State &qs, u64 off, u32 p_idx, u32 f); 701 virtual ~Qcow2RefcountBlock(); 702 virtual u64 get_entry(u32 idx); 703 virtual void set_entry(u32 idx, u64 val); 704 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 705 u32 len); 706 virtual void dump(); 707 virtual void get_dirty_range(u64 *start, u64 *end); 708 }; 709 710 //allocating detection needs to review!!! 711 class Qcow2L2Table: public Qcow2SliceMeta { 712 private: 713 //the two is valid only iff this slice is dirty 714 u64 dirty_start, dirty_end; 715 public: get_entry_fast(u32 idx)716 u64 get_entry_fast(u32 idx) { 717 u64 val = be64_to_cpu(((const u64 *)addr)[idx]); 718 719 return val; 720 } 721 get_extended_entry(u32 idx)722 u64 get_extended_entry(u32 idx) { 723 return 0; 724 } 725 set_entry_fast(u32 idx,u64 val)726 void set_entry_fast(u32 idx, u64 val) { 727 ((u64 *)addr)[idx] = cpu_to_be64(val); 728 set_dirty(idx, true); 729 } 730 entry_allocated(u64 entry)731 bool entry_allocated(u64 entry) { 732 return entry != 0; 733 } 734 entry_is_dirty(u32 idx)735 bool entry_is_dirty(u32 idx) { 736 return entry_val_is_dirty(get_entry(idx)); 737 } 738 739 Qcow2L2Table(Qcow2State &qs, u64 off, u32 p_idx, u32 f); 740 void reset(Qcow2State &qs, u64 off, u32 p_idx, u32 f); 741 virtual ~Qcow2L2Table(); 742 virtual u64 get_entry(u32 idx); 743 virtual void set_entry(u32 idx, u64 val); 744 virtual int flush(Qcow2State &qs, const qcow2_io_ctx_t &ioc, u64 off, 745 u32 len); 746 virtual void dump(); 747 virtual void get_dirty_range(u64 *start, u64 *end); 748 //virtual int flush(Qcow2State &qs, qcow2_io_ctx_t ioc, bool auto_free = false); 749 virtual void io_done(Qcow2State &qs, const struct ublksrv_queue *q, 750 const struct io_uring_cqe *cqe); 751 #ifdef DEBUG_QCOW2_META_VALIDATE 752 void check(Qcow2State &qs, const char *func, int line); 753 void check_duplicated_clusters(Qcow2State &qs, int tag, 754 const char *func, int line); 755 #else check(Qcow2State & qs,const char * func,int line)756 void check(Qcow2State &qs, const char *func, int line) {} check_duplicated_clusters(Qcow2State & qs,int tag,const char * func,int line)757 void check_duplicated_clusters(Qcow2State &qs, int tag, 758 const char *func, int line) {} 759 #endif 760 }; 761 762 #endif 763