xref: /aosp_15_r20/system/core/fs_mgr/libsnapshot/snapuserd/user-space-merge/read_worker.cpp (revision 00c7fec1bb09f3284aad6a6f96d2f63dfc3650ad)
1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/properties.h>
18 
19 #include <libsnapshot/cow_format.h>
20 #include <pthread.h>
21 
22 #include "read_worker.h"
23 #include "snapuserd_core.h"
24 #include "user-space-merge/worker.h"
25 #include "utility.h"
26 
27 namespace android {
28 namespace snapshot {
29 
30 using namespace android;
31 using namespace android::dm;
32 using android::base::unique_fd;
33 
CloseFds()34 void ReadWorker::CloseFds() {
35     block_server_ = {};
36     backing_store_fd_ = {};
37     backing_store_direct_fd_ = {};
38     Worker::CloseFds();
39 }
40 
ReadWorker(const std::string & cow_device,const std::string & backing_device,const std::string & misc_name,const std::string & base_path_merge,std::shared_ptr<SnapshotHandler> snapuserd,std::shared_ptr<IBlockServerOpener> opener,bool direct_read)41 ReadWorker::ReadWorker(const std::string& cow_device, const std::string& backing_device,
42                        const std::string& misc_name, const std::string& base_path_merge,
43                        std::shared_ptr<SnapshotHandler> snapuserd,
44                        std::shared_ptr<IBlockServerOpener> opener, bool direct_read)
45     : Worker(cow_device, misc_name, base_path_merge, snapuserd),
46       backing_store_device_(backing_device),
47       direct_read_(direct_read),
48       block_server_opener_(opener),
49       aligned_buffer_(std::unique_ptr<void, decltype(&::free)>(nullptr, &::free)) {}
50 
51 // Start the replace operation. This will read the
52 // internal COW format and if the block is compressed,
53 // it will be de-compressed.
ProcessReplaceOp(const CowOperation * cow_op,void * buffer,size_t buffer_size)54 bool ReadWorker::ProcessReplaceOp(const CowOperation* cow_op, void* buffer, size_t buffer_size) {
55     if (!reader_->ReadData(cow_op, buffer, buffer_size)) {
56         SNAP_LOG(ERROR) << "ProcessReplaceOp failed for block " << cow_op->new_block
57                         << " buffer_size: " << buffer_size;
58         return false;
59     }
60     return true;
61 }
62 
ReadFromSourceDevice(const CowOperation * cow_op,void * buffer)63 bool ReadWorker::ReadFromSourceDevice(const CowOperation* cow_op, void* buffer) {
64     uint64_t offset;
65     if (!reader_->GetSourceOffset(cow_op, &offset)) {
66         SNAP_LOG(ERROR) << "ReadFromSourceDevice: Failed to get source offset";
67         return false;
68     }
69     SNAP_LOG(DEBUG) << " ReadFromBaseDevice...: new-block: " << cow_op->new_block
70                     << " Op: " << *cow_op;
71 
72     if (direct_read_ && IsBlockAligned(offset)) {
73         if (!android::base::ReadFullyAtOffset(backing_store_direct_fd_, aligned_buffer_.get(),
74                                               BLOCK_SZ, offset)) {
75             SNAP_PLOG(ERROR) << "O_DIRECT Read failed at offset: " << offset;
76             return false;
77         }
78         std::memcpy(buffer, aligned_buffer_.get(), BLOCK_SZ);
79         return true;
80     }
81 
82     if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SZ, offset)) {
83         std::string op;
84         if (cow_op->type() == kCowCopyOp)
85             op = "Copy-op";
86         else {
87             op = "Xor-op";
88         }
89         SNAP_PLOG(ERROR) << op << " failed. Read from backing store: " << backing_store_device_
90                          << "at block :" << offset / BLOCK_SZ << " offset:" << offset % BLOCK_SZ;
91         return false;
92     }
93 
94     return true;
95 }
96 
97 // Start the copy operation. This will read the backing
98 // block device which is represented by cow_op->source.
ProcessCopyOp(const CowOperation * cow_op,void * buffer)99 bool ReadWorker::ProcessCopyOp(const CowOperation* cow_op, void* buffer) {
100     if (!ReadFromSourceDevice(cow_op, buffer)) {
101         return false;
102     }
103     return true;
104 }
105 
ProcessXorOp(const CowOperation * cow_op,void * buffer)106 bool ReadWorker::ProcessXorOp(const CowOperation* cow_op, void* buffer) {
107     using WordType = std::conditional_t<sizeof(void*) == sizeof(uint64_t), uint64_t, uint32_t>;
108 
109     if (!ReadFromSourceDevice(cow_op, buffer)) {
110         return false;
111     }
112 
113     if (xor_buffer_.empty()) {
114         xor_buffer_.resize(BLOCK_SZ);
115     }
116     CHECK(xor_buffer_.size() == BLOCK_SZ);
117 
118     ssize_t size = reader_->ReadData(cow_op, xor_buffer_.data(), xor_buffer_.size());
119     if (size != BLOCK_SZ) {
120         SNAP_LOG(ERROR) << "ProcessXorOp failed for block " << cow_op->new_block
121                         << ", return value: " << size;
122         return false;
123     }
124 
125     auto xor_in = reinterpret_cast<const WordType*>(xor_buffer_.data());
126     auto xor_out = reinterpret_cast<WordType*>(buffer);
127     auto num_words = BLOCK_SZ / sizeof(WordType);
128 
129     for (auto i = 0; i < num_words; i++) {
130         xor_out[i] ^= xor_in[i];
131     }
132     return true;
133 }
134 
ProcessZeroOp(void * buffer)135 bool ReadWorker::ProcessZeroOp(void* buffer) {
136     memset(buffer, 0, BLOCK_SZ);
137     return true;
138 }
139 
ProcessOrderedOp(const CowOperation * cow_op,void * buffer)140 bool ReadWorker::ProcessOrderedOp(const CowOperation* cow_op, void* buffer) {
141     MERGE_GROUP_STATE state = snapuserd_->ProcessMergingBlock(cow_op->new_block, buffer);
142 
143     switch (state) {
144         case MERGE_GROUP_STATE::GROUP_MERGE_COMPLETED: {
145             // Merge is completed for this COW op; just read directly from
146             // the base device
147             SNAP_LOG(DEBUG) << "Merge-completed: Reading from base device sector: "
148                             << (cow_op->new_block >> SECTOR_SHIFT)
149                             << " Block-number: " << cow_op->new_block;
150             if (!ReadDataFromBaseDevice(ChunkToSector(cow_op->new_block), buffer, BLOCK_SZ)) {
151                 SNAP_LOG(ERROR) << "ReadDataFromBaseDevice at sector: "
152                                 << (cow_op->new_block >> SECTOR_SHIFT) << " after merge-complete.";
153                 return false;
154             }
155             return true;
156         }
157         case MERGE_GROUP_STATE::GROUP_MERGE_PENDING: {
158             bool ret;
159             if (cow_op->type() == kCowCopyOp) {
160                 ret = ProcessCopyOp(cow_op, buffer);
161             } else {
162                 ret = ProcessXorOp(cow_op, buffer);
163             }
164 
165             // I/O is complete - decrement the refcount irrespective of the return
166             // status
167             snapuserd_->NotifyIOCompletion(cow_op->new_block);
168             return ret;
169         }
170         // We already have the data in the buffer retrieved from RA thread.
171         // Nothing to process further.
172         case MERGE_GROUP_STATE::GROUP_MERGE_RA_READY: {
173             [[fallthrough]];
174         }
175         case MERGE_GROUP_STATE::GROUP_MERGE_IN_PROGRESS: {
176             return true;
177         }
178         default: {
179             // All other states, fail the I/O viz (GROUP_MERGE_FAILED and GROUP_INVALID)
180             return false;
181         }
182     }
183 
184     return false;
185 }
186 
ProcessCowOp(const CowOperation * cow_op,void * buffer)187 bool ReadWorker::ProcessCowOp(const CowOperation* cow_op, void* buffer) {
188     if (cow_op == nullptr) {
189         SNAP_LOG(ERROR) << "ProcessCowOp: Invalid cow_op";
190         return false;
191     }
192 
193     switch (cow_op->type()) {
194         case kCowReplaceOp: {
195             size_t buffer_size = CowOpCompressionSize(cow_op, BLOCK_SZ);
196             uint8_t chunk[buffer_size];
197             if (!ProcessReplaceOp(cow_op, chunk, buffer_size)) {
198                 return false;
199             }
200             std::memcpy(buffer, chunk, BLOCK_SZ);
201             return true;
202         }
203 
204         case kCowZeroOp: {
205             return ProcessZeroOp(buffer);
206         }
207 
208         case kCowCopyOp:
209             [[fallthrough]];
210         case kCowXorOp: {
211             return ProcessOrderedOp(cow_op, buffer);
212         }
213 
214         default: {
215             SNAP_LOG(ERROR) << "Unknown operation-type found: "
216                             << static_cast<uint8_t>(cow_op->type());
217         }
218     }
219     return false;
220 }
221 
Init()222 bool ReadWorker::Init() {
223     if (!Worker::Init()) {
224         return false;
225     }
226 
227     const size_t compression_factor = reader_->GetMaxCompressionSize();
228     if (!compression_factor) {
229         SNAP_LOG(ERROR) << "Compression factor is set to 0 which is invalid.";
230         return false;
231     }
232     decompressed_buffer_ = std::make_unique<uint8_t[]>(compression_factor);
233 
234     backing_store_fd_.reset(open(backing_store_device_.c_str(), O_RDONLY));
235     if (backing_store_fd_ < 0) {
236         SNAP_PLOG(ERROR) << "Open Failed: " << backing_store_device_;
237         return false;
238     }
239 
240     if (direct_read_) {
241         backing_store_direct_fd_.reset(open(backing_store_device_.c_str(), O_RDONLY | O_DIRECT));
242         if (backing_store_direct_fd_ < 0) {
243             SNAP_PLOG(ERROR) << "Open Failed with O_DIRECT: " << backing_store_direct_fd_;
244             direct_read_ = false;
245         } else {
246             void* aligned_addr;
247             ssize_t page_size = getpagesize();
248             if (posix_memalign(&aligned_addr, page_size, page_size) < 0) {
249                 direct_read_ = false;
250                 SNAP_PLOG(ERROR) << "posix_memalign failed "
251                                  << " page_size: " << page_size << " read_sz: " << page_size;
252             } else {
253                 aligned_buffer_.reset(aligned_addr);
254             }
255         }
256     }
257 
258     block_server_ = block_server_opener_->Open(this, PAYLOAD_BUFFER_SZ);
259     if (!block_server_) {
260         SNAP_PLOG(ERROR) << "Unable to open block server";
261         return false;
262     }
263     return true;
264 }
265 
Run()266 bool ReadWorker::Run() {
267     SNAP_LOG(INFO) << "Processing snapshot I/O requests....";
268 
269     pthread_setname_np(pthread_self(), "ReadWorker");
270     auto worker_thread_priority = android::base::GetUintProperty<uint32_t>(
271             "ro.virtual_ab.worker_thread_priority", ANDROID_PRIORITY_NORMAL);
272 
273     if (!SetThreadPriority(worker_thread_priority)) {
274         SNAP_PLOG(ERROR) << "Failed to set thread priority";
275     }
276 
277     // Start serving IO
278     while (true) {
279         if (!block_server_->ProcessRequests()) {
280             break;
281         }
282     }
283 
284     CloseFds();
285     reader_->CloseCowFd();
286 
287     return true;
288 }
289 
ReadDataFromBaseDevice(sector_t sector,void * buffer,size_t read_size)290 bool ReadWorker::ReadDataFromBaseDevice(sector_t sector, void* buffer, size_t read_size) {
291     CHECK(read_size <= BLOCK_SZ);
292 
293     loff_t offset = sector << SECTOR_SHIFT;
294     if (!android::base::ReadFullyAtOffset(base_path_merge_fd_, buffer, read_size, offset)) {
295         SNAP_PLOG(ERROR) << "ReadDataFromBaseDevice failed. fd: " << base_path_merge_fd_
296                          << "at sector :" << sector << " size: " << read_size;
297         return false;
298     }
299 
300     return true;
301 }
302 
GetCowOpBlockOffset(const CowOperation * cow_op,uint64_t io_block,off_t * block_offset)303 bool ReadWorker::GetCowOpBlockOffset(const CowOperation* cow_op, uint64_t io_block,
304                                      off_t* block_offset) {
305     // If this is a replace op, get the block offset of this I/O
306     // block. Multi-block compression is supported only for
307     // Replace ops.
308     //
309     // Note: This can be extended when we support COPY and XOR ops down the
310     // line as the blocks are mostly contiguous.
311     if (cow_op && cow_op->type() == kCowReplaceOp) {
312         return GetBlockOffset(cow_op, io_block, BLOCK_SZ, block_offset);
313     }
314     return false;
315 }
316 
ReadAlignedSector(sector_t sector,size_t sz)317 bool ReadWorker::ReadAlignedSector(sector_t sector, size_t sz) {
318     size_t remaining_size = sz;
319     std::vector<std::pair<sector_t, const CowOperation*>>& chunk_vec = snapuserd_->GetChunkVec();
320     int ret = 0;
321 
322     do {
323         // Process 1MB payload at a time
324         size_t read_size = std::min(PAYLOAD_BUFFER_SZ, remaining_size);
325 
326         size_t total_bytes_read = 0;
327         const CowOperation* prev_op = nullptr;
328         while (read_size) {
329             // We need to check every 4k block to verify if it is
330             // present in the mapping.
331             size_t size = std::min(BLOCK_SZ, read_size);
332 
333             auto it = std::lower_bound(chunk_vec.begin(), chunk_vec.end(),
334                                        std::make_pair(sector, nullptr), SnapshotHandler::compare);
335             const bool sector_not_found = (it == chunk_vec.end() || it->first != sector);
336 
337             void* buffer = block_server_->GetResponseBuffer(BLOCK_SZ, size);
338             if (!buffer) {
339                 SNAP_LOG(ERROR) << "AcquireBuffer failed in ReadAlignedSector";
340                 return false;
341             }
342 
343             if (sector_not_found) {
344                 // Find the 4k block
345                 uint64_t io_block = SectorToChunk(sector);
346                 // Get the previous iterator. Since the vector is sorted, the
347                 // lookup of this sector can fall in a range of blocks if
348                 // CowOperation has compressed multiple blocks.
349                 if (it != chunk_vec.begin()) {
350                     std::advance(it, -1);
351                 }
352 
353                 bool is_mapping_present = true;
354 
355                 // Vector itself is empty. This can happen if the block was not
356                 // changed per the OTA or if the merge was already complete but
357                 // snapshot table was not yet collapsed.
358                 if (it == chunk_vec.end()) {
359                     is_mapping_present = false;
360                 }
361 
362                 const CowOperation* cow_op = nullptr;
363                 // Relative offset within the compressed multiple blocks
364                 off_t block_offset = 0;
365                 if (is_mapping_present) {
366                     // Get the nearest operation found in the vector
367                     cow_op = it->second;
368                     is_mapping_present = GetCowOpBlockOffset(cow_op, io_block, &block_offset);
369                 }
370 
371                 // Thus, we have a case wherein sector was not found in the sorted
372                 // vector; however, we indeed have a mapping of this sector
373                 // embedded in one of the CowOperation which spans multiple
374                 // block size.
375                 if (is_mapping_present) {
376                     // block_offset = 0 would mean that the CowOperation should
377                     // already be in the sorted vector. Hence, lookup should
378                     // have already found it. If not, this is a bug.
379                     if (block_offset == 0) {
380                         SNAP_LOG(ERROR)
381                                 << "GetBlockOffset returned offset 0 for io_block: " << io_block;
382                         return false;
383                     }
384 
385                     // Get the CowOperation actual compression size
386                     size_t compression_size = CowOpCompressionSize(cow_op, BLOCK_SZ);
387                     // Offset cannot be greater than the compression size
388                     if (block_offset > compression_size) {
389                         SNAP_LOG(ERROR) << "Invalid I/O block found. io_block: " << io_block
390                                         << " CowOperation-new-block: " << cow_op->new_block
391                                         << " compression-size: " << compression_size;
392                         return false;
393                     }
394 
395                     // Cached copy of the previous iteration. Just retrieve the
396                     // data
397                     if (prev_op && prev_op->new_block == cow_op->new_block) {
398                         std::memcpy(buffer, (char*)decompressed_buffer_.get() + block_offset, size);
399                     } else {
400                         // Get the data from the disk based on the compression
401                         // size
402                         if (!ProcessReplaceOp(cow_op, decompressed_buffer_.get(),
403                                               compression_size)) {
404                             return false;
405                         }
406                         // Copy the data from the decompressed buffer relative
407                         // to the i/o block offset.
408                         std::memcpy(buffer, (char*)decompressed_buffer_.get() + block_offset, size);
409                         // Cache this CowOperation pointer for successive I/O
410                         // operation. Since the request is sequential and the
411                         // block is already decompressed, subsequest I/O blocks
412                         // can fetch the data directly from this decompressed
413                         // buffer.
414                         prev_op = cow_op;
415                     }
416                 } else {
417                     // Block not found in map - which means this block was not
418                     // changed as per the OTA. Just route the I/O to the base
419                     // device.
420                     if (!ReadDataFromBaseDevice(sector, buffer, size)) {
421                         SNAP_LOG(ERROR) << "ReadDataFromBaseDevice failed";
422                         return false;
423                     }
424                 }
425                 ret = size;
426             } else {
427                 // We found the sector in mapping. Check the type of COW OP and
428                 // process it.
429                 if (!ProcessCowOp(it->second, buffer)) {
430                     SNAP_LOG(ERROR)
431                             << "ProcessCowOp failed, sector = " << sector << ", size = " << sz;
432                     return false;
433                 }
434 
435                 ret = std::min(BLOCK_SZ, read_size);
436             }
437 
438             read_size -= ret;
439             total_bytes_read += ret;
440             sector += (ret >> SECTOR_SHIFT);
441         }
442 
443         if (!SendBufferedIo()) {
444             return false;
445         }
446 
447         SNAP_LOG(DEBUG) << "SendBufferedIo success total_bytes_read: " << total_bytes_read
448                         << " remaining_size: " << remaining_size;
449         remaining_size -= total_bytes_read;
450     } while (remaining_size > 0);
451 
452     return true;
453 }
454 
IsMappingPresent(const CowOperation * cow_op,loff_t requested_offset,loff_t cow_op_offset)455 bool ReadWorker::IsMappingPresent(const CowOperation* cow_op, loff_t requested_offset,
456                                   loff_t cow_op_offset) {
457     const bool replace_op = (cow_op->type() == kCowReplaceOp);
458     if (replace_op) {
459         size_t max_compressed_size = CowOpCompressionSize(cow_op, BLOCK_SZ);
460         if ((requested_offset >= cow_op_offset) &&
461             (requested_offset < (cow_op_offset + max_compressed_size))) {
462             return true;
463         }
464     }
465     return false;
466 }
467 
ReadUnalignedSector(sector_t sector,size_t size,std::vector<std::pair<sector_t,const CowOperation * >>::iterator & it)468 int ReadWorker::ReadUnalignedSector(
469         sector_t sector, size_t size,
470         std::vector<std::pair<sector_t, const CowOperation*>>::iterator& it) {
471     SNAP_LOG(DEBUG) << "ReadUnalignedSector: sector " << sector << " size: " << size
472                     << " Aligned sector: " << it->first;
473 
474     loff_t requested_offset = sector << SECTOR_SHIFT;
475     loff_t final_offset = (it->first) << SECTOR_SHIFT;
476 
477     const CowOperation* cow_op = it->second;
478     if (IsMappingPresent(cow_op, requested_offset, final_offset)) {
479         size_t buffer_size = CowOpCompressionSize(cow_op, BLOCK_SZ);
480         uint8_t chunk[buffer_size];
481         // Read the entire decompressed buffer based on the block-size
482         if (!ProcessReplaceOp(cow_op, chunk, buffer_size)) {
483             return -1;
484         }
485         size_t skip_offset = (requested_offset - final_offset);
486         size_t write_sz = std::min(size, buffer_size - skip_offset);
487 
488         auto buffer =
489                 reinterpret_cast<uint8_t*>(block_server_->GetResponseBuffer(BLOCK_SZ, write_sz));
490         if (!buffer) {
491             SNAP_LOG(ERROR) << "ReadUnalignedSector failed to allocate buffer";
492             return -1;
493         }
494 
495         std::memcpy(buffer, (char*)chunk + skip_offset, write_sz);
496         return write_sz;
497     }
498 
499     int num_sectors_skip = sector - it->first;
500     size_t skip_size = num_sectors_skip << SECTOR_SHIFT;
501     size_t write_size = std::min(size, BLOCK_SZ - skip_size);
502     auto buffer =
503             reinterpret_cast<uint8_t*>(block_server_->GetResponseBuffer(BLOCK_SZ, write_size));
504     if (!buffer) {
505         SNAP_LOG(ERROR) << "ProcessCowOp failed to allocate buffer";
506         return -1;
507     }
508 
509     if (!ProcessCowOp(it->second, buffer)) {
510         SNAP_LOG(ERROR) << "ReadUnalignedSector: " << sector << " failed of size: " << size
511                         << " Aligned sector: " << it->first;
512         return -1;
513     }
514 
515     if (skip_size) {
516         if (skip_size == BLOCK_SZ) {
517             SNAP_LOG(ERROR) << "Invalid un-aligned IO request at sector: " << sector
518                             << " Base-sector: " << it->first;
519             return -1;
520         }
521         memmove(buffer, buffer + skip_size, write_size);
522     }
523     return write_size;
524 }
525 
ReadUnalignedSector(sector_t sector,size_t size)526 bool ReadWorker::ReadUnalignedSector(sector_t sector, size_t size) {
527     std::vector<std::pair<sector_t, const CowOperation*>>& chunk_vec = snapuserd_->GetChunkVec();
528 
529     auto it = std::lower_bound(chunk_vec.begin(), chunk_vec.end(), std::make_pair(sector, nullptr),
530                                SnapshotHandler::compare);
531 
532     // |-------|-------|-------|
533     // 0       1       2       3
534     //
535     // Block 0 - op 1
536     // Block 1 - op 2
537     // Block 2 - op 3
538     //
539     // chunk_vec will have block 0, 1, 2 which maps to relavant COW ops.
540     //
541     // Each block is 4k bytes. Thus, the last block will span 8 sectors
542     // ranging till block 3 (However, block 3 won't be in chunk_vec as
543     // it doesn't have any mapping to COW ops. Now, if we get an I/O request for a sector
544     // spanning between block 2 and block 3, we need to step back
545     // and get hold of the last element.
546     //
547     // Additionally, we need to make sure that the requested sector is
548     // indeed within the range of the final sector. It is perfectly valid
549     // to get an I/O request for block 3 and beyond which are not mapped
550     // to any COW ops. In that case, we just need to read from the base
551     // device.
552     bool merge_complete = false;
553     if (it == chunk_vec.end()) {
554         if (chunk_vec.size() > 0) {
555             // I/O request beyond the last mapped sector
556             it = std::prev(chunk_vec.end());
557         } else {
558             // This can happen when a partition merge is complete but snapshot
559             // state in /metadata is not yet deleted; during this window if the
560             // device is rebooted, subsequent attempt will mount the snapshot.
561             // However, since the merge was completed we wouldn't have any
562             // mapping to COW ops thus chunk_vec will be empty. In that case,
563             // mark this as merge_complete and route the I/O to the base device.
564             merge_complete = true;
565         }
566     } else if (it->first != sector) {
567         if (it != chunk_vec.begin()) {
568             --it;
569         }
570     } else {
571         return ReadAlignedSector(sector, size);
572     }
573 
574     loff_t requested_offset = sector << SECTOR_SHIFT;
575 
576     loff_t final_offset = 0;
577     if (!merge_complete) {
578         final_offset = it->first << SECTOR_SHIFT;
579     }
580 
581     // Since a COW op span 4k block size, we need to make sure that the requested
582     // offset is within the 4k region. Consider the following case:
583     //
584     // |-------|-------|-------|
585     // 0       1       2       3
586     //
587     // Block 0 - op 1
588     // Block 1 - op 2
589     //
590     // We have an I/O request for a sector between block 2 and block 3. However,
591     // we have mapping to COW ops only for block 0 and block 1. Thus, the
592     // requested offset in this case is beyond the last mapped COW op size (which
593     // is block 1 in this case).
594 
595     size_t remaining_size = size;
596     int ret = 0;
597 
598     const CowOperation* cow_op = it->second;
599     if (!merge_complete && (requested_offset >= final_offset) &&
600         (((requested_offset - final_offset) < BLOCK_SZ) ||
601          IsMappingPresent(cow_op, requested_offset, final_offset))) {
602         // Read the partial un-aligned data
603         ret = ReadUnalignedSector(sector, remaining_size, it);
604         if (ret < 0) {
605             SNAP_LOG(ERROR) << "ReadUnalignedSector failed for sector: " << sector
606                             << " size: " << size << " it->sector: " << it->first;
607             return false;
608         }
609 
610         remaining_size -= ret;
611         sector += (ret >> SECTOR_SHIFT);
612 
613         // Send the data back
614         if (!SendBufferedIo()) {
615             return false;
616         }
617 
618         // If we still have pending data to be processed, this will be aligned I/O
619         if (remaining_size) {
620             return ReadAlignedSector(sector, remaining_size);
621         }
622     } else {
623         // This is all about handling I/O request to be routed to base device
624         // as the I/O is not mapped to any of the COW ops.
625         loff_t aligned_offset = requested_offset;
626         // Align to nearest 4k
627         aligned_offset += BLOCK_SZ - 1;
628         aligned_offset &= ~(BLOCK_SZ - 1);
629         // Find the diff of the aligned offset
630         size_t diff_size = aligned_offset - requested_offset;
631         CHECK(diff_size <= BLOCK_SZ);
632 
633         size_t read_size = std::min(remaining_size, diff_size);
634         void* buffer = block_server_->GetResponseBuffer(BLOCK_SZ, read_size);
635         if (!buffer) {
636             SNAP_LOG(ERROR) << "AcquireBuffer failed in ReadUnalignedSector";
637             return false;
638         }
639         if (!ReadDataFromBaseDevice(sector, buffer, read_size)) {
640             return false;
641         }
642         if (!SendBufferedIo()) {
643             return false;
644         }
645 
646         if (remaining_size >= diff_size) {
647             remaining_size -= diff_size;
648             size_t num_sectors_read = (diff_size >> SECTOR_SHIFT);
649             sector += num_sectors_read;
650             CHECK(IsBlockAligned(sector << SECTOR_SHIFT));
651 
652             // If we still have pending data to be processed, this will be aligned I/O
653             return ReadAlignedSector(sector, remaining_size);
654         }
655     }
656 
657     return true;
658 }
659 
RequestSectors(uint64_t sector,uint64_t len)660 bool ReadWorker::RequestSectors(uint64_t sector, uint64_t len) {
661     // Unaligned I/O request
662     if (!IsBlockAligned(sector << SECTOR_SHIFT)) {
663         return ReadUnalignedSector(sector, len);
664     }
665 
666     return ReadAlignedSector(sector, len);
667 }
668 
SendBufferedIo()669 bool ReadWorker::SendBufferedIo() {
670     return block_server_->SendBufferedIo();
671 }
672 
673 }  // namespace snapshot
674 }  // namespace android
675