1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <cinttypes>
6 #include <cstdlib>
7 #include <iostream>
8 #include <map>
9 #include <memory>
10 #include <string>
11
12 #include "base/at_exit.h"
13 #include "base/command_line.h"
14 #include "base/files/file_path.h"
15 #include "base/files/file_util.h"
16 #include "base/files/scoped_temp_dir.h"
17 #include "base/functional/callback.h"
18 #include "base/logging.h"
19 #include "base/memory/raw_ptr.h"
20 #include "base/memory/raw_ptr_exclusion.h"
21 #include "base/memory/ref_counted.h"
22 #include "base/memory/scoped_refptr.h"
23 #include "base/numerics/checked_math.h"
24 #include "base/strings/string_number_conversions.h"
25 #include "base/test/task_environment.h"
26 #include "base/test/test_timeouts.h"
27 #include "base/time/time.h"
28 #include "net/base/cache_type.h"
29 #include "net/base/interval.h"
30 #include "net/base/io_buffer.h"
31 #include "net/base/net_errors.h"
32 #include "net/base/test_completion_callback.h"
33 #include "net/disk_cache/backend_cleanup_tracker.h"
34 #include "net/disk_cache/blockfile/backend_impl.h"
35 #include "net/disk_cache/disk_cache.h"
36 #include "net/disk_cache/disk_cache_fuzzer.pb.h"
37 #include "net/disk_cache/disk_cache_test_util.h"
38 #include "net/disk_cache/memory/mem_backend_impl.h"
39 #include "net/disk_cache/simple/simple_backend_impl.h"
40 #include "net/disk_cache/simple/simple_file_tracker.h"
41 #include "net/disk_cache/simple/simple_index.h"
42 #include "testing/libfuzzer/proto/lpm_interface.h"
43
44 // To get a good idea of what a test case is doing, just run the libfuzzer
45 // target with LPM_DUMP_NATIVE_INPUT=1 prefixed. This will trigger all the
46 // prints below and will convey exactly what the test case is doing: use this
47 // instead of trying to print the protobuf as text.
48
49 // For code coverage:
50 // python ./tools/code_coverage/coverage.py disk_cache_lpm_fuzzer -b
51 // out/coverage -o out/report -c 'out/coverage/disk_cache_lpm_fuzzer
52 // -runs=0 -workers=24 corpus_disk_cache_simple' -f net/disk_cache
53
54 void IOCallback(std::string io_type, int rv);
55
56 namespace {
57 const uint32_t kMaxSizeKB = 128; // 128KB maximum.
58 const uint32_t kMaxSize = kMaxSizeKB * 1024;
59 const uint32_t kMaxEntrySize = kMaxSize * 2;
60 const uint32_t kNumStreams = 3; // All caches seem to have 3 streams. TODO do
61 // other specialized caches have this?
62 const uint64_t kFirstSavedTime =
63 5; // Totally random number chosen by dice roll. ;)
64 const uint32_t kMaxNumMillisToWait = 2019;
65 const int kMaxFdsSimpleCache = 10;
66
67 // Known colliding key values taken from SimpleCacheCreateCollision unittest.
68 const std::string kCollidingKey1 =
69 "\xfb\x4e\x9c\x1d\x66\x71\xf7\x54\xa3\x11\xa0\x7e\x16\xa5\x68\xf6";
70 const std::string kCollidingKey2 =
71 "\xbc\x60\x64\x92\xbc\xa0\x5c\x15\x17\x93\x29\x2d\xe4\x21\xbd\x03";
72
73 #define IOTYPES_APPLY(F) \
74 F(WriteData) \
75 F(ReadData) \
76 F(WriteSparseData) \
77 F(ReadSparseData) \
78 F(DoomAllEntries) \
79 F(DoomEntriesSince) \
80 F(DoomEntriesBetween) \
81 F(GetAvailableRange) \
82 F(DoomKey)
83
84 enum class IOType {
85 #define ENUM_ENTRY(IO_TYPE) IO_TYPE,
86 IOTYPES_APPLY(ENUM_ENTRY)
87 #undef ENUM_ENTRY
88 };
89
90 struct InitGlobals {
InitGlobals__anon9025cc7b0111::InitGlobals91 InitGlobals() {
92 base::CommandLine::Init(0, nullptr);
93
94 print_comms_ = ::getenv("LPM_DUMP_NATIVE_INPUT");
95
96 // TaskEnvironment requires TestTimeouts initialization to watch for
97 // problematic long-running tasks.
98 TestTimeouts::Initialize();
99
100 // Mark this thread as an IO_THREAD with MOCK_TIME, and ensure that Now()
101 // is driven from the same mock clock.
102 task_environment_ = std::make_unique<base::test::TaskEnvironment>(
103 base::test::TaskEnvironment::MainThreadType::IO,
104 base::test::TaskEnvironment::TimeSource::MOCK_TIME);
105
106 // Disable noisy logging as per "libFuzzer in Chrome" documentation:
107 // testing/libfuzzer/getting_started.md#Disable-noisy-error-message-logging.
108 logging::SetMinLogLevel(logging::LOGGING_FATAL);
109
110 // Re-using this buffer for write operations may technically be against
111 // IOBuffer rules but it shouldn't cause any actual problems.
112 buffer_ = base::MakeRefCounted<net::IOBufferWithSize>(
113 static_cast<size_t>(kMaxEntrySize));
114 CacheTestFillBuffer(buffer_->data(), kMaxEntrySize, false);
115
116 #define CREATE_IO_CALLBACK(IO_TYPE) \
117 io_callbacks_.push_back(base::BindRepeating(&IOCallback, #IO_TYPE));
118 IOTYPES_APPLY(CREATE_IO_CALLBACK)
119 #undef CREATE_IO_CALLBACK
120 }
121
122 // This allows us to mock time for all threads.
123 std::unique_ptr<base::test::TaskEnvironment> task_environment_;
124
125 // Used as a pre-filled buffer for all writes.
126 scoped_refptr<net::IOBuffer> buffer_;
127
128 // Should we print debugging info?
129 bool print_comms_;
130
131 // List of IO callbacks. They do nothing (except maybe print) but are used by
132 // all async entry operations.
133 std::vector<base::RepeatingCallback<void(int)>> io_callbacks_;
134 };
135
136 InitGlobals* init_globals = new InitGlobals();
137 } // namespace
138
139 class DiskCacheLPMFuzzer {
140 public:
DiskCacheLPMFuzzer()141 DiskCacheLPMFuzzer() {
142 CHECK(temp_dir_.CreateUniqueTempDir());
143 cache_path_ = temp_dir_.GetPath();
144 }
145
146 ~DiskCacheLPMFuzzer();
147
148 void RunCommands(const disk_cache_fuzzer::FuzzCommands& commands);
149
150 private:
151 struct EntryInfo {
152 EntryInfo() = default;
153
154 EntryInfo(const EntryInfo&) = delete;
155 EntryInfo& operator=(const EntryInfo&) = delete;
156
157 // This field is not a raw_ptr<> because it was filtered by the rewriter
158 // for: #addr-of, #constexpr-ctor-field-initializer
159 RAW_PTR_EXCLUSION disk_cache::Entry* entry_ptr = nullptr;
160 std::unique_ptr<TestEntryResultCompletionCallback> tcb;
161 };
162 void RunTaskForTest(base::OnceClosure closure);
163
164 // Waits for an entry to be ready. Only should be called if there is a pending
165 // callback for this entry; i.e. ei->tcb != nullptr.
166 // Also takes the rv that the cache entry creation functions return, and does
167 // not wait if rv.net_error != net::ERR_IO_PENDING (and would never have
168 // called the callback).
169 disk_cache::EntryResult WaitOnEntry(
170 EntryInfo* ei,
171 disk_cache::EntryResult result =
172 disk_cache::EntryResult::MakeError(net::ERR_IO_PENDING));
173
174 // Used as a callback for entry-opening backend calls. Will record the entry
175 // in the map as usable and will release any entry-specific calls waiting for
176 // the entry to be ready.
177 void OpenCacheEntryCallback(uint64_t entry_id,
178 bool async,
179 bool set_is_sparse,
180 disk_cache::EntryResult result);
181
182 // Waits for the entry to finish opening, in the async case. Then, if the
183 // entry is successfully open (callback returns net::OK, or was already
184 // successfully opened), check if the entry_ptr == nullptr. If so, the
185 // entry has been closed.
186 bool IsValidEntry(EntryInfo* ei);
187
188 // Closes any non-nullptr entries in open_cache_entries_.
189 void CloseAllRemainingEntries();
190
191 void HandleSetMaxSize(const disk_cache_fuzzer::SetMaxSize&);
192 void CreateBackend(
193 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
194 uint32_t mask,
195 net::CacheType type,
196 bool simple_cache_wait_for_index);
197
198 // Places to keep our cache files.
199 base::FilePath cache_path_;
200 base::ScopedTempDir temp_dir_;
201
202 // Pointers to our backend. Only one of block_impl_, simple_cache_impl_, and
203 // mem_cache_ are active at one time.
204 std::unique_ptr<disk_cache::Backend> cache_;
205 raw_ptr<disk_cache::BackendImpl> block_impl_ = nullptr;
206 std::unique_ptr<disk_cache::SimpleFileTracker> simple_file_tracker_;
207 raw_ptr<disk_cache::SimpleBackendImpl> simple_cache_impl_ = nullptr;
208 raw_ptr<disk_cache::MemBackendImpl> mem_cache_ = nullptr;
209
210 // Maximum size of the cache, that we have currently set.
211 uint32_t max_size_ = kMaxSize;
212
213 // This "consistent hash table" keeys track of the keys we've added to the
214 // backend so far. This should always be indexed by a "key_id" from a
215 // protobuf.
216 std::map<uint64_t, std::string> created_cache_entries_;
217 // This "consistent hash table" keeps track of all opened entries we have from
218 // the backend, and also contains some nullptr's where entries were already
219 // closed. This should always be indexed by an "entry_id" from a protobuf.
220 // When destructed, we close all entries that are still open in order to avoid
221 // memory leaks.
222 std::map<uint64_t, EntryInfo> open_cache_entries_;
223 // This "consistent hash table" keeps track of all times we have saved, so
224 // that we can call backend methods like DoomEntriesSince or
225 // DoomEntriesBetween with sane timestamps. This should always be indexed by a
226 // "time_id" from a protobuf.
227 std::map<uint64_t, base::Time> saved_times_;
228 // This "consistent hash table" keeps tack of all the iterators we have open
229 // from the backend. This should always be indexed by a "it_id" from a
230 // protobuf.
231 std::map<uint64_t, std::unique_ptr<disk_cache::Backend::Iterator>>
232 open_iterators_;
233
234 // This maps keeps track of the sparsity of each entry, using their pointers.
235 // TODO(mpdenton) remove if CreateEntry("Key0"); WriteData("Key0", index = 2,
236 // ...); WriteSparseData("Key0", ...); is supposed to be valid.
237 // Then we can just use CouldBeSparse before the WriteData.
238 std::map<disk_cache::Entry*, bool> sparse_entry_tracker_;
239 };
240
241 #define MAYBE_PRINT \
242 if (init_globals->print_comms_) \
243 std::cout
244
GetIOCallback(IOType iot)245 inline base::RepeatingCallback<void(int)> GetIOCallback(IOType iot) {
246 return init_globals->io_callbacks_[static_cast<int>(iot)];
247 }
248
ToKey(uint64_t key_num)249 std::string ToKey(uint64_t key_num) {
250 // Use one of the two colliding key values in 1% of executions.
251 if (key_num % 100 == 99)
252 return kCollidingKey1;
253 if (key_num % 100 == 98)
254 return kCollidingKey2;
255
256 // Otherwise, use a value based on the key id and fuzzy padding.
257 std::string padding(key_num & 0xFFFF, 'A');
258 return "Key" + padding + base::NumberToString(key_num);
259 }
260
GetRequestPriority(disk_cache_fuzzer::RequestPriority lpm_pri)261 net::RequestPriority GetRequestPriority(
262 disk_cache_fuzzer::RequestPriority lpm_pri) {
263 CHECK(net::MINIMUM_PRIORITY <= static_cast<int>(lpm_pri) &&
264 static_cast<int>(lpm_pri) <= net::MAXIMUM_PRIORITY);
265 return static_cast<net::RequestPriority>(lpm_pri);
266 }
267
GetCacheTypeAndPrint(disk_cache_fuzzer::FuzzCommands::CacheType type,disk_cache_fuzzer::FuzzCommands::CacheBackend backend)268 net::CacheType GetCacheTypeAndPrint(
269 disk_cache_fuzzer::FuzzCommands::CacheType type,
270 disk_cache_fuzzer::FuzzCommands::CacheBackend backend) {
271 switch (type) {
272 case disk_cache_fuzzer::FuzzCommands::APP_CACHE:
273 MAYBE_PRINT << "Cache type = APP_CACHE." << std::endl;
274 return net::CacheType::APP_CACHE;
275 case disk_cache_fuzzer::FuzzCommands::REMOVED_MEDIA_CACHE:
276 // Media cache no longer in use; handle as HTTP_CACHE
277 MAYBE_PRINT << "Cache type = REMOVED_MEDIA_CACHE." << std::endl;
278 return net::CacheType::DISK_CACHE;
279 case disk_cache_fuzzer::FuzzCommands::SHADER_CACHE:
280 MAYBE_PRINT << "Cache type = SHADER_CACHE." << std::endl;
281 return net::CacheType::SHADER_CACHE;
282 case disk_cache_fuzzer::FuzzCommands::PNACL_CACHE:
283 // Simple cache won't handle PNACL_CACHE.
284 if (backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
285 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
286 return net::CacheType::DISK_CACHE;
287 }
288 MAYBE_PRINT << "Cache type = PNACL_CACHE." << std::endl;
289 return net::CacheType::PNACL_CACHE;
290 case disk_cache_fuzzer::FuzzCommands::GENERATED_BYTE_CODE_CACHE:
291 MAYBE_PRINT << "Cache type = GENERATED_BYTE_CODE_CACHE." << std::endl;
292 return net::CacheType::GENERATED_BYTE_CODE_CACHE;
293 case disk_cache_fuzzer::FuzzCommands::GENERATED_NATIVE_CODE_CACHE:
294 MAYBE_PRINT << "Cache type = GENERATED_NATIVE_CODE_CACHE." << std::endl;
295 return net::CacheType::GENERATED_NATIVE_CODE_CACHE;
296 case disk_cache_fuzzer::FuzzCommands::DISK_CACHE:
297 MAYBE_PRINT << "Cache type = DISK_CACHE." << std::endl;
298 return net::CacheType::DISK_CACHE;
299 }
300 }
301
IOCallback(std::string io_type,int rv)302 void IOCallback(std::string io_type, int rv) {
303 MAYBE_PRINT << " [Async IO (" << io_type << ") = " << rv << "]" << std::endl;
304 }
305
306 /*
307 * Consistent hashing inspired map for fuzzer state.
308 * If we stored open cache entries in a hash table mapping cache_entry_id ->
309 * disk_cache::Entry*, then it would be highly unlikely that any subsequent
310 * "CloseEntry" or "WriteData" etc. command would come up with an ID that would
311 * correspond to a valid entry in the hash table. The optimal solution is for
312 * libfuzzer to generate CloseEntry commands with an ID that matches the ID of a
313 * previous OpenEntry command. But libfuzzer is stateless and should stay that
314 * way.
315 *
316 * On the other hand, if we stored entries in a vector, and on a CloseEntry
317 * command we took the entry at CloseEntry.id % (size of entries vector), we
318 * would always generate correct CloseEntries. This is good, but all
319 * dumb/general minimization techniques stop working, because deleting a single
320 * OpenEntry command changes the indexes of every entry in the vector from then
321 * on.
322 *
323 * So, we use something that's more stable for minimization: consistent hashing.
324 * Basically, when we see a CloseEntry.id, we take the entry in the table that
325 * has the next highest id (wrapping when there is no higher entry).
326 *
327 * This makes us resilient to deleting irrelevant OpenEntry commands. But, if we
328 * delete from the table on CloseEntry commands, we still screw up all the
329 * indexes during minimization. We'll get around this by not deleting entries
330 * after CloseEntry commands, but that will result in a slightly less efficient
331 * fuzzer, as if there are many closed entries in the table, many of the *Entry
332 * commands will be useless. It seems like a decent balance between generating
333 * useful fuzz commands and effective minimization.
334 */
335 template <typename T>
GetNextValue(typename std::map<uint64_t,T> * entries,uint64_t val)336 typename std::map<uint64_t, T>::iterator GetNextValue(
337 typename std::map<uint64_t, T>* entries,
338 uint64_t val) {
339 auto iter = entries->lower_bound(val);
340 if (iter != entries->end())
341 return iter;
342 // Wrap to 0
343 iter = entries->lower_bound(0);
344 if (iter != entries->end())
345 return iter;
346
347 return entries->end();
348 }
349
RunTaskForTest(base::OnceClosure closure)350 void DiskCacheLPMFuzzer::RunTaskForTest(base::OnceClosure closure) {
351 if (!block_impl_) {
352 std::move(closure).Run();
353 return;
354 }
355
356 net::TestCompletionCallback cb;
357 int rv = block_impl_->RunTaskForTest(std::move(closure), cb.callback());
358 CHECK_EQ(cb.GetResult(rv), net::OK);
359 }
360
361 // Resets the cb in the map so that WriteData and other calls that work on an
362 // entry don't wait for its result.
OpenCacheEntryCallback(uint64_t entry_id,bool async,bool set_is_sparse,disk_cache::EntryResult result)363 void DiskCacheLPMFuzzer::OpenCacheEntryCallback(
364 uint64_t entry_id,
365 bool async,
366 bool set_is_sparse,
367 disk_cache::EntryResult result) {
368 // TODO(mpdenton) if this fails should we delete the entry entirely?
369 // Would need to mark it for deletion and delete it later, as
370 // IsValidEntry might be waiting for it.
371 EntryInfo* ei = &open_cache_entries_[entry_id];
372
373 if (async) {
374 int rv = result.net_error();
375 ei->entry_ptr = result.ReleaseEntry();
376 // We are responsible for setting things up.
377 if (set_is_sparse && ei->entry_ptr) {
378 sparse_entry_tracker_[ei->entry_ptr] = true;
379 }
380 if (ei->entry_ptr) {
381 MAYBE_PRINT << " [Async opening of cache entry for \""
382 << ei->entry_ptr->GetKey() << "\" callback (rv = " << rv
383 << ")]" << std::endl;
384 }
385 // Unblock any subsequent ops waiting for this --- they don't care about
386 // the actual return value, but use something distinctive for debugging.
387 ei->tcb->callback().Run(
388 disk_cache::EntryResult::MakeError(net::ERR_FILE_VIRUS_INFECTED));
389 } else {
390 // The operation code will pull the result out of the completion callback,
391 // so hand it to it.
392 ei->tcb->callback().Run(std::move(result));
393 }
394 }
395
WaitOnEntry(EntryInfo * ei,disk_cache::EntryResult result)396 disk_cache::EntryResult DiskCacheLPMFuzzer::WaitOnEntry(
397 EntryInfo* ei,
398 disk_cache::EntryResult result) {
399 CHECK(ei->tcb);
400 result = ei->tcb->GetResult(std::move(result));
401
402 // Reset the callback so nobody accidentally waits on a callback that never
403 // comes.
404 ei->tcb.reset();
405 return result;
406 }
407
IsValidEntry(EntryInfo * ei)408 bool DiskCacheLPMFuzzer::IsValidEntry(EntryInfo* ei) {
409 if (ei->tcb) {
410 // If we have a callback, we are the first to access this async-created
411 // entry. Wait for it, and then delete it so nobody waits on it again.
412 WaitOnEntry(ei);
413 }
414 // entry_ptr will be nullptr if the entry has been closed.
415 return ei->entry_ptr != nullptr;
416 }
417
418 /*
419 * Async implementation:
420 1. RunUntilIdle at the top of the loop to handle any callbacks we've been
421 posted from the backend thread.
422 2. Only the entry creation functions have important callbacks. The good thing
423 is backend destruction will cancel these operations. The entry creation
424 functions simply need to keep the entry_ptr* alive until the callback is
425 posted, and then need to make sure the entry_ptr is added to the map in order
426 to Close it in the destructor.
427 As for iterators, it's unclear whether closing an iterator will cancel
428 callbacks.
429
430 Problem: WriteData (and similar) calls will fail on the entry_id until the
431 callback happens. So, I should probably delay these calls or otherwise will
432 have very unreliable test cases. These are the options:
433 1. Queue up WriteData (etc.) calls in some map, such that when the OpenEntry
434 callback runs, the WriteData calls will all run.
435 2. Just sit there and wait for the entry to be ready.
436
437 #2 is probably best as it doesn't prevent any interesting cases and is much
438 simpler.
439 */
440
RunCommands(const disk_cache_fuzzer::FuzzCommands & commands)441 void DiskCacheLPMFuzzer::RunCommands(
442 const disk_cache_fuzzer::FuzzCommands& commands) {
443 // Skip too long command sequences, they are counterproductive for fuzzing.
444 // The number was chosen empirically using the existing fuzzing corpus.
445 if (commands.fuzz_commands_size() > 129)
446 return;
447
448 uint32_t mask =
449 commands.has_set_mask() ? (commands.set_mask() ? 0x1 : 0xf) : 0;
450 net::CacheType type =
451 GetCacheTypeAndPrint(commands.cache_type(), commands.cache_backend());
452 CreateBackend(commands.cache_backend(), mask, type,
453 commands.simple_cache_wait_for_index());
454 MAYBE_PRINT << "CreateBackend()" << std::endl;
455
456 if (commands.has_set_max_size()) {
457 HandleSetMaxSize(commands.set_max_size());
458 }
459
460 {
461 base::Time curr_time = base::Time::Now();
462 saved_times_[kFirstSavedTime] = curr_time;
463 // MAYBE_PRINT << "Saved initial time " << curr_time << std::endl;
464 }
465
466 for (const disk_cache_fuzzer::FuzzCommand& command :
467 commands.fuzz_commands()) {
468 // Handle any callbacks that other threads may have posted to us in the
469 // meantime, so any successful async OpenEntry's (etc.) add their
470 // entry_ptr's to the map.
471 init_globals->task_environment_->RunUntilIdle();
472
473 switch (command.fuzz_command_oneof_case()) {
474 case disk_cache_fuzzer::FuzzCommand::kSetMaxSize: {
475 HandleSetMaxSize(command.set_max_size());
476 break;
477 }
478 case disk_cache_fuzzer::FuzzCommand::kCreateEntry: {
479 if (!cache_)
480 continue;
481
482 const disk_cache_fuzzer::CreateEntry& ce = command.create_entry();
483 uint64_t key_id = ce.key_id();
484 uint64_t entry_id = ce.entry_id();
485 net::RequestPriority pri = GetRequestPriority(ce.pri());
486 bool async = ce.async();
487 bool is_sparse = ce.is_sparse();
488
489 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
490 continue; // Don't overwrite a currently open cache entry.
491
492 std::string key_str = ToKey(key_id);
493 created_cache_entries_[key_id] = key_str;
494
495 EntryInfo* entry_info = &open_cache_entries_[entry_id];
496
497 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
498 disk_cache::EntryResultCallback cb =
499 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
500 base::Unretained(this), entry_id, async, is_sparse);
501
502 MAYBE_PRINT << "CreateEntry(\"" << key_str
503 << "\", set_is_sparse = " << is_sparse
504 << ") = " << std::flush;
505 disk_cache::EntryResult result =
506 cache_->CreateEntry(key_str, pri, std::move(cb));
507 if (!async || result.net_error() != net::ERR_IO_PENDING) {
508 result = WaitOnEntry(entry_info, std::move(result));
509 int rv = result.net_error();
510
511 // Ensure we mark sparsity, save entry if the callback never ran.
512 if (rv == net::OK) {
513 entry_info->entry_ptr = result.ReleaseEntry();
514 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
515 }
516 MAYBE_PRINT << rv << std::endl;
517 } else {
518 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
519 }
520 break;
521 }
522 case disk_cache_fuzzer::FuzzCommand::kOpenEntry: {
523 if (!cache_)
524 continue;
525
526 const disk_cache_fuzzer::OpenEntry& oe = command.open_entry();
527 uint64_t key_id = oe.key_id();
528 uint64_t entry_id = oe.entry_id();
529 net::RequestPriority pri = GetRequestPriority(oe.pri());
530 bool async = oe.async();
531
532 if (created_cache_entries_.empty())
533 continue;
534
535 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
536 continue; // Don't overwrite a currently open cache entry.
537
538 EntryInfo* entry_info = &open_cache_entries_[entry_id];
539
540 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
541 disk_cache::EntryResultCallback cb =
542 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
543 base::Unretained(this), entry_id, async, false);
544
545 auto key_it = GetNextValue(&created_cache_entries_, key_id);
546 MAYBE_PRINT << "OpenEntry(\"" << key_it->second
547 << "\") = " << std::flush;
548 disk_cache::EntryResult result =
549 cache_->OpenEntry(key_it->second, pri, std::move(cb));
550 if (!async || result.net_error() != net::ERR_IO_PENDING) {
551 result = WaitOnEntry(entry_info, std::move(result));
552 int rv = result.net_error();
553 if (rv == net::OK)
554 entry_info->entry_ptr = result.ReleaseEntry();
555 MAYBE_PRINT << rv << std::endl;
556 } else {
557 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
558 }
559 break;
560 }
561 case disk_cache_fuzzer::FuzzCommand::kOpenOrCreateEntry: {
562 if (!cache_)
563 continue;
564
565 const disk_cache_fuzzer::OpenOrCreateEntry& ooce =
566 command.open_or_create_entry();
567 uint64_t key_id = ooce.key_id();
568 uint64_t entry_id = ooce.entry_id();
569 net::RequestPriority pri = GetRequestPriority(ooce.pri());
570 bool async = ooce.async();
571 bool is_sparse = ooce.is_sparse();
572
573 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
574 continue; // Don't overwrite a currently open cache entry.
575
576 std::string key_str;
577 // If our proto tells us to create a new entry, create a new entry, just
578 // with OpenOrCreateEntry.
579 if (ooce.create_new()) {
580 // Use a possibly new key.
581 key_str = ToKey(key_id);
582 created_cache_entries_[key_id] = key_str;
583 } else {
584 if (created_cache_entries_.empty())
585 continue;
586 auto key_it = GetNextValue(&created_cache_entries_, key_id);
587 key_str = key_it->second;
588 }
589
590 // Setup for callbacks.
591
592 EntryInfo* entry_info = &open_cache_entries_[entry_id];
593
594 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
595 disk_cache::EntryResultCallback cb =
596 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
597 base::Unretained(this), entry_id, async, is_sparse);
598
599 // Will only be set as sparse if it is created and not opened.
600 MAYBE_PRINT << "OpenOrCreateEntry(\"" << key_str
601 << "\", set_is_sparse = " << is_sparse
602 << ") = " << std::flush;
603 disk_cache::EntryResult result =
604 cache_->OpenOrCreateEntry(key_str, pri, std::move(cb));
605 if (!async || result.net_error() != net::ERR_IO_PENDING) {
606 result = WaitOnEntry(entry_info, std::move(result));
607 int rv = result.net_error();
608 bool opened = result.opened();
609 entry_info->entry_ptr = result.ReleaseEntry();
610 // Ensure we mark sparsity, even if the callback never ran.
611 if (rv == net::OK && !opened)
612 sparse_entry_tracker_[entry_info->entry_ptr] = is_sparse;
613 MAYBE_PRINT << rv << ", opened = " << opened << std::endl;
614 } else {
615 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
616 }
617 break;
618 }
619 case disk_cache_fuzzer::FuzzCommand::kCloseEntry: {
620 if (open_cache_entries_.empty())
621 continue;
622
623 auto entry_it = GetNextValue(&open_cache_entries_,
624 command.close_entry().entry_id());
625 if (!IsValidEntry(&entry_it->second))
626 continue;
627
628 MAYBE_PRINT << "CloseEntry(\"" << entry_it->second.entry_ptr->GetKey()
629 << "\")" << std::endl;
630 entry_it->second.entry_ptr->Close();
631
632 // Set the entry_ptr to nullptr to ensure no one uses it anymore.
633 entry_it->second.entry_ptr = nullptr;
634 break;
635 }
636 case disk_cache_fuzzer::FuzzCommand::kDoomEntry: {
637 if (open_cache_entries_.empty())
638 continue;
639
640 auto entry_it =
641 GetNextValue(&open_cache_entries_, command.doom_entry().entry_id());
642 if (!IsValidEntry(&entry_it->second))
643 continue;
644
645 MAYBE_PRINT << "DoomEntry(\"" << entry_it->second.entry_ptr->GetKey()
646 << "\")" << std::endl;
647 entry_it->second.entry_ptr->Doom();
648 break;
649 }
650 case disk_cache_fuzzer::FuzzCommand::kWriteData: {
651 if (open_cache_entries_.empty())
652 continue;
653
654 const disk_cache_fuzzer::WriteData& wd = command.write_data();
655 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
656 if (!IsValidEntry(&entry_it->second))
657 continue;
658
659 int index = 0; // if it's sparse, these non-sparse aware streams must
660 // read from stream 0 according to the spec.
661 // Implementations might have weaker constraints.
662 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
663 index = wd.index() % kNumStreams;
664 uint32_t offset = wd.offset() % kMaxEntrySize;
665 size_t size = wd.size() % kMaxEntrySize;
666 bool async = wd.async();
667
668 net::TestCompletionCallback tcb;
669 net::CompletionOnceCallback cb =
670 !async ? tcb.callback() : GetIOCallback(IOType::WriteData);
671
672 MAYBE_PRINT << "WriteData(\"" << entry_it->second.entry_ptr->GetKey()
673 << "\", index = " << index << ", offset = " << offset
674 << ", size = " << size << ", truncate = " << wd.truncate()
675 << ")" << std::flush;
676 int rv = entry_it->second.entry_ptr->WriteData(
677 index, offset, init_globals->buffer_.get(), size, std::move(cb),
678 wd.truncate());
679 if (!async)
680 rv = tcb.GetResult(rv);
681 MAYBE_PRINT << " = " << rv << std::endl;
682 break;
683 }
684 case disk_cache_fuzzer::FuzzCommand::kReadData: {
685 if (open_cache_entries_.empty())
686 continue;
687
688 const disk_cache_fuzzer::ReadData& wd = command.read_data();
689 auto entry_it = GetNextValue(&open_cache_entries_, wd.entry_id());
690 if (!IsValidEntry(&entry_it->second))
691 continue;
692
693 int index = 0; // if it's sparse, these non-sparse aware streams must
694 // read from stream 0 according to the spec.
695 // Implementations might weaker constraints?
696 if (!sparse_entry_tracker_[entry_it->second.entry_ptr])
697 index = wd.index() % kNumStreams;
698 uint32_t offset = wd.offset() % kMaxEntrySize;
699 size_t size = wd.size() % kMaxEntrySize;
700 bool async = wd.async();
701 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(size);
702
703 net::TestCompletionCallback tcb;
704 net::CompletionOnceCallback cb =
705 !async ? tcb.callback() : GetIOCallback(IOType::ReadData);
706
707 MAYBE_PRINT << "ReadData(\"" << entry_it->second.entry_ptr->GetKey()
708 << "\", index = " << index << ", offset = " << offset
709 << ", size = " << size << ")" << std::flush;
710 int rv = entry_it->second.entry_ptr->ReadData(
711 index, offset, buffer.get(), size, std::move(cb));
712 if (!async)
713 rv = tcb.GetResult(rv);
714 MAYBE_PRINT << " = " << rv << std::endl;
715 break;
716 }
717 case disk_cache_fuzzer::FuzzCommand::kWriteSparseData: {
718 if (open_cache_entries_.empty())
719 continue;
720
721 const disk_cache_fuzzer::WriteSparseData& wsd =
722 command.write_sparse_data();
723 auto entry_it = GetNextValue(&open_cache_entries_, wsd.entry_id());
724 if (!IsValidEntry(&entry_it->second) ||
725 !sparse_entry_tracker_[entry_it->second.entry_ptr])
726 continue;
727
728 uint64_t offset = wsd.offset();
729 if (wsd.cap_offset())
730 offset %= kMaxEntrySize;
731 size_t size = wsd.size() % kMaxEntrySize;
732 bool async = wsd.async();
733
734 net::TestCompletionCallback tcb;
735 net::CompletionOnceCallback cb =
736 !async ? tcb.callback() : GetIOCallback(IOType::WriteSparseData);
737 MAYBE_PRINT << "WriteSparseData(\""
738 << entry_it->second.entry_ptr->GetKey()
739 << "\", offset = " << offset << ", size = " << size << ")"
740 << std::flush;
741 int rv = entry_it->second.entry_ptr->WriteSparseData(
742 offset, init_globals->buffer_.get(), size, std::move(cb));
743 if (!async)
744 rv = tcb.GetResult(rv);
745 MAYBE_PRINT << " = " << rv << std::endl;
746 break;
747 }
748 case disk_cache_fuzzer::FuzzCommand::kReadSparseData: {
749 if (open_cache_entries_.empty())
750 continue;
751
752 const disk_cache_fuzzer::ReadSparseData& rsd =
753 command.read_sparse_data();
754 auto entry_it = GetNextValue(&open_cache_entries_, rsd.entry_id());
755 if (!IsValidEntry(&entry_it->second) ||
756 !sparse_entry_tracker_[entry_it->second.entry_ptr])
757 continue;
758
759 uint64_t offset = rsd.offset();
760 if (rsd.cap_offset())
761 offset %= kMaxEntrySize;
762 size_t size = rsd.size() % kMaxEntrySize;
763 bool async = rsd.async();
764 auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(size);
765
766 net::TestCompletionCallback tcb;
767 net::CompletionOnceCallback cb =
768 !async ? tcb.callback() : GetIOCallback(IOType::ReadSparseData);
769
770 MAYBE_PRINT << "ReadSparseData(\""
771 << entry_it->second.entry_ptr->GetKey()
772 << "\", offset = " << offset << ", size = " << size << ")"
773 << std::flush;
774 int rv = entry_it->second.entry_ptr->ReadSparseData(
775 offset, buffer.get(), size, std::move(cb));
776 if (!async)
777 rv = tcb.GetResult(rv);
778 MAYBE_PRINT << " = " << rv << std::endl;
779 break;
780 }
781 case disk_cache_fuzzer::FuzzCommand::kDoomAllEntries: {
782 if (!cache_)
783 continue;
784 bool async = command.doom_all_entries().async();
785
786 net::TestCompletionCallback tcb;
787 net::CompletionOnceCallback cb =
788 !async ? tcb.callback() : GetIOCallback(IOType::DoomAllEntries);
789 MAYBE_PRINT << "DoomAllEntries()" << std::flush;
790 int rv = cache_->DoomAllEntries(std::move(cb));
791 if (!async)
792 rv = tcb.GetResult(rv);
793 MAYBE_PRINT << " = " << rv << std::endl;
794 break;
795 }
796 case disk_cache_fuzzer::FuzzCommand::kFlushQueueForTest: {
797 // Blockfile-cache specific method.
798 if (!block_impl_)
799 return;
800
801 net::TestCompletionCallback cb;
802 MAYBE_PRINT << "FlushQueueForTest()" << std::endl;
803 int rv = block_impl_->FlushQueueForTest(cb.callback());
804 CHECK_EQ(cb.GetResult(rv), net::OK);
805 break;
806 }
807 case disk_cache_fuzzer::FuzzCommand::kCreateIterator: {
808 if (!cache_)
809 continue;
810 uint64_t it_id = command.create_iterator().it_id();
811 MAYBE_PRINT << "CreateIterator(), id = " << it_id << std::endl;
812 open_iterators_[it_id] = cache_->CreateIterator();
813 break;
814 }
815 case disk_cache_fuzzer::FuzzCommand::kIteratorOpenNextEntry: {
816 const disk_cache_fuzzer::IteratorOpenNextEntry& ione =
817 command.iterator_open_next_entry();
818
819 uint64_t it_id = ione.it_id();
820 uint64_t entry_id = ione.entry_id();
821 bool async = ione.async();
822
823 if (open_iterators_.empty())
824 continue;
825
826 if (open_cache_entries_.find(entry_id) != open_cache_entries_.end())
827 continue; // Don't overwrite a currently
828 // open cache entry.
829
830 auto iterator_it = GetNextValue(&open_iterators_, it_id);
831
832 EntryInfo* entry_info = &open_cache_entries_[entry_id];
833
834 entry_info->tcb = std::make_unique<TestEntryResultCompletionCallback>();
835 disk_cache::EntryResultCallback cb =
836 base::BindOnce(&DiskCacheLPMFuzzer::OpenCacheEntryCallback,
837 base::Unretained(this), entry_id, async, false);
838
839 MAYBE_PRINT << "Iterator(" << ione.it_id()
840 << ").OpenNextEntry() = " << std::flush;
841 disk_cache::EntryResult result =
842 iterator_it->second->OpenNextEntry(std::move(cb));
843 if (!async || result.net_error() != net::ERR_IO_PENDING) {
844 result = WaitOnEntry(entry_info, std::move(result));
845 int rv = result.net_error();
846 entry_info->entry_ptr = result.ReleaseEntry();
847 // Print return value, and key if applicable.
848 if (!entry_info->entry_ptr) {
849 MAYBE_PRINT << rv << std::endl;
850 } else {
851 MAYBE_PRINT << rv << ", key = " << entry_info->entry_ptr->GetKey()
852 << std::endl;
853 }
854 } else {
855 MAYBE_PRINT << "net::ERR_IO_PENDING (async)" << std::endl;
856 }
857 break;
858 }
859 case disk_cache_fuzzer::FuzzCommand::kFastForwardBy: {
860 base::TimeDelta to_wait =
861 base::Milliseconds(command.fast_forward_by().capped_num_millis() %
862 kMaxNumMillisToWait);
863 MAYBE_PRINT << "FastForwardBy(" << to_wait << ")" << std::endl;
864 init_globals->task_environment_->FastForwardBy(to_wait);
865
866 base::Time curr_time = base::Time::Now();
867 saved_times_[command.fast_forward_by().time_id()] = curr_time;
868 // MAYBE_PRINT << "Saved time " << curr_time << std::endl;
869 break;
870 }
871 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesSince: {
872 if (!cache_)
873 continue;
874 // App cache does not keep track of LRU timestamps so this method cannot
875 // be used.
876 if (type == net::APP_CACHE)
877 continue;
878 if (saved_times_.empty())
879 continue;
880
881 const disk_cache_fuzzer::DoomEntriesSince& des =
882 command.doom_entries_since();
883 auto time_it = GetNextValue(&saved_times_, des.time_id());
884 bool async = des.async();
885
886 net::TestCompletionCallback tcb;
887 net::CompletionOnceCallback cb =
888 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesSince);
889
890 MAYBE_PRINT << "DoomEntriesSince(" << time_it->second << ")"
891 << std::flush;
892 int rv = cache_->DoomEntriesSince(time_it->second, std::move(cb));
893 if (!async)
894 rv = tcb.GetResult(rv);
895 MAYBE_PRINT << " = " << rv << std::endl;
896 break;
897 }
898 case disk_cache_fuzzer::FuzzCommand::kDoomEntriesBetween: {
899 if (!cache_)
900 continue;
901 // App cache does not keep track of LRU timestamps so this method cannot
902 // be used.
903 if (type == net::APP_CACHE)
904 continue;
905 if (saved_times_.empty())
906 continue;
907
908 const disk_cache_fuzzer::DoomEntriesBetween& deb =
909 command.doom_entries_between();
910 auto time_it1 = GetNextValue(&saved_times_, deb.time_id1());
911 auto time_it2 = GetNextValue(&saved_times_, deb.time_id2());
912 base::Time time1 = time_it1->second;
913 base::Time time2 = time_it2->second;
914 if (time1 > time2)
915 std::swap(time1, time2);
916 bool async = deb.async();
917
918 net::TestCompletionCallback tcb;
919 net::CompletionOnceCallback cb =
920 !async ? tcb.callback() : GetIOCallback(IOType::DoomEntriesBetween);
921
922 MAYBE_PRINT << "DoomEntriesBetween(" << time1 << ", " << time2 << ")"
923 << std::flush;
924 int rv = cache_->DoomEntriesBetween(time1, time2, std::move(cb));
925 if (!async)
926 rv = tcb.GetResult(rv);
927 MAYBE_PRINT << " = " << rv << std::endl;
928 break;
929 }
930 case disk_cache_fuzzer::FuzzCommand::kOnExternalCacheHit: {
931 if (!cache_)
932 continue;
933 if (created_cache_entries_.empty())
934 continue;
935
936 uint64_t key_id = command.on_external_cache_hit().key_id();
937
938 auto key_it = GetNextValue(&created_cache_entries_, key_id);
939 MAYBE_PRINT << "OnExternalCacheHit(\"" << key_it->second << "\")"
940 << std::endl;
941 cache_->OnExternalCacheHit(key_it->second);
942 break;
943 }
944 case disk_cache_fuzzer::FuzzCommand::kTrimForTest: {
945 // Blockfile-cache specific method.
946 if (!block_impl_ || type != net::DISK_CACHE)
947 return;
948
949 MAYBE_PRINT << "TrimForTest()" << std::endl;
950
951 RunTaskForTest(base::BindOnce(&disk_cache::BackendImpl::TrimForTest,
952 base::Unretained(block_impl_),
953 command.trim_for_test().empty()));
954 break;
955 }
956 case disk_cache_fuzzer::FuzzCommand::kTrimDeletedListForTest: {
957 // Blockfile-cache specific method.
958 if (!block_impl_ || type != net::DISK_CACHE)
959 return;
960
961 MAYBE_PRINT << "TrimDeletedListForTest()" << std::endl;
962
963 RunTaskForTest(
964 base::BindOnce(&disk_cache::BackendImpl::TrimDeletedListForTest,
965 base::Unretained(block_impl_),
966 command.trim_deleted_list_for_test().empty()));
967 break;
968 }
969 case disk_cache_fuzzer::FuzzCommand::kGetAvailableRange: {
970 if (open_cache_entries_.empty())
971 continue;
972
973 const disk_cache_fuzzer::GetAvailableRange& gar =
974 command.get_available_range();
975 auto entry_it = GetNextValue(&open_cache_entries_, gar.entry_id());
976 if (!IsValidEntry(&entry_it->second) ||
977 !sparse_entry_tracker_[entry_it->second.entry_ptr])
978 continue;
979
980 disk_cache::Entry* entry = entry_it->second.entry_ptr;
981 uint32_t offset = gar.offset() % kMaxEntrySize;
982 uint32_t len = gar.len() % kMaxEntrySize;
983 bool async = gar.async();
984
985 auto result_checker = base::BindRepeating(
986 [](net::CompletionOnceCallback callback, uint32_t offset,
987 uint32_t len, const disk_cache::RangeResult& result) {
988 std::move(callback).Run(result.net_error);
989
990 if (result.net_error <= 0)
991 return;
992
993 // Make sure that the result is contained in what was
994 // requested. It doesn't have to be the same even if there was
995 // an exact corresponding write, since representation of ranges
996 // may be imprecise, and here we don't know that there was.
997
998 // No overflow thanks to % kMaxEntrySize.
999 net::Interval<uint32_t> requested(offset, offset + len);
1000
1001 uint32_t range_start, range_end;
1002 base::CheckedNumeric<uint64_t> range_start64(result.start);
1003 CHECK(range_start64.AssignIfValid(&range_start));
1004 base::CheckedNumeric<uint64_t> range_end64 =
1005 range_start + result.available_len;
1006 CHECK(range_end64.AssignIfValid(&range_end));
1007 net::Interval<uint32_t> gotten(range_start, range_end);
1008
1009 CHECK(requested.Contains(gotten));
1010 },
1011 GetIOCallback(IOType::GetAvailableRange), offset, len);
1012
1013 TestRangeResultCompletionCallback tcb;
1014 disk_cache::RangeResultCallback cb =
1015 !async ? tcb.callback() : result_checker;
1016
1017 MAYBE_PRINT << "GetAvailableRange(\"" << entry->GetKey() << "\", "
1018 << offset << ", " << len << ")" << std::flush;
1019 disk_cache::RangeResult result =
1020 entry->GetAvailableRange(offset, len, std::move(cb));
1021
1022 if (result.net_error != net::ERR_IO_PENDING) {
1023 // Run the checker callback ourselves.
1024 result_checker.Run(result);
1025 } else if (!async) {
1026 // In this case the callback will be run by the backend, so we don't
1027 // need to do it manually.
1028 result = tcb.GetResult(result);
1029 }
1030
1031 // Finally, take care of printing.
1032 if (async && result.net_error == net::ERR_IO_PENDING) {
1033 MAYBE_PRINT << " = net::ERR_IO_PENDING (async)" << std::endl;
1034 } else {
1035 MAYBE_PRINT << " = " << result.net_error
1036 << ", start = " << result.start
1037 << ", available_len = " << result.available_len;
1038 if (result.net_error < 0) {
1039 MAYBE_PRINT << ", error to string: "
1040 << net::ErrorToShortString(result.net_error)
1041 << std::endl;
1042 } else {
1043 MAYBE_PRINT << std::endl;
1044 }
1045 }
1046 break;
1047 }
1048 case disk_cache_fuzzer::FuzzCommand::kCancelSparseIo: {
1049 if (open_cache_entries_.empty())
1050 continue;
1051
1052 const disk_cache_fuzzer::CancelSparseIO& csio =
1053 command.cancel_sparse_io();
1054 auto entry_it = GetNextValue(&open_cache_entries_, csio.entry_id());
1055 if (!IsValidEntry(&entry_it->second))
1056 continue;
1057
1058 MAYBE_PRINT << "CancelSparseIO(\""
1059 << entry_it->second.entry_ptr->GetKey() << "\")"
1060 << std::endl;
1061 entry_it->second.entry_ptr->CancelSparseIO();
1062 break;
1063 }
1064 case disk_cache_fuzzer::FuzzCommand::kDoomKey: {
1065 if (!cache_)
1066 continue;
1067 if (created_cache_entries_.empty())
1068 continue;
1069
1070 const disk_cache_fuzzer::DoomKey& dk = command.doom_key();
1071 uint64_t key_id = dk.key_id();
1072 net::RequestPriority pri = GetRequestPriority(dk.pri());
1073 bool async = dk.async();
1074
1075 auto key_it = GetNextValue(&created_cache_entries_, key_id);
1076
1077 net::TestCompletionCallback tcb;
1078 net::CompletionOnceCallback cb =
1079 !async ? tcb.callback() : GetIOCallback(IOType::DoomKey);
1080
1081 MAYBE_PRINT << "DoomKey(\"" << key_it->second << "\")" << std::flush;
1082 int rv = cache_->DoomEntry(key_it->second, pri, std::move(cb));
1083 if (!async)
1084 rv = tcb.GetResult(rv);
1085 MAYBE_PRINT << " = " << rv << std::endl;
1086
1087 break;
1088 }
1089 case disk_cache_fuzzer::FuzzCommand::kDestructBackend: {
1090 // Block_impl_ will leak if we destruct the backend without closing
1091 // previous entries.
1092 // TODO(mpdenton) consider creating a separate fuzz target that allows
1093 // closing the |block_impl_| and ignore leaks.
1094 if (block_impl_ || !cache_)
1095 continue;
1096
1097 const disk_cache_fuzzer::DestructBackend& db =
1098 command.destruct_backend();
1099 // Only sometimes actually destruct the backend.
1100 if (!db.actually_destruct1() || !db.actually_destruct2())
1101 continue;
1102
1103 MAYBE_PRINT << "~Backend(). Backend destruction." << std::endl;
1104 cache_.reset();
1105 break;
1106 }
1107 case disk_cache_fuzzer::FuzzCommand::kAddRealDelay: {
1108 if (!command.add_real_delay().actually_delay())
1109 continue;
1110
1111 MAYBE_PRINT << "AddRealDelay(1ms)" << std::endl;
1112 base::PlatformThread::Sleep(base::Milliseconds(1));
1113 break;
1114 }
1115 case disk_cache_fuzzer::FuzzCommand::FUZZ_COMMAND_ONEOF_NOT_SET: {
1116 continue;
1117 }
1118 }
1119 }
1120 }
1121
HandleSetMaxSize(const disk_cache_fuzzer::SetMaxSize & sms)1122 void DiskCacheLPMFuzzer::HandleSetMaxSize(
1123 const disk_cache_fuzzer::SetMaxSize& sms) {
1124 if (!cache_)
1125 return;
1126
1127 max_size_ = sms.size();
1128 max_size_ %= kMaxSizeKB;
1129 max_size_ *= 1024;
1130 MAYBE_PRINT << "SetMaxSize(" << max_size_ << ")" << std::endl;
1131 if (simple_cache_impl_)
1132 CHECK_EQ(true, simple_cache_impl_->SetMaxSize(max_size_));
1133
1134 if (block_impl_)
1135 CHECK_EQ(true, block_impl_->SetMaxSize(max_size_));
1136
1137 if (mem_cache_)
1138 CHECK_EQ(true, mem_cache_->SetMaxSize(max_size_));
1139 }
1140
CreateBackend(disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,uint32_t mask,net::CacheType type,bool simple_cache_wait_for_index)1141 void DiskCacheLPMFuzzer::CreateBackend(
1142 disk_cache_fuzzer::FuzzCommands::CacheBackend cache_backend,
1143 uint32_t mask,
1144 net::CacheType type,
1145 bool simple_cache_wait_for_index) {
1146 if (cache_backend == disk_cache_fuzzer::FuzzCommands::IN_MEMORY) {
1147 MAYBE_PRINT << "Using in-memory cache." << std::endl;
1148 auto cache = std::make_unique<disk_cache::MemBackendImpl>(nullptr);
1149 mem_cache_ = cache.get();
1150 cache_ = std::move(cache);
1151 CHECK(cache_);
1152 } else if (cache_backend == disk_cache_fuzzer::FuzzCommands::SIMPLE) {
1153 MAYBE_PRINT << "Using simple cache." << std::endl;
1154 net::TestCompletionCallback cb;
1155 // We limit ourselves to 64 fds since OS X by default gives us 256.
1156 // (Chrome raises the number on startup, but the fuzzer doesn't).
1157 if (!simple_file_tracker_)
1158 simple_file_tracker_ =
1159 std::make_unique<disk_cache::SimpleFileTracker>(kMaxFdsSimpleCache);
1160 auto simple_backend = std::make_unique<disk_cache::SimpleBackendImpl>(
1161 /*file_operations=*/nullptr, cache_path_,
1162 /*cleanup_tracker=*/nullptr, simple_file_tracker_.get(), max_size_,
1163 type, /*net_log=*/nullptr);
1164 simple_backend->Init(cb.callback());
1165 CHECK_EQ(cb.WaitForResult(), net::OK);
1166 simple_cache_impl_ = simple_backend.get();
1167 cache_ = std::move(simple_backend);
1168
1169 if (simple_cache_wait_for_index) {
1170 MAYBE_PRINT << "Waiting for simple cache index to be ready..."
1171 << std::endl;
1172 net::TestCompletionCallback wait_for_index_cb;
1173 simple_cache_impl_->index()->ExecuteWhenReady(
1174 wait_for_index_cb.callback());
1175 int rv = wait_for_index_cb.WaitForResult();
1176 CHECK_EQ(rv, net::OK);
1177 }
1178 } else {
1179 MAYBE_PRINT << "Using blockfile cache";
1180 std::unique_ptr<disk_cache::BackendImpl> cache;
1181 if (mask) {
1182 MAYBE_PRINT << ", mask = " << mask << std::endl;
1183 cache = std::make_unique<disk_cache::BackendImpl>(
1184 cache_path_, mask,
1185 /* runner = */ nullptr, type,
1186 /* net_log = */ nullptr);
1187 } else {
1188 MAYBE_PRINT << "." << std::endl;
1189 cache = std::make_unique<disk_cache::BackendImpl>(
1190 cache_path_,
1191 /* cleanup_tracker = */ nullptr,
1192 /* runner = */ nullptr, type,
1193 /* net_log = */ nullptr);
1194 }
1195 block_impl_ = cache.get();
1196 cache_ = std::move(cache);
1197 CHECK(cache_);
1198 // TODO(mpdenton) kNoRandom or not? It does a lot of waiting for IO. May be
1199 // good for avoiding leaks but tests a less realistic cache.
1200 // block_impl_->SetFlags(disk_cache::kNoRandom);
1201
1202 // TODO(mpdenton) should I always wait here?
1203 net::TestCompletionCallback cb;
1204 block_impl_->Init(cb.callback());
1205 CHECK_EQ(cb.WaitForResult(), net::OK);
1206 }
1207 }
1208
CloseAllRemainingEntries()1209 void DiskCacheLPMFuzzer::CloseAllRemainingEntries() {
1210 for (auto& entry_info : open_cache_entries_) {
1211 disk_cache::Entry** entry_ptr = &entry_info.second.entry_ptr;
1212 if (!*entry_ptr)
1213 continue;
1214 MAYBE_PRINT << "Destructor CloseEntry(\"" << (*entry_ptr)->GetKey() << "\")"
1215 << std::endl;
1216 (*entry_ptr)->Close();
1217 *entry_ptr = nullptr;
1218 }
1219 }
1220
~DiskCacheLPMFuzzer()1221 DiskCacheLPMFuzzer::~DiskCacheLPMFuzzer() {
1222 // |block_impl_| leaks a lot more if we don't close entries before destructing
1223 // the backend.
1224 if (block_impl_) {
1225 // TODO(mpdenton) Consider creating a fuzz target that does not wait for
1226 // blockfile, and also does not detect leaks.
1227
1228 // Because the blockfile backend will leak any entries closed after its
1229 // destruction, we need to wait for any remaining backend callbacks to
1230 // finish. Otherwise, there will always be a race between handling callbacks
1231 // with RunUntilIdle() and actually closing all of the remaining entries.
1232 // And, closing entries after destructing the backend will not work and
1233 // cause leaks.
1234 for (auto& entry_it : open_cache_entries_) {
1235 if (entry_it.second.tcb) {
1236 WaitOnEntry(&entry_it.second);
1237 }
1238 }
1239
1240 // Destroy any open iterators before destructing the backend so we don't
1241 // cause leaks. TODO(mpdenton) should maybe be documented?
1242 // Also *must* happen after waiting for all OpenNextEntry callbacks to
1243 // finish, because destructing the iterators may cause those callbacks to be
1244 // cancelled, which will cause WaitOnEntry() to spin forever waiting.
1245 // TODO(mpdenton) should also be documented?
1246 open_iterators_.clear();
1247 // Just in case, finish any callbacks.
1248 init_globals->task_environment_->RunUntilIdle();
1249 // Close all entries that haven't been closed yet.
1250 CloseAllRemainingEntries();
1251 // Destroy the backend.
1252 cache_.reset();
1253 } else {
1254 // Here we won't bother with waiting for our OpenEntry* callbacks.
1255 cache_.reset();
1256 // Finish any callbacks that came in before backend destruction.
1257 init_globals->task_environment_->RunUntilIdle();
1258 // Close all entries that haven't been closed yet.
1259 CloseAllRemainingEntries();
1260 }
1261
1262 // Make sure any tasks triggered by the CloseEntry's have run.
1263 init_globals->task_environment_->RunUntilIdle();
1264 if (simple_cache_impl_)
1265 CHECK(simple_file_tracker_->IsEmptyForTesting());
1266 base::RunLoop().RunUntilIdle();
1267
1268 DeleteCache(cache_path_);
1269 }
1270
DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands & commands)1271 DEFINE_BINARY_PROTO_FUZZER(const disk_cache_fuzzer::FuzzCommands& commands) {
1272 {
1273 DiskCacheLPMFuzzer disk_cache_fuzzer_instance;
1274 disk_cache_fuzzer_instance.RunCommands(commands);
1275 }
1276 MAYBE_PRINT << "-----------------------" << std::endl;
1277 }
1278 //
1279