1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_backend_impl.h"
6
7 #include <algorithm>
8 #include <cstdlib>
9 #include <functional>
10 #include <limits>
11
12 #include "base/functional/callback_helpers.h"
13 #include "base/task/sequenced_task_runner.h"
14 #include "base/task/thread_pool.h"
15 #include "build/build_config.h"
16
17 #if BUILDFLAG(IS_POSIX)
18 #include <sys/resource.h>
19 #endif
20
21 #include "base/files/file_util.h"
22 #include "base/functional/bind.h"
23 #include "base/functional/callback.h"
24 #include "base/lazy_instance.h"
25 #include "base/location.h"
26 #include "base/memory/ptr_util.h"
27 #include "base/metrics/field_trial.h"
28 #include "base/metrics/field_trial_params.h"
29 #include "base/metrics/histogram_functions.h"
30 #include "base/metrics/histogram_macros.h"
31 #include "base/system/sys_info.h"
32 #include "base/task/thread_pool/thread_pool_instance.h"
33 #include "base/time/time.h"
34 #include "build/build_config.h"
35 #include "net/base/net_errors.h"
36 #include "net/base/prioritized_task_runner.h"
37 #include "net/disk_cache/backend_cleanup_tracker.h"
38 #include "net/disk_cache/cache_util.h"
39 #include "net/disk_cache/simple/simple_entry_format.h"
40 #include "net/disk_cache/simple/simple_entry_impl.h"
41 #include "net/disk_cache/simple/simple_file_tracker.h"
42 #include "net/disk_cache/simple/simple_histogram_macros.h"
43 #include "net/disk_cache/simple/simple_index.h"
44 #include "net/disk_cache/simple/simple_index_file.h"
45 #include "net/disk_cache/simple/simple_synchronous_entry.h"
46 #include "net/disk_cache/simple/simple_util.h"
47 #include "net/disk_cache/simple/simple_version_upgrade.h"
48
49 using base::FilePath;
50 using base::Time;
51
52 namespace disk_cache {
53
54 namespace {
55
56 // Maximum fraction of the cache that one entry can consume.
57 const int kMaxFileRatio = 8;
58
59 // Native code entries can be large. Rather than increasing the overall cache
60 // size, allow an individual entry to occupy up to half of the cache.
61 const int kMaxNativeCodeFileRatio = 2;
62
63 // Overrides the above.
64 const int64_t kMinFileSizeLimit = 5 * 1024 * 1024;
65
66 // Global context of all the files we have open --- this permits some to be
67 // closed on demand if too many FDs are being used, to avoid running out.
68 base::LazyInstance<SimpleFileTracker>::Leaky g_simple_file_tracker =
69 LAZY_INSTANCE_INITIALIZER;
70
71 // Detects if the files in the cache directory match the current disk cache
72 // backend type and version. If the directory contains no cache, occupies it
73 // with the fresh structure.
FileStructureConsistent(BackendFileOperations * file_operations,const base::FilePath & path)74 SimpleCacheConsistencyResult FileStructureConsistent(
75 BackendFileOperations* file_operations,
76 const base::FilePath& path) {
77 if (!file_operations->PathExists(path) &&
78 !file_operations->CreateDirectory(path)) {
79 LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
80 return SimpleCacheConsistencyResult::kCreateDirectoryFailed;
81 }
82 return disk_cache::UpgradeSimpleCacheOnDisk(file_operations, path);
83 }
84
85 // A context used by a BarrierCompletionCallback to track state.
86 struct BarrierContext {
BarrierContextdisk_cache::__anona762c0330111::BarrierContext87 explicit BarrierContext(net::CompletionOnceCallback final_callback,
88 int expected)
89 : final_callback_(std::move(final_callback)), expected(expected) {}
90
91 net::CompletionOnceCallback final_callback_;
92 const int expected;
93 int count = 0;
94 bool had_error = false;
95 };
96
BarrierCompletionCallbackImpl(BarrierContext * context,int result)97 void BarrierCompletionCallbackImpl(
98 BarrierContext* context,
99 int result) {
100 DCHECK_GT(context->expected, context->count);
101 if (context->had_error)
102 return;
103 if (result != net::OK) {
104 context->had_error = true;
105 std::move(context->final_callback_).Run(result);
106 return;
107 }
108 ++context->count;
109 if (context->count == context->expected)
110 std::move(context->final_callback_).Run(net::OK);
111 }
112
113 // A barrier completion callback is a repeatable callback that waits for
114 // |count| successful results before invoking |final_callback|. In the case of
115 // an error, the first error is passed to |final_callback| and all others
116 // are ignored.
MakeBarrierCompletionCallback(int count,net::CompletionOnceCallback final_callback)117 base::RepeatingCallback<void(int)> MakeBarrierCompletionCallback(
118 int count,
119 net::CompletionOnceCallback final_callback) {
120 BarrierContext* context =
121 new BarrierContext(std::move(final_callback), count);
122 return base::BindRepeating(&BarrierCompletionCallbackImpl,
123 base::Owned(context));
124 }
125
126 // A short bindable thunk that ensures a completion callback is always called
127 // after running an operation asynchronously. Checks for backend liveness first.
RunOperationAndCallback(base::WeakPtr<SimpleBackendImpl> backend,base::OnceCallback<net::Error (net::CompletionOnceCallback)> operation,net::CompletionOnceCallback operation_callback)128 void RunOperationAndCallback(
129 base::WeakPtr<SimpleBackendImpl> backend,
130 base::OnceCallback<net::Error(net::CompletionOnceCallback)> operation,
131 net::CompletionOnceCallback operation_callback) {
132 if (!backend)
133 return;
134
135 auto split_callback = base::SplitOnceCallback(std::move(operation_callback));
136 const int operation_result =
137 std::move(operation).Run(std::move(split_callback.first));
138 if (operation_result != net::ERR_IO_PENDING && split_callback.second)
139 std::move(split_callback.second).Run(operation_result);
140 }
141
142 // Same but for things that work with EntryResult.
RunEntryResultOperationAndCallback(base::WeakPtr<SimpleBackendImpl> backend,base::OnceCallback<EntryResult (EntryResultCallback)> operation,EntryResultCallback operation_callback)143 void RunEntryResultOperationAndCallback(
144 base::WeakPtr<SimpleBackendImpl> backend,
145 base::OnceCallback<EntryResult(EntryResultCallback)> operation,
146 EntryResultCallback operation_callback) {
147 if (!backend)
148 return;
149
150 auto split_callback = base::SplitOnceCallback(std::move(operation_callback));
151 EntryResult operation_result =
152 std::move(operation).Run(std::move(split_callback.first));
153 if (operation_result.net_error() != net::ERR_IO_PENDING &&
154 split_callback.second) {
155 std::move(split_callback.second).Run(std::move(operation_result));
156 }
157 }
158
RecordIndexLoad(net::CacheType cache_type,base::TimeTicks constructed_since,int result)159 void RecordIndexLoad(net::CacheType cache_type,
160 base::TimeTicks constructed_since,
161 int result) {
162 const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
163 constructed_since;
164 if (result == net::OK) {
165 SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
166 } else {
167 SIMPLE_CACHE_UMA(TIMES,
168 "CreationToIndexFail", cache_type, creation_to_index);
169 }
170 }
171
CacheTypeToOperationsMode(net::CacheType type)172 SimpleEntryImpl::OperationsMode CacheTypeToOperationsMode(net::CacheType type) {
173 return (type == net::DISK_CACHE || type == net::GENERATED_BYTE_CODE_CACHE ||
174 type == net::GENERATED_NATIVE_CODE_CACHE ||
175 type == net::GENERATED_WEBUI_BYTE_CODE_CACHE)
176 ? SimpleEntryImpl::OPTIMISTIC_OPERATIONS
177 : SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS;
178 }
179
180 } // namespace
181
182 class SimpleBackendImpl::ActiveEntryProxy
183 : public SimpleEntryImpl::ActiveEntryProxy {
184 public:
~ActiveEntryProxy()185 ~ActiveEntryProxy() override {
186 if (backend_) {
187 DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
188 backend_->active_entries_.erase(entry_hash_);
189 }
190 }
191
Create(int64_t entry_hash,SimpleBackendImpl * backend)192 static std::unique_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
193 int64_t entry_hash,
194 SimpleBackendImpl* backend) {
195 return base::WrapUnique(new ActiveEntryProxy(entry_hash, backend));
196 }
197
198 private:
ActiveEntryProxy(uint64_t entry_hash,SimpleBackendImpl * backend)199 ActiveEntryProxy(uint64_t entry_hash, SimpleBackendImpl* backend)
200 : entry_hash_(entry_hash), backend_(backend->AsWeakPtr()) {}
201
202 uint64_t entry_hash_;
203 base::WeakPtr<SimpleBackendImpl> backend_;
204 };
205
SimpleBackendImpl(scoped_refptr<BackendFileOperationsFactory> file_operations_factory,const FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,SimpleFileTracker * file_tracker,int64_t max_bytes,net::CacheType cache_type,net::NetLog * net_log)206 SimpleBackendImpl::SimpleBackendImpl(
207 scoped_refptr<BackendFileOperationsFactory> file_operations_factory,
208 const FilePath& path,
209 scoped_refptr<BackendCleanupTracker> cleanup_tracker,
210 SimpleFileTracker* file_tracker,
211 int64_t max_bytes,
212 net::CacheType cache_type,
213 net::NetLog* net_log)
214 : Backend(cache_type),
215 file_operations_factory_(
216 file_operations_factory
217 ? std::move(file_operations_factory)
218 : base::MakeRefCounted<TrivialFileOperationsFactory>()),
219 cleanup_tracker_(std::move(cleanup_tracker)),
220 file_tracker_(file_tracker ? file_tracker
221 : g_simple_file_tracker.Pointer()),
222 path_(path),
223 orig_max_size_(max_bytes),
224 entry_operations_mode_(CacheTypeToOperationsMode(cache_type)),
225 post_doom_waiting_(
226 base::MakeRefCounted<SimplePostOperationWaiterTable>()),
227 post_open_by_hash_waiting_(
228 base::MakeRefCounted<SimplePostOperationWaiterTable>()),
229 net_log_(net_log) {
230 // Treat negative passed-in sizes same as SetMaxSize would here and in other
231 // backends, as default (if first call).
232 if (orig_max_size_ < 0)
233 orig_max_size_ = 0;
234 }
235
~SimpleBackendImpl()236 SimpleBackendImpl::~SimpleBackendImpl() {
237 // Write the index out if there is a pending write from a
238 // previous operation.
239 if (index_->HasPendingWrite())
240 index_->WriteToDisk(SimpleIndex::INDEX_WRITE_REASON_SHUTDOWN);
241 }
242
SetTaskRunnerForTesting(scoped_refptr<base::SequencedTaskRunner> task_runner)243 void SimpleBackendImpl::SetTaskRunnerForTesting(
244 scoped_refptr<base::SequencedTaskRunner> task_runner) {
245 prioritized_task_runner_ =
246 base::MakeRefCounted<net::PrioritizedTaskRunner>(kWorkerPoolTaskTraits);
247 prioritized_task_runner_->SetTaskRunnerForTesting( // IN-TEST
248 std::move(task_runner));
249 }
250
Init(CompletionOnceCallback completion_callback)251 void SimpleBackendImpl::Init(CompletionOnceCallback completion_callback) {
252 auto index_task_runner = base::ThreadPool::CreateSequencedTaskRunner(
253 {base::MayBlock(), base::WithBaseSyncPrimitives(),
254 base::TaskPriority::USER_BLOCKING,
255 base::TaskShutdownBehavior::BLOCK_SHUTDOWN});
256
257 prioritized_task_runner_ =
258 base::MakeRefCounted<net::PrioritizedTaskRunner>(kWorkerPoolTaskTraits);
259
260 index_ = std::make_unique<SimpleIndex>(
261 base::SequencedTaskRunner::GetCurrentDefault(), cleanup_tracker_.get(),
262 this, GetCacheType(),
263 std::make_unique<SimpleIndexFile>(
264 index_task_runner, file_operations_factory_, GetCacheType(), path_));
265 index_->ExecuteWhenReady(
266 base::BindOnce(&RecordIndexLoad, GetCacheType(), base::TimeTicks::Now()));
267
268 auto file_operations = file_operations_factory_->Create(index_task_runner);
269 index_task_runner->PostTaskAndReplyWithResult(
270 FROM_HERE,
271 base::BindOnce(&SimpleBackendImpl::InitCacheStructureOnDisk,
272 std::move(file_operations), path_, orig_max_size_,
273 GetCacheType()),
274 base::BindOnce(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
275 std::move(completion_callback)));
276 }
277
SetMaxSize(int64_t max_bytes)278 bool SimpleBackendImpl::SetMaxSize(int64_t max_bytes) {
279 if (max_bytes < 0)
280 return false;
281 orig_max_size_ = max_bytes;
282 index_->SetMaxSize(max_bytes);
283 return true;
284 }
285
MaxFileSize() const286 int64_t SimpleBackendImpl::MaxFileSize() const {
287 uint64_t file_size_ratio = GetCacheType() == net::GENERATED_NATIVE_CODE_CACHE
288 ? kMaxNativeCodeFileRatio
289 : kMaxFileRatio;
290 return std::max(
291 base::saturated_cast<int64_t>(index_->max_size() / file_size_ratio),
292 kMinFileSizeLimit);
293 }
294
OnDoomStart(uint64_t entry_hash)295 scoped_refptr<SimplePostOperationWaiterTable> SimpleBackendImpl::OnDoomStart(
296 uint64_t entry_hash) {
297 post_doom_waiting_->OnOperationStart(entry_hash);
298 return post_doom_waiting_;
299 }
300
DoomEntries(std::vector<uint64_t> * entry_hashes,net::CompletionOnceCallback callback)301 void SimpleBackendImpl::DoomEntries(std::vector<uint64_t>* entry_hashes,
302 net::CompletionOnceCallback callback) {
303 auto mass_doom_entry_hashes = std::make_unique<std::vector<uint64_t>>();
304 mass_doom_entry_hashes->swap(*entry_hashes);
305
306 std::vector<uint64_t> to_doom_individually_hashes;
307
308 // For each of the entry hashes, there are two cases:
309 // 1. There are corresponding entries in active set, pending doom, or both
310 // sets, and so the hash should be doomed individually to avoid flakes.
311 // 2. The hash is not in active use at all, so we can call
312 // SimpleSynchronousEntry::DeleteEntrySetFiles and delete the files en
313 // masse.
314 for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
315 const uint64_t entry_hash = (*mass_doom_entry_hashes)[i];
316 if (!active_entries_.count(entry_hash) &&
317 !post_doom_waiting_->Has(entry_hash)) {
318 continue;
319 }
320
321 to_doom_individually_hashes.push_back(entry_hash);
322
323 (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
324 mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
325 }
326
327 base::RepeatingCallback<void(int)> barrier_callback =
328 MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
329 std::move(callback));
330 for (std::vector<uint64_t>::const_iterator
331 it = to_doom_individually_hashes.begin(),
332 end = to_doom_individually_hashes.end();
333 it != end; ++it) {
334 const int doom_result = DoomEntryFromHash(*it, barrier_callback);
335 DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
336 index_->Remove(*it);
337 }
338
339 for (std::vector<uint64_t>::const_iterator
340 it = mass_doom_entry_hashes->begin(),
341 end = mass_doom_entry_hashes->end();
342 it != end; ++it) {
343 index_->Remove(*it);
344 OnDoomStart(*it);
345 }
346
347 // Taking this pointer here avoids undefined behaviour from calling
348 // std::move() before mass_doom_entry_hashes.get().
349 std::vector<uint64_t>* mass_doom_entry_hashes_ptr =
350 mass_doom_entry_hashes.get();
351
352 // We don't use priorities (i.e., `prioritized_task_runner_`) here because
353 // we don't actually have them here (since this is for eviction based on
354 // index).
355 auto task_runner =
356 base::ThreadPool::CreateSequencedTaskRunner(kWorkerPoolTaskTraits);
357 task_runner->PostTaskAndReplyWithResult(
358 FROM_HERE,
359 base::BindOnce(&SimpleSynchronousEntry::DeleteEntrySetFiles,
360 mass_doom_entry_hashes_ptr, path_,
361 file_operations_factory_->CreateUnbound()),
362 base::BindOnce(&SimpleBackendImpl::DoomEntriesComplete, AsWeakPtr(),
363 std::move(mass_doom_entry_hashes), barrier_callback));
364 }
365
GetEntryCount() const366 int32_t SimpleBackendImpl::GetEntryCount() const {
367 // TODO(pasko): Use directory file count when index is not ready.
368 return index_->GetEntryCount();
369 }
370
OpenEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)371 EntryResult SimpleBackendImpl::OpenEntry(const std::string& key,
372 net::RequestPriority request_priority,
373 EntryResultCallback callback) {
374 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
375
376 std::vector<base::OnceClosure>* post_operation = nullptr;
377 PostOperationQueue post_operation_queue = PostOperationQueue::kNone;
378 scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
379 entry_hash, key, request_priority, post_operation, post_operation_queue);
380 if (!simple_entry) {
381 if (post_operation_queue == PostOperationQueue::kPostDoom &&
382 post_operation->empty() &&
383 entry_operations_mode_ == SimpleEntryImpl::OPTIMISTIC_OPERATIONS) {
384 // The entry is doomed, and no other backend operations are queued for the
385 // entry, thus the open must fail and it's safe to return synchronously.
386 net::NetLogWithSource log_for_entry(net::NetLogWithSource::Make(
387 net_log_, net::NetLogSourceType::DISK_CACHE_ENTRY));
388 log_for_entry.AddEvent(
389 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_CALL);
390 log_for_entry.AddEventWithNetErrorCode(
391 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
392 return EntryResult::MakeError(net::ERR_FAILED);
393 }
394
395 base::OnceCallback<EntryResult(EntryResultCallback)> operation =
396 base::BindOnce(&SimpleBackendImpl::OpenEntry, base::Unretained(this),
397 key, request_priority);
398 post_operation->emplace_back(
399 base::BindOnce(&RunEntryResultOperationAndCallback, AsWeakPtr(),
400 std::move(operation), std::move(callback)));
401 return EntryResult::MakeError(net::ERR_IO_PENDING);
402 }
403 return simple_entry->OpenEntry(std::move(callback));
404 }
405
CreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)406 EntryResult SimpleBackendImpl::CreateEntry(
407 const std::string& key,
408 net::RequestPriority request_priority,
409 EntryResultCallback callback) {
410 DCHECK_LT(0u, key.size());
411 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
412
413 std::vector<base::OnceClosure>* post_operation = nullptr;
414 PostOperationQueue post_operation_queue = PostOperationQueue::kNone;
415 scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
416 entry_hash, key, request_priority, post_operation, post_operation_queue);
417
418 // If couldn't grab an entry object due to pending doom, see if circumstances
419 // are right for an optimistic create.
420 if (!simple_entry && post_operation_queue == PostOperationQueue::kPostDoom) {
421 simple_entry = MaybeOptimisticCreateForPostDoom(
422 entry_hash, key, request_priority, post_operation);
423 }
424
425 // If that doesn't work either, retry this once doom / open by hash is done.
426 if (!simple_entry) {
427 base::OnceCallback<EntryResult(EntryResultCallback)> operation =
428 base::BindOnce(&SimpleBackendImpl::CreateEntry, base::Unretained(this),
429 key, request_priority);
430 post_operation->emplace_back(
431 base::BindOnce(&RunEntryResultOperationAndCallback, AsWeakPtr(),
432 std::move(operation), std::move(callback)));
433 return EntryResult::MakeError(net::ERR_IO_PENDING);
434 }
435
436 return simple_entry->CreateEntry(std::move(callback));
437 }
438
OpenOrCreateEntry(const std::string & key,net::RequestPriority request_priority,EntryResultCallback callback)439 EntryResult SimpleBackendImpl::OpenOrCreateEntry(
440 const std::string& key,
441 net::RequestPriority request_priority,
442 EntryResultCallback callback) {
443 DCHECK_LT(0u, key.size());
444 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
445
446 std::vector<base::OnceClosure>* post_operation = nullptr;
447 PostOperationQueue post_operation_queue = PostOperationQueue::kNone;
448 scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
449 entry_hash, key, request_priority, post_operation, post_operation_queue);
450
451 // If couldn't grab an entry object due to pending doom, see if circumstances
452 // are right for an optimistic create.
453 if (!simple_entry) {
454 if (post_operation_queue == PostOperationQueue::kPostDoom) {
455 simple_entry = MaybeOptimisticCreateForPostDoom(
456 entry_hash, key, request_priority, post_operation);
457 }
458 if (simple_entry) {
459 return simple_entry->CreateEntry(std::move(callback));
460 } else {
461 // If that doesn't work either, retry this once doom / open by hash is
462 // done.
463 base::OnceCallback<EntryResult(EntryResultCallback)> operation =
464 base::BindOnce(&SimpleBackendImpl::OpenOrCreateEntry,
465 base::Unretained(this), key, request_priority);
466 post_operation->emplace_back(
467 base::BindOnce(&RunEntryResultOperationAndCallback, AsWeakPtr(),
468 std::move(operation), std::move(callback)));
469 return EntryResult::MakeError(net::ERR_IO_PENDING);
470 }
471 }
472
473 return simple_entry->OpenOrCreateEntry(std::move(callback));
474 }
475
476 scoped_refptr<SimpleEntryImpl>
MaybeOptimisticCreateForPostDoom(uint64_t entry_hash,const std::string & key,net::RequestPriority request_priority,std::vector<base::OnceClosure> * post_doom)477 SimpleBackendImpl::MaybeOptimisticCreateForPostDoom(
478 uint64_t entry_hash,
479 const std::string& key,
480 net::RequestPriority request_priority,
481 std::vector<base::OnceClosure>* post_doom) {
482 scoped_refptr<SimpleEntryImpl> simple_entry;
483 // We would like to optimistically have create go ahead, for benefit of
484 // HTTP cache use. This can only be sanely done if we are the only op
485 // serialized after doom's completion.
486 if (post_doom->empty() &&
487 entry_operations_mode_ == SimpleEntryImpl::OPTIMISTIC_OPERATIONS) {
488 simple_entry = base::MakeRefCounted<SimpleEntryImpl>(
489 GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
490 entry_operations_mode_, this, file_tracker_, file_operations_factory_,
491 net_log_, GetNewEntryPriority(request_priority));
492 simple_entry->SetKey(key);
493 simple_entry->SetActiveEntryProxy(
494 ActiveEntryProxy::Create(entry_hash, this));
495 simple_entry->SetCreatePendingDoom();
496 std::pair<EntryMap::iterator, bool> insert_result = active_entries_.insert(
497 EntryMap::value_type(entry_hash, simple_entry.get()));
498 post_doom->emplace_back(base::BindOnce(
499 &SimpleEntryImpl::NotifyDoomBeforeCreateComplete, simple_entry));
500 DCHECK(insert_result.second);
501 }
502
503 return simple_entry;
504 }
505
DoomEntry(const std::string & key,net::RequestPriority priority,CompletionOnceCallback callback)506 net::Error SimpleBackendImpl::DoomEntry(const std::string& key,
507 net::RequestPriority priority,
508 CompletionOnceCallback callback) {
509 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
510
511 std::vector<base::OnceClosure>* post_operation = nullptr;
512 PostOperationQueue post_operation_queue = PostOperationQueue::kNone;
513 scoped_refptr<SimpleEntryImpl> simple_entry = CreateOrFindActiveOrDoomedEntry(
514 entry_hash, key, priority, post_operation, post_operation_queue);
515 if (!simple_entry) {
516 // At first glance, it appears exceedingly silly to queue up a doom when we
517 // get here with `post_operation_queue == PostOperationQueue::kPostDoom`,
518 // e.g. a doom already pending; but it's possible that the sequence of
519 // operations is Doom/Create/Doom, in which case the second Doom is not
520 // at all redundant.
521 base::OnceCallback<net::Error(CompletionOnceCallback)> operation =
522 base::BindOnce(&SimpleBackendImpl::DoomEntry, base::Unretained(this),
523 key, priority);
524 post_operation->emplace_back(
525 base::BindOnce(&RunOperationAndCallback, AsWeakPtr(),
526 std::move(operation), std::move(callback)));
527 return net::ERR_IO_PENDING;
528 }
529
530 return simple_entry->DoomEntry(std::move(callback));
531 }
532
DoomAllEntries(CompletionOnceCallback callback)533 net::Error SimpleBackendImpl::DoomAllEntries(CompletionOnceCallback callback) {
534 return DoomEntriesBetween(Time(), Time(), std::move(callback));
535 }
536
DoomEntriesBetween(const Time initial_time,const Time end_time,CompletionOnceCallback callback)537 net::Error SimpleBackendImpl::DoomEntriesBetween(
538 const Time initial_time,
539 const Time end_time,
540 CompletionOnceCallback callback) {
541 index_->ExecuteWhenReady(base::BindOnce(&SimpleBackendImpl::IndexReadyForDoom,
542 AsWeakPtr(), initial_time, end_time,
543 std::move(callback)));
544 return net::ERR_IO_PENDING;
545 }
546
DoomEntriesSince(const Time initial_time,CompletionOnceCallback callback)547 net::Error SimpleBackendImpl::DoomEntriesSince(
548 const Time initial_time,
549 CompletionOnceCallback callback) {
550 return DoomEntriesBetween(initial_time, Time(), std::move(callback));
551 }
552
CalculateSizeOfAllEntries(Int64CompletionOnceCallback callback)553 int64_t SimpleBackendImpl::CalculateSizeOfAllEntries(
554 Int64CompletionOnceCallback callback) {
555 index_->ExecuteWhenReady(
556 base::BindOnce(&SimpleBackendImpl::IndexReadyForSizeCalculation,
557 AsWeakPtr(), std::move(callback)));
558 return net::ERR_IO_PENDING;
559 }
560
CalculateSizeOfEntriesBetween(base::Time initial_time,base::Time end_time,Int64CompletionOnceCallback callback)561 int64_t SimpleBackendImpl::CalculateSizeOfEntriesBetween(
562 base::Time initial_time,
563 base::Time end_time,
564 Int64CompletionOnceCallback callback) {
565 index_->ExecuteWhenReady(
566 base::BindOnce(&SimpleBackendImpl::IndexReadyForSizeBetweenCalculation,
567 AsWeakPtr(), initial_time, end_time, std::move(callback)));
568 return net::ERR_IO_PENDING;
569 }
570
571 class SimpleBackendImpl::SimpleIterator final : public Iterator {
572 public:
SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)573 explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)
574 : backend_(backend) {}
575
576 // From Backend::Iterator:
OpenNextEntry(EntryResultCallback callback)577 EntryResult OpenNextEntry(EntryResultCallback callback) override {
578 if (!backend_)
579 return EntryResult::MakeError(net::ERR_FAILED);
580 CompletionOnceCallback open_next_entry_impl =
581 base::BindOnce(&SimpleIterator::OpenNextEntryImpl,
582 weak_factory_.GetWeakPtr(), std::move(callback));
583 backend_->index_->ExecuteWhenReady(std::move(open_next_entry_impl));
584 return EntryResult::MakeError(net::ERR_IO_PENDING);
585 }
586
OpenNextEntryImpl(EntryResultCallback callback,int index_initialization_error_code)587 void OpenNextEntryImpl(EntryResultCallback callback,
588 int index_initialization_error_code) {
589 if (!backend_) {
590 std::move(callback).Run(EntryResult::MakeError(net::ERR_FAILED));
591 return;
592 }
593 if (index_initialization_error_code != net::OK) {
594 std::move(callback).Run(EntryResult::MakeError(
595 static_cast<net::Error>(index_initialization_error_code)));
596 return;
597 }
598 if (!hashes_to_enumerate_)
599 hashes_to_enumerate_ = backend_->index()->GetAllHashes();
600
601 while (!hashes_to_enumerate_->empty()) {
602 uint64_t entry_hash = hashes_to_enumerate_->back();
603 hashes_to_enumerate_->pop_back();
604 if (backend_->index()->Has(entry_hash)) {
605 auto split_callback = base::SplitOnceCallback(std::move(callback));
606 callback = std::move(split_callback.first);
607 EntryResultCallback continue_iteration = base::BindOnce(
608 &SimpleIterator::CheckIterationReturnValue,
609 weak_factory_.GetWeakPtr(), std::move(split_callback.second));
610 EntryResult open_result = backend_->OpenEntryFromHash(
611 entry_hash, std::move(continue_iteration));
612 if (open_result.net_error() == net::ERR_IO_PENDING)
613 return;
614 if (open_result.net_error() != net::ERR_FAILED) {
615 std::move(callback).Run(std::move(open_result));
616 return;
617 }
618 }
619 }
620 std::move(callback).Run(EntryResult::MakeError(net::ERR_FAILED));
621 }
622
CheckIterationReturnValue(EntryResultCallback callback,EntryResult result)623 void CheckIterationReturnValue(EntryResultCallback callback,
624 EntryResult result) {
625 if (result.net_error() == net::ERR_FAILED) {
626 OpenNextEntry(std::move(callback));
627 return;
628 }
629 std::move(callback).Run(std::move(result));
630 }
631
632 private:
633 base::WeakPtr<SimpleBackendImpl> backend_;
634 std::unique_ptr<std::vector<uint64_t>> hashes_to_enumerate_;
635 base::WeakPtrFactory<SimpleIterator> weak_factory_{this};
636 };
637
CreateIterator()638 std::unique_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
639 return std::make_unique<SimpleIterator>(AsWeakPtr());
640 }
641
GetStats(base::StringPairs * stats)642 void SimpleBackendImpl::GetStats(base::StringPairs* stats) {
643 std::pair<std::string, std::string> item;
644 item.first = "Cache type";
645 item.second = "Simple Cache";
646 stats->push_back(item);
647 }
648
OnExternalCacheHit(const std::string & key)649 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
650 index_->UseIfExists(simple_util::GetEntryHashKey(key));
651 }
652
GetEntryInMemoryData(const std::string & key)653 uint8_t SimpleBackendImpl::GetEntryInMemoryData(const std::string& key) {
654 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
655 return index_->GetEntryInMemoryData(entry_hash);
656 }
657
SetEntryInMemoryData(const std::string & key,uint8_t data)658 void SimpleBackendImpl::SetEntryInMemoryData(const std::string& key,
659 uint8_t data) {
660 const uint64_t entry_hash = simple_util::GetEntryHashKey(key);
661 index_->SetEntryInMemoryData(entry_hash, data);
662 }
663
InitializeIndex(CompletionOnceCallback callback,const DiskStatResult & result)664 void SimpleBackendImpl::InitializeIndex(CompletionOnceCallback callback,
665 const DiskStatResult& result) {
666 if (result.net_error == net::OK) {
667 index_->SetMaxSize(result.max_size);
668 #if BUILDFLAG(IS_ANDROID)
669 if (app_status_listener_getter_) {
670 index_->set_app_status_listener_getter(
671 std::move(app_status_listener_getter_));
672 }
673 #endif
674 index_->Initialize(result.cache_dir_mtime);
675 }
676 std::move(callback).Run(result.net_error);
677 }
678
IndexReadyForDoom(Time initial_time,Time end_time,CompletionOnceCallback callback,int result)679 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
680 Time end_time,
681 CompletionOnceCallback callback,
682 int result) {
683 if (result != net::OK) {
684 std::move(callback).Run(result);
685 return;
686 }
687 std::unique_ptr<std::vector<uint64_t>> removed_key_hashes(
688 index_->GetEntriesBetween(initial_time, end_time).release());
689 DoomEntries(removed_key_hashes.get(), std::move(callback));
690 }
691
IndexReadyForSizeCalculation(Int64CompletionOnceCallback callback,int result)692 void SimpleBackendImpl::IndexReadyForSizeCalculation(
693 Int64CompletionOnceCallback callback,
694 int result) {
695 int64_t rv = result == net::OK ? index_->GetCacheSize() : result;
696 std::move(callback).Run(rv);
697 }
698
IndexReadyForSizeBetweenCalculation(base::Time initial_time,base::Time end_time,Int64CompletionOnceCallback callback,int result)699 void SimpleBackendImpl::IndexReadyForSizeBetweenCalculation(
700 base::Time initial_time,
701 base::Time end_time,
702 Int64CompletionOnceCallback callback,
703 int result) {
704 int64_t rv = result == net::OK
705 ? index_->GetCacheSizeBetween(initial_time, end_time)
706 : result;
707 std::move(callback).Run(rv);
708 }
709
710 // static
InitCacheStructureOnDisk(std::unique_ptr<BackendFileOperations> file_operations,const base::FilePath & path,uint64_t suggested_max_size,net::CacheType cache_type)711 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
712 std::unique_ptr<BackendFileOperations> file_operations,
713 const base::FilePath& path,
714 uint64_t suggested_max_size,
715 net::CacheType cache_type) {
716 DiskStatResult result;
717 result.max_size = suggested_max_size;
718 result.net_error = net::OK;
719 SimpleCacheConsistencyResult consistency =
720 FileStructureConsistent(file_operations.get(), path);
721 SIMPLE_CACHE_UMA(ENUMERATION, "ConsistencyResult", cache_type, consistency);
722
723 // If the cache structure is inconsistent make a single attempt at
724 // recovering it. Previously there were bugs that could cause a partially
725 // written fake index file to be left in an otherwise empty cache. In
726 // that case we can delete the index files and start over. Also, some
727 // consistency failures may leave an empty directory directly and we can
728 // retry those cases as well.
729 if (consistency != SimpleCacheConsistencyResult::kOK) {
730 bool deleted_files = disk_cache::DeleteIndexFilesIfCacheIsEmpty(path);
731 SIMPLE_CACHE_UMA(BOOLEAN, "DidDeleteIndexFilesAfterFailedConsistency",
732 cache_type, deleted_files);
733 if (base::IsDirectoryEmpty(path)) {
734 SimpleCacheConsistencyResult orig_consistency = consistency;
735 consistency = FileStructureConsistent(file_operations.get(), path);
736 SIMPLE_CACHE_UMA(ENUMERATION, "RetryConsistencyResult", cache_type,
737 consistency);
738 if (consistency == SimpleCacheConsistencyResult::kOK) {
739 SIMPLE_CACHE_UMA(ENUMERATION,
740 "OriginalConsistencyResultBeforeSuccessfulRetry",
741 cache_type, orig_consistency);
742 }
743 }
744 if (deleted_files) {
745 SIMPLE_CACHE_UMA(ENUMERATION, "ConsistencyResultAfterIndexFilesDeleted",
746 cache_type, consistency);
747 }
748 }
749
750 if (consistency != SimpleCacheConsistencyResult::kOK) {
751 LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
752 << static_cast<int>(consistency)
753 << " path: " << path.LossyDisplayName();
754 result.net_error = net::ERR_FAILED;
755 } else {
756 std::optional<base::File::Info> file_info =
757 file_operations->GetFileInfo(path);
758 if (!file_info.has_value()) {
759 // Something deleted the directory between when we set it up and the
760 // mstat; this is not uncommon on some test fixtures which erase their
761 // tempdir while some worker threads may still be running.
762 LOG(ERROR) << "Simple Cache Backend: cache directory inaccessible right "
763 "after creation; path: "
764 << path.LossyDisplayName();
765 result.net_error = net::ERR_FAILED;
766 } else {
767 result.cache_dir_mtime = file_info->last_modified;
768 if (!result.max_size) {
769 int64_t available = base::SysInfo::AmountOfFreeDiskSpace(path);
770 result.max_size = disk_cache::PreferredCacheSize(available, cache_type);
771 DCHECK(result.max_size);
772 }
773 }
774 }
775 return result;
776 }
777
778 scoped_refptr<SimpleEntryImpl>
CreateOrFindActiveOrDoomedEntry(const uint64_t entry_hash,const std::string & key,net::RequestPriority request_priority,std::vector<base::OnceClosure> * & post_operation,PostOperationQueue & post_operation_queue)779 SimpleBackendImpl::CreateOrFindActiveOrDoomedEntry(
780 const uint64_t entry_hash,
781 const std::string& key,
782 net::RequestPriority request_priority,
783 std::vector<base::OnceClosure>*& post_operation,
784 PostOperationQueue& post_operation_queue) {
785 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
786
787 // If there is a doom pending, we would want to serialize after it.
788 std::vector<base::OnceClosure>* post_doom =
789 post_doom_waiting_->Find(entry_hash);
790 if (post_doom) {
791 post_operation = post_doom;
792 post_operation_queue = PostOperationQueue::kPostDoom;
793 return nullptr;
794 }
795
796 std::pair<EntryMap::iterator, bool> insert_result =
797 active_entries_.insert(EntryMap::value_type(entry_hash, nullptr));
798 EntryMap::iterator& it = insert_result.first;
799 const bool did_insert = insert_result.second;
800 if (did_insert) {
801 SimpleEntryImpl* entry = it->second = new SimpleEntryImpl(
802 GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
803 entry_operations_mode_, this, file_tracker_, file_operations_factory_,
804 net_log_, GetNewEntryPriority(request_priority));
805 entry->SetKey(key);
806 entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
807 }
808 // TODO(jkarlin): In case of recycling a half-closed entry, we might want to
809 // update its priority.
810 DCHECK(it->second);
811 // It's possible, but unlikely, that we have an entry hash collision with a
812 // currently active entry, or we may not know the key of active entry yet,
813 // since it's being opened by hash.
814 if (key != it->second->key()) {
815 DCHECK(!did_insert);
816 if (it->second->key().has_value()) {
817 // Collision case.
818 it->second->Doom();
819 DCHECK_EQ(0U, active_entries_.count(entry_hash));
820 DCHECK(post_doom_waiting_->Has(entry_hash));
821 // Re-run ourselves to handle the now-pending doom.
822 return CreateOrFindActiveOrDoomedEntry(entry_hash, key, request_priority,
823 post_operation,
824 post_operation_queue);
825 } else {
826 // Open by hash case.
827 post_operation = post_open_by_hash_waiting_->Find(entry_hash);
828 CHECK(post_operation);
829 post_operation_queue = PostOperationQueue::kPostOpenByHash;
830 return nullptr;
831 }
832 }
833 return base::WrapRefCounted(it->second);
834 }
835
OpenEntryFromHash(uint64_t entry_hash,EntryResultCallback callback)836 EntryResult SimpleBackendImpl::OpenEntryFromHash(uint64_t entry_hash,
837 EntryResultCallback callback) {
838 std::vector<base::OnceClosure>* post_doom =
839 post_doom_waiting_->Find(entry_hash);
840 if (post_doom) {
841 base::OnceCallback<EntryResult(EntryResultCallback)> operation =
842 base::BindOnce(&SimpleBackendImpl::OpenEntryFromHash,
843 base::Unretained(this), entry_hash);
844 // TODO(https://crbug.com/1019682) The cancellation behavior looks wrong.
845 post_doom->emplace_back(base::BindOnce(&RunEntryResultOperationAndCallback,
846 AsWeakPtr(), std::move(operation),
847 std::move(callback)));
848 return EntryResult::MakeError(net::ERR_IO_PENDING);
849 }
850
851 std::pair<EntryMap::iterator, bool> insert_result =
852 active_entries_.insert(EntryMap::value_type(entry_hash, nullptr));
853 EntryMap::iterator& it = insert_result.first;
854 const bool did_insert = insert_result.second;
855
856 // This needs to be here to keep the new entry alive until ->OpenEntry.
857 scoped_refptr<SimpleEntryImpl> simple_entry;
858 if (did_insert) {
859 simple_entry = base::MakeRefCounted<SimpleEntryImpl>(
860 GetCacheType(), path_, cleanup_tracker_.get(), entry_hash,
861 entry_operations_mode_, this, file_tracker_, file_operations_factory_,
862 net_log_, GetNewEntryPriority(net::HIGHEST));
863 it->second = simple_entry.get();
864 simple_entry->SetActiveEntryProxy(
865 ActiveEntryProxy::Create(entry_hash, this));
866 post_open_by_hash_waiting_->OnOperationStart(entry_hash);
867 callback = base::BindOnce(&SimpleBackendImpl::OnEntryOpenedFromHash,
868 AsWeakPtr(), entry_hash, std::move(callback));
869 }
870
871 // Note: the !did_insert case includes when another OpenEntryFromHash is
872 // pending; we don't care since that one will take care of the queue and we
873 // don't need to check for key collisions.
874 return it->second->OpenEntry(std::move(callback));
875 }
876
DoomEntryFromHash(uint64_t entry_hash,CompletionOnceCallback callback)877 net::Error SimpleBackendImpl::DoomEntryFromHash(
878 uint64_t entry_hash,
879 CompletionOnceCallback callback) {
880 std::vector<base::OnceClosure>* post_doom =
881 post_doom_waiting_->Find(entry_hash);
882 if (post_doom) {
883 base::OnceCallback<net::Error(CompletionOnceCallback)> operation =
884 base::BindOnce(&SimpleBackendImpl::DoomEntryFromHash,
885 base::Unretained(this), entry_hash);
886 post_doom->emplace_back(base::BindOnce(&RunOperationAndCallback,
887 AsWeakPtr(), std::move(operation),
888 std::move(callback)));
889 return net::ERR_IO_PENDING;
890 }
891
892 auto active_it = active_entries_.find(entry_hash);
893 if (active_it != active_entries_.end())
894 return active_it->second->DoomEntry(std::move(callback));
895
896 // There's no pending dooms, nor any open entry. We can make a trivial
897 // call to DoomEntries() to delete this entry.
898 std::vector<uint64_t> entry_hash_vector;
899 entry_hash_vector.push_back(entry_hash);
900 DoomEntries(&entry_hash_vector, std::move(callback));
901 return net::ERR_IO_PENDING;
902 }
903
OnEntryOpenedFromHash(uint64_t hash,EntryResultCallback callback,EntryResult result)904 void SimpleBackendImpl::OnEntryOpenedFromHash(
905 uint64_t hash,
906 EntryResultCallback callback,
907 EntryResult result) {
908 post_open_by_hash_waiting_->OnOperationComplete(hash);
909 std::move(callback).Run(std::move(result));
910 }
911
DoomEntriesComplete(std::unique_ptr<std::vector<uint64_t>> entry_hashes,CompletionOnceCallback callback,int result)912 void SimpleBackendImpl::DoomEntriesComplete(
913 std::unique_ptr<std::vector<uint64_t>> entry_hashes,
914 CompletionOnceCallback callback,
915 int result) {
916 for (const uint64_t& entry_hash : *entry_hashes)
917 post_doom_waiting_->OnOperationComplete(entry_hash);
918 std::move(callback).Run(result);
919 }
920
GetNewEntryPriority(net::RequestPriority request_priority)921 uint32_t SimpleBackendImpl::GetNewEntryPriority(
922 net::RequestPriority request_priority) {
923 // Lower priority is better, so give high network priority the least bump.
924 return ((net::RequestPriority::MAXIMUM_PRIORITY - request_priority) * 10000) +
925 entry_count_++;
926 }
927
928 } // namespace disk_cache
929