1 // Copyright 2013 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_entry_impl.h"
6
7 #include <algorithm>
8 #include <cstring>
9 #include <limits>
10 #include <optional>
11 #include <utility>
12 #include <vector>
13
14 #include "base/check_op.h"
15 #include "base/functional/bind.h"
16 #include "base/functional/callback.h"
17 #include "base/functional/callback_helpers.h"
18 #include "base/location.h"
19 #include "base/memory/raw_ptr.h"
20 #include "base/notreached.h"
21 #include "base/task/sequenced_task_runner.h"
22 #include "base/task/task_runner.h"
23 #include "base/time/time.h"
24 #include "base/trace_event/memory_usage_estimator.h"
25 #include "net/base/io_buffer.h"
26 #include "net/base/net_errors.h"
27 #include "net/base/prioritized_task_runner.h"
28 #include "net/disk_cache/backend_cleanup_tracker.h"
29 #include "net/disk_cache/net_log_parameters.h"
30 #include "net/disk_cache/simple/simple_backend_impl.h"
31 #include "net/disk_cache/simple/simple_histogram_enums.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_net_log_parameters.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/log/net_log.h"
38 #include "net/log/net_log_source_type.h"
39 #include "third_party/zlib/zlib.h"
40
41 namespace disk_cache {
42 namespace {
43
44 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
45 // the cache.
46 const int64_t kMaxSparseDataSizeDivisor = 10;
47
ComputeIndexState(SimpleBackendImpl * backend,uint64_t entry_hash)48 OpenEntryIndexEnum ComputeIndexState(SimpleBackendImpl* backend,
49 uint64_t entry_hash) {
50 if (!backend->index()->initialized())
51 return INDEX_NOEXIST;
52 if (backend->index()->Has(entry_hash))
53 return INDEX_HIT;
54 return INDEX_MISS;
55 }
56
RecordOpenEntryIndexState(net::CacheType cache_type,OpenEntryIndexEnum state)57 void RecordOpenEntryIndexState(net::CacheType cache_type,
58 OpenEntryIndexEnum state) {
59 SIMPLE_CACHE_UMA(ENUMERATION, "OpenEntryIndexState", cache_type, state,
60 INDEX_MAX);
61 }
62
RecordHeaderSize(net::CacheType cache_type,int size)63 void RecordHeaderSize(net::CacheType cache_type, int size) {
64 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, size);
65 }
66
InvokeCallbackIfBackendIsAlive(const base::WeakPtr<SimpleBackendImpl> & backend,net::CompletionOnceCallback completion_callback,int result)67 void InvokeCallbackIfBackendIsAlive(
68 const base::WeakPtr<SimpleBackendImpl>& backend,
69 net::CompletionOnceCallback completion_callback,
70 int result) {
71 DCHECK(!completion_callback.is_null());
72 if (!backend.get())
73 return;
74 std::move(completion_callback).Run(result);
75 }
76
InvokeEntryResultCallbackIfBackendIsAlive(const base::WeakPtr<SimpleBackendImpl> & backend,EntryResultCallback completion_callback,EntryResult result)77 void InvokeEntryResultCallbackIfBackendIsAlive(
78 const base::WeakPtr<SimpleBackendImpl>& backend,
79 EntryResultCallback completion_callback,
80 EntryResult result) {
81 DCHECK(!completion_callback.is_null());
82 if (!backend.get())
83 return;
84 std::move(completion_callback).Run(std::move(result));
85 }
86
87 // If |sync_possible| is false, and callback is available, posts rv to it and
88 // return net::ERR_IO_PENDING; otherwise just passes through rv.
PostToCallbackIfNeeded(bool sync_possible,net::CompletionOnceCallback callback,int rv)89 int PostToCallbackIfNeeded(bool sync_possible,
90 net::CompletionOnceCallback callback,
91 int rv) {
92 if (!sync_possible && !callback.is_null()) {
93 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
94 FROM_HERE, base::BindOnce(std::move(callback), rv));
95 return net::ERR_IO_PENDING;
96 } else {
97 return rv;
98 }
99 }
100
101 } // namespace
102
103 using base::OnceClosure;
104 using base::FilePath;
105 using base::Time;
106 using base::TaskRunner;
107
108 // A helper class to insure that RunNextOperationIfNeeded() is called when
109 // exiting the current stack frame.
110 class SimpleEntryImpl::ScopedOperationRunner {
111 public:
ScopedOperationRunner(SimpleEntryImpl * entry)112 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
113 }
114
~ScopedOperationRunner()115 ~ScopedOperationRunner() {
116 entry_->RunNextOperationIfNeeded();
117 }
118
119 private:
120 const raw_ptr<SimpleEntryImpl> entry_;
121 };
122
123 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() = default;
124
SimpleEntryImpl(net::CacheType cache_type,const FilePath & path,scoped_refptr<BackendCleanupTracker> cleanup_tracker,const uint64_t entry_hash,OperationsMode operations_mode,SimpleBackendImpl * backend,SimpleFileTracker * file_tracker,scoped_refptr<BackendFileOperationsFactory> file_operations_factory,net::NetLog * net_log,uint32_t entry_priority)125 SimpleEntryImpl::SimpleEntryImpl(
126 net::CacheType cache_type,
127 const FilePath& path,
128 scoped_refptr<BackendCleanupTracker> cleanup_tracker,
129 const uint64_t entry_hash,
130 OperationsMode operations_mode,
131 SimpleBackendImpl* backend,
132 SimpleFileTracker* file_tracker,
133 scoped_refptr<BackendFileOperationsFactory> file_operations_factory,
134 net::NetLog* net_log,
135 uint32_t entry_priority)
136 : cleanup_tracker_(std::move(cleanup_tracker)),
137 backend_(backend->AsWeakPtr()),
138 file_tracker_(file_tracker),
139 file_operations_factory_(std::move(file_operations_factory)),
140 cache_type_(cache_type),
141 path_(path),
142 entry_hash_(entry_hash),
143 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
144 last_used_(Time::Now()),
145 last_modified_(last_used_),
146 prioritized_task_runner_(backend_->prioritized_task_runner()),
147 net_log_(
148 net::NetLogWithSource::Make(net_log,
149 net::NetLogSourceType::DISK_CACHE_ENTRY)),
150 stream_0_data_(base::MakeRefCounted<net::GrowableIOBuffer>()),
151 entry_priority_(entry_priority) {
152 static_assert(std::extent<decltype(data_size_)>() ==
153 std::extent<decltype(crc32s_end_offset_)>(),
154 "arrays should be the same size");
155 static_assert(
156 std::extent<decltype(data_size_)>() == std::extent<decltype(crc32s_)>(),
157 "arrays should be the same size");
158 static_assert(std::extent<decltype(data_size_)>() ==
159 std::extent<decltype(have_written_)>(),
160 "arrays should be the same size");
161 ResetEntry();
162 NetLogSimpleEntryConstruction(net_log_,
163 net::NetLogEventType::SIMPLE_CACHE_ENTRY,
164 net::NetLogEventPhase::BEGIN, this);
165 }
166
SetActiveEntryProxy(std::unique_ptr<ActiveEntryProxy> active_entry_proxy)167 void SimpleEntryImpl::SetActiveEntryProxy(
168 std::unique_ptr<ActiveEntryProxy> active_entry_proxy) {
169 DCHECK(!active_entry_proxy_);
170 active_entry_proxy_ = std::move(active_entry_proxy);
171 }
172
OpenEntry(EntryResultCallback callback)173 EntryResult SimpleEntryImpl::OpenEntry(EntryResultCallback callback) {
174 DCHECK(backend_.get());
175
176 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_CALL);
177
178 OpenEntryIndexEnum index_state =
179 ComputeIndexState(backend_.get(), entry_hash_);
180 RecordOpenEntryIndexState(cache_type_, index_state);
181
182 // If entry is not known to the index, initiate fast failover to the network.
183 if (index_state == INDEX_MISS) {
184 net_log_.AddEventWithNetErrorCode(
185 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END, net::ERR_FAILED);
186 return EntryResult::MakeError(net::ERR_FAILED);
187 }
188
189 pending_operations_.push(SimpleEntryOperation::OpenOperation(
190 this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
191 RunNextOperationIfNeeded();
192 return EntryResult::MakeError(net::ERR_IO_PENDING);
193 }
194
CreateEntry(EntryResultCallback callback)195 EntryResult SimpleEntryImpl::CreateEntry(EntryResultCallback callback) {
196 DCHECK(backend_.get());
197 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(*key_));
198
199 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_CALL);
200
201 EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
202 if (use_optimistic_operations_ &&
203 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
204 net_log_.AddEvent(
205 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
206
207 ReturnEntryToCaller();
208 result = EntryResult::MakeCreated(this);
209 pending_operations_.push(SimpleEntryOperation::CreateOperation(
210 this, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
211 EntryResultCallback()));
212
213 // If we are optimistically returning before a preceeding doom, we need to
214 // wait for that IO, about which we will be notified externally.
215 if (optimistic_create_pending_doom_state_ != CREATE_NORMAL) {
216 CHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
217 optimistic_create_pending_doom_state_);
218 state_ = STATE_IO_PENDING;
219 }
220 } else {
221 pending_operations_.push(SimpleEntryOperation::CreateOperation(
222 this, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, std::move(callback)));
223 }
224
225 // We insert the entry in the index before creating the entry files in the
226 // SimpleSynchronousEntry, because this way the worst scenario is when we
227 // have the entry in the index but we don't have the created files yet, this
228 // way we never leak files. CreationOperationComplete will remove the entry
229 // from the index if the creation fails.
230 backend_->index()->Insert(entry_hash_);
231
232 RunNextOperationIfNeeded();
233 return result;
234 }
235
OpenOrCreateEntry(EntryResultCallback callback)236 EntryResult SimpleEntryImpl::OpenOrCreateEntry(EntryResultCallback callback) {
237 DCHECK(backend_.get());
238 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(*key_));
239
240 net_log_.AddEvent(
241 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_CALL);
242
243 OpenEntryIndexEnum index_state =
244 ComputeIndexState(backend_.get(), entry_hash_);
245 RecordOpenEntryIndexState(cache_type_, index_state);
246
247 EntryResult result = EntryResult::MakeError(net::ERR_IO_PENDING);
248 if (index_state == INDEX_MISS && use_optimistic_operations_ &&
249 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
250 net_log_.AddEvent(
251 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
252
253 ReturnEntryToCaller();
254 result = EntryResult::MakeCreated(this);
255 pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
256 this, index_state, SimpleEntryOperation::ENTRY_ALREADY_RETURNED,
257 EntryResultCallback()));
258
259 // The post-doom stuff should go through CreateEntry, not here.
260 CHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
261 } else {
262 pending_operations_.push(SimpleEntryOperation::OpenOrCreateOperation(
263 this, index_state, SimpleEntryOperation::ENTRY_NEEDS_CALLBACK,
264 std::move(callback)));
265 }
266
267 // We insert the entry in the index before creating the entry files in the
268 // SimpleSynchronousEntry, because this way the worst scenario is when we
269 // have the entry in the index but we don't have the created files yet, this
270 // way we never leak files. CreationOperationComplete will remove the entry
271 // from the index if the creation fails.
272 backend_->index()->Insert(entry_hash_);
273
274 RunNextOperationIfNeeded();
275 return result;
276 }
277
DoomEntry(net::CompletionOnceCallback callback)278 net::Error SimpleEntryImpl::DoomEntry(net::CompletionOnceCallback callback) {
279 if (doom_state_ != DOOM_NONE)
280 return net::OK;
281 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_CALL);
282 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
283
284 MarkAsDoomed(DOOM_QUEUED);
285 if (backend_.get()) {
286 if (optimistic_create_pending_doom_state_ == CREATE_NORMAL) {
287 post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
288 } else {
289 CHECK_EQ(STATE_IO_PENDING, state_);
290 CHECK_EQ(CREATE_OPTIMISTIC_PENDING_DOOM,
291 optimistic_create_pending_doom_state_);
292 // If we are in this state, we went ahead with making the entry even
293 // though the backend was already keeping track of a doom, so it can't
294 // keep track of ours. So we delay notifying it until
295 // NotifyDoomBeforeCreateComplete is called. Since this path is invoked
296 // only when the queue of post-doom callbacks was previously empty, while
297 // the CompletionOnceCallback for the op is posted,
298 // NotifyDoomBeforeCreateComplete() will be the first thing running after
299 // the previous doom completes, so at that point we can immediately grab
300 // a spot in entries_pending_doom_.
301 optimistic_create_pending_doom_state_ =
302 CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM;
303 }
304 }
305 pending_operations_.push(
306 SimpleEntryOperation::DoomOperation(this, std::move(callback)));
307 RunNextOperationIfNeeded();
308 return net::ERR_IO_PENDING;
309 }
310
SetCreatePendingDoom()311 void SimpleEntryImpl::SetCreatePendingDoom() {
312 CHECK_EQ(CREATE_NORMAL, optimistic_create_pending_doom_state_);
313 optimistic_create_pending_doom_state_ = CREATE_OPTIMISTIC_PENDING_DOOM;
314 }
315
NotifyDoomBeforeCreateComplete()316 void SimpleEntryImpl::NotifyDoomBeforeCreateComplete() {
317 CHECK_EQ(STATE_IO_PENDING, state_);
318 CHECK_NE(CREATE_NORMAL, optimistic_create_pending_doom_state_);
319 if (backend_.get() && optimistic_create_pending_doom_state_ ==
320 CREATE_OPTIMISTIC_PENDING_DOOM_FOLLOWED_BY_DOOM)
321 post_doom_waiting_ = backend_->OnDoomStart(entry_hash_);
322
323 state_ = STATE_UNINITIALIZED;
324 optimistic_create_pending_doom_state_ = CREATE_NORMAL;
325 RunNextOperationIfNeeded();
326 }
327
SetKey(const std::string & key)328 void SimpleEntryImpl::SetKey(const std::string& key) {
329 key_ = key;
330 net_log_.AddEventWithStringParams(
331 net::NetLogEventType::SIMPLE_CACHE_ENTRY_SET_KEY, "key", key);
332 }
333
Doom()334 void SimpleEntryImpl::Doom() {
335 DoomEntry(CompletionOnceCallback());
336 }
337
Close()338 void SimpleEntryImpl::Close() {
339 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
340 CHECK_LT(0, open_count_);
341
342 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_CALL);
343
344 if (--open_count_ > 0) {
345 DCHECK(!HasOneRef());
346 Release(); // Balanced in ReturnEntryToCaller().
347 return;
348 }
349
350 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
351 DCHECK(!HasOneRef());
352 Release(); // Balanced in ReturnEntryToCaller().
353 RunNextOperationIfNeeded();
354 }
355
GetKey() const356 std::string SimpleEntryImpl::GetKey() const {
357 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
358 return *key_;
359 }
360
GetLastUsed() const361 Time SimpleEntryImpl::GetLastUsed() const {
362 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
363 DCHECK(cache_type_ != net::APP_CACHE);
364 return last_used_;
365 }
366
GetLastModified() const367 Time SimpleEntryImpl::GetLastModified() const {
368 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
369 return last_modified_;
370 }
371
GetDataSize(int stream_index) const372 int32_t SimpleEntryImpl::GetDataSize(int stream_index) const {
373 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
374 DCHECK_LE(0, data_size_[stream_index]);
375 return data_size_[stream_index];
376 }
377
ReadData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)378 int SimpleEntryImpl::ReadData(int stream_index,
379 int offset,
380 net::IOBuffer* buf,
381 int buf_len,
382 CompletionOnceCallback callback) {
383 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
384
385 if (net_log_.IsCapturing()) {
386 NetLogReadWriteData(
387 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_CALL,
388 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
389 }
390
391 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
392 buf_len < 0) {
393 if (net_log_.IsCapturing()) {
394 NetLogReadWriteComplete(
395 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
396 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
397 }
398
399 return net::ERR_INVALID_ARGUMENT;
400 }
401
402 // If this is the only operation, bypass the queue, and also see if there is
403 // in-memory data to handle it synchronously. In principle, multiple reads can
404 // be parallelized, but past studies have shown that parallelizable ones
405 // happen <1% of the time, so it's probably not worth the effort.
406 bool alone_in_queue =
407 pending_operations_.size() == 0 && state_ == STATE_READY;
408
409 if (alone_in_queue) {
410 return ReadDataInternal(/*sync_possible = */ true, stream_index, offset,
411 buf, buf_len, std::move(callback));
412 }
413
414 pending_operations_.push(SimpleEntryOperation::ReadOperation(
415 this, stream_index, offset, buf_len, buf, std::move(callback)));
416 RunNextOperationIfNeeded();
417 return net::ERR_IO_PENDING;
418 }
419
WriteData(int stream_index,int offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback,bool truncate)420 int SimpleEntryImpl::WriteData(int stream_index,
421 int offset,
422 net::IOBuffer* buf,
423 int buf_len,
424 CompletionOnceCallback callback,
425 bool truncate) {
426 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
427
428 if (net_log_.IsCapturing()) {
429 NetLogReadWriteData(
430 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_CALL,
431 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
432 }
433
434 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
435 offset < 0 || buf_len < 0) {
436 if (net_log_.IsCapturing()) {
437 NetLogReadWriteComplete(
438 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
439 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
440 }
441 return net::ERR_INVALID_ARGUMENT;
442 }
443 int end_offset;
444 if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
445 (backend_.get() && end_offset > backend_->MaxFileSize())) {
446 if (net_log_.IsCapturing()) {
447 NetLogReadWriteComplete(
448 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
449 net::NetLogEventPhase::NONE, net::ERR_FAILED);
450 }
451 return net::ERR_FAILED;
452 }
453 ScopedOperationRunner operation_runner(this);
454
455 // Stream 0 data is kept in memory, so can be written immediatly if there are
456 // no IO operations pending.
457 if (stream_index == 0 && state_ == STATE_READY &&
458 pending_operations_.size() == 0) {
459 state_ = STATE_IO_PENDING;
460 SetStream0Data(buf, offset, buf_len, truncate);
461 state_ = STATE_READY;
462 return buf_len;
463 }
464
465 // We can only do optimistic Write if there is no pending operations, so
466 // that we are sure that the next call to RunNextOperationIfNeeded will
467 // actually run the write operation that sets the stream size. It also
468 // prevents from previous possibly-conflicting writes that could be stacked
469 // in the |pending_operations_|. We could optimize this for when we have
470 // only read operations enqueued, but past studies have shown that that such
471 // parallelizable cases are very rare.
472 const bool optimistic =
473 (use_optimistic_operations_ && state_ == STATE_READY &&
474 pending_operations_.size() == 0);
475 CompletionOnceCallback op_callback;
476 scoped_refptr<net::IOBuffer> op_buf;
477 int ret_value = net::ERR_FAILED;
478 if (!optimistic) {
479 op_buf = buf;
480 op_callback = std::move(callback);
481 ret_value = net::ERR_IO_PENDING;
482 } else {
483 // TODO(morlovich,pasko): For performance, don't use a copy of an IOBuffer
484 // here to avoid paying the price of the RefCountedThreadSafe atomic
485 // operations.
486 if (buf) {
487 op_buf = base::MakeRefCounted<net::IOBufferWithSize>(buf_len);
488 std::copy(buf->data(), buf->data() + buf_len, op_buf->data());
489 }
490 op_callback = CompletionOnceCallback();
491 ret_value = buf_len;
492 if (net_log_.IsCapturing()) {
493 NetLogReadWriteComplete(
494 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
495 net::NetLogEventPhase::NONE, buf_len);
496 }
497 }
498
499 pending_operations_.push(SimpleEntryOperation::WriteOperation(
500 this, stream_index, offset, buf_len, op_buf.get(), truncate, optimistic,
501 std::move(op_callback)));
502 return ret_value;
503 }
504
ReadSparseData(int64_t offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)505 int SimpleEntryImpl::ReadSparseData(int64_t offset,
506 net::IOBuffer* buf,
507 int buf_len,
508 CompletionOnceCallback callback) {
509 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
510
511 if (net_log_.IsCapturing()) {
512 NetLogSparseOperation(
513 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_CALL,
514 net::NetLogEventPhase::NONE, offset, buf_len);
515 }
516
517 if (offset < 0 || buf_len < 0) {
518 if (net_log_.IsCapturing()) {
519 NetLogReadWriteComplete(
520 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
521 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
522 }
523 return net::ERR_INVALID_ARGUMENT;
524 }
525
526 // Truncate |buf_len| to make sure that |offset + buf_len| does not overflow.
527 // This is OK since one can't write that far anyway.
528 // The result of std::min is guaranteed to fit into int since |buf_len| did.
529 buf_len = std::min(static_cast<int64_t>(buf_len),
530 std::numeric_limits<int64_t>::max() - offset);
531
532 ScopedOperationRunner operation_runner(this);
533 pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
534 this, offset, buf_len, buf, std::move(callback)));
535 return net::ERR_IO_PENDING;
536 }
537
WriteSparseData(int64_t offset,net::IOBuffer * buf,int buf_len,CompletionOnceCallback callback)538 int SimpleEntryImpl::WriteSparseData(int64_t offset,
539 net::IOBuffer* buf,
540 int buf_len,
541 CompletionOnceCallback callback) {
542 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
543
544 if (net_log_.IsCapturing()) {
545 NetLogSparseOperation(
546 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_CALL,
547 net::NetLogEventPhase::NONE, offset, buf_len);
548 }
549
550 if (offset < 0 || buf_len < 0 || !base::CheckAdd(offset, buf_len).IsValid()) {
551 if (net_log_.IsCapturing()) {
552 NetLogReadWriteComplete(
553 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
554 net::NetLogEventPhase::NONE, net::ERR_INVALID_ARGUMENT);
555 }
556 return net::ERR_INVALID_ARGUMENT;
557 }
558
559 ScopedOperationRunner operation_runner(this);
560 pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
561 this, offset, buf_len, buf, std::move(callback)));
562 return net::ERR_IO_PENDING;
563 }
564
GetAvailableRange(int64_t offset,int len,RangeResultCallback callback)565 RangeResult SimpleEntryImpl::GetAvailableRange(int64_t offset,
566 int len,
567 RangeResultCallback callback) {
568 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
569 if (offset < 0 || len < 0)
570 return RangeResult(net::ERR_INVALID_ARGUMENT);
571
572 // Truncate |len| to make sure that |offset + len| does not overflow.
573 // This is OK since one can't write that far anyway.
574 // The result of std::min is guaranteed to fit into int since |len| did.
575 len = std::min(static_cast<int64_t>(len),
576 std::numeric_limits<int64_t>::max() - offset);
577
578 ScopedOperationRunner operation_runner(this);
579 pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
580 this, offset, len, std::move(callback)));
581 return RangeResult(net::ERR_IO_PENDING);
582 }
583
CouldBeSparse() const584 bool SimpleEntryImpl::CouldBeSparse() const {
585 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
586 // TODO(morlovich): Actually check.
587 return true;
588 }
589
CancelSparseIO()590 void SimpleEntryImpl::CancelSparseIO() {
591 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
592 // The Simple Cache does not return distinct objects for the same non-doomed
593 // entry, so there's no need to coordinate which object is performing sparse
594 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
595 }
596
ReadyForSparseIO(CompletionOnceCallback callback)597 net::Error SimpleEntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
598 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
599 // The simple Cache does not return distinct objects for the same non-doomed
600 // entry, so there's no need to coordinate which object is performing sparse
601 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
602 return net::OK;
603 }
604
SetLastUsedTimeForTest(base::Time time)605 void SimpleEntryImpl::SetLastUsedTimeForTest(base::Time time) {
606 last_used_ = time;
607 backend_->index()->SetLastUsedTimeForTest(entry_hash_, time);
608 }
609
SetPriority(uint32_t entry_priority)610 void SimpleEntryImpl::SetPriority(uint32_t entry_priority) {
611 entry_priority_ = entry_priority;
612 }
613
~SimpleEntryImpl()614 SimpleEntryImpl::~SimpleEntryImpl() {
615 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
616 DCHECK_EQ(0U, pending_operations_.size());
617
618 // This used to DCHECK on `state_`, but it turns out that destruction
619 // happening on thread shutdown, when closures holding `this` get deleted
620 // can happen in circumstances not possible during normal use, such as when
621 // I/O for Close operation is keeping the entry alive in STATE_IO_PENDING, or
622 // an entry that's STATE_READY has callbacks pending to hand it over to the
623 // user right as the thread is shutdown (this would also have a non-null
624 // `synchronous_entry_`).
625 net_log_.EndEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY);
626 }
627
PostClientCallback(net::CompletionOnceCallback callback,int result)628 void SimpleEntryImpl::PostClientCallback(net::CompletionOnceCallback callback,
629 int result) {
630 if (callback.is_null())
631 return;
632 // Note that the callback is posted rather than directly invoked to avoid
633 // reentrancy issues.
634 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
635 FROM_HERE, base::BindOnce(&InvokeCallbackIfBackendIsAlive, backend_,
636 std::move(callback), result));
637 }
638
PostClientCallback(EntryResultCallback callback,EntryResult result)639 void SimpleEntryImpl::PostClientCallback(EntryResultCallback callback,
640 EntryResult result) {
641 if (callback.is_null())
642 return;
643 // Note that the callback is posted rather than directly invoked to avoid
644 // reentrancy issues.
645 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
646 FROM_HERE,
647 base::BindOnce(&InvokeEntryResultCallbackIfBackendIsAlive, backend_,
648 std::move(callback), std::move(result)));
649 }
650
ResetEntry()651 void SimpleEntryImpl::ResetEntry() {
652 // If we're doomed, we can't really do anything else with the entry, since
653 // we no longer own the name and are disconnected from the active entry table.
654 // We preserve doom_state_ accross this entry for this same reason.
655 state_ = doom_state_ == DOOM_COMPLETED ? STATE_FAILURE : STATE_UNINITIALIZED;
656 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
657 std::memset(crc32s_, 0, sizeof(crc32s_));
658 std::memset(have_written_, 0, sizeof(have_written_));
659 std::memset(data_size_, 0, sizeof(data_size_));
660 }
661
ReturnEntryToCaller()662 void SimpleEntryImpl::ReturnEntryToCaller() {
663 DCHECK(backend_);
664 ++open_count_;
665 AddRef(); // Balanced in Close()
666 }
667
ReturnEntryToCallerAsync(bool is_open,EntryResultCallback callback)668 void SimpleEntryImpl::ReturnEntryToCallerAsync(bool is_open,
669 EntryResultCallback callback) {
670 DCHECK(!callback.is_null());
671
672 // |open_count_| must be incremented immediately, so that a Close on an alias
673 // doesn't try to wrap things up.
674 ++open_count_;
675
676 // Note that the callback is posted rather than directly invoked to avoid
677 // reentrancy issues.
678 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
679 FROM_HERE,
680 base::BindOnce(&SimpleEntryImpl::FinishReturnEntryToCallerAsync, this,
681 is_open, std::move(callback)));
682 }
683
FinishReturnEntryToCallerAsync(bool is_open,EntryResultCallback callback)684 void SimpleEntryImpl::FinishReturnEntryToCallerAsync(
685 bool is_open,
686 EntryResultCallback callback) {
687 AddRef(); // Balanced in Close()
688 if (!backend_.get()) {
689 // With backend dead, Open/Create operations are responsible for cleaning up
690 // the entry --- the ownership is never transferred to the caller, and their
691 // callback isn't invoked.
692 Close();
693 return;
694 }
695
696 std::move(callback).Run(is_open ? EntryResult::MakeOpened(this)
697 : EntryResult::MakeCreated(this));
698 }
699
MarkAsDoomed(DoomState new_state)700 void SimpleEntryImpl::MarkAsDoomed(DoomState new_state) {
701 DCHECK_NE(DOOM_NONE, new_state);
702 doom_state_ = new_state;
703 if (!backend_.get())
704 return;
705 backend_->index()->Remove(entry_hash_);
706 active_entry_proxy_.reset();
707 }
708
RunNextOperationIfNeeded()709 void SimpleEntryImpl::RunNextOperationIfNeeded() {
710 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
711 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
712 SimpleEntryOperation operation = std::move(pending_operations_.front());
713 pending_operations_.pop();
714 switch (operation.type()) {
715 case SimpleEntryOperation::TYPE_OPEN:
716 OpenEntryInternal(operation.entry_result_state(),
717 operation.ReleaseEntryResultCallback());
718 break;
719 case SimpleEntryOperation::TYPE_CREATE:
720 CreateEntryInternal(operation.entry_result_state(),
721 operation.ReleaseEntryResultCallback());
722 break;
723 case SimpleEntryOperation::TYPE_OPEN_OR_CREATE:
724 OpenOrCreateEntryInternal(operation.index_state(),
725 operation.entry_result_state(),
726 operation.ReleaseEntryResultCallback());
727 break;
728 case SimpleEntryOperation::TYPE_CLOSE:
729 CloseInternal();
730 break;
731 case SimpleEntryOperation::TYPE_READ:
732 ReadDataInternal(/* sync_possible= */ false, operation.index(),
733 operation.offset(), operation.buf(),
734 operation.length(), operation.ReleaseCallback());
735 break;
736 case SimpleEntryOperation::TYPE_WRITE:
737 WriteDataInternal(operation.index(), operation.offset(),
738 operation.buf(), operation.length(),
739 operation.ReleaseCallback(), operation.truncate());
740 break;
741 case SimpleEntryOperation::TYPE_READ_SPARSE:
742 ReadSparseDataInternal(operation.sparse_offset(), operation.buf(),
743 operation.length(), operation.ReleaseCallback());
744 break;
745 case SimpleEntryOperation::TYPE_WRITE_SPARSE:
746 WriteSparseDataInternal(operation.sparse_offset(), operation.buf(),
747 operation.length(),
748 operation.ReleaseCallback());
749 break;
750 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
751 GetAvailableRangeInternal(operation.sparse_offset(), operation.length(),
752 operation.ReleaseRangeResultCalback());
753 break;
754 case SimpleEntryOperation::TYPE_DOOM:
755 DoomEntryInternal(operation.ReleaseCallback());
756 break;
757 default:
758 NOTREACHED();
759 }
760 // |this| may have been deleted.
761 }
762 }
763
OpenEntryInternal(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)764 void SimpleEntryImpl::OpenEntryInternal(
765 SimpleEntryOperation::EntryResultState result_state,
766 EntryResultCallback callback) {
767 ScopedOperationRunner operation_runner(this);
768
769 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
770
771 // No optimistic sync return possible on open.
772 DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
773
774 if (state_ == STATE_READY) {
775 ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
776 NetLogSimpleEntryCreation(net_log_,
777 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
778 net::NetLogEventPhase::NONE, this, net::OK);
779 return;
780 }
781 if (state_ == STATE_FAILURE) {
782 PostClientCallback(std::move(callback),
783 EntryResult::MakeError(net::ERR_FAILED));
784 NetLogSimpleEntryCreation(
785 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END,
786 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
787 return;
788 }
789
790 DCHECK_EQ(STATE_UNINITIALIZED, state_);
791 DCHECK(!synchronous_entry_);
792 state_ = STATE_IO_PENDING;
793 const base::TimeTicks start_time = base::TimeTicks::Now();
794 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
795 last_used_, last_modified_, data_size_, sparse_data_size_));
796
797 int32_t trailer_prefetch_size = -1;
798 base::Time last_used_time;
799 if (SimpleBackendImpl* backend = backend_.get()) {
800 if (cache_type_ == net::APP_CACHE) {
801 trailer_prefetch_size =
802 backend->index()->GetTrailerPrefetchSize(entry_hash_);
803 } else {
804 last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
805 }
806 }
807
808 base::OnceClosure task = base::BindOnce(
809 &SimpleSynchronousEntry::OpenEntry, cache_type_, path_, key_, entry_hash_,
810 file_tracker_, file_operations_factory_->CreateUnbound(),
811 trailer_prefetch_size, results.get());
812
813 base::OnceClosure reply = base::BindOnce(
814 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
815 std::move(callback), start_time, last_used_time, std::move(results),
816 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_END);
817
818 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
819 std::move(reply), entry_priority_);
820 }
821
CreateEntryInternal(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)822 void SimpleEntryImpl::CreateEntryInternal(
823 SimpleEntryOperation::EntryResultState result_state,
824 EntryResultCallback callback) {
825 ScopedOperationRunner operation_runner(this);
826
827 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
828
829 if (state_ != STATE_UNINITIALIZED) {
830 // There is already an active normal entry.
831 NetLogSimpleEntryCreation(
832 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END,
833 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
834 // If we have optimistically returned an entry, we would be the first entry
835 // in queue with state_ == STATE_UNINITIALIZED.
836 DCHECK_EQ(SimpleEntryOperation::ENTRY_NEEDS_CALLBACK, result_state);
837 PostClientCallback(std::move(callback),
838 EntryResult::MakeError(net::ERR_FAILED));
839 return;
840 }
841 DCHECK_EQ(STATE_UNINITIALIZED, state_);
842 DCHECK(!synchronous_entry_);
843
844 state_ = STATE_IO_PENDING;
845
846 // Since we don't know the correct values for |last_used_| and
847 // |last_modified_| yet, we make this approximation.
848 last_used_ = last_modified_ = base::Time::Now();
849
850 const base::TimeTicks start_time = base::TimeTicks::Now();
851 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
852 last_used_, last_modified_, data_size_, sparse_data_size_));
853
854 OnceClosure task =
855 base::BindOnce(&SimpleSynchronousEntry::CreateEntry, cache_type_, path_,
856 *key_, entry_hash_, file_tracker_,
857 file_operations_factory_->CreateUnbound(), results.get());
858 OnceClosure reply = base::BindOnce(
859 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
860 std::move(callback), start_time, base::Time(), std::move(results),
861 net::NetLogEventType::SIMPLE_CACHE_ENTRY_CREATE_END);
862 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
863 std::move(reply), entry_priority_);
864 }
865
OpenOrCreateEntryInternal(OpenEntryIndexEnum index_state,SimpleEntryOperation::EntryResultState result_state,EntryResultCallback callback)866 void SimpleEntryImpl::OpenOrCreateEntryInternal(
867 OpenEntryIndexEnum index_state,
868 SimpleEntryOperation::EntryResultState result_state,
869 EntryResultCallback callback) {
870 ScopedOperationRunner operation_runner(this);
871
872 net_log_.AddEvent(
873 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_BEGIN);
874
875 // result_state may be ENTRY_ALREADY_RETURNED only if an optimistic create is
876 // being performed, which must be in STATE_UNINITIALIZED.
877 bool optimistic_create =
878 (result_state == SimpleEntryOperation::ENTRY_ALREADY_RETURNED);
879 DCHECK(!optimistic_create || state_ == STATE_UNINITIALIZED);
880
881 if (state_ == STATE_READY) {
882 ReturnEntryToCallerAsync(/* is_open = */ true, std::move(callback));
883 NetLogSimpleEntryCreation(
884 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
885 net::NetLogEventPhase::NONE, this, net::OK);
886 return;
887 }
888 if (state_ == STATE_FAILURE) {
889 PostClientCallback(std::move(callback),
890 EntryResult::MakeError(net::ERR_FAILED));
891 NetLogSimpleEntryCreation(
892 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END,
893 net::NetLogEventPhase::NONE, this, net::ERR_FAILED);
894 return;
895 }
896
897 DCHECK_EQ(STATE_UNINITIALIZED, state_);
898 DCHECK(!synchronous_entry_);
899 state_ = STATE_IO_PENDING;
900 const base::TimeTicks start_time = base::TimeTicks::Now();
901 auto results = std::make_unique<SimpleEntryCreationResults>(SimpleEntryStat(
902 last_used_, last_modified_, data_size_, sparse_data_size_));
903
904 int32_t trailer_prefetch_size = -1;
905 base::Time last_used_time;
906 if (SimpleBackendImpl* backend = backend_.get()) {
907 if (cache_type_ == net::APP_CACHE) {
908 trailer_prefetch_size =
909 backend->index()->GetTrailerPrefetchSize(entry_hash_);
910 } else {
911 last_used_time = backend->index()->GetLastUsedTime(entry_hash_);
912 }
913 }
914
915 base::OnceClosure task =
916 base::BindOnce(&SimpleSynchronousEntry::OpenOrCreateEntry, cache_type_,
917 path_, *key_, entry_hash_, index_state, optimistic_create,
918 file_tracker_, file_operations_factory_->CreateUnbound(),
919 trailer_prefetch_size, results.get());
920
921 base::OnceClosure reply = base::BindOnce(
922 &SimpleEntryImpl::CreationOperationComplete, this, result_state,
923 std::move(callback), start_time, last_used_time, std::move(results),
924 net::NetLogEventType::SIMPLE_CACHE_ENTRY_OPEN_OR_CREATE_END);
925
926 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
927 std::move(reply), entry_priority_);
928 }
929
CloseInternal()930 void SimpleEntryImpl::CloseInternal() {
931 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
932
933 if (open_count_ != 0) {
934 // Entry got resurrected in between Close and CloseInternal, nothing to do
935 // for now.
936 return;
937 }
938
939 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
940 auto crc32s_to_write = std::make_unique<std::vector<CRCRecord>>();
941
942 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
943
944 if (state_ == STATE_READY) {
945 DCHECK(synchronous_entry_);
946 state_ = STATE_IO_PENDING;
947 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
948 if (have_written_[i]) {
949 if (GetDataSize(i) == crc32s_end_offset_[i]) {
950 int32_t crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
951 crc32s_to_write->push_back(CRCRecord(i, true, crc));
952 } else {
953 crc32s_to_write->push_back(CRCRecord(i, false, 0));
954 }
955 }
956 }
957 } else {
958 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
959 }
960
961 auto results = std::make_unique<SimpleEntryCloseResults>();
962 if (synchronous_entry_) {
963 OnceClosure task = base::BindOnce(
964 &SimpleSynchronousEntry::Close, base::Unretained(synchronous_entry_),
965 SimpleEntryStat(last_used_, last_modified_, data_size_,
966 sparse_data_size_),
967 std::move(crc32s_to_write), base::RetainedRef(stream_0_data_),
968 results.get());
969 OnceClosure reply = base::BindOnce(&SimpleEntryImpl::CloseOperationComplete,
970 this, std::move(results));
971 synchronous_entry_ = nullptr;
972 prioritized_task_runner_->PostTaskAndReply(
973 FROM_HERE, std::move(task), std::move(reply), entry_priority_);
974 } else {
975 CloseOperationComplete(std::move(results));
976 }
977 }
978
ReadDataInternal(bool sync_possible,int stream_index,int offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)979 int SimpleEntryImpl::ReadDataInternal(bool sync_possible,
980 int stream_index,
981 int offset,
982 net::IOBuffer* buf,
983 int buf_len,
984 net::CompletionOnceCallback callback) {
985 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
986 ScopedOperationRunner operation_runner(this);
987
988 if (net_log_.IsCapturing()) {
989 NetLogReadWriteData(
990 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_BEGIN,
991 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, false);
992 }
993
994 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
995 if (net_log_.IsCapturing()) {
996 NetLogReadWriteComplete(net_log_,
997 net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
998 net::NetLogEventPhase::NONE, net::ERR_FAILED);
999 }
1000 // Note that the API states that client-provided callbacks for entry-level
1001 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
1002 // the backend was already destroyed.
1003 return PostToCallbackIfNeeded(sync_possible, std::move(callback),
1004 net::ERR_FAILED);
1005 }
1006 DCHECK_EQ(STATE_READY, state_);
1007 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
1008 // If there is nothing to read, we bail out before setting state_ to
1009 // STATE_IO_PENDING (so ScopedOperationRunner might start us on next op
1010 // here).
1011 return PostToCallbackIfNeeded(sync_possible, std::move(callback), 0);
1012 }
1013
1014 // Truncate read to not go past end of stream.
1015 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
1016
1017 // Since stream 0 data is kept in memory, it is read immediately.
1018 if (stream_index == 0) {
1019 state_ = STATE_IO_PENDING;
1020 ReadFromBuffer(stream_0_data_.get(), offset, buf_len, buf);
1021 state_ = STATE_READY;
1022 return PostToCallbackIfNeeded(sync_possible, std::move(callback), buf_len);
1023 }
1024
1025 // Sometimes we can read in-ram prefetched stream 1 data immediately, too.
1026 if (stream_index == 1) {
1027 if (stream_1_prefetch_data_) {
1028 state_ = STATE_IO_PENDING;
1029 ReadFromBuffer(stream_1_prefetch_data_.get(), offset, buf_len, buf);
1030 state_ = STATE_READY;
1031 return PostToCallbackIfNeeded(sync_possible, std::move(callback),
1032 buf_len);
1033 }
1034 }
1035
1036 state_ = STATE_IO_PENDING;
1037 if (doom_state_ == DOOM_NONE && backend_.get())
1038 backend_->index()->UseIfExists(entry_hash_);
1039
1040 SimpleSynchronousEntry::ReadRequest read_req(stream_index, offset, buf_len);
1041 // Figure out if we should be computing the checksum for this read,
1042 // and whether we should be verifying it, too.
1043 if (crc32s_end_offset_[stream_index] == offset) {
1044 read_req.request_update_crc = true;
1045 read_req.previous_crc32 =
1046 offset == 0 ? crc32(0, Z_NULL, 0) : crc32s_[stream_index];
1047
1048 // We can't verify the checksum if we already overwrote part of the file.
1049 // (It may still make sense to compute it if the overwritten area and the
1050 // about-to-read-in area are adjoint).
1051 read_req.request_verify_crc = !have_written_[stream_index];
1052 }
1053
1054 auto result = std::make_unique<SimpleSynchronousEntry::ReadResult>();
1055 auto entry_stat = std::make_unique<SimpleEntryStat>(
1056 last_used_, last_modified_, data_size_, sparse_data_size_);
1057 OnceClosure task = base::BindOnce(
1058 &SimpleSynchronousEntry::ReadData, base::Unretained(synchronous_entry_),
1059 read_req, entry_stat.get(), base::RetainedRef(buf), result.get());
1060 OnceClosure reply = base::BindOnce(
1061 &SimpleEntryImpl::ReadOperationComplete, this, stream_index, offset,
1062 std::move(callback), std::move(entry_stat), std::move(result));
1063 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1064 std::move(reply), entry_priority_);
1065 return net::ERR_IO_PENDING;
1066 }
1067
WriteDataInternal(int stream_index,int offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback,bool truncate)1068 void SimpleEntryImpl::WriteDataInternal(int stream_index,
1069 int offset,
1070 net::IOBuffer* buf,
1071 int buf_len,
1072 net::CompletionOnceCallback callback,
1073 bool truncate) {
1074 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1075 ScopedOperationRunner operation_runner(this);
1076
1077 if (net_log_.IsCapturing()) {
1078 NetLogReadWriteData(
1079 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
1080 net::NetLogEventPhase::NONE, stream_index, offset, buf_len, truncate);
1081 }
1082
1083 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1084 if (net_log_.IsCapturing()) {
1085 NetLogReadWriteComplete(
1086 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
1087 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1088 }
1089 if (!callback.is_null()) {
1090 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1091 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1092 }
1093 // |this| may be destroyed after return here.
1094 return;
1095 }
1096
1097 DCHECK_EQ(STATE_READY, state_);
1098
1099 // Since stream 0 data is kept in memory, it will be written immediatly.
1100 if (stream_index == 0) {
1101 state_ = STATE_IO_PENDING;
1102 SetStream0Data(buf, offset, buf_len, truncate);
1103 state_ = STATE_READY;
1104 if (!callback.is_null()) {
1105 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1106 FROM_HERE, base::BindOnce(std::move(callback), buf_len));
1107 }
1108 return;
1109 }
1110
1111 // Ignore zero-length writes that do not change the file size.
1112 if (buf_len == 0) {
1113 int32_t data_size = data_size_[stream_index];
1114 if (truncate ? (offset == data_size) : (offset <= data_size)) {
1115 if (!callback.is_null()) {
1116 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1117 FROM_HERE, base::BindOnce(std::move(callback), 0));
1118 }
1119 return;
1120 }
1121 }
1122 state_ = STATE_IO_PENDING;
1123 if (doom_state_ == DOOM_NONE && backend_.get())
1124 backend_->index()->UseIfExists(entry_hash_);
1125
1126 // Any stream 1 write invalidates the prefetched data.
1127 if (stream_index == 1)
1128 stream_1_prefetch_data_ = nullptr;
1129
1130 bool request_update_crc = false;
1131 uint32_t initial_crc = 0;
1132
1133 if (offset < crc32s_end_offset_[stream_index]) {
1134 // If a range for which the crc32 was already computed is rewritten, the
1135 // computation of the crc32 need to start from 0 again.
1136 crc32s_end_offset_[stream_index] = 0;
1137 }
1138
1139 if (crc32s_end_offset_[stream_index] == offset) {
1140 request_update_crc = true;
1141 initial_crc = (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1142 }
1143
1144 // |entry_stat| needs to be initialized before modifying |data_size_|.
1145 auto entry_stat = std::make_unique<SimpleEntryStat>(
1146 last_used_, last_modified_, data_size_, sparse_data_size_);
1147 if (truncate) {
1148 data_size_[stream_index] = offset + buf_len;
1149 } else {
1150 data_size_[stream_index] = std::max(offset + buf_len,
1151 GetDataSize(stream_index));
1152 }
1153
1154 auto write_result = std::make_unique<SimpleSynchronousEntry::WriteResult>();
1155
1156 // Since we don't know the correct values for |last_used_| and
1157 // |last_modified_| yet, we make this approximation.
1158 last_used_ = last_modified_ = base::Time::Now();
1159
1160 have_written_[stream_index] = true;
1161 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
1162 // record will have to be rewritten.
1163 if (stream_index == 1)
1164 have_written_[0] = true;
1165
1166 // Retain a reference to |buf| in |reply| instead of |task|, so that we can
1167 // reduce cross thread malloc/free pairs. The cross thread malloc/free pair
1168 // increases the apparent memory usage due to the thread cached free list.
1169 // TODO(morlovich): Remove the doom_state_ argument to WriteData, since with
1170 // renaming rather than delete, creating a new stream 2 of doomed entry will
1171 // just work.
1172 OnceClosure task = base::BindOnce(
1173 &SimpleSynchronousEntry::WriteData, base::Unretained(synchronous_entry_),
1174 SimpleSynchronousEntry::WriteRequest(
1175 stream_index, offset, buf_len, initial_crc, truncate,
1176 doom_state_ != DOOM_NONE, request_update_crc),
1177 base::Unretained(buf), entry_stat.get(), write_result.get());
1178 OnceClosure reply =
1179 base::BindOnce(&SimpleEntryImpl::WriteOperationComplete, this,
1180 stream_index, std::move(callback), std::move(entry_stat),
1181 std::move(write_result), base::RetainedRef(buf));
1182 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1183 std::move(reply), entry_priority_);
1184 }
1185
ReadSparseDataInternal(int64_t sparse_offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)1186 void SimpleEntryImpl::ReadSparseDataInternal(
1187 int64_t sparse_offset,
1188 net::IOBuffer* buf,
1189 int buf_len,
1190 net::CompletionOnceCallback callback) {
1191 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1192 ScopedOperationRunner operation_runner(this);
1193
1194 if (net_log_.IsCapturing()) {
1195 NetLogSparseOperation(
1196 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_BEGIN,
1197 net::NetLogEventPhase::NONE, sparse_offset, buf_len);
1198 }
1199
1200 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1201 if (net_log_.IsCapturing()) {
1202 NetLogReadWriteComplete(
1203 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
1204 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1205 }
1206 if (!callback.is_null()) {
1207 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1208 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1209 }
1210 // |this| may be destroyed after return here.
1211 return;
1212 }
1213
1214 DCHECK_EQ(STATE_READY, state_);
1215 state_ = STATE_IO_PENDING;
1216
1217 auto result = std::make_unique<int>();
1218 auto last_used = std::make_unique<base::Time>();
1219 OnceClosure task = base::BindOnce(
1220 &SimpleSynchronousEntry::ReadSparseData,
1221 base::Unretained(synchronous_entry_),
1222 SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
1223 base::RetainedRef(buf), last_used.get(), result.get());
1224 OnceClosure reply = base::BindOnce(
1225 &SimpleEntryImpl::ReadSparseOperationComplete, this, std::move(callback),
1226 std::move(last_used), std::move(result));
1227 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1228 std::move(reply), entry_priority_);
1229 }
1230
WriteSparseDataInternal(int64_t sparse_offset,net::IOBuffer * buf,int buf_len,net::CompletionOnceCallback callback)1231 void SimpleEntryImpl::WriteSparseDataInternal(
1232 int64_t sparse_offset,
1233 net::IOBuffer* buf,
1234 int buf_len,
1235 net::CompletionOnceCallback callback) {
1236 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1237 ScopedOperationRunner operation_runner(this);
1238
1239 if (net_log_.IsCapturing()) {
1240 NetLogSparseOperation(
1241 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_BEGIN,
1242 net::NetLogEventPhase::NONE, sparse_offset, buf_len);
1243 }
1244
1245 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1246 if (net_log_.IsCapturing()) {
1247 NetLogReadWriteComplete(
1248 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
1249 net::NetLogEventPhase::NONE, net::ERR_FAILED);
1250 }
1251 if (!callback.is_null()) {
1252 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1253 FROM_HERE, base::BindOnce(std::move(callback), net::ERR_FAILED));
1254 }
1255 // |this| may be destroyed after return here.
1256 return;
1257 }
1258
1259 DCHECK_EQ(STATE_READY, state_);
1260 state_ = STATE_IO_PENDING;
1261
1262 uint64_t max_sparse_data_size = std::numeric_limits<int64_t>::max();
1263 if (backend_.get()) {
1264 uint64_t max_cache_size = backend_->index()->max_size();
1265 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1266 }
1267
1268 auto entry_stat = std::make_unique<SimpleEntryStat>(
1269 last_used_, last_modified_, data_size_, sparse_data_size_);
1270
1271 last_used_ = last_modified_ = base::Time::Now();
1272
1273 auto result = std::make_unique<int>();
1274 OnceClosure task = base::BindOnce(
1275 &SimpleSynchronousEntry::WriteSparseData,
1276 base::Unretained(synchronous_entry_),
1277 SimpleSynchronousEntry::SparseRequest(sparse_offset, buf_len),
1278 base::RetainedRef(buf), max_sparse_data_size, entry_stat.get(),
1279 result.get());
1280 OnceClosure reply = base::BindOnce(
1281 &SimpleEntryImpl::WriteSparseOperationComplete, this, std::move(callback),
1282 std::move(entry_stat), std::move(result));
1283 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1284 std::move(reply), entry_priority_);
1285 }
1286
GetAvailableRangeInternal(int64_t sparse_offset,int len,RangeResultCallback callback)1287 void SimpleEntryImpl::GetAvailableRangeInternal(int64_t sparse_offset,
1288 int len,
1289 RangeResultCallback callback) {
1290 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1291 ScopedOperationRunner operation_runner(this);
1292
1293 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
1294 if (!callback.is_null()) {
1295 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1296 FROM_HERE,
1297 base::BindOnce(std::move(callback), RangeResult(net::ERR_FAILED)));
1298 }
1299 // |this| may be destroyed after return here.
1300 return;
1301 }
1302
1303 DCHECK_EQ(STATE_READY, state_);
1304 state_ = STATE_IO_PENDING;
1305
1306 auto result = std::make_unique<RangeResult>();
1307 OnceClosure task = base::BindOnce(
1308 &SimpleSynchronousEntry::GetAvailableRange,
1309 base::Unretained(synchronous_entry_),
1310 SimpleSynchronousEntry::SparseRequest(sparse_offset, len), result.get());
1311 OnceClosure reply =
1312 base::BindOnce(&SimpleEntryImpl::GetAvailableRangeOperationComplete, this,
1313 std::move(callback), std::move(result));
1314 prioritized_task_runner_->PostTaskAndReply(FROM_HERE, std::move(task),
1315 std::move(reply), entry_priority_);
1316 }
1317
DoomEntryInternal(net::CompletionOnceCallback callback)1318 void SimpleEntryImpl::DoomEntryInternal(net::CompletionOnceCallback callback) {
1319 if (doom_state_ == DOOM_COMPLETED) {
1320 // During the time we were sitting on a queue, some operation failed
1321 // and cleaned our files up, so we don't have to do anything.
1322 DoomOperationComplete(std::move(callback), state_, net::OK);
1323 return;
1324 }
1325
1326 if (!backend_) {
1327 // If there's no backend, we want to truncate the files rather than delete
1328 // or rename them. Either op will update the entry directory's mtime, which
1329 // will likely force a full index rebuild on the next startup; this is
1330 // clearly an undesirable cost. Instead, the lesser evil is to set the entry
1331 // files to length zero, leaving the invalid entry in the index. On the next
1332 // attempt to open the entry, it will fail asynchronously (since the magic
1333 // numbers will not be found), and the files will actually be removed.
1334 // Since there is no backend, new entries to conflict with us also can't be
1335 // created.
1336 prioritized_task_runner_->PostTaskAndReplyWithResult(
1337 FROM_HERE,
1338 base::BindOnce(&SimpleSynchronousEntry::TruncateEntryFiles, path_,
1339 entry_hash_, file_operations_factory_->CreateUnbound()),
1340 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1341 std::move(callback),
1342 // Return to STATE_FAILURE after dooming, since no
1343 // operation can succeed on the truncated entry files.
1344 STATE_FAILURE),
1345 entry_priority_);
1346 state_ = STATE_IO_PENDING;
1347 return;
1348 }
1349
1350 if (synchronous_entry_) {
1351 // If there is a backing object, we have to go through its instance methods,
1352 // so that it can rename itself and keep track of the altenative name.
1353 prioritized_task_runner_->PostTaskAndReplyWithResult(
1354 FROM_HERE,
1355 base::BindOnce(&SimpleSynchronousEntry::Doom,
1356 base::Unretained(synchronous_entry_)),
1357 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1358 std::move(callback), state_),
1359 entry_priority_);
1360 } else {
1361 DCHECK_EQ(STATE_UNINITIALIZED, state_);
1362 // If nothing is open, we can just delete the files. We know they have the
1363 // base names, since if we ever renamed them our doom_state_ would be
1364 // DOOM_COMPLETED, and we would exit at function entry.
1365 prioritized_task_runner_->PostTaskAndReplyWithResult(
1366 FROM_HERE,
1367 base::BindOnce(&SimpleSynchronousEntry::DeleteEntryFiles, path_,
1368 cache_type_, entry_hash_,
1369 file_operations_factory_->CreateUnbound()),
1370 base::BindOnce(&SimpleEntryImpl::DoomOperationComplete, this,
1371 std::move(callback), state_),
1372 entry_priority_);
1373 }
1374 state_ = STATE_IO_PENDING;
1375 }
1376
CreationOperationComplete(SimpleEntryOperation::EntryResultState result_state,EntryResultCallback completion_callback,const base::TimeTicks & start_time,const base::Time index_last_used_time,std::unique_ptr<SimpleEntryCreationResults> in_results,net::NetLogEventType end_event_type)1377 void SimpleEntryImpl::CreationOperationComplete(
1378 SimpleEntryOperation::EntryResultState result_state,
1379 EntryResultCallback completion_callback,
1380 const base::TimeTicks& start_time,
1381 const base::Time index_last_used_time,
1382 std::unique_ptr<SimpleEntryCreationResults> in_results,
1383 net::NetLogEventType end_event_type) {
1384 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1385 DCHECK_EQ(state_, STATE_IO_PENDING);
1386 DCHECK(in_results);
1387 ScopedOperationRunner operation_runner(this);
1388 if (in_results->result != net::OK) {
1389 if (in_results->result != net::ERR_FILE_EXISTS) {
1390 // Here we keep index up-to-date, but don't remove ourselves from active
1391 // entries since we may have queued operations, and it would be
1392 // problematic to run further Creates, Opens, or Dooms if we are not
1393 // the active entry. We can only do this because OpenEntryInternal
1394 // and CreateEntryInternal have to start from STATE_UNINITIALIZED, so
1395 // nothing else is going on which may be confused.
1396 if (backend_)
1397 backend_->index()->Remove(entry_hash_);
1398 }
1399
1400 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1401 PostClientCallback(std::move(completion_callback),
1402 EntryResult::MakeError(net::ERR_FAILED));
1403 ResetEntry();
1404 return;
1405 }
1406
1407 // If this is a successful creation (rather than open), mark all streams to be
1408 // saved on close.
1409 if (in_results->created) {
1410 for (bool& have_written : have_written_)
1411 have_written = true;
1412 }
1413
1414 // Make sure to keep the index up-to-date. We likely already did this when
1415 // CreateEntry was called, but it's possible we were sitting on a queue
1416 // after an op that removed us.
1417 if (backend_ && doom_state_ == DOOM_NONE)
1418 backend_->index()->Insert(entry_hash_);
1419
1420 synchronous_entry_ = in_results->sync_entry;
1421
1422 // Copy over any pre-fetched data and its CRCs.
1423 for (int stream = 0; stream < 2; ++stream) {
1424 const SimpleStreamPrefetchData& prefetched =
1425 in_results->stream_prefetch_data[stream];
1426 if (prefetched.data.get()) {
1427 if (stream == 0)
1428 stream_0_data_ = prefetched.data;
1429 else
1430 stream_1_prefetch_data_ = prefetched.data;
1431
1432 // The crc was read in SimpleSynchronousEntry.
1433 crc32s_[stream] = prefetched.stream_crc32;
1434 crc32s_end_offset_[stream] = in_results->entry_stat.data_size(stream);
1435 }
1436 }
1437
1438 // If this entry was opened by hash, key_ could still be empty. If so, update
1439 // it with the key read from the synchronous entry.
1440 if (!key_.has_value()) {
1441 SetKey(*synchronous_entry_->key());
1442 } else {
1443 // This should only be triggered when creating an entry. In the open case
1444 // the key is either copied from the arguments to open, or checked
1445 // in the synchronous entry.
1446 DCHECK_EQ(*key_, *synchronous_entry_->key());
1447 }
1448
1449 // Prefer index last used time to disk's, since that may be pretty inaccurate.
1450 if (!index_last_used_time.is_null())
1451 in_results->entry_stat.set_last_used(index_last_used_time);
1452
1453 UpdateDataFromEntryStat(in_results->entry_stat);
1454 if (cache_type_ == net::APP_CACHE && backend_.get() && backend_->index()) {
1455 backend_->index()->SetTrailerPrefetchSize(
1456 entry_hash_, in_results->computed_trailer_prefetch_size);
1457 }
1458 SIMPLE_CACHE_UMA(TIMES,
1459 "EntryCreationTime", cache_type_,
1460 (base::TimeTicks::Now() - start_time));
1461
1462 net_log_.AddEvent(end_event_type);
1463
1464 const bool created = in_results->created;
1465
1466 // We need to release `in_results` before going out of scope, because
1467 // `operation_runner` destruction might call a close operation, that will
1468 // ultimately release `in_results->sync_entry`, and thus leading to having a
1469 // dangling pointer here.
1470 in_results = nullptr;
1471
1472 state_ = STATE_READY;
1473 if (result_state == SimpleEntryOperation::ENTRY_NEEDS_CALLBACK) {
1474 ReturnEntryToCallerAsync(!created, std::move(completion_callback));
1475 }
1476 }
1477
UpdateStateAfterOperationComplete(const SimpleEntryStat & entry_stat,int result)1478 void SimpleEntryImpl::UpdateStateAfterOperationComplete(
1479 const SimpleEntryStat& entry_stat,
1480 int result) {
1481 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1482 DCHECK(synchronous_entry_);
1483 DCHECK_EQ(STATE_IO_PENDING, state_);
1484 if (result < 0) {
1485 state_ = STATE_FAILURE;
1486 MarkAsDoomed(DOOM_COMPLETED);
1487 } else {
1488 UpdateDataFromEntryStat(entry_stat);
1489 state_ = STATE_READY;
1490 }
1491 }
1492
EntryOperationComplete(net::CompletionOnceCallback completion_callback,const SimpleEntryStat & entry_stat,int result)1493 void SimpleEntryImpl::EntryOperationComplete(
1494 net::CompletionOnceCallback completion_callback,
1495 const SimpleEntryStat& entry_stat,
1496 int result) {
1497 UpdateStateAfterOperationComplete(entry_stat, result);
1498 if (!completion_callback.is_null()) {
1499 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1500 FROM_HERE, base::BindOnce(std::move(completion_callback), result));
1501 }
1502 RunNextOperationIfNeeded();
1503 }
1504
ReadOperationComplete(int stream_index,int offset,net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<SimpleSynchronousEntry::ReadResult> read_result)1505 void SimpleEntryImpl::ReadOperationComplete(
1506 int stream_index,
1507 int offset,
1508 net::CompletionOnceCallback completion_callback,
1509 std::unique_ptr<SimpleEntryStat> entry_stat,
1510 std::unique_ptr<SimpleSynchronousEntry::ReadResult> read_result) {
1511 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1512 DCHECK(synchronous_entry_);
1513 DCHECK_EQ(STATE_IO_PENDING, state_);
1514 DCHECK(read_result);
1515 int result = read_result->result;
1516
1517 if (read_result->crc_updated) {
1518 if (result > 0) {
1519 DCHECK_EQ(crc32s_end_offset_[stream_index], offset);
1520 crc32s_end_offset_[stream_index] += result;
1521 crc32s_[stream_index] = read_result->updated_crc32;
1522 }
1523 }
1524
1525 if (result < 0) {
1526 crc32s_end_offset_[stream_index] = 0;
1527 }
1528
1529 if (net_log_.IsCapturing()) {
1530 NetLogReadWriteComplete(net_log_,
1531 net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_END,
1532 net::NetLogEventPhase::NONE, result);
1533 }
1534
1535 EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
1536 }
1537
WriteOperationComplete(int stream_index,net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<SimpleSynchronousEntry::WriteResult> write_result,net::IOBuffer * buf)1538 void SimpleEntryImpl::WriteOperationComplete(
1539 int stream_index,
1540 net::CompletionOnceCallback completion_callback,
1541 std::unique_ptr<SimpleEntryStat> entry_stat,
1542 std::unique_ptr<SimpleSynchronousEntry::WriteResult> write_result,
1543 net::IOBuffer* buf) {
1544 int result = write_result->result;
1545 if (net_log_.IsCapturing()) {
1546 NetLogReadWriteComplete(net_log_,
1547 net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_END,
1548 net::NetLogEventPhase::NONE, result);
1549 }
1550
1551 if (result < 0)
1552 crc32s_end_offset_[stream_index] = 0;
1553
1554 if (result > 0 && write_result->crc_updated) {
1555 crc32s_end_offset_[stream_index] += result;
1556 crc32s_[stream_index] = write_result->updated_crc32;
1557 }
1558
1559 EntryOperationComplete(std::move(completion_callback), *entry_stat, result);
1560 }
1561
ReadSparseOperationComplete(net::CompletionOnceCallback completion_callback,std::unique_ptr<base::Time> last_used,std::unique_ptr<int> result)1562 void SimpleEntryImpl::ReadSparseOperationComplete(
1563 net::CompletionOnceCallback completion_callback,
1564 std::unique_ptr<base::Time> last_used,
1565 std::unique_ptr<int> result) {
1566 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1567 DCHECK(synchronous_entry_);
1568 DCHECK(result);
1569
1570 if (net_log_.IsCapturing()) {
1571 NetLogReadWriteComplete(
1572 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_READ_SPARSE_END,
1573 net::NetLogEventPhase::NONE, *result);
1574 }
1575
1576 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1577 sparse_data_size_);
1578 EntryOperationComplete(std::move(completion_callback), entry_stat, *result);
1579 }
1580
WriteSparseOperationComplete(net::CompletionOnceCallback completion_callback,std::unique_ptr<SimpleEntryStat> entry_stat,std::unique_ptr<int> result)1581 void SimpleEntryImpl::WriteSparseOperationComplete(
1582 net::CompletionOnceCallback completion_callback,
1583 std::unique_ptr<SimpleEntryStat> entry_stat,
1584 std::unique_ptr<int> result) {
1585 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1586 DCHECK(synchronous_entry_);
1587 DCHECK(result);
1588
1589 if (net_log_.IsCapturing()) {
1590 NetLogReadWriteComplete(
1591 net_log_, net::NetLogEventType::SIMPLE_CACHE_ENTRY_WRITE_SPARSE_END,
1592 net::NetLogEventPhase::NONE, *result);
1593 }
1594
1595 EntryOperationComplete(std::move(completion_callback), *entry_stat, *result);
1596 }
1597
GetAvailableRangeOperationComplete(RangeResultCallback completion_callback,std::unique_ptr<RangeResult> result)1598 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1599 RangeResultCallback completion_callback,
1600 std::unique_ptr<RangeResult> result) {
1601 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1602 DCHECK(synchronous_entry_);
1603 DCHECK(result);
1604
1605 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1606 sparse_data_size_);
1607 UpdateStateAfterOperationComplete(entry_stat, result->net_error);
1608 if (!completion_callback.is_null()) {
1609 base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
1610 FROM_HERE, base::BindOnce(std::move(completion_callback), *result));
1611 }
1612 RunNextOperationIfNeeded();
1613 }
1614
DoomOperationComplete(net::CompletionOnceCallback callback,State state_to_restore,int result)1615 void SimpleEntryImpl::DoomOperationComplete(
1616 net::CompletionOnceCallback callback,
1617 State state_to_restore,
1618 int result) {
1619 state_ = state_to_restore;
1620 doom_state_ = DOOM_COMPLETED;
1621 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_DOOM_END);
1622 PostClientCallback(std::move(callback), result);
1623 RunNextOperationIfNeeded();
1624 if (post_doom_waiting_) {
1625 post_doom_waiting_->OnOperationComplete(entry_hash_);
1626 post_doom_waiting_ = nullptr;
1627 }
1628 }
1629
CloseOperationComplete(std::unique_ptr<SimpleEntryCloseResults> in_results)1630 void SimpleEntryImpl::CloseOperationComplete(
1631 std::unique_ptr<SimpleEntryCloseResults> in_results) {
1632 DCHECK(!synchronous_entry_);
1633 DCHECK_EQ(0, open_count_);
1634 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1635 STATE_UNINITIALIZED == state_);
1636 net_log_.AddEvent(net::NetLogEventType::SIMPLE_CACHE_ENTRY_CLOSE_END);
1637 if (cache_type_ == net::APP_CACHE &&
1638 in_results->estimated_trailer_prefetch_size > 0 && backend_.get() &&
1639 backend_->index()) {
1640 backend_->index()->SetTrailerPrefetchSize(
1641 entry_hash_, in_results->estimated_trailer_prefetch_size);
1642 }
1643 ResetEntry();
1644 RunNextOperationIfNeeded();
1645 }
1646
UpdateDataFromEntryStat(const SimpleEntryStat & entry_stat)1647 void SimpleEntryImpl::UpdateDataFromEntryStat(
1648 const SimpleEntryStat& entry_stat) {
1649 DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
1650 DCHECK(synchronous_entry_);
1651 // We want to only be called in STATE_IO_PENDING so that if call to
1652 // SimpleIndex::UpdateEntrySize() ends up triggering eviction and queuing
1653 // Dooms it doesn't also run any queued operations.
1654 CHECK_EQ(state_, STATE_IO_PENDING);
1655
1656 last_used_ = entry_stat.last_used();
1657 last_modified_ = entry_stat.last_modified();
1658 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1659 data_size_[i] = entry_stat.data_size(i);
1660 }
1661 sparse_data_size_ = entry_stat.sparse_data_size();
1662
1663 SimpleBackendImpl* backend_ptr = backend_.get();
1664 if (doom_state_ == DOOM_NONE && backend_ptr) {
1665 backend_ptr->index()->UpdateEntrySize(
1666 entry_hash_, base::checked_cast<uint32_t>(GetDiskUsage()));
1667 }
1668 }
1669
GetDiskUsage() const1670 int64_t SimpleEntryImpl::GetDiskUsage() const {
1671 int64_t file_size = 0;
1672 for (int data_size : data_size_) {
1673 file_size += simple_util::GetFileSizeFromDataSize(key_->size(), data_size);
1674 }
1675 file_size += sparse_data_size_;
1676 return file_size;
1677 }
1678
ReadFromBuffer(net::GrowableIOBuffer * in_buf,int offset,int buf_len,net::IOBuffer * out_buf)1679 void SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
1680 int offset,
1681 int buf_len,
1682 net::IOBuffer* out_buf) {
1683 DCHECK_GE(buf_len, 0);
1684
1685 std::copy(in_buf->data() + offset, in_buf->data() + offset + buf_len,
1686 out_buf->data());
1687 UpdateDataFromEntryStat(SimpleEntryStat(base::Time::Now(), last_modified_,
1688 data_size_, sparse_data_size_));
1689 }
1690
SetStream0Data(net::IOBuffer * buf,int offset,int buf_len,bool truncate)1691 void SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1692 int offset,
1693 int buf_len,
1694 bool truncate) {
1695 // Currently, stream 0 is only used for HTTP headers, and always writes them
1696 // with a single, truncating write. Detect these writes and record the size
1697 // changes of the headers. Also, support writes to stream 0 that have
1698 // different access patterns, as required by the API contract.
1699 // All other clients of the Simple Cache are encouraged to use stream 1.
1700 have_written_[0] = true;
1701 int data_size = GetDataSize(0);
1702 if (offset == 0 && truncate) {
1703 stream_0_data_->SetCapacity(buf_len);
1704 std::copy(buf->data(), buf->data() + buf_len, stream_0_data_->data());
1705 data_size_[0] = buf_len;
1706 } else {
1707 const int buffer_size =
1708 truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1709 stream_0_data_->SetCapacity(buffer_size);
1710 // If |stream_0_data_| was extended, the extension until offset needs to be
1711 // zero-filled.
1712 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1713 if (fill_size > 0) {
1714 std::fill(stream_0_data_->data() + data_size,
1715 stream_0_data_->data() + data_size + fill_size, 0);
1716 }
1717 if (buf) {
1718 std::copy(buf->data(), buf->data() + buf_len,
1719 stream_0_data_->data() + offset);
1720 }
1721 data_size_[0] = buffer_size;
1722 }
1723 RecordHeaderSize(cache_type_, data_size_[0]);
1724 base::Time modification_time = base::Time::Now();
1725
1726 // Reset checksum; SimpleSynchronousEntry::Close will compute it for us,
1727 // and do it off the source creation sequence.
1728 crc32s_end_offset_[0] = 0;
1729
1730 UpdateDataFromEntryStat(
1731 SimpleEntryStat(modification_time, modification_time, data_size_,
1732 sparse_data_size_));
1733 }
1734
1735 } // namespace disk_cache
1736