xref: /aosp_15_r20/external/cronet/net/disk_cache/disk_cache.h (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Defines the public interface of the disk cache. For more details see
6 // http://dev.chromium.org/developers/design-documents/network-stack/disk-cache
7 
8 #ifndef NET_DISK_CACHE_DISK_CACHE_H_
9 #define NET_DISK_CACHE_DISK_CACHE_H_
10 
11 #include <stdint.h>
12 
13 #include <memory>
14 #include <optional>
15 #include <string>
16 #include <vector>
17 
18 #include "base/files/file.h"
19 #include "base/memory/ref_counted.h"
20 #include "base/strings/string_split.h"
21 #include "base/task/sequenced_task_runner.h"
22 #include "base/time/time.h"
23 #include "build/build_config.h"
24 #include "net/base/cache_type.h"
25 #include "net/base/completion_once_callback.h"
26 #include "net/base/net_errors.h"
27 #include "net/base/net_export.h"
28 #include "net/base/request_priority.h"
29 
30 namespace base {
31 class FilePath;
32 
33 namespace android {
34 class ApplicationStatusListener;
35 }  // namespace android
36 
37 }  // namespace base
38 
39 namespace net {
40 class IOBuffer;
41 class NetLog;
42 }
43 
44 namespace disk_cache {
45 
46 class Entry;
47 class Backend;
48 class EntryResult;
49 class BackendFileOperationsFactory;
50 struct RangeResult;
51 using EntryResultCallback = base::OnceCallback<void(EntryResult)>;
52 using RangeResultCallback = base::OnceCallback<void(const RangeResult&)>;
53 
54 // How to handle resetting the back-end cache from the previous session.
55 // See CreateCacheBackend() for its usage.
56 enum class ResetHandling { kReset, kResetOnError, kNeverReset };
57 
58 struct NET_EXPORT BackendResult {
59   BackendResult();
60   ~BackendResult();
61   BackendResult(BackendResult&&);
62   BackendResult& operator=(BackendResult&&);
63 
64   BackendResult(const BackendResult&) = delete;
65   BackendResult& operator=(const BackendResult&) = delete;
66 
67   // `error_in` should not be net::OK for MakeError().
68   static BackendResult MakeError(net::Error error_in);
69   // `backend_in` should not be nullptr for Make().
70   static BackendResult Make(std::unique_ptr<Backend> backend_in);
71 
72   net::Error net_error = net::ERR_FAILED;
73   std::unique_ptr<Backend> backend;
74 };
75 
76 using BackendResultCallback = base::OnceCallback<void(BackendResult)>;
77 
78 // Returns an instance of a Backend of the given `type`. `file_operations`
79 // (nullable) is used to broker file operations in sandboxed environments.
80 // Currently `file_operations` is only used for the simple backend.
81 // `path` points to a folder where the cached data will be stored (if
82 // appropriate). This cache instance must be the only object that will be
83 // reading or writing files to that folder (if another one exists, and `type` is
84 // not net::DISK_CACHE this operation will not complete until the previous
85 // duplicate gets destroyed and finishes all I/O). The returned object should be
86 // deleted when not needed anymore.
87 //
88 // If `reset_handling` is set to kResetOnError and there is a problem with the
89 // cache initialization, the files will be deleted and a new set will be
90 // created. If it's set to kReset, this will happen even if there isn't a
91 // problem with cache initialization. Finally, if it's set to kNeverReset, the
92 // cache creation will fail if there is a problem with cache initialization.
93 //
94 // `max_bytes` is the maximum size the cache can grow to. If zero is passed in
95 // as `max_bytes`, the cache will determine the value to use.
96 //
97 // `net_error` in return value of the function is a net error code. If it is
98 // ERR_IO_PENDING, the `callback` will be invoked when a backend is available or
99 // a fatal error condition is reached.  `backend` in return value or parameter
100 // to callback can be nullptr if a fatal error is found.
101 NET_EXPORT BackendResult
102 CreateCacheBackend(net::CacheType type,
103                    net::BackendType backend_type,
104                    scoped_refptr<BackendFileOperationsFactory> file_operations,
105                    const base::FilePath& path,
106                    int64_t max_bytes,
107                    ResetHandling reset_handling,
108                    net::NetLog* net_log,
109                    BackendResultCallback callback);
110 
111 // Note: this is permitted to return nullptr when things are in process of
112 // shutting down.
113 using ApplicationStatusListenerGetter =
114     base::RepeatingCallback<base::android::ApplicationStatusListener*()>;
115 
116 #if BUILDFLAG(IS_ANDROID)
117 // Similar to the function above, but takes an |app_status_listener_getter|
118 // which is used to listen for when the Android application status changes, so
119 // we can flush the cache to disk when the app goes to the background.
120 NET_EXPORT BackendResult
121 CreateCacheBackend(net::CacheType type,
122                    net::BackendType backend_type,
123                    scoped_refptr<BackendFileOperationsFactory> file_operations,
124                    const base::FilePath& path,
125                    int64_t max_bytes,
126                    ResetHandling reset_handling,
127                    net::NetLog* net_log,
128                    BackendResultCallback callback,
129                    ApplicationStatusListenerGetter app_status_listener_getter);
130 #endif
131 
132 // Variant of the above that calls |post_cleanup_callback| once all the I/O
133 // that was in flight has completed post-destruction. |post_cleanup_callback|
134 // will get invoked even if the creation fails. The invocation will always be
135 // via the event loop, and never direct.
136 //
137 // This is currently unsupported for |type| == net::DISK_CACHE.
138 //
139 // Note that this will not wait for |post_cleanup_callback| of a previous
140 // instance for |path| to run.
141 NET_EXPORT BackendResult
142 CreateCacheBackend(net::CacheType type,
143                    net::BackendType backend_type,
144                    scoped_refptr<BackendFileOperationsFactory> file_operations,
145                    const base::FilePath& path,
146                    int64_t max_bytes,
147                    ResetHandling reset_handling,
148                    net::NetLog* net_log,
149                    base::OnceClosure post_cleanup_callback,
150                    BackendResultCallback callback);
151 
152 // This will flush any internal threads used by backends created w/o an
153 // externally injected thread specified, so tests can be sure that all I/O
154 // has finished before inspecting the world.
155 NET_EXPORT void FlushCacheThreadForTesting();
156 
157 // Async version of FlushCacheThreadForTesting. `callback` will be called on
158 // the calling sequence.
159 NET_EXPORT void FlushCacheThreadAsynchronouslyForTesting(
160     base::OnceClosure cllback);
161 
162 // The root interface for a disk cache instance.
163 class NET_EXPORT Backend {
164  public:
165   using CompletionOnceCallback = net::CompletionOnceCallback;
166   using Int64CompletionOnceCallback = net::Int64CompletionOnceCallback;
167   using EntryResultCallback = disk_cache::EntryResultCallback;
168   using EntryResult = disk_cache::EntryResult;
169 
170   class Iterator {
171    public:
172     virtual ~Iterator() = default;
173 
174     // OpenNextEntry returns a result with net_error() |net::OK| and provided
175     // entry if there is an entry to enumerate which it can return immediately.
176     // It returns a result with net_error() |net::ERR_FAILED| at the end of
177     // enumeration. If the function returns a result with net_error()
178     // |net::ERR_IO_PENDING|, then the final result will be passed to the
179     // provided |callback|, otherwise |callback| will not be called. If any
180     // entry in the cache is modified during iteration, the result of this
181     // function is thereafter undefined.
182     //
183     // Calling OpenNextEntry after the backend which created it is destroyed
184     // may fail with |net::ERR_FAILED|; however it should not crash.
185     //
186     // Some cache backends make stronger guarantees about mutation during
187     // iteration, see top comment in simple_backend_impl.h for details.
188     virtual EntryResult OpenNextEntry(EntryResultCallback callback) = 0;
189   };
190 
191   // If the backend is destroyed when there are operations in progress (any
192   // callback that has not been invoked yet), this method cancels said
193   // operations so the callbacks are not invoked, possibly leaving the work
194   // half way (for instance, dooming just a few entries). Note that pending IO
195   // for a given Entry (as opposed to the Backend) will still generate a
196   // callback.
197   // Warning: there is some inconsistency in details between different backends
198   // on what will succeed and what will fail.  In particular the blockfile
199   // backend will leak entries closed after backend deletion, while others
200   // handle it properly.
Backend(net::CacheType cache_type)201   explicit Backend(net::CacheType cache_type) : cache_type_(cache_type) {}
202   virtual ~Backend() = default;
203 
204   // Returns the type of this cache.
GetCacheType()205   net::CacheType GetCacheType() const { return cache_type_; }
206 
207   // Returns the number of entries in the cache.
208   virtual int32_t GetEntryCount() const = 0;
209 
210   // Atomically attempts to open an existing entry based on |key| or, if none
211   // already exists, to create a new entry. Returns an EntryResult object,
212   // which contains 1) network error code; 2) if the error code is OK,
213   // an owning pointer to either a preexisting or a newly created
214   // entry; 3) a bool indicating if the entry was opened or not. When the entry
215   // pointer is no longer needed, its Close() method should be called. If this
216   // method return value has net_error() == ERR_IO_PENDING, the
217   // |callback| will be invoked when the entry is available. The |priority| of
218   // the entry determines its priority in the background worker pools.
219   //
220   // This method should be the preferred way to obtain an entry over using
221   // OpenEntry() or CreateEntry() separately in order to simplify consumer
222   // logic.
223   virtual EntryResult OpenOrCreateEntry(const std::string& key,
224                                         net::RequestPriority priority,
225                                         EntryResultCallback callback) = 0;
226 
227   // Opens an existing entry, returning status code, and, if successful, an
228   // entry pointer packaged up into an EntryResult. If return value's
229   // net_error() is ERR_IO_PENDING, the |callback| will be invoked when the
230   // entry is available. The |priority| of the entry determines its priority in
231   // the background worker pools.
232   virtual EntryResult OpenEntry(const std::string& key,
233                                 net::RequestPriority priority,
234                                 EntryResultCallback) = 0;
235 
236   // Creates a new entry, returning status code, and, if successful, and
237   // an entry pointer packaged up into an EntryResult. If return value's
238   // net_error() is ERR_IO_PENDING, the |callback| will be invoked when the
239   // entry is available. The |priority| of the entry determines its priority in
240   // the background worker pools.
241   virtual EntryResult CreateEntry(const std::string& key,
242                                   net::RequestPriority priority,
243                                   EntryResultCallback callback) = 0;
244 
245   // Marks the entry, specified by the given key, for deletion. The return value
246   // is a net error code. If this method returns ERR_IO_PENDING, the |callback|
247   // will be invoked after the entry is doomed.
248   virtual net::Error DoomEntry(const std::string& key,
249                                net::RequestPriority priority,
250                                CompletionOnceCallback callback) = 0;
251 
252   // Marks all entries for deletion. The return value is a net error code. If
253   // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
254   // operation completes.
255   virtual net::Error DoomAllEntries(CompletionOnceCallback callback) = 0;
256 
257   // Marks a range of entries for deletion. This supports unbounded deletes in
258   // either direction by using null Time values for either argument. The return
259   // value is a net error code. If this method returns ERR_IO_PENDING, the
260   // |callback| will be invoked when the operation completes.
261   // Entries with |initial_time| <= access time < |end_time| are deleted.
262   virtual net::Error DoomEntriesBetween(base::Time initial_time,
263                                         base::Time end_time,
264                                         CompletionOnceCallback callback) = 0;
265 
266   // Marks all entries accessed since |initial_time| for deletion. The return
267   // value is a net error code. If this method returns ERR_IO_PENDING, the
268   // |callback| will be invoked when the operation completes.
269   // Entries with |initial_time| <= access time are deleted.
270   virtual net::Error DoomEntriesSince(base::Time initial_time,
271                                       CompletionOnceCallback callback) = 0;
272 
273   // Calculate the total size of the cache. The return value is the size in
274   // bytes or a net error code. If this method returns ERR_IO_PENDING,
275   // the |callback| will be invoked when the operation completes.
276   virtual int64_t CalculateSizeOfAllEntries(
277       Int64CompletionOnceCallback callback) = 0;
278 
279   // Calculate the size of all cache entries accessed between |initial_time| and
280   // |end_time|.
281   // The return value is the size in bytes or a net error code. The default
282   // implementation returns ERR_NOT_IMPLEMENTED and should only be overwritten
283   // if there is an efficient way for the backend to determine the size for a
284   // subset of the cache without reading the whole cache from disk.
285   // If this method returns ERR_IO_PENDING, the |callback| will be invoked when
286   // the operation completes.
287   virtual int64_t CalculateSizeOfEntriesBetween(
288       base::Time initial_time,
289       base::Time end_time,
290       Int64CompletionOnceCallback callback);
291 
292   // Returns an iterator which will enumerate all entries of the cache in an
293   // undefined order.
294   virtual std::unique_ptr<Iterator> CreateIterator() = 0;
295 
296   // Return a list of cache statistics.
297   virtual void GetStats(base::StringPairs* stats) = 0;
298 
299   // Called whenever an external cache in the system reuses the resource
300   // referred to by |key|.
301   virtual void OnExternalCacheHit(const std::string& key) = 0;
302 
303   // Backends can optionally permit one to store, probabilistically, up to a
304   // byte associated with a key of an existing entry in memory.
305 
306   // GetEntryInMemoryData has the following behavior:
307   // - If the data is not available at this time for any reason, returns 0.
308   // - Otherwise, returns a value that was with very high probability
309   //   given to SetEntryInMemoryData(|key|) (and with a very low probability
310   //   to a different key that collides in the in-memory index).
311   //
312   // Due to the probability of collisions, including those that can be induced
313   // by hostile 3rd parties, this interface should not be used to make decisions
314   // that affect correctness (especially security).
315   virtual uint8_t GetEntryInMemoryData(const std::string& key);
316   virtual void SetEntryInMemoryData(const std::string& key, uint8_t data);
317 
318   // Returns the maximum length an individual stream can have.
319   virtual int64_t MaxFileSize() const = 0;
320 
321  private:
322   const net::CacheType cache_type_;
323 };
324 
325 // This interface represents an entry in the disk cache.
326 class NET_EXPORT Entry {
327  public:
328   using CompletionOnceCallback = net::CompletionOnceCallback;
329   using IOBuffer = net::IOBuffer;
330   using RangeResultCallback = disk_cache::RangeResultCallback;
331   using RangeResult = disk_cache::RangeResult;
332 
333   // Marks this cache entry for deletion.
334   virtual void Doom() = 0;
335 
336   // Releases this entry. Calling this method does not cancel pending IO
337   // operations on this entry. Even after the last reference to this object has
338   // been released, pending completion callbacks may be invoked.
339   virtual void Close() = 0;
340 
341   // Returns the key associated with this cache entry.
342   virtual std::string GetKey() const = 0;
343 
344   // Returns the time when this cache entry was last used.
345   virtual base::Time GetLastUsed() const = 0;
346 
347   // Returns the time when this cache entry was last modified.
348   virtual base::Time GetLastModified() const = 0;
349 
350   // Returns the size of the cache data with the given index.
351   virtual int32_t GetDataSize(int index) const = 0;
352 
353   // Copies cached data into the given buffer of length |buf_len|. Returns the
354   // number of bytes read or a network error code. If this function returns
355   // ERR_IO_PENDING, the completion callback will be called on the current
356   // thread when the operation completes, and a reference to |buf| will be
357   // retained until the callback is called. Note that as long as the function
358   // does not complete immediately, the callback will always be invoked, even
359   // after Close has been called; in other words, the caller may close this
360   // entry without having to wait for all the callbacks, and still rely on the
361   // cleanup performed from the callback code.
362   virtual int ReadData(int index,
363                        int offset,
364                        IOBuffer* buf,
365                        int buf_len,
366                        CompletionOnceCallback callback) = 0;
367 
368   // Copies data from the given buffer of length |buf_len| into the cache.
369   // Returns the number of bytes written or a network error code. If this
370   // function returns ERR_IO_PENDING, the completion callback will be called
371   // on the current thread when the operation completes, and a reference to
372   // |buf| will be retained until the callback is called. Note that as long as
373   // the function does not complete immediately, the callback will always be
374   // invoked, even after Close has been called; in other words, the caller may
375   // close this entry without having to wait for all the callbacks, and still
376   // rely on the cleanup performed from the callback code.
377   // If truncate is true, this call will truncate the stored data at the end of
378   // what we are writing here.
379   virtual int WriteData(int index,
380                         int offset,
381                         IOBuffer* buf,
382                         int buf_len,
383                         CompletionOnceCallback callback,
384                         bool truncate) = 0;
385 
386   // Sparse entries support:
387   //
388   // A Backend implementation can support sparse entries, so the cache keeps
389   // track of which parts of the entry have been written before. The backend
390   // will never return data that was not written previously, so reading from
391   // such region will return 0 bytes read (or actually the number of bytes read
392   // before reaching that region).
393   //
394   // There are only two streams for sparse entries: a regular control stream
395   // (index 0) that must be accessed through the regular API (ReadData and
396   // WriteData), and one sparse stream that must me accessed through the sparse-
397   // aware API that follows. Calling a non-sparse aware method with an index
398   // argument other than 0 is a mistake that results in implementation specific
399   // behavior. Using a sparse-aware method with an entry that was not stored
400   // using the same API, or with a backend that doesn't support sparse entries
401   // will return ERR_CACHE_OPERATION_NOT_SUPPORTED.
402   //
403   // The storage granularity of the implementation should be at least 1 KB. In
404   // other words, storing less than 1 KB may result in an implementation
405   // dropping the data completely, and writing at offsets not aligned with 1 KB,
406   // or with lengths not a multiple of 1 KB may result in the first or last part
407   // of the data being discarded. However, two consecutive writes should not
408   // result in a hole in between the two parts as long as they are sequential
409   // (the second one starts where the first one ended), and there is no other
410   // write between them.
411   //
412   // The Backend implementation is free to evict any range from the cache at any
413   // moment, so in practice, the previously stated granularity of 1 KB is not
414   // as bad as it sounds.
415   //
416   // The sparse methods don't support multiple simultaneous IO operations to the
417   // same physical entry, so in practice a single object should be instantiated
418   // for a given key at any given time. Once an operation has been issued, the
419   // caller should wait until it completes before starting another one. This
420   // requirement includes the case when an entry is closed while some operation
421   // is in progress and another object is instantiated; any IO operation will
422   // fail while the previous operation is still in-flight. In order to deal with
423   // this requirement, the caller could either wait until the operation
424   // completes before closing the entry, or call CancelSparseIO() before closing
425   // the entry, and call ReadyForSparseIO() on the new entry and wait for the
426   // callback before issuing new operations.
427 
428   // Behaves like ReadData() except that this method is used to access sparse
429   // entries.
430   virtual int ReadSparseData(int64_t offset,
431                              IOBuffer* buf,
432                              int buf_len,
433                              CompletionOnceCallback callback) = 0;
434 
435   // Behaves like WriteData() except that this method is used to access sparse
436   // entries. |truncate| is not part of this interface because a sparse entry
437   // is not expected to be reused with new data. To delete the old data and
438   // start again, or to reduce the total size of the stream data (which implies
439   // that the content has changed), the whole entry should be doomed and
440   // re-created.
441   virtual int WriteSparseData(int64_t offset,
442                               IOBuffer* buf,
443                               int buf_len,
444                               CompletionOnceCallback callback) = 0;
445 
446   // Returns information about the currently stored portion of a sparse entry.
447   // |offset| and |len| describe a particular range that should be scanned to
448   // find out if it is stored or not. Please see the documentation of
449   // RangeResult for more details.
450   virtual RangeResult GetAvailableRange(int64_t offset,
451                                         int len,
452                                         RangeResultCallback callback) = 0;
453 
454   // Returns true if this entry could be a sparse entry or false otherwise. This
455   // is a quick test that may return true even if the entry is not really
456   // sparse. This method doesn't modify the state of this entry (it will not
457   // create sparse tracking data). GetAvailableRange or ReadSparseData can be
458   // used to perform a definitive test of whether an existing entry is sparse or
459   // not, but that method may modify the current state of the entry (making it
460   // sparse, for instance). The purpose of this method is to test an existing
461   // entry, but without generating actual IO to perform a thorough check.
462   virtual bool CouldBeSparse() const = 0;
463 
464   // Cancels any pending sparse IO operation (if any). The completion callback
465   // of the operation in question will still be called when the operation
466   // finishes, but the operation will finish sooner when this method is used.
467   virtual void CancelSparseIO() = 0;
468 
469   // Returns OK if this entry can be used immediately. If that is not the
470   // case, returns ERR_IO_PENDING and invokes the provided callback when this
471   // entry is ready to use. This method always returns OK for non-sparse
472   // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
473   // (by calling CancelSparseIO), but the cache is still busy with it. If there
474   // is a pending operation that has not been cancelled, this method will return
475   // OK although another IO operation cannot be issued at this time; in this
476   // case the caller should just wait for the regular callback to be invoked
477   // instead of using this method to provide another callback.
478   //
479   // Note that CancelSparseIO may have been called on another instance of this
480   // object that refers to the same physical disk entry.
481   // Note: This method is deprecated.
482   virtual net::Error ReadyForSparseIO(CompletionOnceCallback callback) = 0;
483 
484   // Used in tests to set the last used time. Note that backend might have
485   // limited precision. Also note that this call may modify the last modified
486   // time.
487   virtual void SetLastUsedTimeForTest(base::Time time) = 0;
488 
489  protected:
490   virtual ~Entry() = default;
491 };
492 
493 struct EntryDeleter {
operatorEntryDeleter494   void operator()(Entry* entry) {
495     // Note that |entry| is ref-counted.
496     entry->Close();
497   }
498 };
499 
500 // Automatically closes an entry when it goes out of scope.
501 // Warning: Be careful. Automatically closing may not be the desired behavior
502 // when writing to an entry. You may wish to doom first (e.g., in case writing
503 // hasn't yet completed but the browser is shutting down).
504 typedef std::unique_ptr<Entry, EntryDeleter> ScopedEntryPtr;
505 
506 // Represents the result of an entry open or create operation.
507 // This is a move-only, owning type, which will close the entry it owns unless
508 // it's released from it via ReleaseEntry (or it's moved away from).
509 class NET_EXPORT EntryResult {
510  public:
511   EntryResult();
512   ~EntryResult();
513   EntryResult(EntryResult&&);
514   EntryResult& operator=(EntryResult&&);
515 
516   EntryResult(const EntryResult&) = delete;
517   EntryResult& operator=(const EntryResult&) = delete;
518 
519   // Creates an entry result representing successfully opened (pre-existing)
520   // cache entry. |new_entry| must be non-null.
521   static EntryResult MakeOpened(Entry* new_entry);
522 
523   // Creates an entry result representing successfully created (new)
524   // cache entry. |new_entry| must be non-null.
525   static EntryResult MakeCreated(Entry* new_entry);
526 
527   // Creates an entry result representing an error. Status must not be net::OK.
528   static EntryResult MakeError(net::Error status);
529 
530   // Relinquishes ownership of the entry, and returns a pointer to it.
531   // Will return nullptr if there is no such entry.
532   // WARNING: clears net_error() to ERR_FAILED, opened() to false.
533   Entry* ReleaseEntry();
534 
535   // ReleaseEntry() will return a non-null pointer if and only if this is
536   // net::OK before the call to it.
net_error()537   net::Error net_error() const { return net_error_; }
538 
539   // Returns true if an existing entry was opened rather than a new one created.
540   // Implies net_error() == net::OK and non-null entry.
opened()541   bool opened() const { return opened_; }
542 
543  private:
544   // Invariant to keep: |entry_| != nullptr iff |net_error_| == net::OK;
545   // |opened_| set only if entry is set.
546   net::Error net_error_ = net::ERR_FAILED;
547   bool opened_ = false;
548   ScopedEntryPtr entry_;
549 };
550 
551 // Represents a result of GetAvailableRange.
552 struct NET_EXPORT RangeResult {
553   RangeResult() = default;
RangeResultRangeResult554   explicit RangeResult(net::Error error) : net_error(error) {}
555 
RangeResultRangeResult556   RangeResult(int64_t start, int available_len)
557       : net_error(net::OK), start(start), available_len(available_len) {}
558 
559   // This is net::OK if operation succeeded, and `start` and `available_len`
560   // were set appropriately (potentially with 0 for `available_len`).
561   //
562   // In return value of GetAvailableRange(), net::ERR_IO_PENDING means that the
563   // result will be provided asynchronously via the callback. This can not occur
564   // in the value passed to the callback itself.
565   //
566   // In case the operation failed, this will be the error code.
567   net::Error net_error = net::ERR_FAILED;
568 
569   // First byte within the range passed to GetAvailableRange that's available
570   // in the cache entry.
571   //
572   // Valid iff net_error is net::OK.
573   int64_t start = -1;
574 
575   // Number of consecutive bytes stored within the requested range starting from
576   // `start` that can be read at once. This may be zero.
577   //
578   // Valid iff net_error is net::OK.
579   int available_len = 0;
580 };
581 
582 // The maximum size of cache that can be created for type
583 // GENERATED_WEBUI_BYTE_CODE_CACHE. There are only a handful of commonly
584 // accessed WebUI pages, which can each cache 0.5 - 1.5 MB of code. There is no
585 // point in having a very large WebUI code cache, even if lots of disk space is
586 // available.
587 constexpr int kMaxWebUICodeCacheSize = 5 * 1024 * 1024;
588 
589 class UnboundBackendFileOperations;
590 
591 // An interface to provide file operations so that the HTTP cache works on
592 // a sandboxed process.
593 // All the paths must be absolute paths.
594 // A BackendFileOperations object is bound to a sequence.
595 class BackendFileOperations {
596  public:
597   struct FileEnumerationEntry {
598     FileEnumerationEntry() = default;
FileEnumerationEntryFileEnumerationEntry599     FileEnumerationEntry(base::FilePath path,
600                          int64_t size,
601                          base::Time last_accessed,
602                          base::Time last_modified)
603         : path(std::move(path)),
604           size(size),
605           last_accessed(last_accessed),
606           last_modified(last_modified) {}
607 
608     base::FilePath path;
609     int64_t size = 0;
610     base::Time last_accessed;
611     base::Time last_modified;
612   };
613 
614   // An enum representing the mode for DeleteFile function.
615   enum class DeleteFileMode {
616     // The default mode, meaning base::DeleteFile.
617     kDefault,
618     // Ensure that new files for the same name can be created immediately after
619     // deletion. Note that this is the default behavior on POSIX. On Windows
620     // this assumes that all the file handles for the file to be deleted are
621     // opened with FLAG_WIN_SHARE_DELETE.
622     kEnsureImmediateAvailability,
623   };
624 
625   // An interface to enumerate files in a directory.
626   // Indirect descendants are not listed, and directories are not listed.
627   class FileEnumerator {
628    public:
629     virtual ~FileEnumerator() = default;
630 
631     // Returns the next file in the directory, if any. Returns nullopt if there
632     // are no further files (including the error case). The path of the
633     // returned entry should be a full path.
634     virtual std::optional<FileEnumerationEntry> Next() = 0;
635 
636     // Returns true if we've found an error during traversal.
637     virtual bool HasError() const = 0;
638   };
639 
640   virtual ~BackendFileOperations() = default;
641 
642   // Creates a directory with the given path and returns whether that succeeded.
643   virtual bool CreateDirectory(const base::FilePath& path) = 0;
644 
645   // Returns true if the given path exists on the local filesystem.
646   virtual bool PathExists(const base::FilePath& path) = 0;
647 
648   // Returns true if the given path exists on the local filesystem and it's a
649   // directory.
650   virtual bool DirectoryExists(const base::FilePath& path) = 0;
651 
652   // Opens a file with the given path and flags. Returns the opened file.
653   virtual base::File OpenFile(const base::FilePath& path, uint32_t flags) = 0;
654 
655   // Deletes a file with the given path and returns whether that succeeded.
656   virtual bool DeleteFile(const base::FilePath& path,
657                           DeleteFileMode mode = DeleteFileMode::kDefault) = 0;
658 
659   // Renames a file `from_path` to `to_path`. Returns the error information.
660   virtual bool ReplaceFile(const base::FilePath& from_path,
661                            const base::FilePath& to_path,
662                            base::File::Error* error) = 0;
663 
664   // Returns information about the given path.
665   virtual std::optional<base::File::Info> GetFileInfo(
666       const base::FilePath& path) = 0;
667 
668   // Creates an object that can be used to enumerate files in the specified
669   // directory.
670   virtual std::unique_ptr<FileEnumerator> EnumerateFiles(
671       const base::FilePath& path) = 0;
672 
673   // Deletes the given directory recursively, asynchronously. `callback` will
674   // called with whether the operation succeeded.
675   // This is done by:
676   //  1. Renaming the directory to another directory,
677   //  2. Calling `callback` with the result, and
678   //  3. Deleting the directory.
679   // This means the caller won't know the result of 3.
680   virtual void CleanupDirectory(const base::FilePath& path,
681                                 base::OnceCallback<void(bool)> callback) = 0;
682 
683   // Unbind this object from the sequence, and returns an
684   // UnboundBackendFileOperations which can be bound to any sequence. Once
685   // this method is called, no methods (except for the destructor) on this
686   // object must not be called.
687   virtual std::unique_ptr<UnboundBackendFileOperations> Unbind() = 0;
688 };
689 
690 // BackendFileOperations which is not yet bound to a sequence.
691 class UnboundBackendFileOperations {
692  public:
693   virtual ~UnboundBackendFileOperations() = default;
694 
695   // This can be called at most once.
696   virtual std::unique_ptr<BackendFileOperations> Bind(
697       scoped_refptr<base::SequencedTaskRunner> task_runner) = 0;
698 };
699 
700 // A factory interface that creates BackendFileOperations.
701 class BackendFileOperationsFactory
702     : public base::RefCounted<BackendFileOperationsFactory> {
703  public:
704   // Creates a BackendFileOperations which is bound to `task_runner`.
705   virtual std::unique_ptr<BackendFileOperations> Create(
706       scoped_refptr<base::SequencedTaskRunner> task_runner) = 0;
707 
708   // Creates an "unbound" BackendFileOperations.
709   virtual std::unique_ptr<UnboundBackendFileOperations> CreateUnbound() = 0;
710 
711  protected:
712   friend class base::RefCounted<BackendFileOperationsFactory>;
713   virtual ~BackendFileOperationsFactory() = default;
714 };
715 
716 // A trivial BackendFileOperations implementation which uses corresponding
717 // base functions.
718 class NET_EXPORT TrivialFileOperations final : public BackendFileOperations {
719  public:
720   TrivialFileOperations();
721   ~TrivialFileOperations() override;
722 
723   // BackendFileOperations implementation:
724   bool CreateDirectory(const base::FilePath& path) override;
725   bool PathExists(const base::FilePath& path) override;
726   bool DirectoryExists(const base::FilePath& path) override;
727   base::File OpenFile(const base::FilePath& path, uint32_t flags) override;
728   bool DeleteFile(const base::FilePath& path, DeleteFileMode mode) override;
729   bool ReplaceFile(const base::FilePath& from_path,
730                    const base::FilePath& to_path,
731                    base::File::Error* error) override;
732   std::optional<base::File::Info> GetFileInfo(
733       const base::FilePath& path) override;
734   std::unique_ptr<FileEnumerator> EnumerateFiles(
735       const base::FilePath& path) override;
736   void CleanupDirectory(const base::FilePath& path,
737                         base::OnceCallback<void(bool)> callback) override;
738   std::unique_ptr<UnboundBackendFileOperations> Unbind() override;
739 
740  private:
741   SEQUENCE_CHECKER(sequence_checker_);
742 #if DCHECK_IS_ON()
743   bool bound_ = true;
744 #endif
745 };
746 
747 class NET_EXPORT TrivialFileOperationsFactory
748     : public BackendFileOperationsFactory {
749  public:
750   TrivialFileOperationsFactory();
751 
752   // BackendFileOperationsFactory implementation:
753   std::unique_ptr<BackendFileOperations> Create(
754       scoped_refptr<base::SequencedTaskRunner> task_runner) override;
755   std::unique_ptr<UnboundBackendFileOperations> CreateUnbound() override;
756 
757  private:
758   ~TrivialFileOperationsFactory() override;
759 
760   SEQUENCE_CHECKER(sequence_checker_);
761 };
762 
763 }  // namespace disk_cache
764 
765 #endif  // NET_DISK_CACHE_DISK_CACHE_H_
766