xref: /aosp_15_r20/external/cronet/base/task/sequence_manager/lazily_deallocated_deque.h (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
7 
8 #include <algorithm>
9 #include <cmath>
10 #include <limits>
11 #include <memory>
12 #include <utility>
13 #include <vector>
14 
15 #include "base/check.h"
16 #include "base/check_op.h"
17 #include "base/containers/span.h"
18 #include "base/debug/alias.h"
19 #include "base/gtest_prod_util.h"
20 #include "base/memory/raw_ptr.h"
21 #include "base/memory/raw_ptr_exclusion.h"
22 #include "base/time/time.h"
23 
24 namespace base {
25 namespace sequence_manager {
26 namespace internal {
27 
28 // A LazilyDeallocatedDeque specialized for the SequenceManager's usage
29 // patterns. The queue generally grows while tasks are added and then removed
30 // until empty and the cycle repeats.
31 //
32 // The main difference between sequence_manager::LazilyDeallocatedDeque and
33 // others is memory management.  For performance (memory allocation isn't free)
34 // we don't automatically reclaiming memory when the queue becomes empty.
35 // Instead we rely on the surrounding code periodically calling
36 // MaybeShrinkQueue, ideally when the queue is empty.
37 //
38 // We keep track of the maximum recent queue size and rate limit
39 // MaybeShrinkQueue to avoid unnecessary churn.
40 //
41 // NB this queue isn't by itself thread safe.
42 template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
43 class LazilyDeallocatedDeque {
44  public:
45   enum {
46     // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
47     // 3 elements.
48     kMinimumRingSize = 4,
49 
50     // Maximum "wasted" capacity allowed when considering if we should resize
51     // the backing store.
52     kReclaimThreshold = 16,
53 
54     // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
55     // queue.
56     kMinimumShrinkIntervalInSeconds = 5
57   };
58 
59   LazilyDeallocatedDeque() = default;
60   LazilyDeallocatedDeque(const LazilyDeallocatedDeque&) = delete;
61   LazilyDeallocatedDeque& operator=(const LazilyDeallocatedDeque&) = delete;
~LazilyDeallocatedDeque()62   ~LazilyDeallocatedDeque() { clear(); }
63 
empty()64   bool empty() const { return size_ == 0; }
65 
max_size()66   size_t max_size() const { return max_size_; }
67 
size()68   size_t size() const { return size_; }
69 
capacity()70   size_t capacity() const {
71     size_t capacity = 0;
72     for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
73       capacity += iter->capacity();
74     }
75     return capacity;
76   }
77 
clear()78   void clear() {
79     while (head_) {
80       head_ = std::move(head_->next_);
81     }
82 
83     tail_ = nullptr;
84     size_ = 0;
85   }
86 
87   // Assumed to be an uncommon operation.
push_front(T t)88   void push_front(T t) {
89     if (!head_) {
90       DCHECK(!tail_);
91       head_ = std::make_unique<Ring>(kMinimumRingSize);
92       tail_ = head_.get();
93     }
94 
95     // Grow if needed, by the minimum amount.
96     if (!head_->CanPush()) {
97       // TODO(alexclarke): Remove once we've understood the OOMs.
98       size_t size = size_;
99       base::debug::Alias(&size);
100 
101       std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
102       new_ring->next_ = std::move(head_);
103       head_ = std::move(new_ring);
104     }
105 
106     head_->push_front(std::move(t));
107     max_size_ = std::max(max_size_, ++size_);
108   }
109 
110   // Assumed to be a common operation.
push_back(T t)111   void push_back(T t) {
112     if (!head_) {
113       DCHECK(!tail_);
114       head_ = std::make_unique<Ring>(kMinimumRingSize);
115       tail_ = head_.get();
116     }
117 
118     // Grow if needed.
119     if (!tail_->CanPush()) {
120       // TODO(alexclarke): Remove once we've understood the OOMs.
121       size_t size = size_;
122       base::debug::Alias(&size);
123 
124       // Doubling the size is a common strategy, but one which can be wasteful
125       // so we use a (somewhat) slower growth curve.
126       tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
127                                             (tail_->capacity() / 2));
128       tail_ = tail_->next_.get();
129     }
130 
131     tail_->push_back(std::move(t));
132     max_size_ = std::max(max_size_, ++size_);
133   }
134 
front()135   T& front() {
136     DCHECK(head_);
137     return head_->front();
138   }
139 
front()140   const T& front() const {
141     DCHECK(head_);
142     return head_->front();
143   }
144 
back()145   T& back() {
146     DCHECK(tail_);
147     return tail_->back();
148   }
149 
back()150   const T& back() const {
151     DCHECK(tail_);
152     return tail_->back();
153   }
154 
pop_front()155   void pop_front() {
156     DCHECK(head_);
157     DCHECK(!head_->empty());
158     DCHECK(tail_);
159     DCHECK_GT(size_, 0u);
160     head_->pop_front();
161 
162     // If the ring has become empty and we have several rings then, remove the
163     // head one (which we expect to have lower capacity than the remaining
164     // ones).
165     if (head_->empty() && head_->next_) {
166       head_ = std::move(head_->next_);
167     }
168 
169     --size_;
170   }
171 
swap(LazilyDeallocatedDeque & other)172   void swap(LazilyDeallocatedDeque& other) {
173     std::swap(head_, other.head_);
174     std::swap(tail_, other.tail_);
175     std::swap(size_, other.size_);
176     std::swap(max_size_, other.max_size_);
177     std::swap(next_resize_time_, other.next_resize_time_);
178   }
179 
MaybeShrinkQueue()180   void MaybeShrinkQueue() {
181     if (!tail_)
182       return;
183 
184     DCHECK_GE(max_size_, size_);
185 
186     // Rate limit how often we shrink the queue because it's somewhat expensive.
187     TimeTicks current_time = now_source();
188     if (current_time < next_resize_time_)
189       return;
190 
191     // Due to the way the Ring works we need 1 more slot than is used.
192     size_t new_capacity = max_size_ + 1;
193     if (new_capacity < kMinimumRingSize)
194       new_capacity = kMinimumRingSize;
195 
196     // Reset |max_size_| so that unless usage has spiked up we will consider
197     // reclaiming it next time.
198     max_size_ = size_;
199 
200     // Only realloc if the current capacity is sufficiently greater than the
201     // observed maximum size for the previous period.
202     if (new_capacity + kReclaimThreshold >= capacity())
203       return;
204 
205     SetCapacity(new_capacity);
206     next_resize_time_ = current_time + Seconds(kMinimumShrinkIntervalInSeconds);
207   }
208 
SetCapacity(size_t new_capacity)209   void SetCapacity(size_t new_capacity) {
210     std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
211 
212     DCHECK_GE(new_capacity, size_ + 1);
213 
214     // Preserve the |size_| which counts down to zero in the while loop.
215     size_t real_size = size_;
216 
217     while (!empty()) {
218       DCHECK(new_ring->CanPush());
219       new_ring->push_back(std::move(head_->front()));
220       pop_front();
221     }
222 
223     size_ = real_size;
224 
225     DCHECK_EQ(head_.get(), tail_);
226     head_ = std::move(new_ring);
227     tail_ = head_.get();
228   }
229 
230  private:
231   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
232   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
233   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
234   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
235 
236   struct Ring {
RingRing237     explicit Ring(size_t capacity)
238         : backing_store_(std::make_unique<char[]>(sizeof(T) * capacity)),
239           data_(reinterpret_cast<T*>(backing_store_.get()), capacity) {
240       DCHECK_GE(capacity, kMinimumRingSize);
241       CHECK_LT(capacity, std::numeric_limits<size_t>::max() / sizeof(T));
242     }
243     Ring(const Ring&) = delete;
244     Ring& operator=(const Ring&) = delete;
~RingRing245     ~Ring() {
246       while (!empty()) {
247         pop_front();
248       }
249     }
250 
emptyRing251     bool empty() const { return back_index_ == front_index_; }
252 
capacityRing253     size_t capacity() const { return data_.size(); }
254 
CanPushRing255     bool CanPush() const {
256       return front_index_ != CircularIncrement(back_index_);
257     }
258 
push_frontRing259     void push_front(T&& t) {
260       // Mustn't appear to become empty.
261       DCHECK_NE(CircularDecrement(front_index_), back_index_);
262       new (&data_[front_index_]) T(std::move(t));
263       front_index_ = CircularDecrement(front_index_);
264     }
265 
push_backRing266     void push_back(T&& t) {
267       back_index_ = CircularIncrement(back_index_);
268       DCHECK(!empty());  // Mustn't appear to become empty.
269       new (&data_[back_index_]) T(std::move(t));
270     }
271 
CanPopRing272     bool CanPop() const { return front_index_ != back_index_; }
273 
pop_frontRing274     void pop_front() {
275       DCHECK(!empty());
276       front_index_ = CircularIncrement(front_index_);
277       data_[front_index_].~T();
278     }
279 
frontRing280     T& front() {
281       DCHECK(!empty());
282       return data_[CircularIncrement(front_index_)];
283     }
284 
frontRing285     const T& front() const {
286       DCHECK(!empty());
287       return data_[CircularIncrement(front_index_)];
288     }
289 
backRing290     T& back() {
291       DCHECK(!empty());
292       return data_[back_index_];
293     }
294 
backRing295     const T& back() const {
296       DCHECK(!empty());
297       return data_[back_index_];
298     }
299 
CircularDecrementRing300     size_t CircularDecrement(size_t index) const {
301       if (index == 0)
302         return capacity() - 1;
303       return index - 1;
304     }
305 
CircularIncrementRing306     size_t CircularIncrement(size_t index) const {
307       DCHECK_LT(index, capacity());
308       ++index;
309       if (index == capacity()) {
310         return 0;
311       }
312       return index;
313     }
314 
315     size_t front_index_ = 0;
316     size_t back_index_ = 0;
317     std::unique_ptr<char[]> backing_store_;
318     base::span<T> data_;
319     std::unique_ptr<Ring> next_ = nullptr;
320   };
321 
322  public:
323   class Iterator {
324    public:
325     using value_type = T;
326     using pointer = const T*;
327     using reference = const T&;
328 
329     const T& operator->() const { return ring_->data_[index_]; }
330     const T& operator*() const { return ring_->data_[index_]; }
331 
332     Iterator& operator++() {
333       if (index_ == ring_->back_index_) {
334         ring_ = ring_->next_.get();
335         index_ = ring_ ? ring_->CircularIncrement(ring_->front_index_) : 0;
336       } else {
337         index_ = ring_->CircularIncrement(index_);
338       }
339       return *this;
340     }
341 
342     operator bool() const { return !!ring_; }
343 
344    private:
Iterator(const Ring * ring)345     explicit Iterator(const Ring* ring) {
346       if (!ring || ring->empty()) {
347         ring_ = nullptr;
348         index_ = 0;
349         return;
350       }
351 
352       ring_ = ring;
353       index_ = ring_->CircularIncrement(ring->front_index_);
354     }
355 
356     raw_ptr<const Ring> ring_;
357     size_t index_;
358 
359     friend class LazilyDeallocatedDeque;
360   };
361 
begin()362   Iterator begin() const { return Iterator(head_.get()); }
363 
end()364   Iterator end() const { return Iterator(nullptr); }
365 
366  private:
367   // We maintain a list of Ring buffers, to enable us to grow without copying,
368   // but most of the time we aim to have only one active Ring.
369   std::unique_ptr<Ring> head_;
370 
371   // `tail_` is not a raw_ptr<...> for performance reasons (based on analysis of
372   // sampling profiler data and tab_search:top100:2020).
373   RAW_PTR_EXCLUSION Ring* tail_ = nullptr;
374 
375   size_t size_ = 0;
376   size_t max_size_ = 0;
377   TimeTicks next_resize_time_;
378 };
379 
380 }  // namespace internal
381 }  // namespace sequence_manager
382 }  // namespace base
383 
384 #endif  // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
385