xref: /aosp_15_r20/external/angle/src/common/FixedQueue.h (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // FixedQueue.h:
7 //   An array based fifo queue class that supports concurrent push and pop.
8 //
9 
10 #ifndef COMMON_FIXEDQUEUE_H_
11 #define COMMON_FIXEDQUEUE_H_
12 
13 #include "common/debug.h"
14 
15 #include <algorithm>
16 #include <array>
17 #include <atomic>
18 
19 namespace angle
20 {
21 // class FixedQueue: An vector based fifo queue class that supports concurrent push and
22 // pop. Caller must ensure queue is not empty before pop and not full before push. This class
23 // supports concurrent push and pop from different threads, but only with single producer single
24 // consumer usage. If caller want to push from two different threads, proper mutex must be used to
25 // ensure the access is serialized. You can also call updateCapacity to adjust the storage size, but
26 // caller must take proper mutex lock to ensure no one is accessing the storage. In a typical usage
27 // case is that you have two mutex lock, enqueueLock and dequeueLock. You use enqueueLock to push
28 // and use dequeueLock to pop. You dont need the lock for checking size (i.e, call empty/full). You
29 // take both lock in a given order to call updateCapacity. See unit test
30 // FixedQueue.ConcurrentPushPopWithResize for example.
31 template <class T>
32 class FixedQueue final : angle::NonCopyable
33 {
34   public:
35     using Storage         = std::vector<T>;
36     using value_type      = typename Storage::value_type;
37     using size_type       = typename Storage::size_type;
38     using reference       = typename Storage::reference;
39     using const_reference = typename Storage::const_reference;
40 
41     FixedQueue(size_t capacity);
42     ~FixedQueue();
43 
44     size_type size() const;
45     bool empty() const;
46     bool full() const;
47 
48     size_type capacity() const;
49     // Caller must ensure no one is accessing the data while update storage. This should happen
50     // infrequently since all data will be copied between old storage and new storage.
51     void updateCapacity(size_t newCapacity);
52 
53     reference front();
54     const_reference front() const;
55 
56     void push(const value_type &value);
57     void push(value_type &&value);
58 
59     reference back();
60     const_reference back() const;
61 
62     void pop();
63     void clear();
64 
65   private:
66     Storage mData;
67     // The front and back indices are virtual indices (think about queue size is infinite). They
68     // will never wrap around when hit N. The wrap around occur when element is referenced. Virtual
69     // index for current head
70     size_type mFrontIndex;
71     // Virtual index for next write.
72     size_type mEndIndex;
73     // Atomic so that we can support concurrent push and pop.
74     std::atomic<size_type> mSize;
75     size_type mMaxSize;
76 };
77 
78 template <class T>
FixedQueue(size_t capacity)79 FixedQueue<T>::FixedQueue(size_t capacity)
80     : mFrontIndex(0), mEndIndex(0), mSize(0), mMaxSize(capacity)
81 {
82     mData.resize(mMaxSize);
83 }
84 
85 template <class T>
~FixedQueue()86 FixedQueue<T>::~FixedQueue()
87 {
88     mData.clear();
89 }
90 
91 template <class T>
size()92 ANGLE_INLINE typename FixedQueue<T>::size_type FixedQueue<T>::size() const
93 {
94     ASSERT(mSize <= mMaxSize);
95     return mSize;
96 }
97 
98 template <class T>
empty()99 ANGLE_INLINE bool FixedQueue<T>::empty() const
100 {
101     return size() == 0;
102 }
103 
104 template <class T>
full()105 ANGLE_INLINE bool FixedQueue<T>::full() const
106 {
107     return size() == mMaxSize;
108 }
109 
110 template <class T>
capacity()111 ANGLE_INLINE typename FixedQueue<T>::size_type FixedQueue<T>::capacity() const
112 {
113     return mMaxSize;
114 }
115 
116 template <class T>
updateCapacity(size_t newCapacity)117 ANGLE_INLINE void FixedQueue<T>::updateCapacity(size_t newCapacity)
118 {
119     ASSERT(newCapacity >= size());
120     Storage newData(newCapacity);
121     for (size_type i = mFrontIndex; i < mEndIndex; i++)
122     {
123         newData[i % newCapacity] = std::move(mData[i % mMaxSize]);
124     }
125     mData.clear();
126     std::swap(newData, mData);
127     mMaxSize = newCapacity;
128     ASSERT(mData.size() == mMaxSize);
129 }
130 
131 template <class T>
front()132 ANGLE_INLINE typename FixedQueue<T>::reference FixedQueue<T>::front()
133 {
134     ASSERT(!empty());
135     return mData[mFrontIndex % mMaxSize];
136 }
137 
138 template <class T>
front()139 ANGLE_INLINE typename FixedQueue<T>::const_reference FixedQueue<T>::front() const
140 {
141     ASSERT(!empty());
142     return mData[mFrontIndex % mMaxSize];
143 }
144 
145 template <class T>
push(const value_type & value)146 void FixedQueue<T>::push(const value_type &value)
147 {
148     ASSERT(!full());
149     mData[mEndIndex % mMaxSize] = value;
150     mEndIndex++;
151     // We must increment size last, after we wrote data. That way if another thread is doing
152     // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully
153     // pushed.
154     mSize++;
155 }
156 
157 template <class T>
push(value_type && value)158 void FixedQueue<T>::push(value_type &&value)
159 {
160     ASSERT(!full());
161     mData[mEndIndex % mMaxSize] = std::move(value);
162     mEndIndex++;
163     // We must increment size last, after we wrote data. That way if another thread is doing
164     // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully
165     // pushed.
166     mSize++;
167 }
168 
169 template <class T>
back()170 ANGLE_INLINE typename FixedQueue<T>::reference FixedQueue<T>::back()
171 {
172     ASSERT(!empty());
173     return mData[(mEndIndex + (mMaxSize - 1)) % mMaxSize];
174 }
175 
176 template <class T>
back()177 ANGLE_INLINE typename FixedQueue<T>::const_reference FixedQueue<T>::back() const
178 {
179     ASSERT(!empty());
180     return mData[(mEndIndex + (mMaxSize - 1)) % mMaxSize];
181 }
182 
183 template <class T>
pop()184 void FixedQueue<T>::pop()
185 {
186     ASSERT(!empty());
187     mData[mFrontIndex % mMaxSize] = value_type();
188     mFrontIndex++;
189     // We must decrement size last, after we wrote data. That way if another thread is doing
190     // `if(!dq.full()){ dq.push; }`, it will only see not full until element is fully popped.
191     mSize--;
192 }
193 
194 template <class T>
clear()195 void FixedQueue<T>::clear()
196 {
197     // Size will change in the "pop()" and also by "push()" calls from other thread.
198     const size_type localSize = size();
199     for (size_type i = 0; i < localSize; i++)
200     {
201         pop();
202     }
203 }
204 }  // namespace angle
205 
206 #endif  // COMMON_FIXEDQUEUE_H_
207