xref: /aosp_15_r20/external/pigweed/pw_multibuf/public/pw_multibuf/chunk.h (revision 61c4878ac05f98d0ceed94b57d316916de578985)
1 // Copyright 2023 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <optional>
17 #include <utility>
18 
19 #include "pw_assert/assert.h"
20 #include "pw_bytes/span.h"
21 #include "pw_sync/interrupt_spin_lock.h"
22 
23 namespace pw::multibuf {
24 
25 class ChunkRegionTracker;
26 class OwnedChunk;
27 
28 /// A handle to a contiguous slice of data.
29 ///
30 /// A `Chunk` is similar to a `ByteSpan`, but is aware of its underlying memory
31 /// region, and is able to split, shrink, and grow into neighboring empty space
32 /// within its region.
33 ///
34 /// This class is optimized to allow multiple owners to write into neighboring
35 /// regions of the same allocation. In order to support zero-copy DMA of
36 /// communications buffers, allocators can create properly-aligned `Chunk`
37 /// regions inside an allocation.
38 ///
39 /// One example usecase for this is communication drivers that want to reserve
40 /// space at the front or rear of a buffer for headers or footers. A driver can
41 /// `DiscardPrefix` in order to reserve bytes for headers, `Truncate` in order
42 /// to reserve bytes for footers, and then pass the `Chunk` to the user to fill
43 /// in. These discarded bytes are still held by the underlying region, so the
44 /// header and footer space can later be reclaimed using the `ClaimPrefix` and
45 /// `ClaimSuffix` methods. The region itself is only released once there are no
46 /// remaining Chunks within it.
47 class Chunk {
48  public:
49   Chunk() = delete;
50   // Not copyable or movable.
51   Chunk(Chunk&) = delete;
52   Chunk& operator=(Chunk&) = delete;
53   Chunk(Chunk&&) = delete;
54   Chunk& operator=(Chunk&&) = delete;
55 
data()56   std::byte* data() { return span_.data(); }
data()57   const std::byte* data() const { return span_.data(); }
size()58   size_t size() const { return span_.size(); }
empty()59   [[nodiscard]] bool empty() const { return span_.empty(); }
60 
61   std::byte& operator[](size_t index) { return span_[index]; }
62   const std::byte& operator[](size_t index) const { return span_[index]; }
63 
64   // Container declarations
65   using element_type = std::byte;
66   using value_type = std::byte;
67   using size_type = size_t;
68   using difference_type = ptrdiff_t;
69   using pointer = std::byte*;
70   using const_pointer = const std::byte*;
71   using reference = std::byte&;
72   using const_reference = const std::byte&;
73   using iterator = std::byte*;
74   using const_iterator = const std::byte*;
75   using reverse_iterator = std::byte*;
76   using const_reverse_iterator = const std::byte*;
77 
begin()78   std::byte* begin() { return span_.data(); }
begin()79   const std::byte* begin() const { return cbegin(); }
cbegin()80   const std::byte* cbegin() const { return span_.data(); }
end()81   std::byte* end() { return span_.data() + span_.size(); }
end()82   const std::byte* end() const { return cend(); }
cend()83   const std::byte* cend() const { return span_.data() + span_.size(); }
84 
85   /// Returns if `next_chunk` is mergeable into the end of this `Chunk`.
86   ///
87   /// This will only succeed when the two `Chunk` s are adjacent in memory and
88   /// originated from the same region.
89   [[nodiscard]] bool CanMerge(const Chunk& next_chunk) const;
90 
91   /// Attempts to merge `next_chunk` into the end of this `Chunk`.
92   ///
93   /// If the chunks are successfully merged, this `Chunk` will be extended
94   /// forwards to encompass the space of `next_chunk`, and `next_chunk` will be
95   /// emptied and `Release`d.
96   ///
97   /// This will only succeed when the two `Chunk` s are adjacent in memory and
98   /// originated from the same region.
99   ///
100   /// If the chunks are not mergeable, neither `Chunk` will be modified.
101   bool Merge(OwnedChunk& next_chunk);
102 
103   /// Attempts to add `bytes_to_claim` to the front of this buffer by advancing
104   /// its range backwards in memory. Returns `true` if the operation succeeded.
105   ///
106   /// This will only succeed if this `Chunk` points to a section of a region
107   /// that has unreferenced bytes preceding it. For example, a `Chunk` which
108   /// has been shrunk using `DiscardPrefix` can be re-expanded using
109   /// `ClaimPrefix`.
110   ///
111   /// This method will acquire a mutex and is not IRQ safe.
112   [[nodiscard]] bool ClaimPrefix(size_t bytes_to_claim);
113 
114   /// Attempts to add `bytes_to_claim` to the front of this buffer by advancing
115   /// its range forwards in memory. Returns `true` if the operation succeeded.
116   ///
117   /// This will only succeed if this `Chunk` points to a section of a region
118   /// that has unreferenced bytes following it. For example, a `Chunk` which has
119   /// been shrunk using `Truncate` can be re-expanded using
120   ///
121   /// This method will acquire a mutex and is not IRQ safe.
122   [[nodiscard]] bool ClaimSuffix(size_t bytes_to_claim);
123 
124   /// Shrinks this handle to refer to the data beginning at offset
125   /// `bytes_to_discard`.
126   ///
127   /// Does not modify the underlying data. The discarded memory continues to be
128   /// held by the underlying region as long as any `Chunk`s exist within it.
129   /// This allows the memory to be later reclaimed using `ClaimPrefix`.
130   ///
131   /// This method will acquire a mutex and is not IRQ safe.
132   void DiscardPrefix(size_t bytes_to_discard);
133 
134   /// Shrinks this handle to refer to data in the range `begin..<end`.
135   ///
136   /// Does not modify the underlying data. The discarded memory continues to be
137   /// held by the underlying region as long as any `Chunk`s exist within it.
138   /// This allows the memory to be later reclaimed using `ClaimPrefix` or
139   /// `ClaimSuffix`.
140   ///
141   /// This method will acquire a mutex and is not IRQ safe.
142   void Slice(size_t begin, size_t end);
143 
144   /// Shrinks this handle to refer to only the first `len` bytes.
145   ///
146   /// Does not modify the underlying data. The discarded memory continues to be
147   /// held by the underlying region as long as any `Chunk`s exist within it.
148   /// This allows the memory to be later reclaimed using `ClaimSuffix`.
149   ///
150   /// This method will acquire a mutex and is not IRQ safe.
151   void Truncate(size_t len);
152 
153   /// Attempts to shrink this handle to refer to the data beginning at offset
154   /// `bytes_to_take`, returning the first `bytes_to_take` bytes as a new
155   /// `OwnedChunk`.
156   ///
157   /// If the inner call to `AllocateChunkClass` fails, this function will return
158   /// `std::nullopt` and this handle's span will not change.
159   ///
160   /// This method will acquire a mutex and is not IRQ safe.
161   std::optional<OwnedChunk> TakePrefix(size_t bytes_to_take);
162 
163   /// Attempts to shrink this handle to refer only the first `len -
164   /// bytes_to_take` bytes, returning the last `bytes_to_take` bytes as a new
165   /// `OwnedChunk`.
166   ///
167   /// If the inner call to `AllocateChunkClass` fails, this function will return
168   /// `std::nullopt` and this handle's span will not change.
169   ///
170   /// This method will acquire a mutex and is not IRQ safe.
171   std::optional<OwnedChunk> TakeSuffix(size_t bytes_to_take);
172 
173  private:
Chunk(ChunkRegionTracker * region_tracker,ByteSpan span)174   Chunk(ChunkRegionTracker* region_tracker, ByteSpan span)
175       : region_tracker_(region_tracker),
176         next_in_region_(nullptr),
177         prev_in_region_(nullptr),
178         next_in_buf_(nullptr),
179         span_(span) {}
180 
181   // NOTE: these functions are logically
182   // `PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_)`, however this
183   // annotation cannot be applied due to the forward declaration of
184   // `region_tracker_`.
185   void InsertAfterInRegionList(Chunk* new_chunk);
186   void InsertBeforeInRegionList(Chunk* new_chunk);
187   void RemoveFromRegionList();
188 
189   /// Frees this `Chunk`. Future accesses to this class after calls to `Free`
190   /// are undefined behavior.
191   ///
192   /// Decrements the reference count on the underlying chunk of data.
193   ///
194   /// Does not modify the underlying data, but may cause it to be deallocated if
195   /// this was the only remaining `Chunk` referring to its region.
196   ///
197   /// This method will acquire a mutex and is not IRQ safe.
198   void Free();
199 
200   /// The region_tracker_ for this chunk.
201   ChunkRegionTracker* const region_tracker_;
202 
203   /// Pointer to the next chunk in the same `region_tracker_`.
204   ///
205   /// Guarded by `region_tracker_->lock_` (but not annotated as such due to the
206   /// fact that annotations aren't smart enough to understand that all chunks
207   /// within a region share a tracker + lock).
208   ///
209   /// `nullptr` if this is the last `Chunk` in its `region_tracker_`.
210   Chunk* next_in_region_;
211 
212   /// Pointer to the previous chunk in the same `region_tracker_`.
213   ///
214   /// Guarded by `region_tracker_->lock_` (but not annotated as such due to the
215   /// fact that annotations aren't smart enough to understand that all chunks
216   /// within a region share a tracker + lock).
217   ///
218   /// `nullptr` if this is the first `Chunk` in its `region_tracker_`.
219   Chunk* prev_in_region_;
220 
221   /// Pointer to the next chunk in a `MultiBuf`.
222   ///
223   /// This is `nullptr` if this chunk is not in a `MultiBuf`, or is the last
224   /// element of a `MultiBuf`.
225   ///
226   /// Reserved for use by the MultiBuf class.
227   Chunk* next_in_buf_;
228 
229   /// Pointer to the sub-region to which this chunk has exclusive access.
230   ///
231   /// This `span_` is conceptually owned by this `Chunk` object, and may be read
232   /// or written to by a `Chunk` user (the normal rules of thread compatibility
233   /// apply-- `const` methods may be called concurrently, non-`const` methods
234   /// may not).
235   ///
236   /// Neighboring `Chunk` objects in this region looking to expand via
237   /// `ClaimPrefix` or `ClaimSuffix` may read this span's boundaries. These
238   /// other `Chunk`s must acquire `region_tracker_->lock_` before reading, and
239   /// this `Chunk` must acquire `region_tracker_->lock_` before changing
240   /// `span_`.
241   ByteSpan span_;
242 
243   friend class ChunkRegionTracker;  // For the constructor
244   friend class OwnedChunk;          // for `Free`.
245   friend class MultiBuf;            // for `next_in_buf_`.
246   friend class MultiBufChunks;      // for `Free` and `next_in_buf_`.
247 };
248 
249 /// An object that manages a single allocated region which is referenced by one
250 /// or more `Chunk` objects.
251 ///
252 /// This class is typically implemented by `MultiBufAllocator` implementations
253 /// in order to customize the behavior of region deallocation.
254 ///
255 /// `ChunkRegionTracker` s have three responsibilities:
256 ///
257 /// - Tracking the region of memory into which `Chunk` s can expand. This is
258 ///   reported via the `Region` method. `Chunk` s in this region can refer to
259 ///   memory within this region sparsely, but they can grow or shrink so long as
260 ///   they stay within the bounds of the `Region`.
261 ///
262 /// - Deallocating the region and the `ChunkRegionTracker` itself. This is
263 ///   implemented via the `Destroy` method, which is called once all of the
264 ///   `Chunk` s in a region have been released.
265 ///
266 /// - Allocating and deallocating space for `Chunk` classes. This is merely
267 ///   allocating space for the `Chunk` object itself, not the memory to which it
268 ///   refers. This can be implemented straightforwardly by delegating to an
269 ///   existing generic allocator such as `malloc` or a
270 ///   `pw::allocator::Allocator` implementation.
271 class ChunkRegionTracker {
272  public:
273   /// Creates the first `Chunk` referencing a whole region of memory.
274   ///
275   /// This must only be called once per `ChunkRegionTracker`, when the region is
276   /// first created. Multiple calls will result in undefined behavior.
277   ///
278   /// Returns `std::nullopt` if `AllocateChunkStorage` returns `nullptr`.
279   std::optional<OwnedChunk> CreateFirstChunk();
280 
281  protected:
282   ChunkRegionTracker() = default;
283   virtual ~ChunkRegionTracker() = default;
284 
285   /// Destroys the `ChunkRegionTracker`.
286   ///
287   /// Typical implementations will call `std::destroy_at(this)` and then free
288   /// the memory associated with the region and the tracker.
289   virtual void Destroy() = 0;
290 
291   /// Returns the entire span of the region being managed.
292   ///
293   /// `Chunk` s referencing this tracker will not expand beyond this region, nor
294   /// into one another's portions of the region.
295   ///
296   /// This region does not provide any alignment guarantees by default.
297   ///
298   /// This region must not change for the lifetime of this `ChunkRegionTracker`.
299   virtual ByteSpan Region() const = 0;
300 
301   /// Returns a pointer to `sizeof(Chunk)` bytes with `alignas(Chunk)`. Returns
302   /// `nullptr` on failure.
303   virtual void* AllocateChunkClass() = 0;
304 
305   /// Deallocates a pointer returned by `AllocateChunkClass`.
306   virtual void DeallocateChunkClass(void*) = 0;
307 
308  private:
309   /// A lock used to manage an internal linked-list of `Chunk`s.
310   ///
311   /// This allows chunks to:
312   ///
313   /// - know whether they can expand to fill neighboring regions of memory.
314   /// - know when the last chunk has been destructed, triggering `Destroy`.
315   pw::sync::InterruptSpinLock lock_;
316   friend Chunk;
317 };
318 
319 /// An RAII handle to a contiguous slice of data.
320 ///
321 /// Note: `OwnedChunk` may acquire a `pw::sync::Mutex` during destruction, and
322 /// so must not be destroyed within ISR contexts.
323 class OwnedChunk {
324  public:
OwnedChunk()325   OwnedChunk() : inner_(nullptr) {}
OwnedChunk(OwnedChunk && other)326   OwnedChunk(OwnedChunk&& other) noexcept : inner_(other.inner_) {
327     other.inner_ = nullptr;
328   }
329   OwnedChunk& operator=(OwnedChunk&& other) noexcept {
330     inner_ = other.inner_;
331     other.inner_ = nullptr;
332     return *this;
333   }
334   /// This method will acquire a mutex and is not IRQ safe.
~OwnedChunk()335   ~OwnedChunk() { Release(); }
336 
data()337   std::byte* data() {
338     return const_cast<std::byte*>(std::as_const(*this).data());
339   }
data()340   const std::byte* data() const {
341     return inner_ == nullptr ? nullptr : inner_->data();
342   }
343 
size()344   size_t size() const { return inner_ == nullptr ? 0 : inner_->size(); }
345 
346   std::byte& operator[](size_t index) { return data()[index]; }
347   std::byte operator[](size_t index) const { return data()[index]; }
348 
349   // Container declarations
350   using element_type = std::byte;
351   using value_type = std::byte;
352   using size_type = size_t;
353   using difference_type = ptrdiff_t;
354   using pointer = std::byte*;
355   using const_pointer = const std::byte*;
356   using reference = std::byte&;
357   using const_reference = const std::byte&;
358   using iterator = std::byte*;
359   using const_iterator = const std::byte*;
360   using reverse_iterator = std::byte*;
361   using const_reverse_iterator = const std::byte*;
362 
begin()363   std::byte* begin() { return data(); }
begin()364   const std::byte* begin() const { return cbegin(); }
cbegin()365   const std::byte* cbegin() const { return data(); }
end()366   std::byte* end() { return data() + size(); }
end()367   const std::byte* end() const { return cend(); }
cend()368   const std::byte* cend() const { return data() + size(); }
369 
370   Chunk& operator*() { return *inner_; }
371   const Chunk& operator*() const { return *inner_; }
372   Chunk* operator->() { return inner_; }
373   const Chunk* operator->() const { return inner_; }
374 
375   /// Decrements the reference count on the underlying chunk of data and empties
376   /// this handle so that `span()` now returns an empty (zero-sized) span.
377   ///
378   /// Does not modify the underlying data, but may cause it to be deallocated if
379   /// this was the only remaining `Chunk` referring to its region.
380   ///
381   /// This method is equivalent to `{ Chunk _unused = std::move(chunk_ref); }`
382   ///
383   /// This method will acquire a mutex and is not IRQ safe.
384   void Release();
385 
386   /// Returns the contained `Chunk*` and empties this `OwnedChunk` without
387   /// releasing the underlying `Chunk`.
Take()388   Chunk* Take() && {
389     Chunk* result = inner_;
390     inner_ = nullptr;
391     return result;
392   }
393 
394  private:
395   /// Constructs a new `OwnedChunk` from an existing `Chunk*`.
OwnedChunk(Chunk * inner)396   OwnedChunk(Chunk* inner) : inner_(inner) {}
397 
398   /// A pointer to the owned `Chunk`.
399   Chunk* inner_;
400 
401   /// Allow `ChunkRegionTracker` and `MultiBuf` to create `OwnedChunk`s using
402   /// the private constructor above.
403   friend class Chunk;
404   friend class ChunkRegionTracker;
405   friend class MultiBuf;
406   friend class MultiBufChunks;
407 };
408 
409 }  // namespace pw::multibuf
410