xref: /aosp_15_r20/external/pigweed/pw_multibuf/chunk.cc (revision 61c4878ac05f98d0ceed94b57d316916de578985)
1 // Copyright 2023 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_multibuf/chunk.h"
16 
17 #include <mutex>
18 
19 #include "pw_assert/check.h"
20 
21 namespace pw::multibuf {
22 namespace {
CheckedAdd(std::byte * ptr,size_t offset)23 std::byte* CheckedAdd(std::byte* ptr, size_t offset) {
24   uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
25   if (std::numeric_limits<uintptr_t>::max() - ptr_int < offset) {
26     return nullptr;
27   }
28   return reinterpret_cast<std::byte*>(ptr_int + offset);
29 }
30 
CheckedSub(std::byte * ptr,size_t offset)31 std::byte* CheckedSub(std::byte* ptr, size_t offset) {
32   uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
33   if (ptr_int < offset) {
34     return nullptr;
35   }
36   return reinterpret_cast<std::byte*>(ptr_int - offset);
37 }
38 
BeginPtr(ByteSpan span)39 std::byte* BeginPtr(ByteSpan span) { return span.data(); }
40 
EndPtr(ByteSpan span)41 std::byte* EndPtr(ByteSpan span) { return span.data() + span.size(); }
42 
43 }  // namespace
44 
CanMerge(const Chunk & next_chunk) const45 bool Chunk::CanMerge(const Chunk& next_chunk) const {
46   return region_tracker_ == next_chunk.region_tracker_ &&
47          EndPtr(span_) == BeginPtr(next_chunk.span_);
48 }
49 
Merge(OwnedChunk & next_chunk_owned)50 bool Chunk::Merge(OwnedChunk& next_chunk_owned) {
51   if (!CanMerge(*next_chunk_owned)) {
52     return false;
53   }
54   Chunk* next_chunk = std::move(next_chunk_owned).Take();
55 
56   // Note: Both chunks have the same ``region_tracker_``.
57   //
58   // We lock the one from `next_chunk` to satisfy the automatic
59   // checker that ``RemoveFromRegionList`` is safe to call.
60   std::lock_guard lock(next_chunk->region_tracker_->lock_);
61   PW_DCHECK(next_in_region_ == next_chunk);
62   span_ = ByteSpan(data(), size() + next_chunk->size());
63   next_chunk->RemoveFromRegionList();
64   region_tracker_->DeallocateChunkClass(next_chunk);
65   return true;
66 }
67 
InsertAfterInRegionList(Chunk * new_chunk)68 void Chunk::InsertAfterInRegionList(Chunk* new_chunk)
69     PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_) {
70   new_chunk->next_in_region_ = next_in_region_;
71   new_chunk->prev_in_region_ = this;
72   if (next_in_region_ != nullptr) {
73     next_in_region_->prev_in_region_ = new_chunk;
74   }
75   next_in_region_ = new_chunk;
76 }
77 
InsertBeforeInRegionList(Chunk * new_chunk)78 void Chunk::InsertBeforeInRegionList(Chunk* new_chunk)
79     PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_) {
80   new_chunk->next_in_region_ = this;
81   new_chunk->prev_in_region_ = prev_in_region_;
82   if (prev_in_region_ != nullptr) {
83     prev_in_region_->next_in_region_ = new_chunk;
84   }
85   prev_in_region_ = new_chunk;
86 }
87 
RemoveFromRegionList()88 void Chunk::RemoveFromRegionList()
89     PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_) {
90   if (prev_in_region_ != nullptr) {
91     prev_in_region_->next_in_region_ = next_in_region_;
92   }
93   if (next_in_region_ != nullptr) {
94     next_in_region_->prev_in_region_ = prev_in_region_;
95   }
96   prev_in_region_ = nullptr;
97   next_in_region_ = nullptr;
98 }
99 
CreateFirstChunk()100 std::optional<OwnedChunk> ChunkRegionTracker::CreateFirstChunk() {
101   void* memory = AllocateChunkClass();
102   if (memory == nullptr) {
103     return std::nullopt;
104   }
105   // Note: `Region()` is `const`, so no lock is required.
106   Chunk* chunk = new (memory) Chunk(this, Region());
107   return OwnedChunk(chunk);
108 }
109 
Free()110 void Chunk::Free() {
111   span_ = ByteSpan();
112   bool region_empty;
113   // Record `region_track` so that it is available for use even after
114   // `this` is free'd by the call to `DeallocateChunkClass`.
115   ChunkRegionTracker* region_tracker = region_tracker_;
116   {
117     std::lock_guard lock(region_tracker_->lock_);
118     region_empty = prev_in_region_ == nullptr && next_in_region_ == nullptr;
119     RemoveFromRegionList();
120     // NOTE: do *not* attempt to access any fields of `this` after this point.
121     //
122     // The lock must be held while deallocating this, otherwise another
123     // ``Chunk::Release`` in the same region could race and call
124     // ``ChunkRegionTracker::Destroy``, making this call no longer valid.
125     region_tracker->DeallocateChunkClass(this);
126   }
127   if (region_empty) {
128     region_tracker->Destroy();
129   }
130 }
131 
Release()132 void OwnedChunk::Release() {
133   if (inner_ == nullptr) {
134     return;
135   }
136   inner_->Free();
137   inner_ = nullptr;
138 }
139 
ClaimPrefix(size_t bytes_to_claim)140 bool Chunk::ClaimPrefix(size_t bytes_to_claim) {
141   if (bytes_to_claim == 0) {
142     return true;
143   }
144   // In order to roll back `bytes_to_claim`, the current chunk must start at
145   // least `bytes_to_claim` after the beginning of the current region.
146   std::byte* new_start = CheckedSub(data(), bytes_to_claim);
147   // Note: `Region()` is `const`, so no lock is required.
148   if (new_start == nullptr || new_start < BeginPtr(region_tracker_->Region())) {
149     return false;
150   }
151 
152   // `lock` is acquired in order to traverse the linked list and mutate `span_`.
153   std::lock_guard lock(region_tracker_->lock_);
154 
155   // If there are any chunks before this one, they must not end after
156   // `new_start`.
157   Chunk* prev = prev_in_region_;
158   if (prev != nullptr && EndPtr(*prev) > new_start) {
159     return false;
160   }
161 
162   size_t old_size = span_.size();
163   span_ = ByteSpan(new_start, old_size + bytes_to_claim);
164   return true;
165 }
166 
ClaimSuffix(size_t bytes_to_claim)167 bool Chunk::ClaimSuffix(size_t bytes_to_claim) {
168   if (bytes_to_claim == 0) {
169     return true;
170   }
171   // In order to expand forward `bytes_to_claim`, the current chunk must start
172   // at least `subytes` before the end of the current region.
173   std::byte* new_end = CheckedAdd(EndPtr(*this), bytes_to_claim);
174   // Note: `Region()` is `const`, so no lock is required.
175   if (new_end == nullptr || new_end > EndPtr(region_tracker_->Region())) {
176     return false;
177   }
178 
179   // `lock` is acquired in order to traverse the linked list and mutate `span_`.
180   std::lock_guard lock(region_tracker_->lock_);
181 
182   // If there are any chunks after this one, they must not start before
183   // `new_end`.
184   Chunk* next = next_in_region_;
185   if (next != nullptr && BeginPtr(next->span_) < new_end) {
186     return false;
187   }
188 
189   size_t old_size = span_.size();
190   span_ = ByteSpan(data(), old_size + bytes_to_claim);
191   return true;
192 }
193 
DiscardPrefix(size_t bytes_to_discard)194 void Chunk::DiscardPrefix(size_t bytes_to_discard) {
195   Slice(bytes_to_discard, size());
196 }
197 
Slice(size_t begin,size_t end)198 void Chunk::Slice(size_t begin, size_t end) {
199   PW_DCHECK(begin <= size());
200   PW_DCHECK(end <= size());
201   PW_DCHECK(end >= begin);
202   ByteSpan new_span(data() + begin, end - begin);
203   std::lock_guard lock(region_tracker_->lock_);
204   span_ = new_span;
205 }
206 
Truncate(size_t len)207 void Chunk::Truncate(size_t len) { Slice(0, len); }
208 
TakePrefix(size_t bytes_to_take)209 std::optional<OwnedChunk> Chunk::TakePrefix(size_t bytes_to_take) {
210   void* new_chunk_memory = region_tracker_->AllocateChunkClass();
211   if (new_chunk_memory == nullptr) {
212     return std::nullopt;
213   }
214 
215   PW_DCHECK(bytes_to_take <= size());
216   ByteSpan first_span = ByteSpan(data(), bytes_to_take);
217   ByteSpan second_span =
218       ByteSpan(data() + bytes_to_take, size() - bytes_to_take);
219 
220   std::lock_guard lock(region_tracker_->lock_);
221   span_ = second_span;
222   Chunk* new_chunk = new (new_chunk_memory) Chunk(region_tracker_, first_span);
223   InsertBeforeInRegionList(new_chunk);
224   return OwnedChunk(new_chunk);
225 }
226 
TakeSuffix(size_t bytes_to_take)227 std::optional<OwnedChunk> Chunk::TakeSuffix(size_t bytes_to_take) {
228   void* new_chunk_memory = region_tracker_->AllocateChunkClass();
229   if (new_chunk_memory == nullptr) {
230     return std::nullopt;
231   }
232 
233   PW_DCHECK(bytes_to_take <= size());
234   ByteSpan first_span = ByteSpan(data(), size() - bytes_to_take);
235   ByteSpan second_span = ByteSpan(EndPtr(*this) - bytes_to_take, bytes_to_take);
236 
237   std::lock_guard lock(region_tracker_->lock_);
238   span_ = first_span;
239   Chunk* new_chunk = new (new_chunk_memory) Chunk(region_tracker_, second_span);
240   InsertAfterInRegionList(new_chunk);
241   return OwnedChunk(new_chunk);
242 }
243 
244 }  // namespace pw::multibuf
245