1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include "pw_multibuf/simple_allocator.h"
16
17 #include <algorithm>
18 #include <mutex>
19
20 #include "pw_assert/check.h"
21
22 namespace pw::multibuf {
23 namespace internal {
24
~LinkedRegionTracker()25 LinkedRegionTracker::~LinkedRegionTracker() {
26 // The ``LinkedRegionTracker`` *must* be removed from the parent allocator's
27 // region list prior to being destroyed, as doing so requires holding a lock.
28 //
29 // The destructor is only called via ``Destroy()``'s invocation of
30 // ``metadata_alloc_ref.Delete(this);``
31 PW_DCHECK(unlisted());
32 }
33
Destroy()34 void LinkedRegionTracker::Destroy() {
35 SimpleAllocator::AvailableMemorySize available;
36 {
37 // N.B.: this lock *must* go out of scope before the call to
38 // ``Delete(this)`` below in order to prevent referencing the ``parent_``
39 // field after this tracker has been destroyed.
40 std::lock_guard lock(parent_.lock_);
41 unlist();
42 available = parent_.GetAvailableMemorySize();
43 }
44 parent_.MoreMemoryAvailable(available.total, available.contiguous);
45 parent_.metadata_alloc_.Delete(this);
46 }
47
AllocateChunkClass()48 void* LinkedRegionTracker::AllocateChunkClass() {
49 return parent_.metadata_alloc_.Allocate(allocator::Layout::Of<Chunk>());
50 }
51
DeallocateChunkClass(void * ptr)52 void LinkedRegionTracker::DeallocateChunkClass(void* ptr) {
53 return parent_.metadata_alloc_.Deallocate(ptr);
54 }
55
56 } // namespace internal
57
DoAllocate(size_t min_size,size_t desired_size,ContiguityRequirement contiguity_requirement)58 pw::Result<MultiBuf> SimpleAllocator::DoAllocate(
59 size_t min_size,
60 size_t desired_size,
61 ContiguityRequirement contiguity_requirement) {
62 if (min_size > data_area_.size()) {
63 return Status::OutOfRange();
64 }
65 // NB: std::lock_guard is not used here in order to release the lock
66 // prior to destroying ``buf`` below.
67 lock_.lock();
68 auto available_memory_size = GetAvailableMemorySize();
69 size_t available = (contiguity_requirement == kNeedsContiguous)
70 ? available_memory_size.contiguous
71 : available_memory_size.total;
72 if (available < min_size) {
73 lock_.unlock();
74 return Status::ResourceExhausted();
75 }
76 size_t goal_size = std::min(desired_size, available);
77 if (contiguity_requirement == kNeedsContiguous) {
78 auto out = InternalAllocateContiguous(goal_size);
79 lock_.unlock();
80 return out;
81 }
82
83 MultiBuf buf;
84 Status status;
85 size_t remaining_goal = goal_size;
86 ForEachFreeBlock(
87 [this, &buf, &status, remaining_goal](const FreeBlock& block)
88 PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) mutable {
89 if (remaining_goal == 0) {
90 return ControlFlow::Break;
91 }
92 size_t chunk_size = std::min(block.span.size(), remaining_goal);
93 pw::Result<OwnedChunk> chunk = InsertRegion(
94 {block.iter, ByteSpan(block.span.data(), chunk_size)});
95 if (!chunk.ok()) {
96 status = chunk.status();
97 return ControlFlow::Break;
98 }
99 remaining_goal -= chunk->size();
100 buf.PushFrontChunk(std::move(*chunk));
101 return ControlFlow::Continue;
102 });
103 // Lock must be released prior to possibly free'ing the `buf` in the case
104 // where `!status.ok()`. This is necessary so that the destructing chunks
105 // can free their regions.
106 lock_.unlock();
107 if (!status.ok()) {
108 return status;
109 }
110 return buf;
111 }
112
InternalAllocateContiguous(size_t size)113 pw::Result<MultiBuf> SimpleAllocator::InternalAllocateContiguous(size_t size) {
114 pw::Result<MultiBuf> buf = Status::ResourceExhausted();
115 ForEachFreeBlock([this, &buf, size](const FreeBlock& block)
116 PW_EXCLUSIVE_LOCKS_REQUIRED(lock_) {
117 if (block.span.size() >= size) {
118 ByteSpan buf_span(block.span.data(), size);
119 buf = InsertRegion({block.iter, buf_span})
120 .transform(MultiBuf::FromChunk);
121 return ControlFlow::Break;
122 }
123 return ControlFlow::Continue;
124 });
125 return buf;
126 }
127
InsertRegion(const FreeBlock & block)128 pw::Result<OwnedChunk> SimpleAllocator::InsertRegion(const FreeBlock& block) {
129 internal::LinkedRegionTracker* new_region =
130 metadata_alloc_.New<internal::LinkedRegionTracker>(*this, block.span);
131 if (new_region == nullptr) {
132 return Status::OutOfRange();
133 }
134 std::optional<OwnedChunk> chunk = new_region->CreateFirstChunk();
135 if (!chunk.has_value()) {
136 metadata_alloc_.Delete(new_region);
137 return Status::OutOfRange();
138 }
139 regions_.insert_after(block.iter, *new_region);
140 return std::move(*chunk);
141 }
142
GetAvailableMemorySize()143 SimpleAllocator::AvailableMemorySize SimpleAllocator::GetAvailableMemorySize() {
144 size_t total = 0;
145 size_t max_contiguous = 0;
146 ForEachFreeBlock([&total, &max_contiguous](const FreeBlock& block) {
147 total += block.span.size();
148 if (block.span.size() > max_contiguous) {
149 max_contiguous = block.span.size();
150 }
151 return ControlFlow::Continue;
152 });
153
154 AvailableMemorySize available;
155 available.total = total;
156 available.contiguous = max_contiguous;
157 return available;
158 }
159
160 } // namespace pw::multibuf
161