1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/util/bump_allocator.h"
18
19 #include <limits>
20 #include <optional>
21
22 #include "perfetto/base/compiler.h"
23 #include "perfetto/base/logging.h"
24 #include "perfetto/ext/base/utils.h"
25
26 namespace perfetto {
27 namespace trace_processor {
28 namespace {
29
30 // TODO(b/266983484): consider using base::PagedMemory unless a) we are on a
31 // platform where that doesn't make sense (WASM) b) we are trying to do heap
32 // profiling.
Allocate(uint32_t size)33 base::AlignedUniquePtr<uint8_t[]> Allocate(uint32_t size) {
34 uint8_t* ptr = static_cast<uint8_t*>(base::AlignedAlloc(8, size));
35 // Poison the region to try and catch out of bound accesses.
36 PERFETTO_ASAN_POISON(ptr, size);
37 return base::AlignedUniquePtr<uint8_t[]>(ptr);
38 }
39
40 } // namespace
41
42 BumpAllocator::BumpAllocator() = default;
43
~BumpAllocator()44 BumpAllocator::~BumpAllocator() {
45 for (const auto& chunk : chunks_) {
46 PERFETTO_CHECK(chunk.unfreed_allocations == 0);
47 }
48 }
49
Alloc(uint32_t size)50 BumpAllocator::AllocId BumpAllocator::Alloc(uint32_t size) {
51 // Size is required to be a multiple of 8 to avoid needing to deal with
52 // alignment. It must also be at most kChunkSize as we do not support cross
53 // chunk spanning allocations.
54 PERFETTO_DCHECK(size % 8 == 0);
55 PERFETTO_DCHECK(size <= kChunkSize);
56
57 // Fast path: check if we have space to service this allocation in the current
58 // chunk.
59 std::optional<AllocId> alloc_id = TryAllocInLastChunk(size);
60 if (alloc_id) {
61 return *alloc_id;
62 }
63
64 // Slow path: we don't have enough space in the last chunk so we create one.
65 Chunk chunk;
66 chunk.allocation = Allocate(kChunkSize);
67 chunks_.emplace_back(std::move(chunk));
68
69 // Ensure that we haven't exceeded the maximum number of chunks.
70 PERFETTO_CHECK(LastChunkIndex() < kMaxChunkCount);
71
72 // This time the allocation should definitely succeed in the last chunk (which
73 // we just added).
74 alloc_id = TryAllocInLastChunk(size);
75 PERFETTO_CHECK(alloc_id);
76 return *alloc_id;
77 }
78
Free(AllocId id)79 void BumpAllocator::Free(AllocId id) {
80 uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index);
81 PERFETTO_DCHECK(queue_index <= std::numeric_limits<size_t>::max());
82 Chunk& chunk = chunks_.at(static_cast<size_t>(queue_index));
83 PERFETTO_DCHECK(chunk.unfreed_allocations > 0);
84 chunk.unfreed_allocations--;
85 }
86
GetPointer(AllocId id)87 void* BumpAllocator::GetPointer(AllocId id) {
88 uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index);
89 PERFETTO_CHECK(queue_index <= std::numeric_limits<size_t>::max());
90 return chunks_.at(static_cast<size_t>(queue_index)).allocation.get() +
91 id.chunk_offset;
92 }
93
EraseFrontFreeChunks()94 uint64_t BumpAllocator::EraseFrontFreeChunks() {
95 size_t to_erase_chunks = 0;
96 for (; to_erase_chunks < chunks_.size(); ++to_erase_chunks) {
97 // Break on the first chunk which still has unfreed allocations.
98 if (chunks_.at(to_erase_chunks).unfreed_allocations > 0) {
99 break;
100 }
101 }
102 chunks_.erase_front(to_erase_chunks);
103 erased_front_chunks_count_ += to_erase_chunks;
104 return to_erase_chunks;
105 }
106
PastTheEndId()107 BumpAllocator::AllocId BumpAllocator::PastTheEndId() {
108 if (chunks_.empty()) {
109 return AllocId{erased_front_chunks_count_, 0};
110 }
111 if (chunks_.back().bump_offset == kChunkSize) {
112 return AllocId{LastChunkIndex() + 1, 0};
113 }
114 return AllocId{LastChunkIndex(), chunks_.back().bump_offset};
115 }
116
TryAllocInLastChunk(uint32_t size)117 std::optional<BumpAllocator::AllocId> BumpAllocator::TryAllocInLastChunk(
118 uint32_t size) {
119 if (chunks_.empty()) {
120 return std::nullopt;
121 }
122
123 // TODO(266983484): consider switching this to bump downwards instead of
124 // upwards for more efficient code generation.
125 Chunk& chunk = chunks_.back();
126
127 // Verify some invariants:
128 // 1) The allocation must exist
129 // 2) The bump must be in the bounds of the chunk.
130 PERFETTO_DCHECK(chunk.allocation);
131 PERFETTO_DCHECK(chunk.bump_offset <= kChunkSize);
132
133 // If the end of the allocation ends up after this chunk, we cannot service it
134 // in this chunk.
135 uint32_t alloc_offset = chunk.bump_offset;
136 uint32_t new_bump_offset = chunk.bump_offset + size;
137 if (new_bump_offset > kChunkSize) {
138 return std::nullopt;
139 }
140
141 // Set the new offset equal to the end of this allocation and increment the
142 // unfreed allocation counter.
143 chunk.bump_offset = new_bump_offset;
144 chunk.unfreed_allocations++;
145
146 // Unpoison the allocation range to allow access to it on ASAN builds.
147 PERFETTO_ASAN_UNPOISON(chunk.allocation.get() + alloc_offset, size);
148
149 return AllocId{LastChunkIndex(), alloc_offset};
150 }
151
152 } // namespace trace_processor
153 } // namespace perfetto
154