1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "malloc_arena_pool.h"
18
19
20 #include <algorithm>
21 #include <cstddef>
22 #include <iomanip>
23 #include <numeric>
24
25 #include <android-base/logging.h>
26 #include "arena_allocator-inl.h"
27 #include "mman.h"
28
29 namespace art {
30
31 class MallocArena final : public Arena {
32 public:
33 explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
34 virtual ~MallocArena();
35 private:
RequiredOverallocation()36 static constexpr size_t RequiredOverallocation() {
37 return (alignof(std::max_align_t) < ArenaAllocator::kArenaAlignment)
38 ? ArenaAllocator::kArenaAlignment - alignof(std::max_align_t)
39 : 0u;
40 }
41
42 uint8_t* unaligned_memory_;
43 };
44
MallocArena(size_t size)45 MallocArena::MallocArena(size_t size) {
46 // We need to guarantee kArenaAlignment aligned allocation for the new arena.
47 // TODO: Use std::aligned_alloc() when it becomes available with C++17.
48 constexpr size_t overallocation = RequiredOverallocation();
49 unaligned_memory_ = reinterpret_cast<uint8_t*>(calloc(1, size + overallocation));
50 CHECK(unaligned_memory_ != nullptr); // Abort on OOM.
51 DCHECK_ALIGNED(unaligned_memory_, alignof(std::max_align_t));
52 if (overallocation == 0u) {
53 memory_ = unaligned_memory_;
54 } else {
55 memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
56 if (kRunningOnMemoryTool) {
57 size_t head = memory_ - unaligned_memory_;
58 size_t tail = overallocation - head;
59 MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
60 MEMORY_TOOL_MAKE_NOACCESS(memory_ + size, tail);
61 }
62 }
63 DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
64 size_ = size;
65 }
66
~MallocArena()67 MallocArena::~MallocArena() {
68 constexpr size_t overallocation = RequiredOverallocation();
69 if (overallocation != 0u && kRunningOnMemoryTool) {
70 size_t head = memory_ - unaligned_memory_;
71 size_t tail = overallocation - head;
72 MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
73 MEMORY_TOOL_MAKE_UNDEFINED(memory_ + size_, tail);
74 }
75 free(reinterpret_cast<void*>(unaligned_memory_));
76 }
77
Reset()78 void Arena::Reset() {
79 if (bytes_allocated_ > 0) {
80 memset(Begin(), 0, bytes_allocated_);
81 bytes_allocated_ = 0;
82 }
83 }
84
MallocArenaPool()85 MallocArenaPool::MallocArenaPool() : free_arenas_(nullptr) {
86 }
87
~MallocArenaPool()88 MallocArenaPool::~MallocArenaPool() {
89 ReclaimMemory();
90 }
91
ReclaimMemory()92 void MallocArenaPool::ReclaimMemory() {
93 while (free_arenas_ != nullptr) {
94 Arena* arena = free_arenas_;
95 free_arenas_ = free_arenas_->next_;
96 delete arena;
97 }
98 }
99
LockReclaimMemory()100 void MallocArenaPool::LockReclaimMemory() {
101 std::lock_guard<std::mutex> lock(lock_);
102 ReclaimMemory();
103 }
104
AllocArena(size_t size)105 Arena* MallocArenaPool::AllocArena(size_t size) {
106 Arena* ret = nullptr;
107 {
108 std::lock_guard<std::mutex> lock(lock_);
109 // We used to check only the first free arena but we're now checking two.
110 //
111 // FIXME: This is a workaround for `oatdump` running out of memory because of an allocation
112 // pattern where we would allocate a large arena (more than the default size) and then a
113 // normal one (default size) and then return them to the pool together, with the normal one
114 // passed as `first` to `FreeArenaChain()`, thus becoming the first in the `free_arenas_`
115 // list. Since we checked only the first arena, doing this repeatedly would never reuse the
116 // existing freed larger arenas and they would just accumulate in the free arena list until
117 // running out of memory. This workaround allows reusing the second arena in the list, thus
118 // fixing the problem for this specific allocation pattern. Similar allocation patterns
119 // with three or more arenas can still result in out of memory issues.
120 if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
121 ret = free_arenas_;
122 free_arenas_ = free_arenas_->next_;
123 } else if (free_arenas_ != nullptr &&
124 free_arenas_->next_ != nullptr &&
125 free_arenas_->next_->Size() >= size) {
126 ret = free_arenas_->next_;
127 free_arenas_->next_ = free_arenas_->next_->next_;
128 }
129 }
130 if (ret == nullptr) {
131 ret = new MallocArena(size);
132 }
133 ret->Reset();
134 return ret;
135 }
136
TrimMaps()137 void MallocArenaPool::TrimMaps() {
138 // Nop, because there is no way to do madvise here.
139 }
140
GetBytesAllocated() const141 size_t MallocArenaPool::GetBytesAllocated() const {
142 size_t total = 0;
143 std::lock_guard<std::mutex> lock(lock_);
144 for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
145 total += arena->GetBytesAllocated();
146 }
147 return total;
148 }
149
FreeArenaChain(Arena * first)150 void MallocArenaPool::FreeArenaChain(Arena* first) {
151 if (kRunningOnMemoryTool) {
152 for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
153 MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
154 }
155 }
156
157 if (arena_allocator::kArenaAllocatorPreciseTracking) {
158 // Do not reuse arenas when tracking.
159 while (first != nullptr) {
160 Arena* next = first->next_;
161 delete first;
162 first = next;
163 }
164 return;
165 }
166
167 if (first != nullptr) {
168 Arena* last = first;
169 while (last->next_ != nullptr) {
170 last = last->next_;
171 }
172 std::lock_guard<std::mutex> lock(lock_);
173 last->next_ = free_arenas_;
174 free_arenas_ = first;
175 }
176 }
177
178 } // namespace art
179