1 //
2 //
3 // Copyright 2017 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 // \file Arena based allocator
20 // Allows very fast allocation of memory, but that memory cannot be freed until
21 // the arena as a whole is freed
22 // Tracks the total memory allocated against it, so that future arenas can
23 // pre-allocate the right amount of memory
24 
25 #ifndef GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
26 #define GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
27 
28 #include <grpc/support/port_platform.h>
29 
30 #include <stddef.h>
31 
32 #include <atomic>
33 #include <limits>
34 #include <memory>
35 #include <new>
36 #include <utility>
37 
38 #include "absl/meta/type_traits.h"
39 #include "absl/utility/utility.h"
40 
41 #include <grpc/event_engine/memory_allocator.h>
42 
43 #include "src/core/lib/gpr/alloc.h"
44 #include "src/core/lib/gprpp/construct_destruct.h"
45 #include "src/core/lib/promise/context.h"
46 #include "src/core/lib/resource_quota/memory_quota.h"
47 
48 // #define GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
49 // #define GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
50 
51 namespace grpc_core {
52 
53 namespace arena_detail {
54 
55 struct PoolAndSize {
56   size_t alloc_size;
57   size_t pool_index;
58 };
59 
60 template <typename Void, size_t kIndex, size_t kObjectSize,
61           size_t... kBucketSize>
62 struct PoolIndexForSize;
63 
64 template <size_t kObjectSize, size_t kIndex, size_t kSmallestRemainingBucket,
65           size_t... kBucketSizes>
66 struct PoolIndexForSize<
67     absl::enable_if_t<kObjectSize <= kSmallestRemainingBucket>, kIndex,
68     kObjectSize, kSmallestRemainingBucket, kBucketSizes...> {
69   static constexpr size_t kPool = kIndex;
70   static constexpr size_t kSize = kSmallestRemainingBucket;
71 };
72 
73 template <size_t kObjectSize, size_t kIndex, size_t kSmallestRemainingBucket,
74           size_t... kBucketSizes>
75 struct PoolIndexForSize<
76     absl::enable_if_t<(kObjectSize > kSmallestRemainingBucket)>, kIndex,
77     kObjectSize, kSmallestRemainingBucket, kBucketSizes...>
78     : public PoolIndexForSize<void, kIndex + 1, kObjectSize, kBucketSizes...> {
79 };
80 
81 template <size_t kObjectSize, size_t... kBucketSizes>
82 constexpr size_t PoolFromObjectSize(
83     absl::integer_sequence<size_t, kBucketSizes...>) {
84   return PoolIndexForSize<void, 0, kObjectSize, kBucketSizes...>::kPool;
85 }
86 
87 template <size_t kObjectSize, size_t... kBucketSizes>
88 constexpr size_t AllocationSizeFromObjectSize(
89     absl::integer_sequence<size_t, kBucketSizes...>) {
90   return PoolIndexForSize<void, 0, kObjectSize, kBucketSizes...>::kSize;
91 }
92 
93 template <size_t kIndex, size_t... kBucketSizes>
94 struct ChoosePoolForAllocationSizeImpl;
95 
96 template <size_t kIndex, size_t kBucketSize, size_t... kBucketSizes>
97 struct ChoosePoolForAllocationSizeImpl<kIndex, kBucketSize, kBucketSizes...> {
98   static PoolAndSize Fn(size_t n) {
99     if (n <= kBucketSize) return {kBucketSize, kIndex};
100     return ChoosePoolForAllocationSizeImpl<kIndex + 1, kBucketSizes...>::Fn(n);
101   }
102 };
103 
104 template <size_t kIndex>
105 struct ChoosePoolForAllocationSizeImpl<kIndex> {
106   static PoolAndSize Fn(size_t n) {
107     return PoolAndSize{n, std::numeric_limits<size_t>::max()};
108   }
109 };
110 
111 template <size_t... kBucketSizes>
112 PoolAndSize ChoosePoolForAllocationSize(
113     size_t n, absl::integer_sequence<size_t, kBucketSizes...>) {
114   return ChoosePoolForAllocationSizeImpl<0, kBucketSizes...>::Fn(n);
115 }
116 
117 }  // namespace arena_detail
118 
119 class Arena {
120   // Selected pool sizes.
121   // How to tune: see tools/codegen/core/optimize_arena_pool_sizes.py
122   using PoolSizes = absl::integer_sequence<size_t, 80, 304, 528, 1024>;
123   struct FreePoolNode {
124     FreePoolNode* next;
125   };
126 
127  public:
128   // Create an arena, with \a initial_size bytes in the first allocated buffer.
129   static Arena* Create(size_t initial_size, MemoryAllocator* memory_allocator);
130 
131   // Create an arena, with \a initial_size bytes in the first allocated buffer,
132   // and return both a void pointer to the returned arena and a void* with the
133   // first allocation.
134   static std::pair<Arena*, void*> CreateWithAlloc(
135       size_t initial_size, size_t alloc_size,
136       MemoryAllocator* memory_allocator);
137 
138   // Destroy all `ManagedNew` allocated objects.
139   // Allows safe destruction of these objects even if they need context held by
140   // the arena.
141   // Idempotent.
142   // TODO(ctiller): eliminate ManagedNew.
143   void DestroyManagedNewObjects();
144 
145   // Destroy an arena.
146   void Destroy();
147 
148   // Return the total amount of memory allocated by this arena.
149   size_t TotalUsedBytes() const {
150     return total_used_.load(std::memory_order_relaxed);
151   }
152 
153   // Allocate \a size bytes from the arena.
154   void* Alloc(size_t size) {
155     static constexpr size_t base_size =
156         GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
157     size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
158     size_t begin = total_used_.fetch_add(size, std::memory_order_relaxed);
159     if (begin + size <= initial_zone_size_) {
160       return reinterpret_cast<char*>(this) + base_size + begin;
161     } else {
162       return AllocZone(size);
163     }
164   }
165 
166   // TODO(roth): We currently assume that all callers need alignment of 16
167   // bytes, which may be wrong in some cases. When we have time, we should
168   // change this to instead use the alignment of the type being allocated by
169   // this method.
170   template <typename T, typename... Args>
171   T* New(Args&&... args) {
172     T* t = static_cast<T*>(Alloc(sizeof(T)));
173     Construct(t, std::forward<Args>(args)...);
174     return t;
175   }
176 
177   // Like New, but has the arena call p->~T() at arena destruction time.
178   template <typename T, typename... Args>
179   T* ManagedNew(Args&&... args) {
180     auto* p = New<ManagedNewImpl<T>>(std::forward<Args>(args)...);
181     p->Link(&managed_new_head_);
182     return &p->t;
183   }
184 
185 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
186   class PooledDeleter {
187    public:
188     explicit PooledDeleter(std::atomic<FreePoolNode*>* free_list)
189         : free_list_(free_list) {}
190     PooledDeleter() = default;
191     template <typename T>
192     void operator()(T* p) {
193       // TODO(ctiller): promise based filter hijacks ownership of some pointers
194       // to make them appear as PoolPtr without really transferring ownership,
195       // by setting the arena to nullptr.
196       // This is a transitional hack and should be removed once promise based
197       // filter is removed.
198       if (free_list_ != nullptr) {
199         p->~T();
200         FreePooled(p, free_list_);
201       }
202     }
203 
204     bool has_freelist() const { return free_list_ != nullptr; }
205 
206    private:
207     std::atomic<FreePoolNode*>* free_list_;
208   };
209 
210   template <typename T>
211   using PoolPtr = std::unique_ptr<T, PooledDeleter>;
212 
213   // Make a unique_ptr to T that is allocated from the arena.
214   // When the pointer is released, the memory may be reused for other
215   // MakePooled(.*) calls.
216   // CAUTION: The amount of memory allocated is rounded up to the nearest
217   //          value in Arena::PoolSizes, and so this may pessimize total
218   //          arena size.
219   template <typename T, typename... Args>
220   PoolPtr<T> MakePooled(Args&&... args) {
221     auto* free_list =
222         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
223     return PoolPtr<T>(
224         new (AllocPooled(
225             sizeof(T),
226             arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
227             free_list)) T(std::forward<Args>(args)...),
228         PooledDeleter(free_list));
229   }
230 
231   // Make a unique_ptr to an array of T that is allocated from the arena.
232   // When the pointer is released, the memory may be reused for other
233   // MakePooled(.*) calls.
234   // One can use MakePooledArray<char> to allocate a buffer of bytes.
235   // CAUTION: The amount of memory allocated is rounded up to the nearest
236   //          value in Arena::PoolSizes, and so this may pessimize total
237   //          arena size.
238   template <typename T>
239   PoolPtr<T[]> MakePooledArray(size_t n) {
240     auto where =
241         arena_detail::ChoosePoolForAllocationSize(n * sizeof(T), PoolSizes());
242     if (where.pool_index == std::numeric_limits<size_t>::max()) {
243       return PoolPtr<T[]>(new (Alloc(where.alloc_size)) T[n],
244                           PooledDeleter(nullptr));
245     } else {
246       return PoolPtr<T[]>(new (AllocPooled(where.alloc_size, where.alloc_size,
247                                            &pools_[where.pool_index])) T[n],
248                           PooledDeleter(&pools_[where.pool_index]));
249     }
250   }
251 
252   // Like MakePooled, but with manual memory management.
253   // The caller is responsible for calling DeletePooled() on the returned
254   // pointer, and expected to call it with the same type T as was passed to this
255   // function (else the free list returned to the arena will be corrupted).
256   template <typename T, typename... Args>
257   T* NewPooled(Args&&... args) {
258     auto* free_list =
259         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
260     return new (AllocPooled(
261         sizeof(T),
262         arena_detail::AllocationSizeFromObjectSize<sizeof(T)>(PoolSizes()),
263         free_list)) T(std::forward<Args>(args)...);
264   }
265 
266   template <typename T>
267   void DeletePooled(T* p) {
268     auto* free_list =
269         &pools_[arena_detail::PoolFromObjectSize<sizeof(T)>(PoolSizes())];
270     p->~T();
271     FreePooled(p, free_list);
272   }
273 #else
274   class PooledDeleter {
275    public:
276     PooledDeleter() = default;
277     explicit PooledDeleter(std::nullptr_t) : delete_(false) {}
278     template <typename T>
279     void operator()(T* p) {
280       // TODO(ctiller): promise based filter hijacks ownership of some pointers
281       // to make them appear as PoolPtr without really transferring ownership,
282       // by setting the arena to nullptr.
283       // This is a transitional hack and should be removed once promise based
284       // filter is removed.
285       if (delete_) delete p;
286     }
287 
288     bool has_freelist() const { return delete_; }
289 
290    private:
291     bool delete_ = true;
292   };
293 
294   template <typename T>
295   using PoolPtr = std::unique_ptr<T, PooledDeleter>;
296 
297   // Make a unique_ptr to T that is allocated from the arena.
298   // When the pointer is released, the memory may be reused for other
299   // MakePooled(.*) calls.
300   // CAUTION: The amount of memory allocated is rounded up to the nearest
301   //          value in Arena::PoolSizes, and so this may pessimize total
302   //          arena size.
303   template <typename T, typename... Args>
304   PoolPtr<T> MakePooled(Args&&... args) {
305     return PoolPtr<T>(new T(std::forward<Args>(args)...), PooledDeleter());
306   }
307 
308   // Make a unique_ptr to an array of T that is allocated from the arena.
309   // When the pointer is released, the memory may be reused for other
310   // MakePooled(.*) calls.
311   // One can use MakePooledArray<char> to allocate a buffer of bytes.
312   // CAUTION: The amount of memory allocated is rounded up to the nearest
313   //          value in Arena::PoolSizes, and so this may pessimize total
314   //          arena size.
315   template <typename T>
316   PoolPtr<T[]> MakePooledArray(size_t n) {
317     return PoolPtr<T[]>(new T[n], PooledDeleter());
318   }
319 
320   // Like MakePooled, but with manual memory management.
321   // The caller is responsible for calling DeletePooled() on the returned
322   // pointer, and expected to call it with the same type T as was passed to this
323   // function (else the free list returned to the arena will be corrupted).
324   template <typename T, typename... Args>
325   T* NewPooled(Args&&... args) {
326     return new T(std::forward<Args>(args)...);
327   }
328 
329   template <typename T>
330   void DeletePooled(T* p) {
331     delete p;
332   }
333 #endif
334 
335  private:
336   struct Zone {
337     Zone* prev;
338   };
339 
340   struct ManagedNewObject {
341     ManagedNewObject* next = nullptr;
342     void Link(std::atomic<ManagedNewObject*>* head);
343     virtual ~ManagedNewObject() = default;
344   };
345 
346   template <typename T>
347   struct ManagedNewImpl : public ManagedNewObject {
348     T t;
349     template <typename... Args>
350     explicit ManagedNewImpl(Args&&... args) : t(std::forward<Args>(args)...) {}
351   };
352 
353   // Initialize an arena.
354   // Parameters:
355   //   initial_size: The initial size of the whole arena in bytes. These bytes
356   //   are contained within 'zone 0'. If the arena user ends up requiring more
357   //   memory than the arena contains in zone 0, subsequent zones are allocated
358   //   on demand and maintained in a tail-linked list.
359   //
360   //   initial_alloc: Optionally, construct the arena as though a call to
361   //   Alloc() had already been made for initial_alloc bytes. This provides a
362   //   quick optimization (avoiding an atomic fetch-add) for the common case
363   //   where we wish to create an arena and then perform an immediate
364   //   allocation.
365   explicit Arena(size_t initial_size, size_t initial_alloc,
366                  MemoryAllocator* memory_allocator)
367       : total_used_(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_alloc)),
368         initial_zone_size_(initial_size),
369         memory_allocator_(memory_allocator) {}
370 
371   ~Arena();
372 
373   void* AllocZone(size_t size);
374 
375   void* AllocPooled(size_t obj_size, size_t alloc_size,
376                     std::atomic<FreePoolNode*>* head);
377   static void FreePooled(void* p, std::atomic<FreePoolNode*>* head);
378 
379   void TracePoolAlloc(size_t size, void* ptr) {
380     (void)size;
381     (void)ptr;
382 #ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
383     gpr_log(GPR_ERROR, "ARENA %p ALLOC %" PRIdPTR " @ %p", this, size, ptr);
384 #endif
385   }
386   static void TracePoolFree(void* ptr) {
387     (void)ptr;
388 #ifdef GRPC_ARENA_TRACE_POOLED_ALLOCATIONS
389     gpr_log(GPR_ERROR, "FREE %p", ptr);
390 #endif
391   }
392 
393   // Keep track of the total used size. We use this in our call sizing
394   // hysteresis.
395   std::atomic<size_t> total_used_{0};
396   std::atomic<size_t> total_allocated_{0};
397   const size_t initial_zone_size_;
398   // If the initial arena allocation wasn't enough, we allocate additional zones
399   // in a reverse linked list. Each additional zone consists of (1) a pointer to
400   // the zone added before this zone (null if this is the first additional zone)
401   // and (2) the allocated memory. The arena itself maintains a pointer to the
402   // last zone; the zone list is reverse-walked during arena destruction only.
403   std::atomic<Zone*> last_zone_{nullptr};
404   std::atomic<ManagedNewObject*> managed_new_head_{nullptr};
405 #ifndef GRPC_ARENA_POOLED_ALLOCATIONS_USE_MALLOC
406   std::atomic<FreePoolNode*> pools_[PoolSizes::size()]{};
407 #endif
408   // The backing memory quota
409   MemoryAllocator* const memory_allocator_;
410 };
411 
412 // Smart pointer for arenas when the final size is not required.
413 struct ScopedArenaDeleter {
414   void operator()(Arena* arena) { arena->Destroy(); }
415 };
416 using ScopedArenaPtr = std::unique_ptr<Arena, ScopedArenaDeleter>;
417 inline ScopedArenaPtr MakeScopedArena(size_t initial_size,
418                                       MemoryAllocator* memory_allocator) {
419   return ScopedArenaPtr(Arena::Create(initial_size, memory_allocator));
420 }
421 
422 // Arenas form a context for activities
423 template <>
424 struct ContextType<Arena> {};
425 
426 }  // namespace grpc_core
427 
428 #endif  // GRPC_SRC_CORE_LIB_RESOURCE_QUOTA_ARENA_H
429