xref: /aosp_15_r20/external/llvm-libc/src/__support/block.h (revision 71db0c75aadcf003ffe3238005f61d7618a3fead)
1 //===-- Implementation header for a block of memory -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIBC_SRC___SUPPORT_BLOCK_H
10 #define LLVM_LIBC_SRC___SUPPORT_BLOCK_H
11 
12 #include "src/__support/CPP/algorithm.h"
13 #include "src/__support/CPP/cstddef.h"
14 #include "src/__support/CPP/limits.h"
15 #include "src/__support/CPP/new.h"
16 #include "src/__support/CPP/optional.h"
17 #include "src/__support/CPP/span.h"
18 #include "src/__support/CPP/type_traits.h"
19 #include "src/__support/libc_assert.h"
20 #include "src/__support/macros/config.h"
21 
22 #include <stdint.h>
23 
24 namespace LIBC_NAMESPACE_DECL {
25 
26 namespace internal {
27 // Types of corrupted blocks, and functions to crash with an error message
28 // corresponding to each type.
29 enum class BlockStatus {
30   VALID,
31   MISALIGNED,
32   PREV_MISMATCHED,
33   NEXT_MISMATCHED,
34 };
35 } // namespace internal
36 
37 /// Returns the value rounded down to the nearest multiple of alignment.
align_down(size_t value,size_t alignment)38 LIBC_INLINE constexpr size_t align_down(size_t value, size_t alignment) {
39   // Note this shouldn't overflow since the result will always be <= value.
40   return (value / alignment) * alignment;
41 }
42 
43 /// Returns the value rounded down to the nearest multiple of alignment.
44 template <typename T>
align_down(T * value,size_t alignment)45 LIBC_INLINE constexpr T *align_down(T *value, size_t alignment) {
46   return reinterpret_cast<T *>(
47       align_down(reinterpret_cast<size_t>(value), alignment));
48 }
49 
50 /// Returns the value rounded up to the nearest multiple of alignment.
align_up(size_t value,size_t alignment)51 LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) {
52   __builtin_add_overflow(value, alignment - 1, &value);
53   return align_down(value, alignment);
54 }
55 
56 /// Returns the value rounded up to the nearest multiple of alignment.
57 template <typename T>
align_up(T * value,size_t alignment)58 LIBC_INLINE constexpr T *align_up(T *value, size_t alignment) {
59   return reinterpret_cast<T *>(
60       align_up(reinterpret_cast<size_t>(value), alignment));
61 }
62 
63 using ByteSpan = cpp::span<LIBC_NAMESPACE::cpp::byte>;
64 using cpp::optional;
65 
66 /// Memory region with links to adjacent blocks.
67 ///
68 /// The blocks store their offsets to the previous and next blocks. The latter
69 /// is also the block's size.
70 ///
71 /// Blocks will always be aligned to a `ALIGNMENT` boundary. Block sizes will
72 /// always be rounded up to a multiple of `ALIGNMENT`.
73 ///
74 /// As an example, the diagram below represents two contiguous `Block`s. The
75 /// indices indicate byte offsets:
76 ///
77 /// @code{.unparsed}
78 /// Block 1:
79 /// +---------------------+--------------+
80 /// | Header              | Usable space |
81 /// +----------+----------+--------------+
82 /// | prev     | next     |              |
83 /// | 0......3 | 4......7 | 8........227 |
84 /// | 00000000 | 00000230 |  <app data>  |
85 /// +----------+----------+--------------+
86 /// Block 2:
87 /// +---------------------+--------------+
88 /// | Header              | Usable space |
89 /// +----------+----------+--------------+
90 /// | prev     | next     |              |
91 /// | 0......3 | 4......7 | 8........827 |
92 /// | 00000230 | 00000830 | f7f7....f7f7 |
93 /// +----------+----------+--------------+
94 /// @endcode
95 ///
96 /// As a space optimization, when a block is allocated, it consumes the prev
97 /// field of the following block:
98 ///
99 /// Block 1 (used):
100 /// +---------------------+--------------+
101 /// | Header              | Usable space |
102 /// +----------+----------+--------------+
103 /// | prev     | next     |              |
104 /// | 0......3 | 4......7 | 8........230 |
105 /// | 00000000 | 00000230 |  <app data>  |
106 /// +----------+----------+--------------+
107 /// Block 2:
108 /// +---------------------+--------------+
109 /// | B1       | Header   | Usable space |
110 /// +----------+----------+--------------+
111 /// |          | next     |              |
112 /// | 0......3 | 4......7 | 8........827 |
113 /// | xxxxxxxx | 00000830 | f7f7....f7f7 |
114 /// +----------+----------+--------------+
115 ///
116 /// The next offset of a block matches the previous offset of its next block.
117 /// The first block in a list is denoted by having a previous offset of `0`.
118 class Block {
119   // Masks for the contents of the next_ field.
120   static constexpr size_t PREV_FREE_MASK = 1 << 0;
121   static constexpr size_t LAST_MASK = 1 << 1;
122   static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK);
123 
124 public:
125   static constexpr size_t ALIGNMENT = cpp::max(alignof(max_align_t), size_t{4});
126   static const size_t BLOCK_OVERHEAD;
127 
128   // No copy or move.
129   Block(const Block &other) = delete;
130   Block &operator=(const Block &other) = delete;
131 
132   /// Creates the first block for a given memory region, followed by a sentinel
133   /// last block. Returns the first block.
134   static optional<Block *> init(ByteSpan region);
135 
136   /// @returns  A pointer to a `Block`, given a pointer to the start of the
137   ///           usable space inside the block.
138   ///
139   /// This is the inverse of `usable_space()`.
140   ///
141   /// @warning  This method does not do any checking; passing a random
142   ///           pointer will return a non-null pointer.
from_usable_space(void * usable_space)143   LIBC_INLINE static Block *from_usable_space(void *usable_space) {
144     auto *bytes = reinterpret_cast<cpp::byte *>(usable_space);
145     return reinterpret_cast<Block *>(bytes - BLOCK_OVERHEAD);
146   }
from_usable_space(const void * usable_space)147   LIBC_INLINE static const Block *from_usable_space(const void *usable_space) {
148     const auto *bytes = reinterpret_cast<const cpp::byte *>(usable_space);
149     return reinterpret_cast<const Block *>(bytes - BLOCK_OVERHEAD);
150   }
151 
152   /// @returns The total size of the block in bytes, including the header.
outer_size()153   LIBC_INLINE size_t outer_size() const { return next_ & SIZE_MASK; }
154 
outer_size(size_t inner_size)155   LIBC_INLINE static size_t outer_size(size_t inner_size) {
156     // The usable region includes the prev_ field of the next block.
157     return inner_size - sizeof(prev_) + BLOCK_OVERHEAD;
158   }
159 
160   /// @returns The number of usable bytes inside the block were it to be
161   /// allocated.
inner_size()162   LIBC_INLINE size_t inner_size() const {
163     if (!next())
164       return 0;
165     return inner_size(outer_size());
166   }
167 
168   /// @returns The number of usable bytes inside a block with the given outer
169   /// size were it to be allocated.
inner_size(size_t outer_size)170   LIBC_INLINE static size_t inner_size(size_t outer_size) {
171     // The usable region includes the prev_ field of the next block.
172     return inner_size_free(outer_size) + sizeof(prev_);
173   }
174 
175   /// @returns The number of usable bytes inside the block if it remains free.
inner_size_free()176   LIBC_INLINE size_t inner_size_free() const {
177     if (!next())
178       return 0;
179     return inner_size_free(outer_size());
180   }
181 
182   /// @returns The number of usable bytes inside a block with the given outer
183   /// size if it remains free.
inner_size_free(size_t outer_size)184   LIBC_INLINE static size_t inner_size_free(size_t outer_size) {
185     return outer_size - BLOCK_OVERHEAD;
186   }
187 
188   /// @returns A pointer to the usable space inside this block.
usable_space()189   LIBC_INLINE cpp::byte *usable_space() {
190     return reinterpret_cast<cpp::byte *>(this) + BLOCK_OVERHEAD;
191   }
usable_space()192   LIBC_INLINE const cpp::byte *usable_space() const {
193     return reinterpret_cast<const cpp::byte *>(this) + BLOCK_OVERHEAD;
194   }
195 
196   // @returns The region of memory the block manages, including the header.
region()197   LIBC_INLINE ByteSpan region() {
198     return {reinterpret_cast<cpp::byte *>(this), outer_size()};
199   }
200 
201   /// Attempts to split this block.
202   ///
203   /// If successful, the block will have an inner size of at least
204   /// `new_inner_size`, rounded to ensure that the split point is on an
205   /// ALIGNMENT boundary. The remaining space will be returned as a new block.
206   /// Note that the prev_ field of the next block counts as part of the inner
207   /// size of the returnd block.
208   optional<Block *> split(size_t new_inner_size);
209 
210   /// Merges this block with the one that comes after it.
211   bool merge_next();
212 
213   /// @returns The block immediately after this one, or a null pointer if this
214   /// is the last block.
next()215   LIBC_INLINE Block *next() const {
216     if (next_ & LAST_MASK)
217       return nullptr;
218     return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) +
219                                      outer_size());
220   }
221 
222   /// @returns The free block immediately before this one, otherwise nullptr.
prev_free()223   LIBC_INLINE Block *prev_free() const {
224     if (!(next_ & PREV_FREE_MASK))
225       return nullptr;
226     return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) - prev_);
227   }
228 
229   /// @returns Whether the block is unavailable for allocation.
used()230   LIBC_INLINE bool used() const { return !next() || !next()->prev_free(); }
231 
232   /// Marks this block as in use.
mark_used()233   LIBC_INLINE void mark_used() {
234     LIBC_ASSERT(next() && "last block is always considered used");
235     next()->next_ &= ~PREV_FREE_MASK;
236   }
237 
238   /// Marks this block as free.
mark_free()239   LIBC_INLINE void mark_free() {
240     LIBC_ASSERT(next() && "last block is always considered used");
241     next()->next_ |= PREV_FREE_MASK;
242     // The next block's prev_ field becomes alive, as it is no longer part of
243     // this block's used space.
244     *new (&next()->prev_) size_t = outer_size();
245   }
246 
247   /// Marks this block as the last one in the chain. Makes next() return
248   /// nullptr.
mark_last()249   LIBC_INLINE void mark_last() { next_ |= LAST_MASK; }
250 
Block(size_t outer_size)251   LIBC_INLINE constexpr Block(size_t outer_size) : next_(outer_size) {
252     LIBC_ASSERT(outer_size % ALIGNMENT == 0 && "block sizes must be aligned");
253   }
254 
is_usable_space_aligned(size_t alignment)255   LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const {
256     return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0;
257   }
258 
259   /// @returns The new inner size of this block that would give the usable
260   /// space of the next block the given alignment.
padding_for_alignment(size_t alignment)261   LIBC_INLINE size_t padding_for_alignment(size_t alignment) const {
262     if (is_usable_space_aligned(alignment))
263       return 0;
264 
265     // We need to ensure we can always split this block into a "padding" block
266     // and the aligned block. To do this, we need enough extra space for at
267     // least one block.
268     //
269     // |block   |usable_space                          |
270     // |........|......................................|
271     //                            ^
272     //                            Alignment requirement
273     //
274     //
275     // |block   |space   |block   |usable_space        |
276     // |........|........|........|....................|
277     //                            ^
278     //                            Alignment requirement
279     //
280     alignment = cpp::max(alignment, ALIGNMENT);
281     uintptr_t start = reinterpret_cast<uintptr_t>(usable_space());
282     uintptr_t next_usable_space = align_up(start + BLOCK_OVERHEAD, alignment);
283     uintptr_t next_block = next_usable_space - BLOCK_OVERHEAD;
284     return next_block - start + sizeof(prev_);
285   }
286 
287   // Check that we can `allocate` a block with a given alignment and size from
288   // this existing block.
289   bool can_allocate(size_t alignment, size_t size) const;
290 
291   // This is the return type for `allocate` which can split one block into up to
292   // three blocks.
293   struct BlockInfo {
294     // This is the newly aligned block. It will have the alignment requested by
295     // a call to `allocate` and at most `size`.
296     Block *block;
297 
298     // If the usable_space in the new block was not aligned according to the
299     // `alignment` parameter, we will need to split into this block and the
300     // `block` to ensure `block` is properly aligned. In this case, `prev` will
301     // be a pointer to this new "padding" block. `prev` will be nullptr if no
302     // new block was created or we were able to merge the block before the
303     // original block with the "padding" block.
304     Block *prev;
305 
306     // This is the remainder of the next block after splitting the `block`
307     // according to `size`. This can happen if there's enough space after the
308     // `block`.
309     Block *next;
310   };
311 
312   // Divide a block into up to 3 blocks according to `BlockInfo`. This should
313   // only be called if `can_allocate` returns true.
314   static BlockInfo allocate(Block *block, size_t alignment, size_t size);
315 
316 private:
317   /// Construct a block to represent a span of bytes. Overwrites only enough
318   /// memory for the block header; the rest of the span is left alone.
as_block(ByteSpan bytes)319   LIBC_INLINE static Block *as_block(ByteSpan bytes) {
320     return ::new (bytes.data()) Block(bytes.size());
321   }
322 
323   /// Like `split`, but assumes the caller has already checked to parameters to
324   /// ensure the split will succeed.
325   Block *split_impl(size_t new_inner_size);
326 
327   /// Offset from this block to the previous block. 0 if this is the first
328   /// block. This field is only alive when the previous block is free;
329   /// otherwise, its memory is reused as part of the previous block's usable
330   /// space.
331   size_t prev_ = 0;
332 
333   /// Offset from this block to the next block. Valid even if this is the last
334   /// block, since it equals the size of the block.
335   size_t next_ = 0;
336 
337   /// Information about the current state of the block is stored in the two low
338   /// order bits of the next_ value. These are guaranteed free by a minimum
339   /// alignment (and thus, alignment of the size) of 4. The lowest bit is the
340   /// `prev_free` flag, and the other bit is the `last` flag.
341   ///
342   /// * If the `prev_free` flag is set, the block isn't the first and the
343   ///   previous block is free.
344   /// * If the `last` flag is set, the block is the sentinel last block. It is
345   ///   summarily considered used and has no next block.
346 } __attribute__((packed, aligned(cpp::max(alignof(max_align_t), size_t{4}))));
347 
348 inline constexpr size_t Block::BLOCK_OVERHEAD =
349     align_up(sizeof(Block), ALIGNMENT);
350 
get_aligned_subspan(ByteSpan bytes,size_t alignment)351 LIBC_INLINE ByteSpan get_aligned_subspan(ByteSpan bytes, size_t alignment) {
352   if (bytes.data() == nullptr)
353     return ByteSpan();
354 
355   auto unaligned_start = reinterpret_cast<uintptr_t>(bytes.data());
356   auto aligned_start = align_up(unaligned_start, alignment);
357   auto unaligned_end = unaligned_start + bytes.size();
358   auto aligned_end = align_down(unaligned_end, alignment);
359 
360   if (aligned_end <= aligned_start)
361     return ByteSpan();
362 
363   return bytes.subspan(aligned_start - unaligned_start,
364                        aligned_end - aligned_start);
365 }
366 
367 LIBC_INLINE
init(ByteSpan region)368 optional<Block *> Block::init(ByteSpan region) {
369   optional<ByteSpan> result = get_aligned_subspan(region, ALIGNMENT);
370   if (!result)
371     return {};
372 
373   region = result.value();
374   // Two blocks are allocated: a free block and a sentinel last block.
375   if (region.size() < 2 * BLOCK_OVERHEAD)
376     return {};
377 
378   if (cpp::numeric_limits<size_t>::max() < region.size())
379     return {};
380 
381   Block *block = as_block(region.first(region.size() - BLOCK_OVERHEAD));
382   Block *last = as_block(region.last(BLOCK_OVERHEAD));
383   block->mark_free();
384   last->mark_last();
385   return block;
386 }
387 
388 LIBC_INLINE
can_allocate(size_t alignment,size_t size)389 bool Block::can_allocate(size_t alignment, size_t size) const {
390   if (inner_size() < size)
391     return false;
392   if (is_usable_space_aligned(alignment))
393     return true;
394 
395   // Alignment isn't met, so a padding block is needed. Determine amount of
396   // inner_size() consumed by the padding block.
397   size_t padding_size = padding_for_alignment(alignment) - sizeof(prev_);
398 
399   // Check that there is room for the allocation in the following aligned block.
400   size_t aligned_inner_size = inner_size() - padding_size - BLOCK_OVERHEAD;
401   return size <= aligned_inner_size;
402 }
403 
404 LIBC_INLINE
allocate(Block * block,size_t alignment,size_t size)405 Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) {
406   LIBC_ASSERT(
407       block->can_allocate(alignment, size) &&
408       "Calls to this function for a given alignment and size should only be "
409       "done if `can_allocate` for these parameters returns true.");
410 
411   BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr};
412 
413   if (!info.block->is_usable_space_aligned(alignment)) {
414     Block *original = info.block;
415     optional<Block *> maybe_aligned_block =
416         original->split(info.block->padding_for_alignment(alignment));
417     LIBC_ASSERT(maybe_aligned_block.has_value() &&
418                 "This split should always result in a new block. The check in "
419                 "`can_allocate` ensures that we have enough space here to make "
420                 "two blocks.");
421 
422     if (Block *prev = original->prev_free()) {
423       // If there is a free block before this, we can merge the current one with
424       // the newly created one.
425       prev->merge_next();
426     } else {
427       info.prev = original;
428     }
429 
430     Block *aligned_block = *maybe_aligned_block;
431     LIBC_ASSERT(aligned_block->is_usable_space_aligned(alignment) &&
432                 "The aligned block isn't aligned somehow.");
433     info.block = aligned_block;
434   }
435 
436   // Now get a block for the requested size.
437   if (optional<Block *> next = info.block->split(size))
438     info.next = *next;
439 
440   return info;
441 }
442 
443 LIBC_INLINE
split(size_t new_inner_size)444 optional<Block *> Block::split(size_t new_inner_size) {
445   if (used())
446     return {};
447   // The prev_ field of the next block is always available, so there is a
448   // minimum size to a block created through splitting.
449   if (new_inner_size < sizeof(prev_))
450     new_inner_size = sizeof(prev_);
451 
452   size_t old_inner_size = inner_size();
453   new_inner_size =
454       align_up(new_inner_size - sizeof(prev_), ALIGNMENT) + sizeof(prev_);
455   if (old_inner_size < new_inner_size)
456     return {};
457 
458   if (old_inner_size - new_inner_size < BLOCK_OVERHEAD)
459     return {};
460 
461   return split_impl(new_inner_size);
462 }
463 
464 LIBC_INLINE
split_impl(size_t new_inner_size)465 Block *Block::split_impl(size_t new_inner_size) {
466   size_t outer_size1 = outer_size(new_inner_size);
467   LIBC_ASSERT(outer_size1 % ALIGNMENT == 0 && "new size must be aligned");
468   ByteSpan new_region = region().subspan(outer_size1);
469   next_ &= ~SIZE_MASK;
470   next_ |= outer_size1;
471 
472   Block *new_block = as_block(new_region);
473   mark_free(); // Free status for this block is now stored in new_block.
474   new_block->next()->prev_ = new_region.size();
475   return new_block;
476 }
477 
478 LIBC_INLINE
merge_next()479 bool Block::merge_next() {
480   if (used() || next()->used())
481     return false;
482   size_t new_size = outer_size() + next()->outer_size();
483   next_ &= ~SIZE_MASK;
484   next_ |= new_size;
485   next()->prev_ = new_size;
486   return true;
487 }
488 
489 } // namespace LIBC_NAMESPACE_DECL
490 
491 #endif // LLVM_LIBC_SRC___SUPPORT_BLOCK_H
492