xref: /aosp_15_r20/external/skia/include/private/base/SkTArray.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkTArray_DEFINED
9 #define SkTArray_DEFINED
10 
11 #include "include/private/base/SkASAN.h"  // IWYU pragma: keep
12 #include "include/private/base/SkAlignedStorage.h"
13 #include "include/private/base/SkAssert.h"
14 #include "include/private/base/SkAttributes.h"
15 #include "include/private/base/SkContainers.h"
16 #include "include/private/base/SkDebug.h"
17 #include "include/private/base/SkMalloc.h"
18 #include "include/private/base/SkMath.h"
19 #include "include/private/base/SkSpan_impl.h"
20 #include "include/private/base/SkTo.h"
21 #include "include/private/base/SkTypeTraits.h"  // IWYU pragma: keep
22 
23 #include <algorithm>
24 #include <climits>
25 #include <cstddef>
26 #include <cstdint>
27 #include <cstring>
28 #include <initializer_list>
29 #include <new>
30 #include <utility>
31 
32 namespace skia_private {
33 /** TArray<T> implements a typical, mostly std::vector-like array.
34     Each T will be default-initialized on allocation, and ~T will be called on destruction.
35 
36     MEM_MOVE controls the behavior when a T needs to be moved (e.g. when the array is resized)
37       - true: T will be bit-copied via memcpy.
38       - false: T will be moved via move-constructors.
39 */
40 template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>> class TArray {
41 public:
42     using value_type = T;
43 
44     /**
45      * Creates an empty array with no initial storage
46      */
TArray()47     TArray() : fOwnMemory(true), fCapacity{0} {}
48 
49     /**
50      * Creates an empty array that will preallocate space for reserveCount elements.
51      */
TArray(int reserveCount)52     explicit TArray(int reserveCount) : TArray() { this->reserve_exact(reserveCount); }
53 
54     /**
55      * Copies one array to another. The new array will be heap allocated.
56      */
TArray(const TArray & that)57     TArray(const TArray& that) : TArray(that.fData, that.fSize) {}
58 
TArray(TArray && that)59     TArray(TArray&& that) {
60         if (that.fOwnMemory) {
61             this->setData(that);
62             that.setData({});
63         } else {
64             this->initData(that.fSize);
65             that.move(fData);
66         }
67         this->changeSize(that.fSize);
68         that.changeSize(0);
69     }
70 
71     /**
72      * Creates a TArray by copying contents of a standard C array. The new
73      * array will be heap allocated. Be careful not to use this constructor
74      * when you really want the (void*, int) version.
75      */
TArray(const T * array,int count)76     TArray(const T* array, int count) {
77         this->initData(count);
78         this->copy(array);
79     }
80 
81     /**
82      * Creates a TArray by copying contents from an SkSpan. The new array will be heap allocated.
83      */
TArray(SkSpan<const T> data)84     TArray(SkSpan<const T> data) : TArray(data.begin(), static_cast<int>(data.size())) {}
85 
86     /**
87      * Creates a TArray by copying contents of an initializer list.
88      */
TArray(std::initializer_list<T> data)89     TArray(std::initializer_list<T> data) : TArray(data.begin(), data.size()) {}
90 
91     TArray& operator=(const TArray& that) {
92         if (this == &that) {
93             return *this;
94         }
95         this->clear();
96         this->checkRealloc(that.size(), kExactFit);
97         this->changeSize(that.fSize);
98         this->copy(that.fData);
99         return *this;
100     }
101 
102     TArray& operator=(TArray&& that) {
103         if (this != &that) {
104             this->clear();
105             this->unpoison();
106             that.unpoison();
107             if (that.fOwnMemory) {
108                 // The storage is on the heap, so move the data pointer.
109                 if (fOwnMemory) {
110                     sk_free(fData);
111                 }
112 
113                 fData = std::exchange(that.fData, nullptr);
114 
115                 // Can't use exchange with bitfields.
116                 fCapacity = that.fCapacity;
117                 that.fCapacity = 0;
118 
119                 fOwnMemory = true;
120 
121                 this->changeSize(that.fSize);
122             } else {
123                 // The data is stored inline in that, so move it element-by-element.
124                 this->checkRealloc(that.size(), kExactFit);
125                 this->changeSize(that.fSize);
126                 that.move(fData);
127             }
128             that.changeSize(0);
129         }
130         return *this;
131     }
132 
~TArray()133     ~TArray() {
134         this->destroyAll();
135         this->unpoison();
136         if (fOwnMemory) {
137             sk_free(fData);
138         }
139     }
140 
141     /**
142      * Resets to size() = n newly constructed T objects and resets any reserve count.
143      */
reset(int n)144     void reset(int n) {
145         SkASSERT(n >= 0);
146         this->clear();
147         this->checkRealloc(n, kExactFit);
148         this->changeSize(n);
149         for (int i = 0; i < this->size(); ++i) {
150             new (fData + i) T;
151         }
152     }
153 
154     /**
155      * Resets to a copy of a C array and resets any reserve count.
156      */
reset(const T * array,int count)157     void reset(const T* array, int count) {
158         SkASSERT(count >= 0);
159         this->clear();
160         this->checkRealloc(count, kExactFit);
161         this->changeSize(count);
162         this->copy(array);
163     }
164 
165     /**
166      * Ensures there is enough reserved space for at least n elements. This is guaranteed at least
167      * until the array size grows above n and subsequently shrinks below n, any version of reset()
168      * is called, or reserve() is called again.
169      */
reserve(int n)170     void reserve(int n) {
171         SkASSERT(n >= 0);
172         if (n > this->size()) {
173             this->checkRealloc(n - this->size(), kGrowing);
174         }
175     }
176 
177     /**
178      * Ensures there is enough reserved space for exactly n elements. The same capacity guarantees
179      * as above apply.
180      */
reserve_exact(int n)181     void reserve_exact(int n) {
182         SkASSERT(n >= 0);
183         if (n > this->size()) {
184             this->checkRealloc(n - this->size(), kExactFit);
185         }
186     }
187 
removeShuffle(int n)188     void removeShuffle(int n) {
189         SkASSERT(n < this->size());
190         int newCount = fSize - 1;
191         fData[n].~T();
192         if (n != newCount) {
193             this->move(n, newCount);
194         }
195         this->changeSize(newCount);
196     }
197 
198     // Is the array empty.
empty()199     bool empty() const { return fSize == 0; }
200 
201     /**
202      * Adds one new default-initialized T value and returns it by reference. Note that the reference
203      * only remains valid until the next call that adds or removes elements.
204      */
push_back()205     T& push_back() {
206         void* newT = this->push_back_raw(1);
207         return *new (newT) T;
208     }
209 
210     /**
211      * Adds one new T value which is copy-constructed, returning it by reference. As always,
212      * the reference only remains valid until the next call that adds or removes elements.
213      */
push_back(const T & t)214     T& push_back(const T& t) {
215         this->unpoison();
216         T* newT;
217         if (this->capacity() > fSize) SK_LIKELY {
218             // Copy over the element directly.
219             newT = new (fData + fSize) T(t);
220         } else {
221             newT = this->growAndConstructAtEnd(t);
222         }
223 
224         this->changeSize(fSize + 1);
225         return *newT;
226     }
227 
228     /**
229      * Adds one new T value which is copy-constructed, returning it by reference.
230      */
push_back(T && t)231     T& push_back(T&& t) {
232         this->unpoison();
233         T* newT;
234         if (this->capacity() > fSize) SK_LIKELY {
235             // Move over the element directly.
236             newT = new (fData + fSize) T(std::move(t));
237         } else {
238             newT = this->growAndConstructAtEnd(std::move(t));
239         }
240 
241         this->changeSize(fSize + 1);
242         return *newT;
243     }
244 
245     /**
246      *  Constructs a new T at the back of this array, returning it by reference.
247      */
emplace_back(Args &&...args)248     template <typename... Args> T& emplace_back(Args&&... args) {
249         this->unpoison();
250         T* newT;
251         if (this->capacity() > fSize) SK_LIKELY {
252             // Emplace the new element in directly.
253             newT = new (fData + fSize) T(std::forward<Args>(args)...);
254         } else {
255             newT = this->growAndConstructAtEnd(std::forward<Args>(args)...);
256         }
257 
258         this->changeSize(fSize + 1);
259         return *newT;
260     }
261 
262     /**
263      * Allocates n more default-initialized T values, and returns the address of
264      * the start of that new range. Note: this address is only valid until the
265      * next API call made on the array that might add or remove elements.
266      */
push_back_n(int n)267     T* push_back_n(int n) {
268         SkASSERT(n >= 0);
269         T* newTs = TCast(this->push_back_raw(n));
270         for (int i = 0; i < n; ++i) {
271             new (&newTs[i]) T;
272         }
273         return newTs;
274     }
275 
276     /**
277      * Version of above that uses a copy constructor to initialize all n items
278      * to the same T.
279      */
push_back_n(int n,const T & t)280     T* push_back_n(int n, const T& t) {
281         SkASSERT(n >= 0);
282         T* newTs = TCast(this->push_back_raw(n));
283         for (int i = 0; i < n; ++i) {
284             new (&newTs[i]) T(t);
285         }
286         return static_cast<T*>(newTs);
287     }
288 
289     /**
290      * Version of above that uses a copy constructor to initialize the n items
291      * to separate T values.
292      */
push_back_n(int n,const T t[])293     T* push_back_n(int n, const T t[]) {
294         SkASSERT(n >= 0);
295         this->checkRealloc(n, kGrowing);
296         T* end = this->end();
297         this->changeSize(fSize + n);
298         for (int i = 0; i < n; ++i) {
299             new (end + i) T(t[i]);
300         }
301         return end;
302     }
303 
304     /**
305      * Version of above that uses the move constructor to set n items.
306      */
move_back_n(int n,T * t)307     T* move_back_n(int n, T* t) {
308         SkASSERT(n >= 0);
309         this->checkRealloc(n, kGrowing);
310         T* end = this->end();
311         this->changeSize(fSize + n);
312         for (int i = 0; i < n; ++i) {
313             new (end + i) T(std::move(t[i]));
314         }
315         return end;
316     }
317 
318     /**
319      * Removes the last element. Not safe to call when size() == 0.
320      */
pop_back()321     void pop_back() {
322         sk_collection_not_empty(this->empty());
323         fData[fSize - 1].~T();
324         this->changeSize(fSize - 1);
325     }
326 
327     /**
328      * Removes the last n elements. Not safe to call when size() < n.
329      */
pop_back_n(int n)330     void pop_back_n(int n) {
331         SkASSERT(n >= 0);
332         SkASSERT(this->size() >= n);
333         int i = fSize;
334         while (i-- > fSize - n) {
335             (*this)[i].~T();
336         }
337         this->changeSize(fSize - n);
338     }
339 
340     /**
341      * Pushes or pops from the back to resize. Pushes will be default initialized.
342      */
resize_back(int newCount)343     void resize_back(int newCount) {
344         SkASSERT(newCount >= 0);
345         if (newCount > this->size()) {
346             if (this->empty()) {
347                 // When the container is completely empty, grow to exactly the requested size.
348                 this->checkRealloc(newCount, kExactFit);
349             }
350             this->push_back_n(newCount - fSize);
351         } else if (newCount < this->size()) {
352             this->pop_back_n(fSize - newCount);
353         }
354     }
355 
356     /** Swaps the contents of this array with that array. Does a pointer swap if possible,
357         otherwise copies the T values. */
swap(TArray & that)358     void swap(TArray& that) {
359         using std::swap;
360         if (this == &that) {
361             return;
362         }
363         if (fOwnMemory && that.fOwnMemory) {
364             swap(fData, that.fData);
365             swap(fSize, that.fSize);
366 
367             // Can't use swap because fCapacity is a bit field.
368             auto allocCount = fCapacity;
369             fCapacity = that.fCapacity;
370             that.fCapacity = allocCount;
371         } else {
372             // This could be more optimal...
373             TArray copy(std::move(that));
374             that = std::move(*this);
375             *this = std::move(copy);
376         }
377     }
378 
379     /**
380      * Moves all elements of `that` to the end of this array, leaving `that` empty.
381      * This is a no-op if `that` is empty or equal to this array.
382      */
move_back(TArray & that)383     void move_back(TArray& that) {
384         if (that.empty() || &that == this) {
385             return;
386         }
387         void* dst = this->push_back_raw(that.size());
388         // After move() returns, the contents of `dst` will have either been in-place initialized
389         // using a the move constructor (per-item from `that`'s elements), or will have been
390         // mem-copied into when MEM_MOVE is true (now valid objects).
391         that.move(dst);
392         // All items in `that` have either been destroyed (when MEM_MOVE is false) or should be
393         // considered invalid (when MEM_MOVE is true). Reset fSize to 0 directly to skip any further
394         // per-item destruction.
395         that.changeSize(0);
396     }
397 
begin()398     T* begin() {
399         return fData;
400     }
begin()401     const T* begin() const {
402         return fData;
403     }
404 
405     // It's safe to use fItemArray + fSize because if fItemArray is nullptr then adding 0 is
406     // valid and returns nullptr. See [expr.add] in the C++ standard.
end()407     T* end() {
408         if (fData == nullptr) {
409             SkASSERT(fSize == 0);
410         }
411         return fData + fSize;
412     }
end()413     const T* end() const {
414         if (fData == nullptr) {
415             SkASSERT(fSize == 0);
416         }
417         return fData + fSize;
418     }
data()419     T* data() { return fData; }
data()420     const T* data() const { return fData; }
size()421     int size() const { return fSize; }
size_bytes()422     size_t size_bytes() const { return Bytes(fSize); }
resize(size_t count)423     void resize(size_t count) { this->resize_back((int)count); }
424 
clear()425     void clear() {
426         this->destroyAll();
427         this->changeSize(0);
428     }
429 
shrink_to_fit()430     void shrink_to_fit() {
431         if (!fOwnMemory || fSize == fCapacity) {
432             return;
433         }
434         this->unpoison();
435         if (fSize == 0) {
436             sk_free(fData);
437             fData = nullptr;
438             fCapacity = 0;
439         } else {
440             SkSpan<std::byte> allocation = Allocate(fSize);
441             this->move(TCast(allocation.data()));
442             if (fOwnMemory) {
443                 sk_free(fData);
444             }
445             // Poison is applied in `setDataFromBytes`.
446             this->setDataFromBytes(allocation);
447         }
448     }
449 
450     /**
451      * Get the i^th element.
452      */
453     T& operator[] (int i) {
454         return fData[sk_collection_check_bounds(i, this->size())];
455     }
456 
457     const T& operator[] (int i) const {
458         return fData[sk_collection_check_bounds(i, this->size())];
459     }
460 
at(int i)461     T& at(int i) { return (*this)[i]; }
at(int i)462     const T& at(int i) const { return (*this)[i]; }
463 
464     /**
465      * equivalent to operator[](0)
466      */
front()467     T& front() {
468         sk_collection_not_empty(this->empty());
469         return fData[0];
470     }
471 
front()472     const T& front() const {
473         sk_collection_not_empty(this->empty());
474         return fData[0];
475     }
476 
477     /**
478      * equivalent to operator[](size() - 1)
479      */
back()480     T& back() {
481         sk_collection_not_empty(this->empty());
482         return fData[fSize - 1];
483     }
484 
back()485     const T& back() const {
486         sk_collection_not_empty(this->empty());
487         return fData[fSize - 1];
488     }
489 
490     /**
491      * equivalent to operator[](size()-1-i)
492      */
fromBack(int i)493     T& fromBack(int i) {
494         return (*this)[fSize - i - 1];
495     }
496 
fromBack(int i)497     const T& fromBack(int i) const {
498         return (*this)[fSize - i - 1];
499     }
500 
501     bool operator==(const TArray<T, MEM_MOVE>& right) const {
502         int leftCount = this->size();
503         if (leftCount != right.size()) {
504             return false;
505         }
506         for (int index = 0; index < leftCount; ++index) {
507             if (fData[index] != right.fData[index]) {
508                 return false;
509             }
510         }
511         return true;
512     }
513 
514     bool operator!=(const TArray<T, MEM_MOVE>& right) const {
515         return !(*this == right);
516     }
517 
capacity()518     int capacity() const {
519         return fCapacity;
520     }
521 
522 protected:
523     // Creates an empty array that will use the passed storage block until it is insufficiently
524     // large to hold the entire array.
525     template <int InitialCapacity>
526     TArray(SkAlignedSTStorage<InitialCapacity, T>* storage, int size = 0) {
527         static_assert(InitialCapacity >= 0);
528         SkASSERT(size >= 0);
529         SkASSERT(storage->get() != nullptr);
530         if (size > InitialCapacity) {
531             this->initData(size);
532         } else {
533             this->setDataFromBytes(*storage);
534             this->changeSize(size);
535 
536             // setDataFromBytes always sets fOwnMemory to true, but we are actually using static
537             // storage here, which shouldn't ever be freed.
538             fOwnMemory = false;
539         }
540     }
541 
542     // Copy a C array, using pre-allocated storage if preAllocCount >= count. Otherwise, storage
543     // will only be used when array shrinks to fit.
544     template <int InitialCapacity>
TArray(const T * array,int size,SkAlignedSTStorage<InitialCapacity,T> * storage)545     TArray(const T* array, int size, SkAlignedSTStorage<InitialCapacity, T>* storage)
546             : TArray{storage, size} {
547         this->copy(array);
548     }
549     template <int InitialCapacity>
TArray(SkSpan<const T> data,SkAlignedSTStorage<InitialCapacity,T> * storage)550     TArray(SkSpan<const T> data, SkAlignedSTStorage<InitialCapacity, T>* storage)
551             : TArray{storage, static_cast<int>(data.size())} {
552         this->copy(data.begin());
553     }
554 
555 private:
556     // Growth factors for checkRealloc.
557     static constexpr double kExactFit = 1.0;
558     static constexpr double kGrowing = 1.5;
559 
560     static constexpr int kMinHeapAllocCount = 8;
561     static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two.");
562 
563     // Note for 32-bit machines kMaxCapacity will be <= SIZE_MAX. For 64-bit machines it will
564     // just be INT_MAX if the sizeof(T) < 2^32.
565     static constexpr int kMaxCapacity = SkToInt(std::min(SIZE_MAX / sizeof(T), (size_t)INT_MAX));
566 
setDataFromBytes(SkSpan<std::byte> allocation)567     void setDataFromBytes(SkSpan<std::byte> allocation) {
568         T* data = TCast(allocation.data());
569         // We have gotten extra bytes back from the allocation limit, pin to kMaxCapacity. It
570         // would seem like the SkContainerAllocator should handle the divide, but it would have
571         // to a full divide instruction. If done here the size is known at compile, and usually
572         // can be implemented by a right shift. The full divide takes ~50X longer than the shift.
573         size_t size = std::min(allocation.size() / sizeof(T), SkToSizeT(kMaxCapacity));
574         this->setData(SkSpan<T>(data, size));
575     }
576 
setData(SkSpan<T> array)577     void setData(SkSpan<T> array) {
578         this->unpoison();
579 
580         fData = array.data();
581         fCapacity = SkToU32(array.size());
582         fOwnMemory = true;
583 
584         this->poison();
585     }
586 
unpoison()587     void unpoison() {
588 #ifdef SK_SANITIZE_ADDRESS
589         if (fData && fPoisoned) {
590             // SkDebugf("UNPOISONING %p : 0 -> %zu\n", fData, Bytes(fCapacity));
591             sk_asan_unpoison_memory_region(this->begin(), Bytes(fCapacity));
592             fPoisoned = false;
593         }
594 #endif
595     }
596 
poison()597     void poison() {
598 #ifdef SK_SANITIZE_ADDRESS
599         if (fData && fCapacity > fSize) {
600             // SkDebugf("  POISONING %p : %zu -> %zu\n", fData, Bytes(fSize), Bytes(fCapacity));
601             sk_asan_poison_memory_region(this->end(), Bytes(fCapacity - fSize));
602             fPoisoned = true;
603         }
604 #endif
605     }
606 
changeSize(int n)607     void changeSize(int n) {
608         this->unpoison();
609         fSize = n;
610         this->poison();
611     }
612 
613     // We disable Control-Flow Integrity sanitization (go/cfi) when casting item-array buffers.
614     // CFI flags this code as dangerous because we are casting `buffer` to a T* while the buffer's
615     // contents might still be uninitialized memory. When T has a vtable, this is especially risky
616     // because we could hypothetically access a virtual method on fItemArray and jump to an
617     // unpredictable location in memory. Of course, TArray won't actually use fItemArray in this
618     // way, and we don't want to construct a T before the user requests one. There's no real risk
619     // here, so disable CFI when doing these casts.
620     SK_NO_SANITIZE("cfi")
TCast(void * buffer)621     static T* TCast(void* buffer) {
622         return (T*)buffer;
623     }
624 
Bytes(int n)625     static size_t Bytes(int n) {
626         SkASSERT(n <= kMaxCapacity);
627         return SkToSizeT(n) * sizeof(T);
628     }
629 
630     static SkSpan<std::byte> Allocate(int capacity, double growthFactor = 1.0) {
631         return SkContainerAllocator{sizeof(T), kMaxCapacity}.allocate(capacity, growthFactor);
632     }
633 
initData(int count)634     void initData(int count) {
635         this->setDataFromBytes(Allocate(count));
636         this->changeSize(count);
637     }
638 
destroyAll()639     void destroyAll() {
640         if (!this->empty()) {
641             T* cursor = this->begin();
642             T* const end = this->end();
643             do {
644                 cursor->~T();
645                 cursor++;
646             } while (cursor < end);
647         }
648     }
649 
650     /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage.
651      *  In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage.
652      */
copy(const T * src)653     void copy(const T* src) {
654         if constexpr (std::is_trivially_copyable_v<T>) {
655             if (!this->empty() && src != nullptr) {
656                 sk_careful_memcpy(fData, src, this->size_bytes());
657             }
658         } else {
659             for (int i = 0; i < this->size(); ++i) {
660                 new (fData + i) T(src[i]);
661             }
662         }
663     }
664 
move(int dst,int src)665     void move(int dst, int src) {
666         if constexpr (MEM_MOVE) {
667             memcpy(static_cast<void*>(&fData[dst]),
668                    static_cast<const void*>(&fData[src]),
669                    sizeof(T));
670         } else {
671             new (&fData[dst]) T(std::move(fData[src]));
672             fData[src].~T();
673         }
674     }
675 
move(void * dst)676     void move(void* dst) {
677         if constexpr (MEM_MOVE) {
678             sk_careful_memcpy(dst, fData, Bytes(fSize));
679         } else {
680             for (int i = 0; i < this->size(); ++i) {
681                 new (static_cast<char*>(dst) + Bytes(i)) T(std::move(fData[i]));
682                 fData[i].~T();
683             }
684         }
685     }
686 
687     // Helper function that makes space for n objects, adjusts the count, but does not initialize
688     // the new objects.
push_back_raw(int n)689     void* push_back_raw(int n) {
690         this->checkRealloc(n, kGrowing);
691         void* ptr = fData + fSize;
692         this->changeSize(fSize + n);
693         return ptr;
694     }
695 
696     template <typename... Args>
growAndConstructAtEnd(Args &&...args)697     SK_ALWAYS_INLINE T* growAndConstructAtEnd(Args&&... args) {
698         SkSpan<std::byte> buffer = this->preallocateNewData(/*delta=*/1, kGrowing);
699         T* newT = new (TCast(buffer.data()) + fSize) T(std::forward<Args>(args)...);
700         this->installDataAndUpdateCapacity(buffer);
701 
702         return newT;
703     }
704 
checkRealloc(int delta,double growthFactor)705     void checkRealloc(int delta, double growthFactor) {
706         SkASSERT(delta >= 0);
707         SkASSERT(fSize >= 0);
708         SkASSERT(fCapacity >= 0);
709 
710         // Check if there are enough remaining allocated elements to satisfy the request.
711         if (this->capacity() - fSize < delta) {
712             // Looks like we need to reallocate.
713             this->installDataAndUpdateCapacity(this->preallocateNewData(delta, growthFactor));
714         }
715     }
716 
preallocateNewData(int delta,double growthFactor)717     SkSpan<std::byte> preallocateNewData(int delta, double growthFactor) {
718         SkASSERT(delta >= 0);
719         SkASSERT(fSize >= 0);
720         SkASSERT(fCapacity >= 0);
721 
722         // Don't overflow fSize or size_t later in the memory allocation. Overflowing memory
723         // allocation really only applies to fSizes on 32-bit machines; on 64-bit machines this
724         // will probably never produce a check. Since kMaxCapacity is bounded above by INT_MAX,
725         // this also checks the bounds of fSize.
726         if (delta > kMaxCapacity - fSize) {
727             sk_report_container_overflow_and_die();
728         }
729         const int newCount = fSize + delta;
730 
731         return Allocate(newCount, growthFactor);
732     }
733 
installDataAndUpdateCapacity(SkSpan<std::byte> allocation)734     void installDataAndUpdateCapacity(SkSpan<std::byte> allocation) {
735         this->move(TCast(allocation.data()));
736         if (fOwnMemory) {
737             sk_free(fData);
738         }
739         this->setDataFromBytes(allocation);
740         SkASSERT(fData != nullptr);
741     }
742 
743     T* fData{nullptr};
744     int fSize{0};
745     uint32_t fOwnMemory : 1;
746     uint32_t fCapacity : 31;
747 #ifdef SK_SANITIZE_ADDRESS
748     bool fPoisoned = false;
749 #endif
750 };
751 
swap(TArray<T,M> & a,TArray<T,M> & b)752 template <typename T, bool M> static inline void swap(TArray<T, M>& a, TArray<T, M>& b) {
753     a.swap(b);
754 }
755 
756 // Subclass of TArray that contains a pre-allocated memory block for the array.
757 template <int Nreq, typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>>
758 class STArray : private SkAlignedSTStorage<SkContainerAllocator::RoundUp<T>(Nreq), T>,
759                 public TArray<T, MEM_MOVE> {
760     // We round up the requested array size to the next capacity multiple.
761     // This space would likely otherwise go to waste.
762     static constexpr int N = SkContainerAllocator::RoundUp<T>(Nreq);
763     static_assert(Nreq > 0);
764     static_assert(N >= Nreq);
765 
766     using Storage = SkAlignedSTStorage<N,T>;
767 
768 public:
STArray()769     STArray()
770         : Storage{}
771         , TArray<T, MEM_MOVE>(this) {}  // Must use () to avoid confusion with initializer_list
772                                         // when T=bool because * are convertable to bool.
773 
STArray(const T * array,int count)774     STArray(const T* array, int count)
775         : Storage{}
776         , TArray<T, MEM_MOVE>{array, count, this} {}
777 
STArray(SkSpan<const T> data)778     STArray(SkSpan<const T> data)
779         : Storage{}
780         , TArray<T, MEM_MOVE>{data, this} {}
781 
STArray(std::initializer_list<T> data)782     STArray(std::initializer_list<T> data)
783         : STArray{data.begin(), SkToInt(data.size())} {}
784 
STArray(int reserveCount)785     explicit STArray(int reserveCount)
786         : STArray() { this->reserve_exact(reserveCount); }
787 
STArray(const STArray & that)788     STArray(const STArray& that)
789         : STArray() { *this = that; }
790 
STArray(const TArray<T,MEM_MOVE> & that)791     explicit STArray(const TArray<T, MEM_MOVE>& that)
792         : STArray() { *this = that; }
793 
STArray(STArray && that)794     STArray(STArray&& that)
795         : STArray() { *this = std::move(that); }
796 
STArray(TArray<T,MEM_MOVE> && that)797     explicit STArray(TArray<T, MEM_MOVE>&& that)
798         : STArray() { *this = std::move(that); }
799 
800     STArray& operator=(const STArray& that) {
801         TArray<T, MEM_MOVE>::operator=(that);
802         return *this;
803     }
804 
805     STArray& operator=(const TArray<T, MEM_MOVE>& that) {
806         TArray<T, MEM_MOVE>::operator=(that);
807         return *this;
808     }
809 
810     STArray& operator=(STArray&& that) {
811         TArray<T, MEM_MOVE>::operator=(std::move(that));
812         return *this;
813     }
814 
815     STArray& operator=(TArray<T, MEM_MOVE>&& that) {
816         TArray<T, MEM_MOVE>::operator=(std::move(that));
817         return *this;
818     }
819 
820     // Force the use of TArray for data() and size().
821     using TArray<T, MEM_MOVE>::data;
822     using TArray<T, MEM_MOVE>::size;
823 };
824 }  // namespace skia_private
825 #endif  // SkTArray_DEFINED
826