xref: /aosp_15_r20/external/AFLplusplus/include/alloc-inl.h (revision 08b48e0b10e97b33e7b60c5b6e2243bd915777f2)
1 /*
2    american fuzzy lop++ - error-checking, memory-zeroing alloc routines
3    --------------------------------------------------------------------
4 
5    Originally written by Michal Zalewski
6 
7    Now maintained by Marc Heuse <[email protected]>,
8                      Heiko Eißfeldt <[email protected]>,
9                      Andrea Fioraldi <[email protected]>,
10                      Dominik Maier <[email protected]>
11 
12    Copyright 2016, 2017 Google Inc. All rights reserved.
13    Copyright 2019-2024 AFLplusplus Project. All rights reserved.
14 
15    Licensed under the Apache License, Version 2.0 (the "License");
16    you may not use this file except in compliance with the License.
17    You may obtain a copy of the License at:
18 
19      https://www.apache.org/licenses/LICENSE-2.0
20 
21    This allocator is not designed to resist malicious attackers (the canaries
22    are small and predictable), but provides a robust and portable way to detect
23    use-after-free, off-by-one writes, stale pointers, and so on.
24 
25  */
26 
27 #ifndef _HAVE_ALLOC_INL_H
28 #define _HAVE_ALLOC_INL_H
29 
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <stddef.h>
34 
35 #include "config.h"
36 #include "types.h"
37 #include "debug.h"
38 
39 /* Initial size used for afl_realloc */
40 #define INITIAL_GROWTH_SIZE (64)
41 
42 // Be careful! _WANT_ORIGINAL_AFL_ALLOC is not compatible with custom mutators
43 
44 #ifndef _WANT_ORIGINAL_AFL_ALLOC
45   // AFL++ stuff without memory corruption checks - for speed
46 
47   /* User-facing macro to sprintf() to a dynamically allocated buffer. */
48 
49   #define alloc_printf(_str...)                        \
50     ({                                                 \
51                                                        \
52       u8 *_tmp;                                        \
53       s32 _len = snprintf(NULL, 0, _str);              \
54       if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
55       _tmp = ck_alloc(_len + 1);                       \
56       snprintf((char *)_tmp, _len + 1, _str);          \
57       _tmp;                                            \
58                                                        \
59     })
60 
61   /* Macro to enforce allocation limits as a last-resort defense against
62      integer overflows. */
63 
64   #define ALLOC_CHECK_SIZE(_s)                                          \
65     do {                                                                \
66                                                                         \
67       if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \
68                                                                         \
69     } while (0)
70 
71   /* Macro to check malloc() failures and the like. */
72 
73   #define ALLOC_CHECK_RESULT(_r, _s)                                    \
74     do {                                                                \
75                                                                         \
76       if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \
77                                                                         \
78     } while (0)
79 
80 /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
81    requests. */
82 
DFL_ck_alloc_nozero(u32 size)83 static inline void *DFL_ck_alloc_nozero(u32 size) {
84 
85   void *ret;
86 
87   if (!size) { return NULL; }
88 
89   ALLOC_CHECK_SIZE(size);
90   ret = malloc(size);
91   ALLOC_CHECK_RESULT(ret, size);
92 
93   return (void *)ret;
94 
95 }
96 
97 /* Allocate a buffer, returning zeroed memory.
98   Returns null for 0 size */
99 
DFL_ck_alloc(u32 size)100 static inline void *DFL_ck_alloc(u32 size) {
101 
102   void *mem;
103 
104   if (!size) { return NULL; }
105   mem = DFL_ck_alloc_nozero(size);
106 
107   return memset(mem, 0, size);
108 
109 }
110 
111 /* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD
112    is set, the old memory will be also clobbered with 0xFF. */
113 
DFL_ck_free(void * mem)114 static inline void DFL_ck_free(void *mem) {
115 
116   if (!mem) { return; }
117 
118   free(mem);
119 
120 }
121 
122 /* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.
123    With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the
124    old memory is clobbered with 0xFF. */
125 
DFL_ck_realloc(void * orig,u32 size)126 static inline void *DFL_ck_realloc(void *orig, u32 size) {
127 
128   void *ret;
129 
130   if (!size) {
131 
132     DFL_ck_free(orig);
133     return NULL;
134 
135   }
136 
137   ALLOC_CHECK_SIZE(size);
138 
139   /* Catch pointer issues sooner: force relocation and make sure that the
140      original buffer is wiped. */
141 
142   ret = realloc(orig, size);
143 
144   ALLOC_CHECK_RESULT(ret, size);
145 
146   return (void *)ret;
147 
148 }
149 
150 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
151 
DFL_ck_strdup(u8 * str)152 static inline u8 *DFL_ck_strdup(u8 *str) {
153 
154   u8 *ret;
155   u32 size;
156 
157   if (!str) { return NULL; }
158 
159   size = strlen((char *)str) + 1;
160 
161   ALLOC_CHECK_SIZE(size);
162   ret = (u8 *)malloc(size);
163   ALLOC_CHECK_RESULT(ret, size);
164 
165   return (u8 *)memcpy(ret, str, size);
166 
167 }
168 
169   /* In non-debug mode, we just do straightforward aliasing of the above
170      functions to user-visible names such as ck_alloc(). */
171 
172   #define ck_alloc DFL_ck_alloc
173   #define ck_alloc_nozero DFL_ck_alloc_nozero
174   #define ck_realloc DFL_ck_realloc
175   #define ck_strdup DFL_ck_strdup
176   #define ck_free DFL_ck_free
177 
178   #define alloc_report()
179 
180 #else
181   // This is the original alloc-inl of stock afl
182 
183   /* User-facing macro to sprintf() to a dynamically allocated buffer. */
184 
185   #define alloc_printf(_str...)                        \
186     ({                                                 \
187                                                        \
188       u8 *_tmp;                                        \
189       s32 _len = snprintf(NULL, 0, _str);              \
190       if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
191       _tmp = ck_alloc(_len + 1);                       \
192       snprintf((char *)_tmp, _len + 1, _str);          \
193       _tmp;                                            \
194                                                        \
195     })
196 
197   /* Macro to enforce allocation limits as a last-resort defense against
198      integer overflows. */
199   #define ALLOC_CHECK_SIZE(_s)                                          \
200     do {                                                                \
201                                                                         \
202       if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \
203                                                                         \
204     } while (0)
205 
206   /* Macro to check malloc() failures and the like. */
207 
208   #define ALLOC_CHECK_RESULT(_r, _s)                                    \
209     do {                                                                \
210                                                                         \
211       if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \
212                                                                         \
213     } while (0)
214 
215   /* Magic tokens used to mark used / freed chunks. */
216 
217   #define ALLOC_MAGIC_C1 0xFF00FF00                   /* Used head (dword)  */
218   #define ALLOC_MAGIC_F 0xFE00FE00                    /* Freed head (dword) */
219   #define ALLOC_MAGIC_C2 0xF0                         /* Used tail (byte)   */
220 
221   /* Positions of guard tokens in relation to the user-visible pointer. */
222 
223   #define ALLOC_C1(_ptr) (((u32 *)(_ptr))[-2])
224   #define ALLOC_S(_ptr) (((u32 *)(_ptr))[-1])
225   #define ALLOC_C2(_ptr) (((u8 *)(_ptr))[ALLOC_S(_ptr)])
226 
227   #define ALLOC_OFF_HEAD 8
228   #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)
229 
230   /* Sanity-checking macros for pointers. */
231 
232   #define CHECK_PTR(_p)                            \
233     do {                                           \
234                                                    \
235       if (_p) {                                    \
236                                                    \
237         if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {       \
238                                                    \
239           if (ALLOC_C1(_p) == ALLOC_MAGIC_F)       \
240             ABORT("Use after free.");              \
241           else                                     \
242             ABORT("Corrupted head alloc canary."); \
243                                                    \
244         }                                          \
245         if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2)         \
246           ABORT("Corrupted tail alloc canary.");   \
247                                                    \
248       }                                            \
249                                                    \
250     } while (0)
251 
252   #define CHECK_PTR_EXPR(_p)  \
253     ({                        \
254                               \
255       typeof(_p) _tmp = (_p); \
256       CHECK_PTR(_tmp);        \
257       _tmp;                   \
258                               \
259     })
260 
261 /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
262    requests. */
263 
DFL_ck_alloc_nozero(u32 size)264 static inline void *DFL_ck_alloc_nozero(u32 size) {
265 
266   void *ret;
267 
268   if (!size) return NULL;
269 
270   ALLOC_CHECK_SIZE(size);
271   ret = malloc(size + ALLOC_OFF_TOTAL);
272   ALLOC_CHECK_RESULT(ret, size);
273 
274   ret = (char *)ret + ALLOC_OFF_HEAD;
275 
276   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
277   ALLOC_S(ret) = size;
278   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
279 
280   return ret;
281 
282 }
283 
284 /* Allocate a buffer, returning zeroed memory. */
285 
DFL_ck_alloc(u32 size)286 static inline void *DFL_ck_alloc(u32 size) {
287 
288   void *mem;
289 
290   if (!size) return NULL;
291   mem = DFL_ck_alloc_nozero(size);
292 
293   return memset(mem, 0, size);
294 
295 }
296 
297 /* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD
298    is set, the old memory will be also clobbered with 0xFF. */
299 
DFL_ck_free(void * mem)300 static inline void DFL_ck_free(void *mem) {
301 
302   if (!mem) return;
303 
304   CHECK_PTR(mem);
305   #ifdef DEBUG_BUILD
306 
307   /* Catch pointer issues sooner. */
308   memset(mem, 0xFF, ALLOC_S(mem));
309 
310   #endif                                                     /* DEBUG_BUILD */
311 
312   ALLOC_C1(mem) = ALLOC_MAGIC_F;
313 
314   free((char *)mem - ALLOC_OFF_HEAD);
315 
316 }
317 
318 /* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.
319    With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the
320    old memory is clobbered with 0xFF. */
321 
DFL_ck_realloc(void * orig,u32 size)322 static inline void *DFL_ck_realloc(void *orig, u32 size) {
323 
324   void *ret;
325   u32   old_size = 0;
326 
327   if (!size) {
328 
329     DFL_ck_free(orig);
330     return NULL;
331 
332   }
333 
334   if (orig) {
335 
336     CHECK_PTR(orig);
337 
338   #ifndef DEBUG_BUILD
339     ALLOC_C1(orig) = ALLOC_MAGIC_F;
340   #endif                                                    /* !DEBUG_BUILD */
341 
342     old_size = ALLOC_S(orig);
343     orig = (char *)orig - ALLOC_OFF_HEAD;
344 
345     ALLOC_CHECK_SIZE(old_size);
346 
347   }
348 
349   ALLOC_CHECK_SIZE(size);
350 
351   #ifndef DEBUG_BUILD
352 
353   ret = realloc(orig, size + ALLOC_OFF_TOTAL);
354   ALLOC_CHECK_RESULT(ret, size);
355 
356   #else
357 
358   /* Catch pointer issues sooner: force relocation and make sure that the
359      original buffer is wiped. */
360 
361   ret = malloc(size + ALLOC_OFF_TOTAL);
362   ALLOC_CHECK_RESULT(ret, size);
363 
364   if (orig) {
365 
366     memcpy((char *)ret + ALLOC_OFF_HEAD, (char *)orig + ALLOC_OFF_HEAD,
367            MIN(size, old_size));
368     memset((char *)orig + ALLOC_OFF_HEAD, 0xFF, old_size);
369 
370     ALLOC_C1((char *)orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
371 
372     free(orig);
373 
374   }
375 
376   #endif                                                   /* ^!DEBUG_BUILD */
377 
378   ret = (char *)ret + ALLOC_OFF_HEAD;
379 
380   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
381   ALLOC_S(ret) = size;
382   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
383 
384   if (size > old_size) memset((char *)ret + old_size, 0, size - old_size);
385 
386   return ret;
387 
388 }
389 
390 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
391 
DFL_ck_strdup(u8 * str)392 static inline u8 *DFL_ck_strdup(u8 *str) {
393 
394   void *ret;
395   u32   size;
396 
397   if (!str) return NULL;
398 
399   size = strlen((char *)str) + 1;
400 
401   ALLOC_CHECK_SIZE(size);
402   ret = malloc(size + ALLOC_OFF_TOTAL);
403   ALLOC_CHECK_RESULT(ret, size);
404 
405   ret = (char *)ret + ALLOC_OFF_HEAD;
406 
407   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
408   ALLOC_S(ret) = size;
409   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
410 
411   return memcpy(ret, str, size);
412 
413 }
414 
415   #ifndef DEBUG_BUILD
416 
417     /* In non-debug mode, we just do straightforward aliasing of the above
418        functions to user-visible names such as ck_alloc(). */
419 
420     #define ck_alloc DFL_ck_alloc
421     #define ck_alloc_nozero DFL_ck_alloc_nozero
422     #define ck_realloc DFL_ck_realloc
423     #define ck_strdup DFL_ck_strdup
424     #define ck_free DFL_ck_free
425 
426     #define alloc_report()
427 
428   #else
429 
430     /* In debugging mode, we also track allocations to detect memory leaks, and
431        the flow goes through one more layer of indirection. */
432 
433     /* Alloc tracking data structures: */
434 
435     #define ALLOC_BUCKETS 4096
436 
437 struct TRK_obj {
438 
439   void *ptr;
440   char *file, *func;
441   u32   line;
442 
443 };
444 
445     #ifdef AFL_MAIN
446 
447 struct TRK_obj *TRK[ALLOC_BUCKETS];
448 u32             TRK_cnt[ALLOC_BUCKETS];
449 
450       #define alloc_report() TRK_report()
451 
452     #else
453 
454 extern struct TRK_obj *TRK[ALLOC_BUCKETS];
455 extern u32             TRK_cnt[ALLOC_BUCKETS];
456 
457       #define alloc_report()
458 
459     #endif                                                     /* ^AFL_MAIN */
460 
461     /* Bucket-assigning function for a given pointer: */
462 
463     #define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)
464 
465 /* Add a new entry to the list of allocated objects. */
466 
TRK_alloc_buf(void * ptr,const char * file,const char * func,u32 line)467 static inline void TRK_alloc_buf(void *ptr, const char *file, const char *func,
468                                  u32 line) {
469 
470   u32 i, bucket;
471 
472   if (!ptr) return;
473 
474   bucket = TRKH(ptr);
475 
476   /* Find a free slot in the list of entries for that bucket. */
477 
478   for (i = 0; i < TRK_cnt[bucket]; i++)
479 
480     if (!TRK[bucket][i].ptr) {
481 
482       TRK[bucket][i].ptr = ptr;
483       TRK[bucket][i].file = (char *)file;
484       TRK[bucket][i].func = (char *)func;
485       TRK[bucket][i].line = line;
486       return;
487 
488     }
489 
490   /* No space available - allocate more. */
491 
492   TRK[bucket] = DFL_ck_realloc(TRK[bucket],
493                                (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
494 
495   TRK[bucket][i].ptr = ptr;
496   TRK[bucket][i].file = (char *)file;
497   TRK[bucket][i].func = (char *)func;
498   TRK[bucket][i].line = line;
499 
500   TRK_cnt[bucket]++;
501 
502 }
503 
504 /* Remove entry from the list of allocated objects. */
505 
TRK_free_buf(void * ptr,const char * file,const char * func,u32 line)506 static inline void TRK_free_buf(void *ptr, const char *file, const char *func,
507                                 u32 line) {
508 
509   u32 i, bucket;
510 
511   if (!ptr) return;
512 
513   bucket = TRKH(ptr);
514 
515   /* Find the element on the list... */
516 
517   for (i = 0; i < TRK_cnt[bucket]; i++)
518 
519     if (TRK[bucket][i].ptr == ptr) {
520 
521       TRK[bucket][i].ptr = 0;
522       return;
523 
524     }
525 
526   WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", func, file,
527         line);
528 
529 }
530 
531 /* Do a final report on all non-deallocated objects. */
532 
TRK_report(void)533 static inline void TRK_report(void) {
534 
535   u32 i, bucket;
536 
537   fflush(0);
538 
539   for (bucket = 0; bucket < ALLOC_BUCKETS; bucket++)
540     for (i = 0; i < TRK_cnt[bucket]; i++)
541       if (TRK[bucket][i].ptr)
542         WARNF("ALLOC: Memory never freed, created in %s (%s:%u)",
543               TRK[bucket][i].func, TRK[bucket][i].file, TRK[bucket][i].line);
544 
545 }
546 
547 /* Simple wrappers for non-debugging functions: */
548 
TRK_ck_alloc(u32 size,const char * file,const char * func,u32 line)549 static inline void *TRK_ck_alloc(u32 size, const char *file, const char *func,
550                                  u32 line) {
551 
552   void *ret = DFL_ck_alloc(size);
553   TRK_alloc_buf(ret, file, func, line);
554   return ret;
555 
556 }
557 
TRK_ck_realloc(void * orig,u32 size,const char * file,const char * func,u32 line)558 static inline void *TRK_ck_realloc(void *orig, u32 size, const char *file,
559                                    const char *func, u32 line) {
560 
561   void *ret = DFL_ck_realloc(orig, size);
562   TRK_free_buf(orig, file, func, line);
563   TRK_alloc_buf(ret, file, func, line);
564   return ret;
565 
566 }
567 
TRK_ck_strdup(u8 * str,const char * file,const char * func,u32 line)568 static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func,
569                                   u32 line) {
570 
571   void *ret = DFL_ck_strdup(str);
572   TRK_alloc_buf(ret, file, func, line);
573   return ret;
574 
575 }
576 
TRK_ck_free(void * ptr,const char * file,const char * func,u32 line)577 static inline void TRK_ck_free(void *ptr, const char *file, const char *func,
578                                u32 line) {
579 
580   TRK_free_buf(ptr, file, func, line);
581   DFL_ck_free(ptr);
582 
583 }
584 
585     /* Aliasing user-facing names to tracking functions: */
586 
587     #define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
588 
589     #define ck_alloc_nozero(_p1) \
590       TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
591 
592     #define ck_realloc(_p1, _p2) \
593       TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
594 
595     #define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
596 
597     #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
598 
599   #endif                                                   /* ^!DEBUG_BUILD */
600 
601 #endif                                          /* _WANT_ORIGINAL_AFL_ALLOC */
602 
603 /* This function calculates the next power of 2 greater or equal its argument.
604  @return The rounded up power of 2 (if no overflow) or 0 on overflow.
605 */
next_pow2(size_t in)606 static inline size_t next_pow2(size_t in) {
607 
608   // Commented this out as this behavior doesn't change, according to unittests
609   // if (in == 0 || in > (size_t)-1) {
610 
611   //
612   //   return 0;                  /* avoid undefined behaviour under-/overflow
613   //   */
614   //
615   // }
616 
617   size_t out = in - 1;
618   out |= out >> 1;
619   out |= out >> 2;
620   out |= out >> 4;
621   out |= out >> 8;
622   out |= out >> 16;
623   return out + 1;
624 
625 }
626 
627 /* AFL alloc buffer, the struct is here so we don't need to do fancy ptr
628  * arithmetics */
629 struct afl_alloc_buf {
630 
631   /* The complete allocated size, including the header of len
632    * AFL_ALLOC_SIZE_OFFSET */
633   size_t complete_size;
634   /* ptr to the first element of the actual buffer */
635   u8 buf[0];
636 
637 };
638 
639 #define AFL_ALLOC_SIZE_OFFSET (offsetof(struct afl_alloc_buf, buf))
640 
641 /* Returns the container element to this ptr */
afl_alloc_bufptr(void * buf)642 static inline struct afl_alloc_buf *afl_alloc_bufptr(void *buf) {
643 
644   return (struct afl_alloc_buf *)((u8 *)buf - AFL_ALLOC_SIZE_OFFSET);
645 
646 }
647 
648 /* Gets the maximum size of the buf contents (ptr->complete_size -
649  * AFL_ALLOC_SIZE_OFFSET) */
afl_alloc_bufsize(void * buf)650 static inline size_t afl_alloc_bufsize(void *buf) {
651 
652   return afl_alloc_bufptr(buf)->complete_size - AFL_ALLOC_SIZE_OFFSET;
653 
654 }
655 
656 /* This function makes sure *size is > size_needed after call.
657  It will realloc *buf otherwise.
658  *size will grow exponentially as per:
659  https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/
660  Will return NULL and free *buf if size_needed is <1 or realloc failed.
661  @return For convenience, this function returns *buf.
662  */
afl_realloc(void ** buf,size_t size_needed)663 static inline void *afl_realloc(void **buf, size_t size_needed) {
664 
665   struct afl_alloc_buf *new_buf = NULL;
666 
667   size_t current_size = 0;
668   size_t next_size = 0;
669 
670   if (likely(*buf)) {
671 
672     /* the size is always stored at buf - 1*size_t */
673     new_buf = (struct afl_alloc_buf *)afl_alloc_bufptr(*buf);
674     current_size = new_buf->complete_size;
675 
676   }
677 
678   size_needed += AFL_ALLOC_SIZE_OFFSET;
679 
680   /* No need to realloc */
681   if (likely(current_size >= size_needed)) { return *buf; }
682 
683   /* No initial size was set */
684   if (size_needed < INITIAL_GROWTH_SIZE) {
685 
686     next_size = INITIAL_GROWTH_SIZE;
687 
688   } else {
689 
690     /* grow exponentially */
691     next_size = next_pow2(size_needed);
692 
693     /* handle overflow: fall back to the original size_needed */
694     if (unlikely(!next_size)) { next_size = size_needed; }
695 
696   }
697 
698   /* alloc */
699   struct afl_alloc_buf *newer_buf =
700       (struct afl_alloc_buf *)realloc(new_buf, next_size);
701   if (unlikely(!newer_buf)) {
702 
703     free(new_buf);  // avoid a leak
704     *buf = NULL;
705     return NULL;
706 
707   }
708 
709   new_buf = newer_buf;
710   memset(((u8 *)new_buf) + current_size, 0, next_size - current_size);
711 
712   new_buf->complete_size = next_size;
713   *buf = (void *)(new_buf->buf);
714   return *buf;
715 
716 }
717 
718 /* afl_realloc_exact uses afl alloc buffers but sets it to a specific size */
719 
afl_realloc_exact(void ** buf,size_t size_needed)720 static inline void *afl_realloc_exact(void **buf, size_t size_needed) {
721 
722   struct afl_alloc_buf *new_buf = NULL;
723 
724   size_t current_size = 0;
725 
726   if (likely(*buf)) {
727 
728     /* the size is always stored at buf - 1*size_t */
729     new_buf = (struct afl_alloc_buf *)afl_alloc_bufptr(*buf);
730     current_size = new_buf->complete_size;
731 
732   }
733 
734   size_needed += AFL_ALLOC_SIZE_OFFSET;
735 
736   /* No need to realloc */
737   if (unlikely(current_size == size_needed)) { return *buf; }
738 
739   /* alloc */
740   struct afl_alloc_buf *newer_buf =
741       (struct afl_alloc_buf *)realloc(new_buf, size_needed);
742   if (unlikely(!newer_buf)) {
743 
744     free(new_buf);  // avoid a leak
745     *buf = NULL;
746     return NULL;
747 
748   } else {
749 
750     new_buf = newer_buf;
751 
752   }
753 
754   new_buf->complete_size = size_needed;
755   *buf = (void *)(new_buf->buf);
756   return *buf;
757 
758 }
759 
afl_free(void * buf)760 static inline void afl_free(void *buf) {
761 
762   if (buf) { free(afl_alloc_bufptr(buf)); }
763 
764 }
765 
766 /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */
afl_swap_bufs(void ** buf1,void ** buf2)767 static inline void afl_swap_bufs(void **buf1, void **buf2) {
768 
769   void *scratch_buf = *buf1;
770   *buf1 = *buf2;
771   *buf2 = scratch_buf;
772 
773 }
774 
775 #undef INITIAL_GROWTH_SIZE
776 
777 #endif                                               /* ! _HAVE_ALLOC_INL_H */
778 
779