1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11 #ifndef ZSTD_COMPILER_H
12 #define ZSTD_COMPILER_H
13
14 #include <stddef.h>
15
16 #include "portability_macros.h"
17
18 /*-*******************************************************
19 * Compiler specifics
20 *********************************************************/
21 /* force inlining */
22
23 #if !defined(ZSTD_NO_INLINE)
24 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
25 # define INLINE_KEYWORD inline
26 #else
27 # define INLINE_KEYWORD
28 #endif
29
30 #if defined(__GNUC__) || defined(__ICCARM__)
31 # define FORCE_INLINE_ATTR __attribute__((always_inline))
32 #elif defined(_MSC_VER)
33 # define FORCE_INLINE_ATTR __forceinline
34 #else
35 # define FORCE_INLINE_ATTR
36 #endif
37
38 #else
39
40 #define INLINE_KEYWORD
41 #define FORCE_INLINE_ATTR
42
43 #endif
44
45 /**
46 On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
47 This explicitly marks such functions as __cdecl so that the code will still compile
48 if a CC other than __cdecl has been made the default.
49 */
50 #if defined(_MSC_VER)
51 # define WIN_CDECL __cdecl
52 #else
53 # define WIN_CDECL
54 #endif
55
56 /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
57 #if defined(__GNUC__)
58 # define UNUSED_ATTR __attribute__((unused))
59 #else
60 # define UNUSED_ATTR
61 #endif
62
63 /**
64 * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
65 * parameters. They must be inlined for the compiler to eliminate the constant
66 * branches.
67 */
68 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
69 /**
70 * HINT_INLINE is used to help the compiler generate better code. It is *not*
71 * used for "templates", so it can be tweaked based on the compilers
72 * performance.
73 *
74 * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
75 * always_inline attribute.
76 *
77 * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
78 * attribute.
79 */
80 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
81 # define HINT_INLINE static INLINE_KEYWORD
82 #else
83 # define HINT_INLINE FORCE_INLINE_TEMPLATE
84 #endif
85
86 /* "soft" inline :
87 * The compiler is free to select if it's a good idea to inline or not.
88 * The main objective is to silence compiler warnings
89 * when a defined function in included but not used.
90 *
91 * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit.
92 * Updating the prefix is probably preferable, but requires a fairly large codemod,
93 * since this name is used everywhere.
94 */
95 #ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */
96 #if defined(__GNUC__)
97 # define MEM_STATIC static __inline UNUSED_ATTR
98 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
99 # define MEM_STATIC static inline
100 #elif defined(_MSC_VER)
101 # define MEM_STATIC static __inline
102 #else
103 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
104 #endif
105 #endif
106
107 /* force no inlining */
108 #ifdef _MSC_VER
109 # define FORCE_NOINLINE static __declspec(noinline)
110 #else
111 # if defined(__GNUC__) || defined(__ICCARM__)
112 # define FORCE_NOINLINE static __attribute__((__noinline__))
113 # else
114 # define FORCE_NOINLINE static
115 # endif
116 #endif
117
118
119 /* target attribute */
120 #if defined(__GNUC__) || defined(__ICCARM__)
121 # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
122 #else
123 # define TARGET_ATTRIBUTE(target)
124 #endif
125
126 /* Target attribute for BMI2 dynamic dispatch.
127 * Enable lzcnt, bmi, and bmi2.
128 * We test for bmi1 & bmi2. lzcnt is included in bmi1.
129 */
130 #define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
131
132 /* prefetch
133 * can be disabled, by declaring NO_PREFETCH build macro */
134 #if defined(NO_PREFETCH)
135 # define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */
136 # define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */
137 #else
138 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) && !defined(_M_ARM64EC) /* _mm_prefetch() is not defined outside of x86/x64 */
139 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
140 # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
141 # define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
142 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
143 # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
144 # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
145 # elif defined(__aarch64__)
146 # define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0)
147 # define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0)
148 # else
149 # define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */
150 # define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */
151 # endif
152 #endif /* NO_PREFETCH */
153
154 #define CACHELINE_SIZE 64
155
156 #define PREFETCH_AREA(p, s) \
157 do { \
158 const char* const _ptr = (const char*)(p); \
159 size_t const _size = (size_t)(s); \
160 size_t _pos; \
161 for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
162 PREFETCH_L2(_ptr + _pos); \
163 } \
164 } while (0)
165
166 /* vectorization
167 * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
168 * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
169 #if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
170 # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
171 # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
172 # else
173 # define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
174 # endif
175 #else
176 # define DONT_VECTORIZE
177 #endif
178
179 /* Tell the compiler that a branch is likely or unlikely.
180 * Only use these macros if it causes the compiler to generate better code.
181 * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
182 * and clang, please do.
183 */
184 #if defined(__GNUC__)
185 #define LIKELY(x) (__builtin_expect((x), 1))
186 #define UNLIKELY(x) (__builtin_expect((x), 0))
187 #else
188 #define LIKELY(x) (x)
189 #define UNLIKELY(x) (x)
190 #endif
191
192 #if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
193 # define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0)
194 #else
195 # define ZSTD_UNREACHABLE do { assert(0); } while (0)
196 #endif
197
198 /* disable warnings */
199 #ifdef _MSC_VER /* Visual Studio */
200 # include <intrin.h> /* For Visual 2005 */
201 # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
202 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
203 # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
204 # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
205 # pragma warning(disable : 4324) /* disable: C4324: padded structure */
206 #endif
207
208 /*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
209 #ifndef STATIC_BMI2
210 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
211 # ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
212 # define STATIC_BMI2 1
213 # endif
214 # elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__)
215 # define STATIC_BMI2 1
216 # endif
217 #endif
218
219 #ifndef STATIC_BMI2
220 #define STATIC_BMI2 0
221 #endif
222
223 /* compile time determination of SIMD support */
224 #if !defined(ZSTD_NO_INTRINSICS)
225 # if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
226 # define ZSTD_ARCH_X86_SSE2
227 # endif
228 # if defined(__ARM_NEON) || defined(_M_ARM64)
229 # define ZSTD_ARCH_ARM_NEON
230 # endif
231 #
232 # if defined(ZSTD_ARCH_X86_SSE2)
233 # include <emmintrin.h>
234 # elif defined(ZSTD_ARCH_ARM_NEON)
235 # include <arm_neon.h>
236 # endif
237 #endif
238
239 /* C-language Attributes are added in C23. */
240 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
241 # define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
242 #else
243 # define ZSTD_HAS_C_ATTRIBUTE(x) 0
244 #endif
245
246 /* Only use C++ attributes in C++. Some compilers report support for C++
247 * attributes when compiling with C.
248 */
249 #if defined(__cplusplus) && defined(__has_cpp_attribute)
250 # define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
251 #else
252 # define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
253 #endif
254
255 /* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
256 * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
257 * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
258 * - Else: __attribute__((__fallthrough__))
259 */
260 #ifndef ZSTD_FALLTHROUGH
261 # if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
262 # define ZSTD_FALLTHROUGH [[fallthrough]]
263 # elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
264 # define ZSTD_FALLTHROUGH [[fallthrough]]
265 # elif __has_attribute(__fallthrough__)
266 /* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
267 * gcc complains about: a label can only be part of a statement and a declaration is not a statement.
268 */
269 # define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
270 # else
271 # define ZSTD_FALLTHROUGH
272 # endif
273 #endif
274
275 /*-**************************************************************
276 * Alignment check
277 *****************************************************************/
278
279 /* this test was initially positioned in mem.h,
280 * but this file is removed (or replaced) for linux kernel
281 * so it's now hosted in compiler.h,
282 * which remains valid for both user & kernel spaces.
283 */
284
285 #ifndef ZSTD_ALIGNOF
286 # if defined(__GNUC__) || defined(_MSC_VER)
287 /* covers gcc, clang & MSVC */
288 /* note : this section must come first, before C11,
289 * due to a limitation in the kernel source generator */
290 # define ZSTD_ALIGNOF(T) __alignof(T)
291
292 # elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
293 /* C11 support */
294 # include <stdalign.h>
295 # define ZSTD_ALIGNOF(T) alignof(T)
296
297 # else
298 /* No known support for alignof() - imperfect backup */
299 # define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
300
301 # endif
302 #endif /* ZSTD_ALIGNOF */
303
304 /*-**************************************************************
305 * Sanitizer
306 *****************************************************************/
307
308 /**
309 * Zstd relies on pointer overflow in its decompressor.
310 * We add this attribute to functions that rely on pointer overflow.
311 */
312 #ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
313 # if __has_attribute(no_sanitize)
314 # if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
315 /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
316 # define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
317 # else
318 /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
319 # define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
320 # endif
321 # else
322 # define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
323 # endif
324 #endif
325
326 /**
327 * Helper function to perform a wrapped pointer difference without trigging
328 * UBSAN.
329 *
330 * @returns lhs - rhs with wrapping
331 */
332 MEM_STATIC
333 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
ZSTD_wrappedPtrDiff(unsigned char const * lhs,unsigned char const * rhs)334 ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
335 {
336 return lhs - rhs;
337 }
338
339 /**
340 * Helper function to perform a wrapped pointer add without triggering UBSAN.
341 *
342 * @return ptr + add with wrapping
343 */
344 MEM_STATIC
345 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
ZSTD_wrappedPtrAdd(unsigned char const * ptr,ptrdiff_t add)346 unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
347 {
348 return ptr + add;
349 }
350
351 /**
352 * Helper function to perform a wrapped pointer subtraction without triggering
353 * UBSAN.
354 *
355 * @return ptr - sub with wrapping
356 */
357 MEM_STATIC
358 ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
ZSTD_wrappedPtrSub(unsigned char const * ptr,ptrdiff_t sub)359 unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
360 {
361 return ptr - sub;
362 }
363
364 /**
365 * Helper function to add to a pointer that works around C's undefined behavior
366 * of adding 0 to NULL.
367 *
368 * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
369 */
370 MEM_STATIC
ZSTD_maybeNullPtrAdd(unsigned char * ptr,ptrdiff_t add)371 unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
372 {
373 return add > 0 ? ptr + add : ptr;
374 }
375
376 /* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
377 * abundance of caution, disable our custom poisoning on mingw. */
378 #ifdef __MINGW32__
379 #ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
380 #define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
381 #endif
382 #ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
383 #define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
384 #endif
385 #endif
386
387 #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
388 /* Not all platforms that support msan provide sanitizers/msan_interface.h.
389 * We therefore declare the functions we need ourselves, rather than trying to
390 * include the header file... */
391 #include <stddef.h> /* size_t */
392 #define ZSTD_DEPS_NEED_STDINT
393 #include "zstd_deps.h" /* intptr_t */
394
395 /* Make memory region fully initialized (without changing its contents). */
396 void __msan_unpoison(const volatile void *a, size_t size);
397
398 /* Make memory region fully uninitialized (without changing its contents).
399 This is a legacy interface that does not update origin information. Use
400 __msan_allocated_memory() instead. */
401 void __msan_poison(const volatile void *a, size_t size);
402
403 /* Returns the offset of the first (at least partially) poisoned byte in the
404 memory range, or -1 if the whole range is good. */
405 intptr_t __msan_test_shadow(const volatile void *x, size_t size);
406
407 /* Print shadow and origin for the memory range to stderr in a human-readable
408 format. */
409 void __msan_print_shadow(const volatile void *x, size_t size);
410 #endif
411
412 #if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
413 /* Not all platforms that support asan provide sanitizers/asan_interface.h.
414 * We therefore declare the functions we need ourselves, rather than trying to
415 * include the header file... */
416 #include <stddef.h> /* size_t */
417
418 /**
419 * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
420 *
421 * This memory must be previously allocated by your program. Instrumented
422 * code is forbidden from accessing addresses in this region until it is
423 * unpoisoned. This function is not guaranteed to poison the entire region -
424 * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
425 * alignment restrictions.
426 *
427 * \note This function is not thread-safe because no two threads can poison or
428 * unpoison memory in the same memory region simultaneously.
429 *
430 * \param addr Start of memory region.
431 * \param size Size of memory region. */
432 void __asan_poison_memory_region(void const volatile *addr, size_t size);
433
434 /**
435 * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
436 *
437 * This memory must be previously allocated by your program. Accessing
438 * addresses in this region is allowed until this region is poisoned again.
439 * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
440 * to ASan alignment restrictions.
441 *
442 * \note This function is not thread-safe because no two threads can
443 * poison or unpoison memory in the same memory region simultaneously.
444 *
445 * \param addr Start of memory region.
446 * \param size Size of memory region. */
447 void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
448 #endif
449
450 #endif /* ZSTD_COMPILER_H */
451