1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
6 #define PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
7 
8 #include <cassert>
9 #include <limits>
10 #include <type_traits>
11 
12 #include "partition_alloc/partition_alloc_base/numerics/safe_conversions.h"
13 
14 #if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
15 #include "partition_alloc/partition_alloc_base/numerics/safe_math_arm_impl.h"
16 #define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (1)
17 #else
18 #define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (0)
19 #endif
20 
21 namespace partition_alloc::internal::base::internal {
22 
23 // These are the non-functioning boilerplate implementations of the optimized
24 // safe math routines.
25 #if !PA_BASE_HAS_ASSEMBLER_SAFE_MATH
26 template <typename T, typename U>
27 struct CheckedMulFastAsmOp {
28   static const bool is_supported = false;
29   template <typename V>
DoCheckedMulFastAsmOp30   static constexpr bool Do(T, U, V*) {
31     // Force a compile failure if instantiated.
32     return CheckOnFailure::template HandleFailure<bool>();
33   }
34 };
35 
36 template <typename T, typename U>
37 struct ClampedAddFastAsmOp {
38   static const bool is_supported = false;
39   template <typename V>
DoClampedAddFastAsmOp40   static constexpr V Do(T, U) {
41     // Force a compile failure if instantiated.
42     return CheckOnFailure::template HandleFailure<V>();
43   }
44 };
45 
46 template <typename T, typename U>
47 struct ClampedSubFastAsmOp {
48   static const bool is_supported = false;
49   template <typename V>
DoClampedSubFastAsmOp50   static constexpr V Do(T, U) {
51     // Force a compile failure if instantiated.
52     return CheckOnFailure::template HandleFailure<V>();
53   }
54 };
55 
56 template <typename T, typename U>
57 struct ClampedMulFastAsmOp {
58   static const bool is_supported = false;
59   template <typename V>
DoClampedMulFastAsmOp60   static constexpr V Do(T, U) {
61     // Force a compile failure if instantiated.
62     return CheckOnFailure::template HandleFailure<V>();
63   }
64 };
65 #endif  // PA_BASE_HAS_ASSEMBLER_SAFE_MATH
66 #undef PA_BASE_HAS_ASSEMBLER_SAFE_MATH
67 
68 template <typename T, typename U>
69 struct CheckedAddFastOp {
70   static const bool is_supported = true;
71   template <typename V>
DoCheckedAddFastOp72   __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
73     return !__builtin_add_overflow(x, y, result);
74   }
75 };
76 
77 template <typename T, typename U>
78 struct CheckedSubFastOp {
79   static const bool is_supported = true;
80   template <typename V>
DoCheckedSubFastOp81   __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
82     return !__builtin_sub_overflow(x, y, result);
83   }
84 };
85 
86 template <typename T, typename U>
87 struct CheckedMulFastOp {
88 #if defined(__clang__)
89   // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
90   // support full-width, mixed-sign multiply builtins.
91   // https://crbug.com/613003
92   // We can support intptr_t, uintptr_t, or a smaller common type.
93   static const bool is_supported =
94       (IsTypeInRangeForNumericType<intptr_t, T>::value &&
95        IsTypeInRangeForNumericType<intptr_t, U>::value) ||
96       (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
97        IsTypeInRangeForNumericType<uintptr_t, U>::value);
98 #else
99   static const bool is_supported = true;
100 #endif
101   template <typename V>
DoCheckedMulFastOp102   __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
103     return CheckedMulFastAsmOp<T, U>::is_supported
104                ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
105                : !__builtin_mul_overflow(x, y, result);
106   }
107 };
108 
109 template <typename T, typename U>
110 struct ClampedAddFastOp {
111   static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
112   template <typename V>
DoClampedAddFastOp113   __attribute__((always_inline)) static V Do(T x, U y) {
114     return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
115   }
116 };
117 
118 template <typename T, typename U>
119 struct ClampedSubFastOp {
120   static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
121   template <typename V>
DoClampedSubFastOp122   __attribute__((always_inline)) static V Do(T x, U y) {
123     return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
124   }
125 };
126 
127 template <typename T, typename U>
128 struct ClampedMulFastOp {
129   static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
130   template <typename V>
DoClampedMulFastOp131   __attribute__((always_inline)) static V Do(T x, U y) {
132     return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
133   }
134 };
135 
136 template <typename T>
137 struct ClampedNegFastOp {
138   static const bool is_supported = std::is_signed_v<T>;
DoClampedNegFastOp139   __attribute__((always_inline)) static T Do(T value) {
140     // Use this when there is no assembler path available.
141     if (!ClampedSubFastAsmOp<T, T>::is_supported) {
142       T result;
143       return !__builtin_sub_overflow(T(0), value, &result)
144                  ? result
145                  : std::numeric_limits<T>::max();
146     }
147 
148     // Fallback to the normal subtraction path.
149     return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
150   }
151 };
152 
153 }  // namespace partition_alloc::internal::base::internal
154 
155 #endif  // PARTITION_ALLOC_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
156