xref: /aosp_15_r20/external/federated-compute/fcp/secagg/shared/math.h (revision 14675a029014e728ec732f129a32e299b2da0601)
1 /*
2  * Copyright 2018 Google LLC
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     https://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // This class contains some simple inline math methods commonly used elsewhere
18 // within SecAgg. No error checking or bounds checking is performed. The calling
19 // code is responsible for making sure the operations do not overflow, except as
20 // noted.
21 
22 #ifndef FCP_SECAGG_SHARED_MATH_H_
23 #define FCP_SECAGG_SHARED_MATH_H_
24 
25 #include <cstdint>
26 #include <string>
27 
28 #include "absl/base/internal/endian.h"
29 #include "absl/numeric/int128.h"
30 #include "fcp/base/monitoring.h"
31 
32 namespace fcp {
33 namespace secagg {
34 
35 // Integer division rounded up.
DivideRoundUp(uint32_t a,uint32_t b)36 static inline uint32_t DivideRoundUp(uint32_t a, uint32_t b) {
37   return (a + b - 1) / b;
38 }
39 
40 // Addition modulo non-zero integer z.
AddMod(uint64_t a,uint64_t b,uint64_t z)41 static inline uint64_t AddMod(uint64_t a, uint64_t b, uint64_t z) {
42   return (a + b) % z;
43 }
44 
45 // Optimized version of AddMod that assumes that a and b are smaller than mod.
46 // This version produces a code with branchless CMOVB instruction and is at
47 // least 2x faster than AddMod on x64.
48 // TODO(team): Eventually this should replace AddMod.
AddModOpt(uint64_t a,uint64_t b,uint64_t mod)49 inline uint64_t AddModOpt(uint64_t a, uint64_t b, uint64_t mod) {
50 #ifndef NDEBUG
51   // Verify assumption that a and b are smaller than mod to start with.
52   FCP_CHECK(a < mod && b < mod);
53   // Make sure there is no overflow when adding a and b.
54   FCP_CHECK(a <= (a + b) && b <= (a + b));
55 #endif
56   uint64_t sum = a + b;
57   return sum < mod ? sum : sum - mod;
58 }
59 
60 // Subtraction modulo non-zero integer z. Handles underflow correctly if b > a.
SubtractMod(uint64_t a,uint64_t b,uint64_t z)61 static inline uint64_t SubtractMod(uint64_t a, uint64_t b, uint64_t z) {
62   return ((a - b) + z) % z;
63 }
64 
65 // Optimized version of SubtractMod that assumes that a and b are smaller than
66 // mod.  This version produces a code with branchless CMOVB instruction and is
67 // at least 2x faster than SubtractMod on x64.
68 // TODO(team): Eventually this should replace SubtractMod.
SubtractModOpt(uint64_t a,uint64_t b,uint64_t mod)69 inline uint64_t SubtractModOpt(uint64_t a, uint64_t b, uint64_t mod) {
70 #ifndef NDEBUG
71   // Verify assumption that a and b are smaller than mod to start with.
72   FCP_CHECK(a < mod && b < mod);
73 #endif
74   return a >= b ? a - b : mod - b + a;
75 }
76 
77 // Multiplication of 32-bit integers modulo a non-zero integer z.
78 // Guarantees the output is a 32-bit integer and avoids overflow by casting both
79 // factors to uint64_t first.
MultiplyMod(uint32_t a,uint32_t b,uint64_t z)80 static inline uint32_t MultiplyMod(uint32_t a, uint32_t b, uint64_t z) {
81   return static_cast<uint32_t>((uint64_t{a} * uint64_t{b}) % z);
82 }
83 
84 // Multiplication of 64-bit integers modulo a non-zero integer z.
85 // Guarantees the output is a 64-bit integer and avoids overflow by casting both
86 // factors to uint128 first.
MultiplyMod64(uint64_t a,uint64_t b,uint64_t z)87 static inline uint64_t MultiplyMod64(uint64_t a, uint64_t b, uint64_t z) {
88   return absl::Uint128Low64((absl::uint128(a) * absl::uint128(b)) %
89                             absl::uint128(z));
90 }
91 
92 // Modular inverse of a 64-bit integer modulo a prime z via Fermat's little
93 // theorem. Assumes that z is prime.
InverseModPrime(uint64_t a,uint64_t z)94 static inline uint64_t InverseModPrime(uint64_t a, uint64_t z) {
95   uint64_t inverse = 1;
96   uint64_t exponent = z - 2;
97 
98   while (exponent > 0) {
99     if (exponent & 1) {
100       inverse = MultiplyMod64(inverse, a, z);
101     }
102 
103     exponent >>= 1;
104     a = MultiplyMod64(a, a, z);
105   }
106 
107   return inverse;
108 }
109 
110 // Converts ints to big-endian byte string representation. Provides platform-
111 // independence only in converting known integer values to byte strings for use
112 // in cryptographic methods, not for general processing of binary data.
IntToByteString(uint32_t input)113 static inline std::string IntToByteString(uint32_t input) {
114   char bytes[4];
115   absl::big_endian::Store32(bytes, input);
116   return std::string(bytes, 4);
117 }
118 
119 }  // namespace secagg
120 }  // namespace fcp
121 
122 #endif  // FCP_SECAGG_SHARED_MATH_H_
123