1 /* Copyright (c) 2020, Google Inc.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
15 // Some of this code is taken from the ref10 version of Ed25519 in SUPERCOP
16 // 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
17 // public domain. Other parts have been replaced to call into code generated by
18 // Fiat (https://github.com/mit-plv/fiat-crypto) in //third_party/fiat.
19 //
20 // The field functions are shared by Ed25519 and X25519 where possible.
21
22 #include <assert.h>
23 #include <string.h>
24
25 #include <openssl/mem.h>
26 #include <openssl/rand.h>
27 #include <openssl/sha.h>
28
29 #include "internal.h"
30 #include "../internal.h"
31
32 // Various pre-computed constants.
33 #include "./curve25519_tables.h"
34
35 #if defined(BORINGSSL_HAS_UINT128)
36 #include "../../third_party/fiat/curve25519_64.h"
37 #elif defined(OPENSSL_64_BIT)
38 #include "../../third_party/fiat/curve25519_64_msvc.h"
39 #else
40 #include "../../third_party/fiat/curve25519_32.h"
41 #endif
42
43
44 // Low-level intrinsic operations
45
load_3(const uint8_t * in)46 static uint64_t load_3(const uint8_t *in) {
47 uint64_t result;
48 result = (uint64_t)in[0];
49 result |= ((uint64_t)in[1]) << 8;
50 result |= ((uint64_t)in[2]) << 16;
51 return result;
52 }
53
load_4(const uint8_t * in)54 static uint64_t load_4(const uint8_t *in) {
55 uint64_t result;
56 result = (uint64_t)in[0];
57 result |= ((uint64_t)in[1]) << 8;
58 result |= ((uint64_t)in[2]) << 16;
59 result |= ((uint64_t)in[3]) << 24;
60 return result;
61 }
62
63
64 // Field operations.
65
66 #if defined(OPENSSL_64_BIT)
67
68 typedef uint64_t fe_limb_t;
69 #define FE_NUM_LIMBS 5
70
71 // assert_fe asserts that |f| satisfies bounds:
72 //
73 // [[0x0 ~> 0x8cccccccccccc],
74 // [0x0 ~> 0x8cccccccccccc],
75 // [0x0 ~> 0x8cccccccccccc],
76 // [0x0 ~> 0x8cccccccccccc],
77 // [0x0 ~> 0x8cccccccccccc]]
78 //
79 // See comments in curve25519_64.h for which functions use these bounds for
80 // inputs or outputs.
81 #define assert_fe(f) \
82 do { \
83 for (unsigned _assert_fe_i = 0; _assert_fe_i < 5; _assert_fe_i++) { \
84 declassify_assert(f[_assert_fe_i] <= UINT64_C(0x8cccccccccccc)); \
85 } \
86 } while (0)
87
88 // assert_fe_loose asserts that |f| satisfies bounds:
89 //
90 // [[0x0 ~> 0x1a666666666664],
91 // [0x0 ~> 0x1a666666666664],
92 // [0x0 ~> 0x1a666666666664],
93 // [0x0 ~> 0x1a666666666664],
94 // [0x0 ~> 0x1a666666666664]]
95 //
96 // See comments in curve25519_64.h for which functions use these bounds for
97 // inputs or outputs.
98 #define assert_fe_loose(f) \
99 do { \
100 for (unsigned _assert_fe_i = 0; _assert_fe_i < 5; _assert_fe_i++) { \
101 declassify_assert(f[_assert_fe_i] <= UINT64_C(0x1a666666666664)); \
102 } \
103 } while (0)
104
105 #else
106
107 typedef uint32_t fe_limb_t;
108 #define FE_NUM_LIMBS 10
109
110 // assert_fe asserts that |f| satisfies bounds:
111 //
112 // [[0x0 ~> 0x4666666], [0x0 ~> 0x2333333],
113 // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333],
114 // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333],
115 // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333],
116 // [0x0 ~> 0x4666666], [0x0 ~> 0x2333333]]
117 //
118 // See comments in curve25519_32.h for which functions use these bounds for
119 // inputs or outputs.
120 #define assert_fe(f) \
121 do { \
122 for (unsigned _assert_fe_i = 0; _assert_fe_i < 10; _assert_fe_i++) { \
123 declassify_assert(f[_assert_fe_i] <= \
124 ((_assert_fe_i & 1) ? 0x2333333u : 0x4666666u)); \
125 } \
126 } while (0)
127
128 // assert_fe_loose asserts that |f| satisfies bounds:
129 //
130 // [[0x0 ~> 0xd333332], [0x0 ~> 0x6999999],
131 // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999],
132 // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999],
133 // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999],
134 // [0x0 ~> 0xd333332], [0x0 ~> 0x6999999]]
135 //
136 // See comments in curve25519_32.h for which functions use these bounds for
137 // inputs or outputs.
138 #define assert_fe_loose(f) \
139 do { \
140 for (unsigned _assert_fe_i = 0; _assert_fe_i < 10; _assert_fe_i++) { \
141 declassify_assert(f[_assert_fe_i] <= \
142 ((_assert_fe_i & 1) ? 0x6999999u : 0xd333332u)); \
143 } \
144 } while (0)
145
146 #endif // OPENSSL_64_BIT
147
148 static_assert(sizeof(fe) == sizeof(fe_limb_t) * FE_NUM_LIMBS,
149 "fe_limb_t[FE_NUM_LIMBS] is inconsistent with fe");
150
fe_frombytes_strict(fe * h,const uint8_t s[32])151 static void fe_frombytes_strict(fe *h, const uint8_t s[32]) {
152 // |fiat_25519_from_bytes| requires the top-most bit be clear.
153 declassify_assert((s[31] & 0x80) == 0);
154 fiat_25519_from_bytes(h->v, s);
155 assert_fe(h->v);
156 }
157
fe_frombytes(fe * h,const uint8_t s[32])158 static void fe_frombytes(fe *h, const uint8_t s[32]) {
159 uint8_t s_copy[32];
160 OPENSSL_memcpy(s_copy, s, 32);
161 s_copy[31] &= 0x7f;
162 fe_frombytes_strict(h, s_copy);
163 }
164
fe_tobytes(uint8_t s[32],const fe * f)165 static void fe_tobytes(uint8_t s[32], const fe *f) {
166 assert_fe(f->v);
167 fiat_25519_to_bytes(s, f->v);
168 }
169
170 // h = 0
fe_0(fe * h)171 static void fe_0(fe *h) {
172 OPENSSL_memset(h, 0, sizeof(fe));
173 }
174
fe_loose_0(fe_loose * h)175 static void fe_loose_0(fe_loose *h) {
176 OPENSSL_memset(h, 0, sizeof(fe_loose));
177 }
178
179 // h = 1
fe_1(fe * h)180 static void fe_1(fe *h) {
181 OPENSSL_memset(h, 0, sizeof(fe));
182 h->v[0] = 1;
183 }
184
fe_loose_1(fe_loose * h)185 static void fe_loose_1(fe_loose *h) {
186 OPENSSL_memset(h, 0, sizeof(fe_loose));
187 h->v[0] = 1;
188 }
189
190 // h = f + g
191 // Can overlap h with f or g.
fe_add(fe_loose * h,const fe * f,const fe * g)192 static void fe_add(fe_loose *h, const fe *f, const fe *g) {
193 assert_fe(f->v);
194 assert_fe(g->v);
195 fiat_25519_add(h->v, f->v, g->v);
196 assert_fe_loose(h->v);
197 }
198
199 // h = f - g
200 // Can overlap h with f or g.
fe_sub(fe_loose * h,const fe * f,const fe * g)201 static void fe_sub(fe_loose *h, const fe *f, const fe *g) {
202 assert_fe(f->v);
203 assert_fe(g->v);
204 fiat_25519_sub(h->v, f->v, g->v);
205 assert_fe_loose(h->v);
206 }
207
fe_carry(fe * h,const fe_loose * f)208 static void fe_carry(fe *h, const fe_loose* f) {
209 assert_fe_loose(f->v);
210 fiat_25519_carry(h->v, f->v);
211 assert_fe(h->v);
212 }
213
fe_mul_impl(fe_limb_t out[FE_NUM_LIMBS],const fe_limb_t in1[FE_NUM_LIMBS],const fe_limb_t in2[FE_NUM_LIMBS])214 static void fe_mul_impl(fe_limb_t out[FE_NUM_LIMBS],
215 const fe_limb_t in1[FE_NUM_LIMBS],
216 const fe_limb_t in2[FE_NUM_LIMBS]) {
217 assert_fe_loose(in1);
218 assert_fe_loose(in2);
219 fiat_25519_carry_mul(out, in1, in2);
220 assert_fe(out);
221 }
222
fe_mul_ltt(fe_loose * h,const fe * f,const fe * g)223 static void fe_mul_ltt(fe_loose *h, const fe *f, const fe *g) {
224 fe_mul_impl(h->v, f->v, g->v);
225 }
226
fe_mul_llt(fe_loose * h,const fe_loose * f,const fe * g)227 static void fe_mul_llt(fe_loose *h, const fe_loose *f, const fe *g) {
228 fe_mul_impl(h->v, f->v, g->v);
229 }
230
fe_mul_ttt(fe * h,const fe * f,const fe * g)231 static void fe_mul_ttt(fe *h, const fe *f, const fe *g) {
232 fe_mul_impl(h->v, f->v, g->v);
233 }
234
fe_mul_tlt(fe * h,const fe_loose * f,const fe * g)235 static void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) {
236 fe_mul_impl(h->v, f->v, g->v);
237 }
238
fe_mul_ttl(fe * h,const fe * f,const fe_loose * g)239 static void fe_mul_ttl(fe *h, const fe *f, const fe_loose *g) {
240 fe_mul_impl(h->v, f->v, g->v);
241 }
242
fe_mul_tll(fe * h,const fe_loose * f,const fe_loose * g)243 static void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) {
244 fe_mul_impl(h->v, f->v, g->v);
245 }
246
fe_sq_tl(fe * h,const fe_loose * f)247 static void fe_sq_tl(fe *h, const fe_loose *f) {
248 assert_fe_loose(f->v);
249 fiat_25519_carry_square(h->v, f->v);
250 assert_fe(h->v);
251 }
252
fe_sq_tt(fe * h,const fe * f)253 static void fe_sq_tt(fe *h, const fe *f) {
254 assert_fe_loose(f->v);
255 fiat_25519_carry_square(h->v, f->v);
256 assert_fe(h->v);
257 }
258
259 // Replace (f,g) with (g,f) if b == 1;
260 // replace (f,g) with (f,g) if b == 0.
261 //
262 // Preconditions: b in {0,1}.
fe_cswap(fe * f,fe * g,fe_limb_t b)263 static void fe_cswap(fe *f, fe *g, fe_limb_t b) {
264 b = 0-b;
265 for (unsigned i = 0; i < FE_NUM_LIMBS; i++) {
266 fe_limb_t x = f->v[i] ^ g->v[i];
267 x &= b;
268 f->v[i] ^= x;
269 g->v[i] ^= x;
270 }
271 }
272
fe_mul121666(fe * h,const fe_loose * f)273 static void fe_mul121666(fe *h, const fe_loose *f) {
274 assert_fe_loose(f->v);
275 fiat_25519_carry_scmul_121666(h->v, f->v);
276 assert_fe(h->v);
277 }
278
279 // h = -f
fe_neg(fe_loose * h,const fe * f)280 static void fe_neg(fe_loose *h, const fe *f) {
281 assert_fe(f->v);
282 fiat_25519_opp(h->v, f->v);
283 assert_fe_loose(h->v);
284 }
285
286 // Replace (f,g) with (g,g) if b == 1;
287 // replace (f,g) with (f,g) if b == 0.
288 //
289 // Preconditions: b in {0,1}.
fe_cmov(fe_loose * f,const fe_loose * g,fe_limb_t b)290 static void fe_cmov(fe_loose *f, const fe_loose *g, fe_limb_t b) {
291 // Silence an unused function warning. |fiat_25519_selectznz| isn't quite the
292 // calling convention the rest of this code wants, so implement it by hand.
293 //
294 // TODO(davidben): Switch to fiat's calling convention, or ask fiat to emit a
295 // different one.
296 (void)fiat_25519_selectznz;
297
298 b = 0-b;
299 for (unsigned i = 0; i < FE_NUM_LIMBS; i++) {
300 fe_limb_t x = f->v[i] ^ g->v[i];
301 x &= b;
302 f->v[i] ^= x;
303 }
304 }
305
306 // h = f
fe_copy(fe * h,const fe * f)307 static void fe_copy(fe *h, const fe *f) {
308 OPENSSL_memmove(h, f, sizeof(fe));
309 }
310
fe_copy_lt(fe_loose * h,const fe * f)311 static void fe_copy_lt(fe_loose *h, const fe *f) {
312 static_assert(sizeof(fe_loose) == sizeof(fe), "fe and fe_loose mismatch");
313 OPENSSL_memmove(h, f, sizeof(fe));
314 }
315
fe_loose_invert(fe * out,const fe_loose * z)316 static void fe_loose_invert(fe *out, const fe_loose *z) {
317 fe t0;
318 fe t1;
319 fe t2;
320 fe t3;
321 int i;
322
323 fe_sq_tl(&t0, z);
324 fe_sq_tt(&t1, &t0);
325 for (i = 1; i < 2; ++i) {
326 fe_sq_tt(&t1, &t1);
327 }
328 fe_mul_tlt(&t1, z, &t1);
329 fe_mul_ttt(&t0, &t0, &t1);
330 fe_sq_tt(&t2, &t0);
331 fe_mul_ttt(&t1, &t1, &t2);
332 fe_sq_tt(&t2, &t1);
333 for (i = 1; i < 5; ++i) {
334 fe_sq_tt(&t2, &t2);
335 }
336 fe_mul_ttt(&t1, &t2, &t1);
337 fe_sq_tt(&t2, &t1);
338 for (i = 1; i < 10; ++i) {
339 fe_sq_tt(&t2, &t2);
340 }
341 fe_mul_ttt(&t2, &t2, &t1);
342 fe_sq_tt(&t3, &t2);
343 for (i = 1; i < 20; ++i) {
344 fe_sq_tt(&t3, &t3);
345 }
346 fe_mul_ttt(&t2, &t3, &t2);
347 fe_sq_tt(&t2, &t2);
348 for (i = 1; i < 10; ++i) {
349 fe_sq_tt(&t2, &t2);
350 }
351 fe_mul_ttt(&t1, &t2, &t1);
352 fe_sq_tt(&t2, &t1);
353 for (i = 1; i < 50; ++i) {
354 fe_sq_tt(&t2, &t2);
355 }
356 fe_mul_ttt(&t2, &t2, &t1);
357 fe_sq_tt(&t3, &t2);
358 for (i = 1; i < 100; ++i) {
359 fe_sq_tt(&t3, &t3);
360 }
361 fe_mul_ttt(&t2, &t3, &t2);
362 fe_sq_tt(&t2, &t2);
363 for (i = 1; i < 50; ++i) {
364 fe_sq_tt(&t2, &t2);
365 }
366 fe_mul_ttt(&t1, &t2, &t1);
367 fe_sq_tt(&t1, &t1);
368 for (i = 1; i < 5; ++i) {
369 fe_sq_tt(&t1, &t1);
370 }
371 fe_mul_ttt(out, &t1, &t0);
372 }
373
fe_invert(fe * out,const fe * z)374 static void fe_invert(fe *out, const fe *z) {
375 fe_loose l;
376 fe_copy_lt(&l, z);
377 fe_loose_invert(out, &l);
378 }
379
380 // return 0 if f == 0
381 // return 1 if f != 0
fe_isnonzero(const fe_loose * f)382 static int fe_isnonzero(const fe_loose *f) {
383 fe tight;
384 fe_carry(&tight, f);
385 uint8_t s[32];
386 fe_tobytes(s, &tight);
387
388 static const uint8_t zero[32] = {0};
389 return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0;
390 }
391
392 // return 1 if f is in {1,3,5,...,q-2}
393 // return 0 if f is in {0,2,4,...,q-1}
fe_isnegative(const fe * f)394 static int fe_isnegative(const fe *f) {
395 uint8_t s[32];
396 fe_tobytes(s, f);
397 return s[0] & 1;
398 }
399
fe_sq2_tt(fe * h,const fe * f)400 static void fe_sq2_tt(fe *h, const fe *f) {
401 // h = f^2
402 fe_sq_tt(h, f);
403
404 // h = h + h
405 fe_loose tmp;
406 fe_add(&tmp, h, h);
407 fe_carry(h, &tmp);
408 }
409
fe_pow22523(fe * out,const fe * z)410 static void fe_pow22523(fe *out, const fe *z) {
411 fe t0;
412 fe t1;
413 fe t2;
414 int i;
415
416 fe_sq_tt(&t0, z);
417 fe_sq_tt(&t1, &t0);
418 for (i = 1; i < 2; ++i) {
419 fe_sq_tt(&t1, &t1);
420 }
421 fe_mul_ttt(&t1, z, &t1);
422 fe_mul_ttt(&t0, &t0, &t1);
423 fe_sq_tt(&t0, &t0);
424 fe_mul_ttt(&t0, &t1, &t0);
425 fe_sq_tt(&t1, &t0);
426 for (i = 1; i < 5; ++i) {
427 fe_sq_tt(&t1, &t1);
428 }
429 fe_mul_ttt(&t0, &t1, &t0);
430 fe_sq_tt(&t1, &t0);
431 for (i = 1; i < 10; ++i) {
432 fe_sq_tt(&t1, &t1);
433 }
434 fe_mul_ttt(&t1, &t1, &t0);
435 fe_sq_tt(&t2, &t1);
436 for (i = 1; i < 20; ++i) {
437 fe_sq_tt(&t2, &t2);
438 }
439 fe_mul_ttt(&t1, &t2, &t1);
440 fe_sq_tt(&t1, &t1);
441 for (i = 1; i < 10; ++i) {
442 fe_sq_tt(&t1, &t1);
443 }
444 fe_mul_ttt(&t0, &t1, &t0);
445 fe_sq_tt(&t1, &t0);
446 for (i = 1; i < 50; ++i) {
447 fe_sq_tt(&t1, &t1);
448 }
449 fe_mul_ttt(&t1, &t1, &t0);
450 fe_sq_tt(&t2, &t1);
451 for (i = 1; i < 100; ++i) {
452 fe_sq_tt(&t2, &t2);
453 }
454 fe_mul_ttt(&t1, &t2, &t1);
455 fe_sq_tt(&t1, &t1);
456 for (i = 1; i < 50; ++i) {
457 fe_sq_tt(&t1, &t1);
458 }
459 fe_mul_ttt(&t0, &t1, &t0);
460 fe_sq_tt(&t0, &t0);
461 for (i = 1; i < 2; ++i) {
462 fe_sq_tt(&t0, &t0);
463 }
464 fe_mul_ttt(out, &t0, z);
465 }
466
467
468 // Group operations.
469
x25519_ge_tobytes(uint8_t s[32],const ge_p2 * h)470 void x25519_ge_tobytes(uint8_t s[32], const ge_p2 *h) {
471 fe recip;
472 fe x;
473 fe y;
474
475 fe_invert(&recip, &h->Z);
476 fe_mul_ttt(&x, &h->X, &recip);
477 fe_mul_ttt(&y, &h->Y, &recip);
478 fe_tobytes(s, &y);
479 s[31] ^= fe_isnegative(&x) << 7;
480 }
481
ge_p3_tobytes(uint8_t s[32],const ge_p3 * h)482 static void ge_p3_tobytes(uint8_t s[32], const ge_p3 *h) {
483 fe recip;
484 fe x;
485 fe y;
486
487 fe_invert(&recip, &h->Z);
488 fe_mul_ttt(&x, &h->X, &recip);
489 fe_mul_ttt(&y, &h->Y, &recip);
490 fe_tobytes(s, &y);
491 s[31] ^= fe_isnegative(&x) << 7;
492 }
493
x25519_ge_frombytes_vartime(ge_p3 * h,const uint8_t s[32])494 int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t s[32]) {
495 fe u;
496 fe_loose v;
497 fe w;
498 fe vxx;
499 fe_loose check;
500
501 fe_frombytes(&h->Y, s);
502 fe_1(&h->Z);
503 fe_sq_tt(&w, &h->Y);
504 fe_mul_ttt(&vxx, &w, &d);
505 fe_sub(&v, &w, &h->Z); // u = y^2-1
506 fe_carry(&u, &v);
507 fe_add(&v, &vxx, &h->Z); // v = dy^2+1
508
509 fe_mul_ttl(&w, &u, &v); // w = u*v
510 fe_pow22523(&h->X, &w); // x = w^((q-5)/8)
511 fe_mul_ttt(&h->X, &h->X, &u); // x = u*w^((q-5)/8)
512
513 fe_sq_tt(&vxx, &h->X);
514 fe_mul_ttl(&vxx, &vxx, &v);
515 fe_sub(&check, &vxx, &u);
516 if (fe_isnonzero(&check)) {
517 fe_add(&check, &vxx, &u);
518 if (fe_isnonzero(&check)) {
519 return 0;
520 }
521 fe_mul_ttt(&h->X, &h->X, &sqrtm1);
522 }
523
524 if (fe_isnegative(&h->X) != (s[31] >> 7)) {
525 fe_loose t;
526 fe_neg(&t, &h->X);
527 fe_carry(&h->X, &t);
528 }
529
530 fe_mul_ttt(&h->T, &h->X, &h->Y);
531 return 1;
532 }
533
ge_p2_0(ge_p2 * h)534 static void ge_p2_0(ge_p2 *h) {
535 fe_0(&h->X);
536 fe_1(&h->Y);
537 fe_1(&h->Z);
538 }
539
ge_p3_0(ge_p3 * h)540 static void ge_p3_0(ge_p3 *h) {
541 fe_0(&h->X);
542 fe_1(&h->Y);
543 fe_1(&h->Z);
544 fe_0(&h->T);
545 }
546
ge_cached_0(ge_cached * h)547 static void ge_cached_0(ge_cached *h) {
548 fe_loose_1(&h->YplusX);
549 fe_loose_1(&h->YminusX);
550 fe_loose_1(&h->Z);
551 fe_loose_0(&h->T2d);
552 }
553
ge_precomp_0(ge_precomp * h)554 static void ge_precomp_0(ge_precomp *h) {
555 fe_loose_1(&h->yplusx);
556 fe_loose_1(&h->yminusx);
557 fe_loose_0(&h->xy2d);
558 }
559
560 // r = p
ge_p3_to_p2(ge_p2 * r,const ge_p3 * p)561 static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) {
562 fe_copy(&r->X, &p->X);
563 fe_copy(&r->Y, &p->Y);
564 fe_copy(&r->Z, &p->Z);
565 }
566
567 // r = p
x25519_ge_p3_to_cached(ge_cached * r,const ge_p3 * p)568 void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) {
569 fe_add(&r->YplusX, &p->Y, &p->X);
570 fe_sub(&r->YminusX, &p->Y, &p->X);
571 fe_copy_lt(&r->Z, &p->Z);
572 fe_mul_ltt(&r->T2d, &p->T, &d2);
573 }
574
575 // r = p
x25519_ge_p1p1_to_p2(ge_p2 * r,const ge_p1p1 * p)576 void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) {
577 fe_mul_tll(&r->X, &p->X, &p->T);
578 fe_mul_tll(&r->Y, &p->Y, &p->Z);
579 fe_mul_tll(&r->Z, &p->Z, &p->T);
580 }
581
582 // r = p
x25519_ge_p1p1_to_p3(ge_p3 * r,const ge_p1p1 * p)583 void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) {
584 fe_mul_tll(&r->X, &p->X, &p->T);
585 fe_mul_tll(&r->Y, &p->Y, &p->Z);
586 fe_mul_tll(&r->Z, &p->Z, &p->T);
587 fe_mul_tll(&r->T, &p->X, &p->Y);
588 }
589
590 // r = p
ge_p1p1_to_cached(ge_cached * r,const ge_p1p1 * p)591 static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) {
592 ge_p3 t;
593 x25519_ge_p1p1_to_p3(&t, p);
594 x25519_ge_p3_to_cached(r, &t);
595 }
596
597 // r = 2 * p
ge_p2_dbl(ge_p1p1 * r,const ge_p2 * p)598 static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) {
599 fe trX, trZ, trT;
600 fe t0;
601
602 fe_sq_tt(&trX, &p->X);
603 fe_sq_tt(&trZ, &p->Y);
604 fe_sq2_tt(&trT, &p->Z);
605 fe_add(&r->Y, &p->X, &p->Y);
606 fe_sq_tl(&t0, &r->Y);
607
608 fe_add(&r->Y, &trZ, &trX);
609 fe_sub(&r->Z, &trZ, &trX);
610 fe_carry(&trZ, &r->Y);
611 fe_sub(&r->X, &t0, &trZ);
612 fe_carry(&trZ, &r->Z);
613 fe_sub(&r->T, &trT, &trZ);
614 }
615
616 // r = 2 * p
ge_p3_dbl(ge_p1p1 * r,const ge_p3 * p)617 static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) {
618 ge_p2 q;
619 ge_p3_to_p2(&q, p);
620 ge_p2_dbl(r, &q);
621 }
622
623 // r = p + q
ge_madd(ge_p1p1 * r,const ge_p3 * p,const ge_precomp * q)624 static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
625 fe trY, trZ, trT;
626
627 fe_add(&r->X, &p->Y, &p->X);
628 fe_sub(&r->Y, &p->Y, &p->X);
629 fe_mul_tll(&trZ, &r->X, &q->yplusx);
630 fe_mul_tll(&trY, &r->Y, &q->yminusx);
631 fe_mul_tlt(&trT, &q->xy2d, &p->T);
632 fe_add(&r->T, &p->Z, &p->Z);
633 fe_sub(&r->X, &trZ, &trY);
634 fe_add(&r->Y, &trZ, &trY);
635 fe_carry(&trZ, &r->T);
636 fe_add(&r->Z, &trZ, &trT);
637 fe_sub(&r->T, &trZ, &trT);
638 }
639
640 // r = p - q
ge_msub(ge_p1p1 * r,const ge_p3 * p,const ge_precomp * q)641 static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
642 fe trY, trZ, trT;
643
644 fe_add(&r->X, &p->Y, &p->X);
645 fe_sub(&r->Y, &p->Y, &p->X);
646 fe_mul_tll(&trZ, &r->X, &q->yminusx);
647 fe_mul_tll(&trY, &r->Y, &q->yplusx);
648 fe_mul_tlt(&trT, &q->xy2d, &p->T);
649 fe_add(&r->T, &p->Z, &p->Z);
650 fe_sub(&r->X, &trZ, &trY);
651 fe_add(&r->Y, &trZ, &trY);
652 fe_carry(&trZ, &r->T);
653 fe_sub(&r->Z, &trZ, &trT);
654 fe_add(&r->T, &trZ, &trT);
655 }
656
657 // r = p + q
x25519_ge_add(ge_p1p1 * r,const ge_p3 * p,const ge_cached * q)658 void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
659 fe trX, trY, trZ, trT;
660
661 fe_add(&r->X, &p->Y, &p->X);
662 fe_sub(&r->Y, &p->Y, &p->X);
663 fe_mul_tll(&trZ, &r->X, &q->YplusX);
664 fe_mul_tll(&trY, &r->Y, &q->YminusX);
665 fe_mul_tlt(&trT, &q->T2d, &p->T);
666 fe_mul_ttl(&trX, &p->Z, &q->Z);
667 fe_add(&r->T, &trX, &trX);
668 fe_sub(&r->X, &trZ, &trY);
669 fe_add(&r->Y, &trZ, &trY);
670 fe_carry(&trZ, &r->T);
671 fe_add(&r->Z, &trZ, &trT);
672 fe_sub(&r->T, &trZ, &trT);
673 }
674
675 // r = p - q
x25519_ge_sub(ge_p1p1 * r,const ge_p3 * p,const ge_cached * q)676 void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
677 fe trX, trY, trZ, trT;
678
679 fe_add(&r->X, &p->Y, &p->X);
680 fe_sub(&r->Y, &p->Y, &p->X);
681 fe_mul_tll(&trZ, &r->X, &q->YminusX);
682 fe_mul_tll(&trY, &r->Y, &q->YplusX);
683 fe_mul_tlt(&trT, &q->T2d, &p->T);
684 fe_mul_ttl(&trX, &p->Z, &q->Z);
685 fe_add(&r->T, &trX, &trX);
686 fe_sub(&r->X, &trZ, &trY);
687 fe_add(&r->Y, &trZ, &trY);
688 fe_carry(&trZ, &r->T);
689 fe_sub(&r->Z, &trZ, &trT);
690 fe_add(&r->T, &trZ, &trT);
691 }
692
cmov(ge_precomp * t,const ge_precomp * u,uint8_t b)693 static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) {
694 fe_cmov(&t->yplusx, &u->yplusx, b);
695 fe_cmov(&t->yminusx, &u->yminusx, b);
696 fe_cmov(&t->xy2d, &u->xy2d, b);
697 }
698
x25519_ge_scalarmult_small_precomp(ge_p3 * h,const uint8_t a[32],const uint8_t precomp_table[15* 2* 32])699 void x25519_ge_scalarmult_small_precomp(
700 ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) {
701 // precomp_table is first expanded into matching |ge_precomp|
702 // elements.
703 ge_precomp multiples[15];
704
705 unsigned i;
706 for (i = 0; i < 15; i++) {
707 // The precomputed table is assumed to already clear the top bit, so
708 // |fe_frombytes_strict| may be used directly.
709 const uint8_t *bytes = &precomp_table[i*(2 * 32)];
710 fe x, y;
711 fe_frombytes_strict(&x, bytes);
712 fe_frombytes_strict(&y, bytes + 32);
713
714 ge_precomp *out = &multiples[i];
715 fe_add(&out->yplusx, &y, &x);
716 fe_sub(&out->yminusx, &y, &x);
717 fe_mul_ltt(&out->xy2d, &x, &y);
718 fe_mul_llt(&out->xy2d, &out->xy2d, &d2);
719 }
720
721 // See the comment above |k25519SmallPrecomp| about the structure of the
722 // precomputed elements. This loop does 64 additions and 64 doublings to
723 // calculate the result.
724 ge_p3_0(h);
725
726 for (i = 63; i < 64; i--) {
727 unsigned j;
728 signed char index = 0;
729
730 for (j = 0; j < 4; j++) {
731 const uint8_t bit = 1 & (a[(8 * j) + (i / 8)] >> (i & 7));
732 index |= (bit << j);
733 }
734
735 ge_precomp e;
736 ge_precomp_0(&e);
737
738 for (j = 1; j < 16; j++) {
739 cmov(&e, &multiples[j-1], 1&constant_time_eq_w(index, j));
740 }
741
742 ge_cached cached;
743 ge_p1p1 r;
744 x25519_ge_p3_to_cached(&cached, h);
745 x25519_ge_add(&r, h, &cached);
746 x25519_ge_p1p1_to_p3(h, &r);
747
748 ge_madd(&r, h, &e);
749 x25519_ge_p1p1_to_p3(h, &r);
750 }
751 }
752
753 #if defined(OPENSSL_SMALL)
754
x25519_ge_scalarmult_base(ge_p3 * h,const uint8_t a[32])755 void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) {
756 x25519_ge_scalarmult_small_precomp(h, a, k25519SmallPrecomp);
757 }
758
759 #else
760
table_select(ge_precomp * t,const int pos,const signed char b)761 static void table_select(ge_precomp *t, const int pos, const signed char b) {
762 uint8_t bnegative = constant_time_msb_w(b);
763 uint8_t babs = b - ((bnegative & b) << 1);
764
765 uint8_t t_bytes[3][32] = {
766 {constant_time_is_zero_w(b) & 1}, {constant_time_is_zero_w(b) & 1}, {0}};
767 #if defined(__clang__) // materialize for vectorization, 6% speedup
768 __asm__("" : "+m" (t_bytes) : /*no inputs*/);
769 #endif
770 static_assert(sizeof(t_bytes) == sizeof(k25519Precomp[pos][0]), "");
771 for (int i = 0; i < 8; i++) {
772 constant_time_conditional_memxor(t_bytes, k25519Precomp[pos][i],
773 sizeof(t_bytes),
774 constant_time_eq_w(babs, 1 + i));
775 }
776
777 fe yplusx, yminusx, xy2d;
778 fe_frombytes_strict(&yplusx, t_bytes[0]);
779 fe_frombytes_strict(&yminusx, t_bytes[1]);
780 fe_frombytes_strict(&xy2d, t_bytes[2]);
781
782 fe_copy_lt(&t->yplusx, &yplusx);
783 fe_copy_lt(&t->yminusx, &yminusx);
784 fe_copy_lt(&t->xy2d, &xy2d);
785
786 ge_precomp minust;
787 fe_copy_lt(&minust.yplusx, &yminusx);
788 fe_copy_lt(&minust.yminusx, &yplusx);
789 fe_neg(&minust.xy2d, &xy2d);
790 cmov(t, &minust, bnegative>>7);
791 }
792
793 // h = a * B
794 // where a = a[0]+256*a[1]+...+256^31 a[31]
795 // B is the Ed25519 base point (x,4/5) with x positive.
796 //
797 // Preconditions:
798 // a[31] <= 127
x25519_ge_scalarmult_base(ge_p3 * h,const uint8_t a[32])799 void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) {
800 #if defined(BORINGSSL_FE25519_ADX)
801 if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() &&
802 CRYPTO_is_ADX_capable()) {
803 uint8_t t[4][32];
804 x25519_ge_scalarmult_base_adx(t, a);
805 fiat_25519_from_bytes(h->X.v, t[0]);
806 fiat_25519_from_bytes(h->Y.v, t[1]);
807 fiat_25519_from_bytes(h->Z.v, t[2]);
808 fiat_25519_from_bytes(h->T.v, t[3]);
809 return;
810 }
811 #endif
812 signed char e[64];
813 signed char carry;
814 ge_p1p1 r;
815 ge_p2 s;
816 ge_precomp t;
817 int i;
818
819 for (i = 0; i < 32; ++i) {
820 e[2 * i + 0] = (a[i] >> 0) & 15;
821 e[2 * i + 1] = (a[i] >> 4) & 15;
822 }
823 // each e[i] is between 0 and 15
824 // e[63] is between 0 and 7
825
826 carry = 0;
827 for (i = 0; i < 63; ++i) {
828 e[i] += carry;
829 carry = e[i] + 8;
830 carry >>= 4;
831 e[i] -= carry << 4;
832 }
833 e[63] += carry;
834 // each e[i] is between -8 and 8
835
836 ge_p3_0(h);
837 for (i = 1; i < 64; i += 2) {
838 table_select(&t, i / 2, e[i]);
839 ge_madd(&r, h, &t);
840 x25519_ge_p1p1_to_p3(h, &r);
841 }
842
843 ge_p3_dbl(&r, h);
844 x25519_ge_p1p1_to_p2(&s, &r);
845 ge_p2_dbl(&r, &s);
846 x25519_ge_p1p1_to_p2(&s, &r);
847 ge_p2_dbl(&r, &s);
848 x25519_ge_p1p1_to_p2(&s, &r);
849 ge_p2_dbl(&r, &s);
850 x25519_ge_p1p1_to_p3(h, &r);
851
852 for (i = 0; i < 64; i += 2) {
853 table_select(&t, i / 2, e[i]);
854 ge_madd(&r, h, &t);
855 x25519_ge_p1p1_to_p3(h, &r);
856 }
857 }
858
859 #endif
860
cmov_cached(ge_cached * t,ge_cached * u,uint8_t b)861 static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) {
862 fe_cmov(&t->YplusX, &u->YplusX, b);
863 fe_cmov(&t->YminusX, &u->YminusX, b);
864 fe_cmov(&t->Z, &u->Z, b);
865 fe_cmov(&t->T2d, &u->T2d, b);
866 }
867
868 // r = scalar * A.
869 // where a = a[0]+256*a[1]+...+256^31 a[31].
x25519_ge_scalarmult(ge_p2 * r,const uint8_t * scalar,const ge_p3 * A)870 void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) {
871 ge_p2 Ai_p2[8];
872 ge_cached Ai[16];
873 ge_p1p1 t;
874
875 ge_cached_0(&Ai[0]);
876 x25519_ge_p3_to_cached(&Ai[1], A);
877 ge_p3_to_p2(&Ai_p2[1], A);
878
879 unsigned i;
880 for (i = 2; i < 16; i += 2) {
881 ge_p2_dbl(&t, &Ai_p2[i / 2]);
882 ge_p1p1_to_cached(&Ai[i], &t);
883 if (i < 8) {
884 x25519_ge_p1p1_to_p2(&Ai_p2[i], &t);
885 }
886 x25519_ge_add(&t, A, &Ai[i]);
887 ge_p1p1_to_cached(&Ai[i + 1], &t);
888 if (i < 7) {
889 x25519_ge_p1p1_to_p2(&Ai_p2[i + 1], &t);
890 }
891 }
892
893 ge_p2_0(r);
894 ge_p3 u;
895
896 for (i = 0; i < 256; i += 4) {
897 ge_p2_dbl(&t, r);
898 x25519_ge_p1p1_to_p2(r, &t);
899 ge_p2_dbl(&t, r);
900 x25519_ge_p1p1_to_p2(r, &t);
901 ge_p2_dbl(&t, r);
902 x25519_ge_p1p1_to_p2(r, &t);
903 ge_p2_dbl(&t, r);
904 x25519_ge_p1p1_to_p3(&u, &t);
905
906 uint8_t index = scalar[31 - i/8];
907 index >>= 4 - (i & 4);
908 index &= 0xf;
909
910 unsigned j;
911 ge_cached selected;
912 ge_cached_0(&selected);
913 for (j = 0; j < 16; j++) {
914 cmov_cached(&selected, &Ai[j], 1&constant_time_eq_w(index, j));
915 }
916
917 x25519_ge_add(&t, &u, &selected);
918 x25519_ge_p1p1_to_p2(r, &t);
919 }
920 }
921
slide(signed char * r,const uint8_t * a)922 static void slide(signed char *r, const uint8_t *a) {
923 int i;
924 int b;
925 int k;
926
927 for (i = 0; i < 256; ++i) {
928 r[i] = 1 & (a[i >> 3] >> (i & 7));
929 }
930
931 for (i = 0; i < 256; ++i) {
932 if (r[i]) {
933 for (b = 1; b <= 6 && i + b < 256; ++b) {
934 if (r[i + b]) {
935 if (r[i] + (r[i + b] << b) <= 15) {
936 r[i] += r[i + b] << b;
937 r[i + b] = 0;
938 } else if (r[i] - (r[i + b] << b) >= -15) {
939 r[i] -= r[i + b] << b;
940 for (k = i + b; k < 256; ++k) {
941 if (!r[k]) {
942 r[k] = 1;
943 break;
944 }
945 r[k] = 0;
946 }
947 } else {
948 break;
949 }
950 }
951 }
952 }
953 }
954 }
955
956 // r = a * A + b * B
957 // where a = a[0]+256*a[1]+...+256^31 a[31].
958 // and b = b[0]+256*b[1]+...+256^31 b[31].
959 // B is the Ed25519 base point (x,4/5) with x positive.
ge_double_scalarmult_vartime(ge_p2 * r,const uint8_t * a,const ge_p3 * A,const uint8_t * b)960 static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a,
961 const ge_p3 *A, const uint8_t *b) {
962 signed char aslide[256];
963 signed char bslide[256];
964 ge_cached Ai[8]; // A,3A,5A,7A,9A,11A,13A,15A
965 ge_p1p1 t;
966 ge_p3 u;
967 ge_p3 A2;
968 int i;
969
970 slide(aslide, a);
971 slide(bslide, b);
972
973 x25519_ge_p3_to_cached(&Ai[0], A);
974 ge_p3_dbl(&t, A);
975 x25519_ge_p1p1_to_p3(&A2, &t);
976 x25519_ge_add(&t, &A2, &Ai[0]);
977 x25519_ge_p1p1_to_p3(&u, &t);
978 x25519_ge_p3_to_cached(&Ai[1], &u);
979 x25519_ge_add(&t, &A2, &Ai[1]);
980 x25519_ge_p1p1_to_p3(&u, &t);
981 x25519_ge_p3_to_cached(&Ai[2], &u);
982 x25519_ge_add(&t, &A2, &Ai[2]);
983 x25519_ge_p1p1_to_p3(&u, &t);
984 x25519_ge_p3_to_cached(&Ai[3], &u);
985 x25519_ge_add(&t, &A2, &Ai[3]);
986 x25519_ge_p1p1_to_p3(&u, &t);
987 x25519_ge_p3_to_cached(&Ai[4], &u);
988 x25519_ge_add(&t, &A2, &Ai[4]);
989 x25519_ge_p1p1_to_p3(&u, &t);
990 x25519_ge_p3_to_cached(&Ai[5], &u);
991 x25519_ge_add(&t, &A2, &Ai[5]);
992 x25519_ge_p1p1_to_p3(&u, &t);
993 x25519_ge_p3_to_cached(&Ai[6], &u);
994 x25519_ge_add(&t, &A2, &Ai[6]);
995 x25519_ge_p1p1_to_p3(&u, &t);
996 x25519_ge_p3_to_cached(&Ai[7], &u);
997
998 ge_p2_0(r);
999
1000 for (i = 255; i >= 0; --i) {
1001 if (aslide[i] || bslide[i]) {
1002 break;
1003 }
1004 }
1005
1006 for (; i >= 0; --i) {
1007 ge_p2_dbl(&t, r);
1008
1009 if (aslide[i] > 0) {
1010 x25519_ge_p1p1_to_p3(&u, &t);
1011 x25519_ge_add(&t, &u, &Ai[aslide[i] / 2]);
1012 } else if (aslide[i] < 0) {
1013 x25519_ge_p1p1_to_p3(&u, &t);
1014 x25519_ge_sub(&t, &u, &Ai[(-aslide[i]) / 2]);
1015 }
1016
1017 if (bslide[i] > 0) {
1018 x25519_ge_p1p1_to_p3(&u, &t);
1019 ge_madd(&t, &u, &Bi[bslide[i] / 2]);
1020 } else if (bslide[i] < 0) {
1021 x25519_ge_p1p1_to_p3(&u, &t);
1022 ge_msub(&t, &u, &Bi[(-bslide[i]) / 2]);
1023 }
1024
1025 x25519_ge_p1p1_to_p2(r, &t);
1026 }
1027 }
1028
1029 // int64_lshift21 returns |a << 21| but is defined when shifting bits into the
1030 // sign bit. This works around a language flaw in C.
int64_lshift21(int64_t a)1031 static inline int64_t int64_lshift21(int64_t a) {
1032 return (int64_t)((uint64_t)a << 21);
1033 }
1034
1035 // The set of scalars is \Z/l
1036 // where l = 2^252 + 27742317777372353535851937790883648493.
1037
1038 // Input:
1039 // s[0]+256*s[1]+...+256^63*s[63] = s
1040 //
1041 // Output:
1042 // s[0]+256*s[1]+...+256^31*s[31] = s mod l
1043 // where l = 2^252 + 27742317777372353535851937790883648493.
1044 // Overwrites s in place.
x25519_sc_reduce(uint8_t s[64])1045 void x25519_sc_reduce(uint8_t s[64]) {
1046 int64_t s0 = 2097151 & load_3(s);
1047 int64_t s1 = 2097151 & (load_4(s + 2) >> 5);
1048 int64_t s2 = 2097151 & (load_3(s + 5) >> 2);
1049 int64_t s3 = 2097151 & (load_4(s + 7) >> 7);
1050 int64_t s4 = 2097151 & (load_4(s + 10) >> 4);
1051 int64_t s5 = 2097151 & (load_3(s + 13) >> 1);
1052 int64_t s6 = 2097151 & (load_4(s + 15) >> 6);
1053 int64_t s7 = 2097151 & (load_3(s + 18) >> 3);
1054 int64_t s8 = 2097151 & load_3(s + 21);
1055 int64_t s9 = 2097151 & (load_4(s + 23) >> 5);
1056 int64_t s10 = 2097151 & (load_3(s + 26) >> 2);
1057 int64_t s11 = 2097151 & (load_4(s + 28) >> 7);
1058 int64_t s12 = 2097151 & (load_4(s + 31) >> 4);
1059 int64_t s13 = 2097151 & (load_3(s + 34) >> 1);
1060 int64_t s14 = 2097151 & (load_4(s + 36) >> 6);
1061 int64_t s15 = 2097151 & (load_3(s + 39) >> 3);
1062 int64_t s16 = 2097151 & load_3(s + 42);
1063 int64_t s17 = 2097151 & (load_4(s + 44) >> 5);
1064 int64_t s18 = 2097151 & (load_3(s + 47) >> 2);
1065 int64_t s19 = 2097151 & (load_4(s + 49) >> 7);
1066 int64_t s20 = 2097151 & (load_4(s + 52) >> 4);
1067 int64_t s21 = 2097151 & (load_3(s + 55) >> 1);
1068 int64_t s22 = 2097151 & (load_4(s + 57) >> 6);
1069 int64_t s23 = (load_4(s + 60) >> 3);
1070 int64_t carry0;
1071 int64_t carry1;
1072 int64_t carry2;
1073 int64_t carry3;
1074 int64_t carry4;
1075 int64_t carry5;
1076 int64_t carry6;
1077 int64_t carry7;
1078 int64_t carry8;
1079 int64_t carry9;
1080 int64_t carry10;
1081 int64_t carry11;
1082 int64_t carry12;
1083 int64_t carry13;
1084 int64_t carry14;
1085 int64_t carry15;
1086 int64_t carry16;
1087
1088 s11 += s23 * 666643;
1089 s12 += s23 * 470296;
1090 s13 += s23 * 654183;
1091 s14 -= s23 * 997805;
1092 s15 += s23 * 136657;
1093 s16 -= s23 * 683901;
1094 s23 = 0;
1095
1096 s10 += s22 * 666643;
1097 s11 += s22 * 470296;
1098 s12 += s22 * 654183;
1099 s13 -= s22 * 997805;
1100 s14 += s22 * 136657;
1101 s15 -= s22 * 683901;
1102 s22 = 0;
1103
1104 s9 += s21 * 666643;
1105 s10 += s21 * 470296;
1106 s11 += s21 * 654183;
1107 s12 -= s21 * 997805;
1108 s13 += s21 * 136657;
1109 s14 -= s21 * 683901;
1110 s21 = 0;
1111
1112 s8 += s20 * 666643;
1113 s9 += s20 * 470296;
1114 s10 += s20 * 654183;
1115 s11 -= s20 * 997805;
1116 s12 += s20 * 136657;
1117 s13 -= s20 * 683901;
1118 s20 = 0;
1119
1120 s7 += s19 * 666643;
1121 s8 += s19 * 470296;
1122 s9 += s19 * 654183;
1123 s10 -= s19 * 997805;
1124 s11 += s19 * 136657;
1125 s12 -= s19 * 683901;
1126 s19 = 0;
1127
1128 s6 += s18 * 666643;
1129 s7 += s18 * 470296;
1130 s8 += s18 * 654183;
1131 s9 -= s18 * 997805;
1132 s10 += s18 * 136657;
1133 s11 -= s18 * 683901;
1134 s18 = 0;
1135
1136 carry6 = (s6 + (1 << 20)) >> 21;
1137 s7 += carry6;
1138 s6 -= int64_lshift21(carry6);
1139 carry8 = (s8 + (1 << 20)) >> 21;
1140 s9 += carry8;
1141 s8 -= int64_lshift21(carry8);
1142 carry10 = (s10 + (1 << 20)) >> 21;
1143 s11 += carry10;
1144 s10 -= int64_lshift21(carry10);
1145 carry12 = (s12 + (1 << 20)) >> 21;
1146 s13 += carry12;
1147 s12 -= int64_lshift21(carry12);
1148 carry14 = (s14 + (1 << 20)) >> 21;
1149 s15 += carry14;
1150 s14 -= int64_lshift21(carry14);
1151 carry16 = (s16 + (1 << 20)) >> 21;
1152 s17 += carry16;
1153 s16 -= int64_lshift21(carry16);
1154
1155 carry7 = (s7 + (1 << 20)) >> 21;
1156 s8 += carry7;
1157 s7 -= int64_lshift21(carry7);
1158 carry9 = (s9 + (1 << 20)) >> 21;
1159 s10 += carry9;
1160 s9 -= int64_lshift21(carry9);
1161 carry11 = (s11 + (1 << 20)) >> 21;
1162 s12 += carry11;
1163 s11 -= int64_lshift21(carry11);
1164 carry13 = (s13 + (1 << 20)) >> 21;
1165 s14 += carry13;
1166 s13 -= int64_lshift21(carry13);
1167 carry15 = (s15 + (1 << 20)) >> 21;
1168 s16 += carry15;
1169 s15 -= int64_lshift21(carry15);
1170
1171 s5 += s17 * 666643;
1172 s6 += s17 * 470296;
1173 s7 += s17 * 654183;
1174 s8 -= s17 * 997805;
1175 s9 += s17 * 136657;
1176 s10 -= s17 * 683901;
1177 s17 = 0;
1178
1179 s4 += s16 * 666643;
1180 s5 += s16 * 470296;
1181 s6 += s16 * 654183;
1182 s7 -= s16 * 997805;
1183 s8 += s16 * 136657;
1184 s9 -= s16 * 683901;
1185 s16 = 0;
1186
1187 s3 += s15 * 666643;
1188 s4 += s15 * 470296;
1189 s5 += s15 * 654183;
1190 s6 -= s15 * 997805;
1191 s7 += s15 * 136657;
1192 s8 -= s15 * 683901;
1193 s15 = 0;
1194
1195 s2 += s14 * 666643;
1196 s3 += s14 * 470296;
1197 s4 += s14 * 654183;
1198 s5 -= s14 * 997805;
1199 s6 += s14 * 136657;
1200 s7 -= s14 * 683901;
1201 s14 = 0;
1202
1203 s1 += s13 * 666643;
1204 s2 += s13 * 470296;
1205 s3 += s13 * 654183;
1206 s4 -= s13 * 997805;
1207 s5 += s13 * 136657;
1208 s6 -= s13 * 683901;
1209 s13 = 0;
1210
1211 s0 += s12 * 666643;
1212 s1 += s12 * 470296;
1213 s2 += s12 * 654183;
1214 s3 -= s12 * 997805;
1215 s4 += s12 * 136657;
1216 s5 -= s12 * 683901;
1217 s12 = 0;
1218
1219 carry0 = (s0 + (1 << 20)) >> 21;
1220 s1 += carry0;
1221 s0 -= int64_lshift21(carry0);
1222 carry2 = (s2 + (1 << 20)) >> 21;
1223 s3 += carry2;
1224 s2 -= int64_lshift21(carry2);
1225 carry4 = (s4 + (1 << 20)) >> 21;
1226 s5 += carry4;
1227 s4 -= int64_lshift21(carry4);
1228 carry6 = (s6 + (1 << 20)) >> 21;
1229 s7 += carry6;
1230 s6 -= int64_lshift21(carry6);
1231 carry8 = (s8 + (1 << 20)) >> 21;
1232 s9 += carry8;
1233 s8 -= int64_lshift21(carry8);
1234 carry10 = (s10 + (1 << 20)) >> 21;
1235 s11 += carry10;
1236 s10 -= int64_lshift21(carry10);
1237
1238 carry1 = (s1 + (1 << 20)) >> 21;
1239 s2 += carry1;
1240 s1 -= int64_lshift21(carry1);
1241 carry3 = (s3 + (1 << 20)) >> 21;
1242 s4 += carry3;
1243 s3 -= int64_lshift21(carry3);
1244 carry5 = (s5 + (1 << 20)) >> 21;
1245 s6 += carry5;
1246 s5 -= int64_lshift21(carry5);
1247 carry7 = (s7 + (1 << 20)) >> 21;
1248 s8 += carry7;
1249 s7 -= int64_lshift21(carry7);
1250 carry9 = (s9 + (1 << 20)) >> 21;
1251 s10 += carry9;
1252 s9 -= int64_lshift21(carry9);
1253 carry11 = (s11 + (1 << 20)) >> 21;
1254 s12 += carry11;
1255 s11 -= int64_lshift21(carry11);
1256
1257 s0 += s12 * 666643;
1258 s1 += s12 * 470296;
1259 s2 += s12 * 654183;
1260 s3 -= s12 * 997805;
1261 s4 += s12 * 136657;
1262 s5 -= s12 * 683901;
1263 s12 = 0;
1264
1265 carry0 = s0 >> 21;
1266 s1 += carry0;
1267 s0 -= int64_lshift21(carry0);
1268 carry1 = s1 >> 21;
1269 s2 += carry1;
1270 s1 -= int64_lshift21(carry1);
1271 carry2 = s2 >> 21;
1272 s3 += carry2;
1273 s2 -= int64_lshift21(carry2);
1274 carry3 = s3 >> 21;
1275 s4 += carry3;
1276 s3 -= int64_lshift21(carry3);
1277 carry4 = s4 >> 21;
1278 s5 += carry4;
1279 s4 -= int64_lshift21(carry4);
1280 carry5 = s5 >> 21;
1281 s6 += carry5;
1282 s5 -= int64_lshift21(carry5);
1283 carry6 = s6 >> 21;
1284 s7 += carry6;
1285 s6 -= int64_lshift21(carry6);
1286 carry7 = s7 >> 21;
1287 s8 += carry7;
1288 s7 -= int64_lshift21(carry7);
1289 carry8 = s8 >> 21;
1290 s9 += carry8;
1291 s8 -= int64_lshift21(carry8);
1292 carry9 = s9 >> 21;
1293 s10 += carry9;
1294 s9 -= int64_lshift21(carry9);
1295 carry10 = s10 >> 21;
1296 s11 += carry10;
1297 s10 -= int64_lshift21(carry10);
1298 carry11 = s11 >> 21;
1299 s12 += carry11;
1300 s11 -= int64_lshift21(carry11);
1301
1302 s0 += s12 * 666643;
1303 s1 += s12 * 470296;
1304 s2 += s12 * 654183;
1305 s3 -= s12 * 997805;
1306 s4 += s12 * 136657;
1307 s5 -= s12 * 683901;
1308 s12 = 0;
1309
1310 carry0 = s0 >> 21;
1311 s1 += carry0;
1312 s0 -= int64_lshift21(carry0);
1313 carry1 = s1 >> 21;
1314 s2 += carry1;
1315 s1 -= int64_lshift21(carry1);
1316 carry2 = s2 >> 21;
1317 s3 += carry2;
1318 s2 -= int64_lshift21(carry2);
1319 carry3 = s3 >> 21;
1320 s4 += carry3;
1321 s3 -= int64_lshift21(carry3);
1322 carry4 = s4 >> 21;
1323 s5 += carry4;
1324 s4 -= int64_lshift21(carry4);
1325 carry5 = s5 >> 21;
1326 s6 += carry5;
1327 s5 -= int64_lshift21(carry5);
1328 carry6 = s6 >> 21;
1329 s7 += carry6;
1330 s6 -= int64_lshift21(carry6);
1331 carry7 = s7 >> 21;
1332 s8 += carry7;
1333 s7 -= int64_lshift21(carry7);
1334 carry8 = s8 >> 21;
1335 s9 += carry8;
1336 s8 -= int64_lshift21(carry8);
1337 carry9 = s9 >> 21;
1338 s10 += carry9;
1339 s9 -= int64_lshift21(carry9);
1340 carry10 = s10 >> 21;
1341 s11 += carry10;
1342 s10 -= int64_lshift21(carry10);
1343
1344 s[0] = s0 >> 0;
1345 s[1] = s0 >> 8;
1346 s[2] = (s0 >> 16) | (s1 << 5);
1347 s[3] = s1 >> 3;
1348 s[4] = s1 >> 11;
1349 s[5] = (s1 >> 19) | (s2 << 2);
1350 s[6] = s2 >> 6;
1351 s[7] = (s2 >> 14) | (s3 << 7);
1352 s[8] = s3 >> 1;
1353 s[9] = s3 >> 9;
1354 s[10] = (s3 >> 17) | (s4 << 4);
1355 s[11] = s4 >> 4;
1356 s[12] = s4 >> 12;
1357 s[13] = (s4 >> 20) | (s5 << 1);
1358 s[14] = s5 >> 7;
1359 s[15] = (s5 >> 15) | (s6 << 6);
1360 s[16] = s6 >> 2;
1361 s[17] = s6 >> 10;
1362 s[18] = (s6 >> 18) | (s7 << 3);
1363 s[19] = s7 >> 5;
1364 s[20] = s7 >> 13;
1365 s[21] = s8 >> 0;
1366 s[22] = s8 >> 8;
1367 s[23] = (s8 >> 16) | (s9 << 5);
1368 s[24] = s9 >> 3;
1369 s[25] = s9 >> 11;
1370 s[26] = (s9 >> 19) | (s10 << 2);
1371 s[27] = s10 >> 6;
1372 s[28] = (s10 >> 14) | (s11 << 7);
1373 s[29] = s11 >> 1;
1374 s[30] = s11 >> 9;
1375 s[31] = s11 >> 17;
1376 }
1377
1378 // Input:
1379 // a[0]+256*a[1]+...+256^31*a[31] = a
1380 // b[0]+256*b[1]+...+256^31*b[31] = b
1381 // c[0]+256*c[1]+...+256^31*c[31] = c
1382 //
1383 // Output:
1384 // s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
1385 // where l = 2^252 + 27742317777372353535851937790883648493.
sc_muladd(uint8_t * s,const uint8_t * a,const uint8_t * b,const uint8_t * c)1386 static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b,
1387 const uint8_t *c) {
1388 int64_t a0 = 2097151 & load_3(a);
1389 int64_t a1 = 2097151 & (load_4(a + 2) >> 5);
1390 int64_t a2 = 2097151 & (load_3(a + 5) >> 2);
1391 int64_t a3 = 2097151 & (load_4(a + 7) >> 7);
1392 int64_t a4 = 2097151 & (load_4(a + 10) >> 4);
1393 int64_t a5 = 2097151 & (load_3(a + 13) >> 1);
1394 int64_t a6 = 2097151 & (load_4(a + 15) >> 6);
1395 int64_t a7 = 2097151 & (load_3(a + 18) >> 3);
1396 int64_t a8 = 2097151 & load_3(a + 21);
1397 int64_t a9 = 2097151 & (load_4(a + 23) >> 5);
1398 int64_t a10 = 2097151 & (load_3(a + 26) >> 2);
1399 int64_t a11 = (load_4(a + 28) >> 7);
1400 int64_t b0 = 2097151 & load_3(b);
1401 int64_t b1 = 2097151 & (load_4(b + 2) >> 5);
1402 int64_t b2 = 2097151 & (load_3(b + 5) >> 2);
1403 int64_t b3 = 2097151 & (load_4(b + 7) >> 7);
1404 int64_t b4 = 2097151 & (load_4(b + 10) >> 4);
1405 int64_t b5 = 2097151 & (load_3(b + 13) >> 1);
1406 int64_t b6 = 2097151 & (load_4(b + 15) >> 6);
1407 int64_t b7 = 2097151 & (load_3(b + 18) >> 3);
1408 int64_t b8 = 2097151 & load_3(b + 21);
1409 int64_t b9 = 2097151 & (load_4(b + 23) >> 5);
1410 int64_t b10 = 2097151 & (load_3(b + 26) >> 2);
1411 int64_t b11 = (load_4(b + 28) >> 7);
1412 int64_t c0 = 2097151 & load_3(c);
1413 int64_t c1 = 2097151 & (load_4(c + 2) >> 5);
1414 int64_t c2 = 2097151 & (load_3(c + 5) >> 2);
1415 int64_t c3 = 2097151 & (load_4(c + 7) >> 7);
1416 int64_t c4 = 2097151 & (load_4(c + 10) >> 4);
1417 int64_t c5 = 2097151 & (load_3(c + 13) >> 1);
1418 int64_t c6 = 2097151 & (load_4(c + 15) >> 6);
1419 int64_t c7 = 2097151 & (load_3(c + 18) >> 3);
1420 int64_t c8 = 2097151 & load_3(c + 21);
1421 int64_t c9 = 2097151 & (load_4(c + 23) >> 5);
1422 int64_t c10 = 2097151 & (load_3(c + 26) >> 2);
1423 int64_t c11 = (load_4(c + 28) >> 7);
1424 int64_t s0;
1425 int64_t s1;
1426 int64_t s2;
1427 int64_t s3;
1428 int64_t s4;
1429 int64_t s5;
1430 int64_t s6;
1431 int64_t s7;
1432 int64_t s8;
1433 int64_t s9;
1434 int64_t s10;
1435 int64_t s11;
1436 int64_t s12;
1437 int64_t s13;
1438 int64_t s14;
1439 int64_t s15;
1440 int64_t s16;
1441 int64_t s17;
1442 int64_t s18;
1443 int64_t s19;
1444 int64_t s20;
1445 int64_t s21;
1446 int64_t s22;
1447 int64_t s23;
1448 int64_t carry0;
1449 int64_t carry1;
1450 int64_t carry2;
1451 int64_t carry3;
1452 int64_t carry4;
1453 int64_t carry5;
1454 int64_t carry6;
1455 int64_t carry7;
1456 int64_t carry8;
1457 int64_t carry9;
1458 int64_t carry10;
1459 int64_t carry11;
1460 int64_t carry12;
1461 int64_t carry13;
1462 int64_t carry14;
1463 int64_t carry15;
1464 int64_t carry16;
1465 int64_t carry17;
1466 int64_t carry18;
1467 int64_t carry19;
1468 int64_t carry20;
1469 int64_t carry21;
1470 int64_t carry22;
1471
1472 s0 = c0 + a0 * b0;
1473 s1 = c1 + a0 * b1 + a1 * b0;
1474 s2 = c2 + a0 * b2 + a1 * b1 + a2 * b0;
1475 s3 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0;
1476 s4 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0;
1477 s5 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0;
1478 s6 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0;
1479 s7 = c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 +
1480 a6 * b1 + a7 * b0;
1481 s8 = c8 + a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 +
1482 a6 * b2 + a7 * b1 + a8 * b0;
1483 s9 = c9 + a0 * b9 + a1 * b8 + a2 * b7 + a3 * b6 + a4 * b5 + a5 * b4 +
1484 a6 * b3 + a7 * b2 + a8 * b1 + a9 * b0;
1485 s10 = c10 + a0 * b10 + a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 +
1486 a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1 + a10 * b0;
1487 s11 = c11 + a0 * b11 + a1 * b10 + a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 +
1488 a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2 + a10 * b1 + a11 * b0;
1489 s12 = a1 * b11 + a2 * b10 + a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 +
1490 a8 * b4 + a9 * b3 + a10 * b2 + a11 * b1;
1491 s13 = a2 * b11 + a3 * b10 + a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 +
1492 a9 * b4 + a10 * b3 + a11 * b2;
1493 s14 = a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 +
1494 a10 * b4 + a11 * b3;
1495 s15 = a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 +
1496 a11 * b4;
1497 s16 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5;
1498 s17 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6;
1499 s18 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7;
1500 s19 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8;
1501 s20 = a9 * b11 + a10 * b10 + a11 * b9;
1502 s21 = a10 * b11 + a11 * b10;
1503 s22 = a11 * b11;
1504 s23 = 0;
1505
1506 carry0 = (s0 + (1 << 20)) >> 21;
1507 s1 += carry0;
1508 s0 -= int64_lshift21(carry0);
1509 carry2 = (s2 + (1 << 20)) >> 21;
1510 s3 += carry2;
1511 s2 -= int64_lshift21(carry2);
1512 carry4 = (s4 + (1 << 20)) >> 21;
1513 s5 += carry4;
1514 s4 -= int64_lshift21(carry4);
1515 carry6 = (s6 + (1 << 20)) >> 21;
1516 s7 += carry6;
1517 s6 -= int64_lshift21(carry6);
1518 carry8 = (s8 + (1 << 20)) >> 21;
1519 s9 += carry8;
1520 s8 -= int64_lshift21(carry8);
1521 carry10 = (s10 + (1 << 20)) >> 21;
1522 s11 += carry10;
1523 s10 -= int64_lshift21(carry10);
1524 carry12 = (s12 + (1 << 20)) >> 21;
1525 s13 += carry12;
1526 s12 -= int64_lshift21(carry12);
1527 carry14 = (s14 + (1 << 20)) >> 21;
1528 s15 += carry14;
1529 s14 -= int64_lshift21(carry14);
1530 carry16 = (s16 + (1 << 20)) >> 21;
1531 s17 += carry16;
1532 s16 -= int64_lshift21(carry16);
1533 carry18 = (s18 + (1 << 20)) >> 21;
1534 s19 += carry18;
1535 s18 -= int64_lshift21(carry18);
1536 carry20 = (s20 + (1 << 20)) >> 21;
1537 s21 += carry20;
1538 s20 -= int64_lshift21(carry20);
1539 carry22 = (s22 + (1 << 20)) >> 21;
1540 s23 += carry22;
1541 s22 -= int64_lshift21(carry22);
1542
1543 carry1 = (s1 + (1 << 20)) >> 21;
1544 s2 += carry1;
1545 s1 -= int64_lshift21(carry1);
1546 carry3 = (s3 + (1 << 20)) >> 21;
1547 s4 += carry3;
1548 s3 -= int64_lshift21(carry3);
1549 carry5 = (s5 + (1 << 20)) >> 21;
1550 s6 += carry5;
1551 s5 -= int64_lshift21(carry5);
1552 carry7 = (s7 + (1 << 20)) >> 21;
1553 s8 += carry7;
1554 s7 -= int64_lshift21(carry7);
1555 carry9 = (s9 + (1 << 20)) >> 21;
1556 s10 += carry9;
1557 s9 -= int64_lshift21(carry9);
1558 carry11 = (s11 + (1 << 20)) >> 21;
1559 s12 += carry11;
1560 s11 -= int64_lshift21(carry11);
1561 carry13 = (s13 + (1 << 20)) >> 21;
1562 s14 += carry13;
1563 s13 -= int64_lshift21(carry13);
1564 carry15 = (s15 + (1 << 20)) >> 21;
1565 s16 += carry15;
1566 s15 -= int64_lshift21(carry15);
1567 carry17 = (s17 + (1 << 20)) >> 21;
1568 s18 += carry17;
1569 s17 -= int64_lshift21(carry17);
1570 carry19 = (s19 + (1 << 20)) >> 21;
1571 s20 += carry19;
1572 s19 -= int64_lshift21(carry19);
1573 carry21 = (s21 + (1 << 20)) >> 21;
1574 s22 += carry21;
1575 s21 -= int64_lshift21(carry21);
1576
1577 s11 += s23 * 666643;
1578 s12 += s23 * 470296;
1579 s13 += s23 * 654183;
1580 s14 -= s23 * 997805;
1581 s15 += s23 * 136657;
1582 s16 -= s23 * 683901;
1583 s23 = 0;
1584
1585 s10 += s22 * 666643;
1586 s11 += s22 * 470296;
1587 s12 += s22 * 654183;
1588 s13 -= s22 * 997805;
1589 s14 += s22 * 136657;
1590 s15 -= s22 * 683901;
1591 s22 = 0;
1592
1593 s9 += s21 * 666643;
1594 s10 += s21 * 470296;
1595 s11 += s21 * 654183;
1596 s12 -= s21 * 997805;
1597 s13 += s21 * 136657;
1598 s14 -= s21 * 683901;
1599 s21 = 0;
1600
1601 s8 += s20 * 666643;
1602 s9 += s20 * 470296;
1603 s10 += s20 * 654183;
1604 s11 -= s20 * 997805;
1605 s12 += s20 * 136657;
1606 s13 -= s20 * 683901;
1607 s20 = 0;
1608
1609 s7 += s19 * 666643;
1610 s8 += s19 * 470296;
1611 s9 += s19 * 654183;
1612 s10 -= s19 * 997805;
1613 s11 += s19 * 136657;
1614 s12 -= s19 * 683901;
1615 s19 = 0;
1616
1617 s6 += s18 * 666643;
1618 s7 += s18 * 470296;
1619 s8 += s18 * 654183;
1620 s9 -= s18 * 997805;
1621 s10 += s18 * 136657;
1622 s11 -= s18 * 683901;
1623 s18 = 0;
1624
1625 carry6 = (s6 + (1 << 20)) >> 21;
1626 s7 += carry6;
1627 s6 -= int64_lshift21(carry6);
1628 carry8 = (s8 + (1 << 20)) >> 21;
1629 s9 += carry8;
1630 s8 -= int64_lshift21(carry8);
1631 carry10 = (s10 + (1 << 20)) >> 21;
1632 s11 += carry10;
1633 s10 -= int64_lshift21(carry10);
1634 carry12 = (s12 + (1 << 20)) >> 21;
1635 s13 += carry12;
1636 s12 -= int64_lshift21(carry12);
1637 carry14 = (s14 + (1 << 20)) >> 21;
1638 s15 += carry14;
1639 s14 -= int64_lshift21(carry14);
1640 carry16 = (s16 + (1 << 20)) >> 21;
1641 s17 += carry16;
1642 s16 -= int64_lshift21(carry16);
1643
1644 carry7 = (s7 + (1 << 20)) >> 21;
1645 s8 += carry7;
1646 s7 -= int64_lshift21(carry7);
1647 carry9 = (s9 + (1 << 20)) >> 21;
1648 s10 += carry9;
1649 s9 -= int64_lshift21(carry9);
1650 carry11 = (s11 + (1 << 20)) >> 21;
1651 s12 += carry11;
1652 s11 -= int64_lshift21(carry11);
1653 carry13 = (s13 + (1 << 20)) >> 21;
1654 s14 += carry13;
1655 s13 -= int64_lshift21(carry13);
1656 carry15 = (s15 + (1 << 20)) >> 21;
1657 s16 += carry15;
1658 s15 -= int64_lshift21(carry15);
1659
1660 s5 += s17 * 666643;
1661 s6 += s17 * 470296;
1662 s7 += s17 * 654183;
1663 s8 -= s17 * 997805;
1664 s9 += s17 * 136657;
1665 s10 -= s17 * 683901;
1666 s17 = 0;
1667
1668 s4 += s16 * 666643;
1669 s5 += s16 * 470296;
1670 s6 += s16 * 654183;
1671 s7 -= s16 * 997805;
1672 s8 += s16 * 136657;
1673 s9 -= s16 * 683901;
1674 s16 = 0;
1675
1676 s3 += s15 * 666643;
1677 s4 += s15 * 470296;
1678 s5 += s15 * 654183;
1679 s6 -= s15 * 997805;
1680 s7 += s15 * 136657;
1681 s8 -= s15 * 683901;
1682 s15 = 0;
1683
1684 s2 += s14 * 666643;
1685 s3 += s14 * 470296;
1686 s4 += s14 * 654183;
1687 s5 -= s14 * 997805;
1688 s6 += s14 * 136657;
1689 s7 -= s14 * 683901;
1690 s14 = 0;
1691
1692 s1 += s13 * 666643;
1693 s2 += s13 * 470296;
1694 s3 += s13 * 654183;
1695 s4 -= s13 * 997805;
1696 s5 += s13 * 136657;
1697 s6 -= s13 * 683901;
1698 s13 = 0;
1699
1700 s0 += s12 * 666643;
1701 s1 += s12 * 470296;
1702 s2 += s12 * 654183;
1703 s3 -= s12 * 997805;
1704 s4 += s12 * 136657;
1705 s5 -= s12 * 683901;
1706 s12 = 0;
1707
1708 carry0 = (s0 + (1 << 20)) >> 21;
1709 s1 += carry0;
1710 s0 -= int64_lshift21(carry0);
1711 carry2 = (s2 + (1 << 20)) >> 21;
1712 s3 += carry2;
1713 s2 -= int64_lshift21(carry2);
1714 carry4 = (s4 + (1 << 20)) >> 21;
1715 s5 += carry4;
1716 s4 -= int64_lshift21(carry4);
1717 carry6 = (s6 + (1 << 20)) >> 21;
1718 s7 += carry6;
1719 s6 -= int64_lshift21(carry6);
1720 carry8 = (s8 + (1 << 20)) >> 21;
1721 s9 += carry8;
1722 s8 -= int64_lshift21(carry8);
1723 carry10 = (s10 + (1 << 20)) >> 21;
1724 s11 += carry10;
1725 s10 -= int64_lshift21(carry10);
1726
1727 carry1 = (s1 + (1 << 20)) >> 21;
1728 s2 += carry1;
1729 s1 -= int64_lshift21(carry1);
1730 carry3 = (s3 + (1 << 20)) >> 21;
1731 s4 += carry3;
1732 s3 -= int64_lshift21(carry3);
1733 carry5 = (s5 + (1 << 20)) >> 21;
1734 s6 += carry5;
1735 s5 -= int64_lshift21(carry5);
1736 carry7 = (s7 + (1 << 20)) >> 21;
1737 s8 += carry7;
1738 s7 -= int64_lshift21(carry7);
1739 carry9 = (s9 + (1 << 20)) >> 21;
1740 s10 += carry9;
1741 s9 -= int64_lshift21(carry9);
1742 carry11 = (s11 + (1 << 20)) >> 21;
1743 s12 += carry11;
1744 s11 -= int64_lshift21(carry11);
1745
1746 s0 += s12 * 666643;
1747 s1 += s12 * 470296;
1748 s2 += s12 * 654183;
1749 s3 -= s12 * 997805;
1750 s4 += s12 * 136657;
1751 s5 -= s12 * 683901;
1752 s12 = 0;
1753
1754 carry0 = s0 >> 21;
1755 s1 += carry0;
1756 s0 -= int64_lshift21(carry0);
1757 carry1 = s1 >> 21;
1758 s2 += carry1;
1759 s1 -= int64_lshift21(carry1);
1760 carry2 = s2 >> 21;
1761 s3 += carry2;
1762 s2 -= int64_lshift21(carry2);
1763 carry3 = s3 >> 21;
1764 s4 += carry3;
1765 s3 -= int64_lshift21(carry3);
1766 carry4 = s4 >> 21;
1767 s5 += carry4;
1768 s4 -= int64_lshift21(carry4);
1769 carry5 = s5 >> 21;
1770 s6 += carry5;
1771 s5 -= int64_lshift21(carry5);
1772 carry6 = s6 >> 21;
1773 s7 += carry6;
1774 s6 -= int64_lshift21(carry6);
1775 carry7 = s7 >> 21;
1776 s8 += carry7;
1777 s7 -= int64_lshift21(carry7);
1778 carry8 = s8 >> 21;
1779 s9 += carry8;
1780 s8 -= int64_lshift21(carry8);
1781 carry9 = s9 >> 21;
1782 s10 += carry9;
1783 s9 -= int64_lshift21(carry9);
1784 carry10 = s10 >> 21;
1785 s11 += carry10;
1786 s10 -= int64_lshift21(carry10);
1787 carry11 = s11 >> 21;
1788 s12 += carry11;
1789 s11 -= int64_lshift21(carry11);
1790
1791 s0 += s12 * 666643;
1792 s1 += s12 * 470296;
1793 s2 += s12 * 654183;
1794 s3 -= s12 * 997805;
1795 s4 += s12 * 136657;
1796 s5 -= s12 * 683901;
1797 s12 = 0;
1798
1799 carry0 = s0 >> 21;
1800 s1 += carry0;
1801 s0 -= int64_lshift21(carry0);
1802 carry1 = s1 >> 21;
1803 s2 += carry1;
1804 s1 -= int64_lshift21(carry1);
1805 carry2 = s2 >> 21;
1806 s3 += carry2;
1807 s2 -= int64_lshift21(carry2);
1808 carry3 = s3 >> 21;
1809 s4 += carry3;
1810 s3 -= int64_lshift21(carry3);
1811 carry4 = s4 >> 21;
1812 s5 += carry4;
1813 s4 -= int64_lshift21(carry4);
1814 carry5 = s5 >> 21;
1815 s6 += carry5;
1816 s5 -= int64_lshift21(carry5);
1817 carry6 = s6 >> 21;
1818 s7 += carry6;
1819 s6 -= int64_lshift21(carry6);
1820 carry7 = s7 >> 21;
1821 s8 += carry7;
1822 s7 -= int64_lshift21(carry7);
1823 carry8 = s8 >> 21;
1824 s9 += carry8;
1825 s8 -= int64_lshift21(carry8);
1826 carry9 = s9 >> 21;
1827 s10 += carry9;
1828 s9 -= int64_lshift21(carry9);
1829 carry10 = s10 >> 21;
1830 s11 += carry10;
1831 s10 -= int64_lshift21(carry10);
1832
1833 s[0] = s0 >> 0;
1834 s[1] = s0 >> 8;
1835 s[2] = (s0 >> 16) | (s1 << 5);
1836 s[3] = s1 >> 3;
1837 s[4] = s1 >> 11;
1838 s[5] = (s1 >> 19) | (s2 << 2);
1839 s[6] = s2 >> 6;
1840 s[7] = (s2 >> 14) | (s3 << 7);
1841 s[8] = s3 >> 1;
1842 s[9] = s3 >> 9;
1843 s[10] = (s3 >> 17) | (s4 << 4);
1844 s[11] = s4 >> 4;
1845 s[12] = s4 >> 12;
1846 s[13] = (s4 >> 20) | (s5 << 1);
1847 s[14] = s5 >> 7;
1848 s[15] = (s5 >> 15) | (s6 << 6);
1849 s[16] = s6 >> 2;
1850 s[17] = s6 >> 10;
1851 s[18] = (s6 >> 18) | (s7 << 3);
1852 s[19] = s7 >> 5;
1853 s[20] = s7 >> 13;
1854 s[21] = s8 >> 0;
1855 s[22] = s8 >> 8;
1856 s[23] = (s8 >> 16) | (s9 << 5);
1857 s[24] = s9 >> 3;
1858 s[25] = s9 >> 11;
1859 s[26] = (s9 >> 19) | (s10 << 2);
1860 s[27] = s10 >> 6;
1861 s[28] = (s10 >> 14) | (s11 << 7);
1862 s[29] = s11 >> 1;
1863 s[30] = s11 >> 9;
1864 s[31] = s11 >> 17;
1865 }
1866
ED25519_keypair(uint8_t out_public_key[32],uint8_t out_private_key[64])1867 void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]) {
1868 uint8_t seed[32];
1869 RAND_bytes(seed, 32);
1870 ED25519_keypair_from_seed(out_public_key, out_private_key, seed);
1871 }
1872
ED25519_sign(uint8_t out_sig[64],const uint8_t * message,size_t message_len,const uint8_t private_key[64])1873 int ED25519_sign(uint8_t out_sig[64], const uint8_t *message,
1874 size_t message_len, const uint8_t private_key[64]) {
1875 // NOTE: The documentation on this function says that it returns zero on
1876 // allocation failure. While that can't happen with the current
1877 // implementation, we want to reserve the ability to allocate in this
1878 // implementation in the future.
1879
1880 uint8_t az[SHA512_DIGEST_LENGTH];
1881 SHA512(private_key, 32, az);
1882
1883 az[0] &= 248;
1884 az[31] &= 63;
1885 az[31] |= 64;
1886
1887 SHA512_CTX hash_ctx;
1888 SHA512_Init(&hash_ctx);
1889 SHA512_Update(&hash_ctx, az + 32, 32);
1890 SHA512_Update(&hash_ctx, message, message_len);
1891 uint8_t nonce[SHA512_DIGEST_LENGTH];
1892 SHA512_Final(nonce, &hash_ctx);
1893
1894 x25519_sc_reduce(nonce);
1895 ge_p3 R;
1896 x25519_ge_scalarmult_base(&R, nonce);
1897 ge_p3_tobytes(out_sig, &R);
1898
1899 SHA512_Init(&hash_ctx);
1900 SHA512_Update(&hash_ctx, out_sig, 32);
1901 SHA512_Update(&hash_ctx, private_key + 32, 32);
1902 SHA512_Update(&hash_ctx, message, message_len);
1903 uint8_t hram[SHA512_DIGEST_LENGTH];
1904 SHA512_Final(hram, &hash_ctx);
1905
1906 x25519_sc_reduce(hram);
1907 sc_muladd(out_sig + 32, hram, az, nonce);
1908
1909 // The signature is computed from the private key, but is public.
1910 CONSTTIME_DECLASSIFY(out_sig, 64);
1911 return 1;
1912 }
1913
ED25519_verify(const uint8_t * message,size_t message_len,const uint8_t signature[64],const uint8_t public_key[32])1914 int ED25519_verify(const uint8_t *message, size_t message_len,
1915 const uint8_t signature[64], const uint8_t public_key[32]) {
1916 ge_p3 A;
1917 if ((signature[63] & 224) != 0 ||
1918 !x25519_ge_frombytes_vartime(&A, public_key)) {
1919 return 0;
1920 }
1921
1922 fe_loose t;
1923 fe_neg(&t, &A.X);
1924 fe_carry(&A.X, &t);
1925 fe_neg(&t, &A.T);
1926 fe_carry(&A.T, &t);
1927
1928 uint8_t pkcopy[32];
1929 OPENSSL_memcpy(pkcopy, public_key, 32);
1930 uint8_t rcopy[32];
1931 OPENSSL_memcpy(rcopy, signature, 32);
1932 uint8_t scopy[32];
1933 OPENSSL_memcpy(scopy, signature + 32, 32);
1934
1935 // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
1936 // the range [0, order) in order to prevent signature malleability.
1937
1938 // kOrder is the order of Curve25519 in little-endian form.
1939 static const uint64_t kOrder[4] = {
1940 UINT64_C(0x5812631a5cf5d3ed),
1941 UINT64_C(0x14def9dea2f79cd6),
1942 0,
1943 UINT64_C(0x1000000000000000),
1944 };
1945 for (size_t i = 3;; i--) {
1946 uint64_t word = CRYPTO_load_u64_le(scopy + i * 8);
1947 if (word > kOrder[i]) {
1948 return 0;
1949 } else if (word < kOrder[i]) {
1950 break;
1951 } else if (i == 0) {
1952 return 0;
1953 }
1954 }
1955
1956 SHA512_CTX hash_ctx;
1957 SHA512_Init(&hash_ctx);
1958 SHA512_Update(&hash_ctx, signature, 32);
1959 SHA512_Update(&hash_ctx, public_key, 32);
1960 SHA512_Update(&hash_ctx, message, message_len);
1961 uint8_t h[SHA512_DIGEST_LENGTH];
1962 SHA512_Final(h, &hash_ctx);
1963
1964 x25519_sc_reduce(h);
1965
1966 ge_p2 R;
1967 ge_double_scalarmult_vartime(&R, h, &A, scopy);
1968
1969 uint8_t rcheck[32];
1970 x25519_ge_tobytes(rcheck, &R);
1971
1972 return CRYPTO_memcmp(rcheck, rcopy, sizeof(rcheck)) == 0;
1973 }
1974
ED25519_keypair_from_seed(uint8_t out_public_key[32],uint8_t out_private_key[64],const uint8_t seed[32])1975 void ED25519_keypair_from_seed(uint8_t out_public_key[32],
1976 uint8_t out_private_key[64],
1977 const uint8_t seed[32]) {
1978 uint8_t az[SHA512_DIGEST_LENGTH];
1979 SHA512(seed, 32, az);
1980
1981 az[0] &= 248;
1982 az[31] &= 127;
1983 az[31] |= 64;
1984
1985 ge_p3 A;
1986 x25519_ge_scalarmult_base(&A, az);
1987 ge_p3_tobytes(out_public_key, &A);
1988 // The public key is derived from the private key, but it is public.
1989 CONSTTIME_DECLASSIFY(out_public_key, 32);
1990
1991 OPENSSL_memcpy(out_private_key, seed, 32);
1992 OPENSSL_memcpy(out_private_key + 32, out_public_key, 32);
1993 }
1994
1995
x25519_scalar_mult_generic(uint8_t out[32],const uint8_t scalar[32],const uint8_t point[32])1996 static void x25519_scalar_mult_generic(uint8_t out[32],
1997 const uint8_t scalar[32],
1998 const uint8_t point[32]) {
1999 fe x1, x2, z2, x3, z3, tmp0, tmp1;
2000 fe_loose x2l, z2l, x3l, tmp0l, tmp1l;
2001
2002 uint8_t e[32];
2003 OPENSSL_memcpy(e, scalar, 32);
2004 e[0] &= 248;
2005 e[31] &= 127;
2006 e[31] |= 64;
2007
2008 // The following implementation was transcribed to Coq and proven to
2009 // correspond to unary scalar multiplication in affine coordinates given that
2010 // x1 != 0 is the x coordinate of some point on the curve. It was also checked
2011 // in Coq that doing a ladderstep with x1 = x3 = 0 gives z2' = z3' = 0, and z2
2012 // = z3 = 0 gives z2' = z3' = 0. The statement was quantified over the
2013 // underlying field, so it applies to Curve25519 itself and the quadratic
2014 // twist of Curve25519. It was not proven in Coq that prime-field arithmetic
2015 // correctly simulates extension-field arithmetic on prime-field values.
2016 // The decoding of the byte array representation of e was not considered.
2017 // Specification of Montgomery curves in affine coordinates:
2018 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27>
2019 // Proof that these form a group that is isomorphic to a Weierstrass curve:
2020 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35>
2021 // Coq transcription and correctness proof of the loop (where scalarbits=255):
2022 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118>
2023 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278>
2024 // preconditions: 0 <= e < 2^255 (not necessarily e < order), fe_invert(0) = 0
2025 fe_frombytes(&x1, point);
2026 fe_1(&x2);
2027 fe_0(&z2);
2028 fe_copy(&x3, &x1);
2029 fe_1(&z3);
2030
2031 unsigned swap = 0;
2032 int pos;
2033 for (pos = 254; pos >= 0; --pos) {
2034 // loop invariant as of right before the test, for the case where x1 != 0:
2035 // pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 is nonzero
2036 // let r := e >> (pos+1) in the following equalities of projective points:
2037 // to_xz (r*P) === if swap then (x3, z3) else (x2, z2)
2038 // to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3)
2039 // x1 is the nonzero x coordinate of the nonzero point (r*P-(r+1)*P)
2040 unsigned b = 1 & (e[pos / 8] >> (pos & 7));
2041 swap ^= b;
2042 fe_cswap(&x2, &x3, swap);
2043 fe_cswap(&z2, &z3, swap);
2044 swap = b;
2045 // Coq transcription of ladderstep formula (called from transcribed loop):
2046 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89>
2047 // <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131>
2048 // x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217>
2049 // x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147>
2050 fe_sub(&tmp0l, &x3, &z3);
2051 fe_sub(&tmp1l, &x2, &z2);
2052 fe_add(&x2l, &x2, &z2);
2053 fe_add(&z2l, &x3, &z3);
2054 fe_mul_tll(&z3, &tmp0l, &x2l);
2055 fe_mul_tll(&z2, &z2l, &tmp1l);
2056 fe_sq_tl(&tmp0, &tmp1l);
2057 fe_sq_tl(&tmp1, &x2l);
2058 fe_add(&x3l, &z3, &z2);
2059 fe_sub(&z2l, &z3, &z2);
2060 fe_mul_ttt(&x2, &tmp1, &tmp0);
2061 fe_sub(&tmp1l, &tmp1, &tmp0);
2062 fe_sq_tl(&z2, &z2l);
2063 fe_mul121666(&z3, &tmp1l);
2064 fe_sq_tl(&x3, &x3l);
2065 fe_add(&tmp0l, &tmp0, &z3);
2066 fe_mul_ttt(&z3, &x1, &z2);
2067 fe_mul_tll(&z2, &tmp1l, &tmp0l);
2068 }
2069 // here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) else (x2, z2)
2070 fe_cswap(&x2, &x3, swap);
2071 fe_cswap(&z2, &z3, swap);
2072
2073 fe_invert(&z2, &z2);
2074 fe_mul_ttt(&x2, &x2, &z2);
2075 fe_tobytes(out, &x2);
2076 }
2077
x25519_scalar_mult(uint8_t out[32],const uint8_t scalar[32],const uint8_t point[32])2078 static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32],
2079 const uint8_t point[32]) {
2080 #if defined(BORINGSSL_X25519_NEON)
2081 if (CRYPTO_is_NEON_capable()) {
2082 x25519_NEON(out, scalar, point);
2083 return;
2084 }
2085 #elif defined(BORINGSSL_FE25519_ADX)
2086 if (CRYPTO_is_BMI1_capable() && CRYPTO_is_BMI2_capable() &&
2087 CRYPTO_is_ADX_capable()) {
2088 x25519_scalar_mult_adx(out, scalar, point);
2089 return;
2090 }
2091 #endif
2092
2093 x25519_scalar_mult_generic(out, scalar, point);
2094 }
2095
X25519_keypair(uint8_t out_public_value[32],uint8_t out_private_key[32])2096 void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) {
2097 RAND_bytes(out_private_key, 32);
2098
2099 // All X25519 implementations should decode scalars correctly (see
2100 // https://tools.ietf.org/html/rfc7748#section-5). However, if an
2101 // implementation doesn't then it might interoperate with random keys a
2102 // fraction of the time because they'll, randomly, happen to be correctly
2103 // formed.
2104 //
2105 // Thus we do the opposite of the masking here to make sure that our private
2106 // keys are never correctly masked and so, hopefully, any incorrect
2107 // implementations are deterministically broken.
2108 //
2109 // This does not affect security because, although we're throwing away
2110 // entropy, a valid implementation of scalarmult should throw away the exact
2111 // same bits anyway.
2112 out_private_key[0] |= ~248;
2113 out_private_key[31] &= ~64;
2114 out_private_key[31] |= ~127;
2115
2116 X25519_public_from_private(out_public_value, out_private_key);
2117 }
2118
X25519(uint8_t out_shared_key[32],const uint8_t private_key[32],const uint8_t peer_public_value[32])2119 int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32],
2120 const uint8_t peer_public_value[32]) {
2121 static const uint8_t kZeros[32] = {0};
2122 x25519_scalar_mult(out_shared_key, private_key, peer_public_value);
2123 // The all-zero output results when the input is a point of small order.
2124 return constant_time_declassify_int(
2125 CRYPTO_memcmp(kZeros, out_shared_key, 32)) != 0;
2126 }
2127
X25519_public_from_private(uint8_t out_public_value[32],const uint8_t private_key[32])2128 void X25519_public_from_private(uint8_t out_public_value[32],
2129 const uint8_t private_key[32]) {
2130 #if defined(BORINGSSL_X25519_NEON)
2131 if (CRYPTO_is_NEON_capable()) {
2132 static const uint8_t kMongomeryBasePoint[32] = {9};
2133 x25519_NEON(out_public_value, private_key, kMongomeryBasePoint);
2134 return;
2135 }
2136 #endif
2137
2138 uint8_t e[32];
2139 OPENSSL_memcpy(e, private_key, 32);
2140 e[0] &= 248;
2141 e[31] &= 127;
2142 e[31] |= 64;
2143
2144 ge_p3 A;
2145 x25519_ge_scalarmult_base(&A, e);
2146
2147 // We only need the u-coordinate of the curve25519 point. The map is
2148 // u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y).
2149 fe_loose zplusy, zminusy;
2150 fe zminusy_inv;
2151 fe_add(&zplusy, &A.Z, &A.Y);
2152 fe_sub(&zminusy, &A.Z, &A.Y);
2153 fe_loose_invert(&zminusy_inv, &zminusy);
2154 fe_mul_tlt(&zminusy_inv, &zplusy, &zminusy_inv);
2155 fe_tobytes(out_public_value, &zminusy_inv);
2156 CONSTTIME_DECLASSIFY(out_public_value, 32);
2157 }
2158