1 // Copyright 2016 Brian Smith.
2 //
3 // Permission to use, copy, modify, and/or distribute this software for any
4 // purpose with or without fee is hereby granted, provided that the above
5 // copyright notice and this permission notice appear in all copies.
6 //
7 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10 // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15 use super::{
16 elem::{binary_op, binary_op_assign},
17 elem_sqr_mul, elem_sqr_mul_acc, Modulus, *,
18 };
19 use core::marker::PhantomData;
20
21 macro_rules! p256_limbs {
22 [ $($limb:expr),+ ] => {
23 limbs![$($limb),+, 0, 0, 0, 0]
24 };
25 }
26
27 pub static COMMON_OPS: CommonOps = CommonOps {
28 num_limbs: 256 / LIMB_BITS,
29
30 q: Modulus {
31 p: p256_limbs![
32 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
33 0xffffffff
34 ],
35 rr: p256_limbs![
36 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb, 0xfffffffe, 0xffffffff, 0xfffffffd,
37 0x00000004
38 ],
39 },
40
41 n: Elem {
42 limbs: p256_limbs![
43 0xfc632551, 0xf3b9cac2, 0xa7179e84, 0xbce6faad, 0xffffffff, 0xffffffff, 0x00000000,
44 0xffffffff
45 ],
46 m: PhantomData,
47 encoding: PhantomData, // Unencoded
48 },
49
50 a: Elem {
51 limbs: p256_limbs![
52 0xfffffffc, 0xffffffff, 0xffffffff, 0x00000003, 0x00000000, 0x00000000, 0x00000004,
53 0xfffffffc
54 ],
55 m: PhantomData,
56 encoding: PhantomData, // R
57 },
58 b: Elem {
59 limbs: p256_limbs![
60 0x29c4bddf, 0xd89cdf62, 0x78843090, 0xacf005cd, 0xf7212ed6, 0xe5a220ab, 0x04874834,
61 0xdc30061d
62 ],
63 m: PhantomData,
64 encoding: PhantomData, // R
65 },
66
67 elem_mul_mont: p256_mul_mont,
68 elem_sqr_mont: p256_sqr_mont,
69
70 point_add_jacobian_impl: p256_point_add,
71 };
72
73 pub static PRIVATE_KEY_OPS: PrivateKeyOps = PrivateKeyOps {
74 common: &COMMON_OPS,
75 elem_inv_squared: p256_elem_inv_squared,
76 point_mul_base_impl: p256_point_mul_base_impl,
77 point_mul_impl: p256_point_mul,
78 };
79
p256_elem_inv_squared(a: &Elem<R>) -> Elem<R>80 fn p256_elem_inv_squared(a: &Elem<R>) -> Elem<R> {
81 // Calculate a**-2 (mod q) == a**(q - 3) (mod q)
82 //
83 // The exponent (q - 3) is:
84 //
85 // 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc
86
87 #[inline]
88 fn sqr_mul(a: &Elem<R>, squarings: usize, b: &Elem<R>) -> Elem<R> {
89 elem_sqr_mul(&COMMON_OPS, a, squarings, b)
90 }
91
92 #[inline]
93 fn sqr_mul_acc(a: &mut Elem<R>, squarings: usize, b: &Elem<R>) {
94 elem_sqr_mul_acc(&COMMON_OPS, a, squarings, b)
95 }
96
97 let b_1 = &a;
98 let b_11 = sqr_mul(b_1, 1, b_1);
99 let b_111 = sqr_mul(&b_11, 1, b_1);
100 let f_11 = sqr_mul(&b_111, 3, &b_111);
101 let fff = sqr_mul(&f_11, 6, &f_11);
102 let fff_111 = sqr_mul(&fff, 3, &b_111);
103 let fffffff_11 = sqr_mul(&fff_111, 15, &fff_111);
104 let ffffffff = sqr_mul(&fffffff_11, 2, &b_11);
105
106 // ffffffff00000001
107 let mut acc = sqr_mul(&ffffffff, 31 + 1, b_1);
108
109 // ffffffff00000001000000000000000000000000ffffffff
110 sqr_mul_acc(&mut acc, 96 + 32, &ffffffff);
111
112 // ffffffff00000001000000000000000000000000ffffffffffffffff
113 sqr_mul_acc(&mut acc, 32, &ffffffff);
114
115 // ffffffff00000001000000000000000000000000fffffffffffffffffffffff_11
116 sqr_mul_acc(&mut acc, 30, &fffffff_11);
117
118 // ffffffff00000001000000000000000000000000fffffffffffffffffffffffc
119 COMMON_OPS.elem_square(&mut acc);
120 COMMON_OPS.elem_square(&mut acc);
121
122 acc
123 }
124
p256_point_mul_base_impl(g_scalar: &Scalar) -> Point125 fn p256_point_mul_base_impl(g_scalar: &Scalar) -> Point {
126 prefixed_extern! {
127 fn p256_point_mul_base(
128 r: *mut Limb, // [3][COMMON_OPS.num_limbs]
129 g_scalar: *const Limb, // [COMMON_OPS.num_limbs]
130 );
131 }
132
133 let mut r = Point::new_at_infinity();
134 unsafe {
135 p256_point_mul_base(r.xyz.as_mut_ptr(), g_scalar.limbs.as_ptr());
136 }
137 r
138 }
139
140 pub static PUBLIC_KEY_OPS: PublicKeyOps = PublicKeyOps {
141 common: &COMMON_OPS,
142 };
143
144 pub static SCALAR_OPS: ScalarOps = ScalarOps {
145 common: &COMMON_OPS,
146 scalar_inv_to_mont_impl: p256_scalar_inv_to_mont,
147 scalar_mul_mont: p256_scalar_mul_mont,
148 };
149
150 pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
151 scalar_ops: &SCALAR_OPS,
152 public_key_ops: &PUBLIC_KEY_OPS,
153 private_key_ops: &PRIVATE_KEY_OPS,
154
155 q_minus_n: Elem {
156 limbs: p256_limbs![0x039cdaae, 0x0c46353d, 0x58e8617b, 0x43190553, 0, 0, 0, 0],
157 m: PhantomData,
158 encoding: PhantomData, // Unencoded
159 },
160 };
161
162 pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
163 scalar_ops: &SCALAR_OPS,
164
165 oneRR_mod_n: Scalar {
166 limbs: p256_limbs![
167 0xbe79eea2, 0x83244c95, 0x49bd6fa6, 0x4699799c, 0x2b6bec59, 0x2845b239, 0xf3d95620,
168 0x66e12d94
169 ],
170 m: PhantomData,
171 encoding: PhantomData, // R
172 },
173 };
174
p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R>175 fn p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
176 // Calculate the modular inverse of scalar |a| using Fermat's Little
177 // Theorem:
178 //
179 // a**-1 (mod n) == a**(n - 2) (mod n)
180 //
181 // The exponent (n - 2) is:
182 //
183 // 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc63254f
184
185 #[inline]
186 fn mul(a: &Scalar<R>, b: &Scalar<R>) -> Scalar<R> {
187 binary_op(p256_scalar_mul_mont, a, b)
188 }
189
190 #[inline]
191 fn sqr(a: &Scalar<R>) -> Scalar<R> {
192 let mut tmp = Scalar::zero();
193 unsafe { p256_scalar_sqr_rep_mont(tmp.limbs.as_mut_ptr(), a.limbs.as_ptr(), 1) }
194 tmp
195 }
196
197 // Returns (`a` squared `squarings` times) * `b`.
198 fn sqr_mul(a: &Scalar<R>, squarings: Limb, b: &Scalar<R>) -> Scalar<R> {
199 debug_assert!(squarings >= 1);
200 let mut tmp = Scalar::zero();
201 unsafe { p256_scalar_sqr_rep_mont(tmp.limbs.as_mut_ptr(), a.limbs.as_ptr(), squarings) }
202 mul(&tmp, b)
203 }
204
205 // Sets `acc` = (`acc` squared `squarings` times) * `b`.
206 fn sqr_mul_acc(acc: &mut Scalar<R>, squarings: Limb, b: &Scalar<R>) {
207 debug_assert!(squarings >= 1);
208 unsafe { p256_scalar_sqr_rep_mont(acc.limbs.as_mut_ptr(), acc.limbs.as_ptr(), squarings) }
209 binary_op_assign(p256_scalar_mul_mont, acc, b);
210 }
211
212 fn to_mont(a: &Scalar) -> Scalar<R> {
213 static N_RR: Scalar<Unencoded> = Scalar {
214 limbs: p256_limbs![
215 0xbe79eea2, 0x83244c95, 0x49bd6fa6, 0x4699799c, 0x2b6bec59, 0x2845b239, 0xf3d95620,
216 0x66e12d94
217 ],
218 m: PhantomData,
219 encoding: PhantomData,
220 };
221 binary_op(p256_scalar_mul_mont, a, &N_RR)
222 }
223
224 // Indexes into `d`.
225 const B_1: usize = 0;
226 const B_10: usize = 1;
227 const B_11: usize = 2;
228 const B_101: usize = 3;
229 const B_111: usize = 4;
230 const B_1111: usize = 5;
231 const B_10101: usize = 6;
232 const B_101111: usize = 7;
233 const DIGIT_COUNT: usize = 8;
234
235 let mut d = [Scalar::zero(); DIGIT_COUNT];
236
237 d[B_1] = to_mont(a);
238 d[B_10] = sqr(&d[B_1]);
239 d[B_11] = mul(&d[B_10], &d[B_1]);
240 d[B_101] = mul(&d[B_10], &d[B_11]);
241 d[B_111] = mul(&d[B_101], &d[B_10]);
242 let b_1010 = sqr(&d[B_101]);
243 d[B_1111] = mul(&b_1010, &d[B_101]);
244 d[B_10101] = sqr_mul(&b_1010, 0 + 1, &d[B_1]);
245 let b_101010 = sqr(&d[B_10101]);
246 d[B_101111] = mul(&b_101010, &d[B_101]);
247 let b_111111 = mul(&b_101010, &d[B_10101]);
248
249 let ff = sqr_mul(&b_111111, 0 + 2, &d[B_11]);
250 let ffff = sqr_mul(&ff, 0 + 8, &ff);
251 let ffffffff = sqr_mul(&ffff, 0 + 16, &ffff);
252
253 // ffffffff00000000ffffffff
254 let mut acc = sqr_mul(&ffffffff, 32 + 32, &ffffffff);
255
256 // ffffffff00000000ffffffffffffffff
257 sqr_mul_acc(&mut acc, 0 + 32, &ffffffff);
258
259 // The rest of the exponent, in binary, is:
260 //
261 // 1011110011100110111110101010110110100111000101111001111010000100
262 // 1111001110111001110010101100001011111100011000110010010101001111
263
264 static REMAINING_WINDOWS: [(u8, u8); 26] = [
265 (6, B_101111 as u8),
266 (2 + 3, B_111 as u8),
267 (2 + 2, B_11 as u8),
268 (1 + 4, B_1111 as u8),
269 (5, B_10101 as u8),
270 (1 + 3, B_101 as u8),
271 (3, B_101 as u8),
272 (3, B_101 as u8),
273 (2 + 3, B_111 as u8),
274 (3 + 6, B_101111 as u8),
275 (2 + 4, B_1111 as u8),
276 (1 + 1, B_1 as u8),
277 (4 + 1, B_1 as u8),
278 (2 + 4, B_1111 as u8),
279 (2 + 3, B_111 as u8),
280 (1 + 3, B_111 as u8),
281 (2 + 3, B_111 as u8),
282 (2 + 3, B_101 as u8),
283 (1 + 2, B_11 as u8),
284 (4 + 6, B_101111 as u8),
285 (2, B_11 as u8),
286 (3 + 2, B_11 as u8),
287 (3 + 2, B_11 as u8),
288 (2 + 1, B_1 as u8),
289 (2 + 5, B_10101 as u8),
290 (2 + 4, B_1111 as u8),
291 ];
292
293 for &(squarings, digit) in &REMAINING_WINDOWS {
294 sqr_mul_acc(&mut acc, Limb::from(squarings), &d[usize::from(digit)]);
295 }
296
297 acc
298 }
299
300 prefixed_extern! {
301 pub(super) fn p256_mul_mont(
302 r: *mut Limb, // [COMMON_OPS.num_limbs]
303 a: *const Limb, // [COMMON_OPS.num_limbs]
304 b: *const Limb, // [COMMON_OPS.num_limbs]
305 );
306 pub(super) fn p256_sqr_mont(
307 r: *mut Limb, // [COMMON_OPS.num_limbs]
308 a: *const Limb, // [COMMON_OPS.num_limbs]
309 );
310
311 fn p256_point_add(
312 r: *mut Limb, // [3][COMMON_OPS.num_limbs]
313 a: *const Limb, // [3][COMMON_OPS.num_limbs]
314 b: *const Limb, // [3][COMMON_OPS.num_limbs]
315 );
316 fn p256_point_mul(
317 r: *mut Limb, // [3][COMMON_OPS.num_limbs]
318 p_scalar: *const Limb, // [COMMON_OPS.num_limbs]
319 p_x: *const Limb, // [COMMON_OPS.num_limbs]
320 p_y: *const Limb, // [COMMON_OPS.num_limbs]
321 );
322
323 fn p256_scalar_mul_mont(
324 r: *mut Limb, // [COMMON_OPS.num_limbs]
325 a: *const Limb, // [COMMON_OPS.num_limbs]
326 b: *const Limb, // [COMMON_OPS.num_limbs]
327 );
328 fn p256_scalar_sqr_rep_mont(
329 r: *mut Limb, // [COMMON_OPS.num_limbs]
330 a: *const Limb, // [COMMON_OPS.num_limbs]
331 rep: Limb,
332 );
333 }
334