1 /* Copyright (C) 1995-1998 Eric Young ([email protected])
2 * All rights reserved.
3 *
4 * This package is an SSL implementation written
5 * by Eric Young ([email protected]).
6 * The implementation was written so as to conform with Netscapes SSL.
7 *
8 * This library is free for commercial and non-commercial use as long as
9 * the following conditions are aheared to. The following conditions
10 * apply to all code found in this distribution, be it the RC4, RSA,
11 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
12 * included with this distribution is covered by the same copyright terms
13 * except that the holder is Tim Hudson ([email protected]).
14 *
15 * Copyright remains Eric Young's, and as such any Copyright notices in
16 * the code are not to be removed.
17 * If this package is used in a product, Eric Young should be given attribution
18 * as the author of the parts of the library used.
19 * This can be in the form of a textual message at program startup or
20 * in documentation (online or textual) provided with the package.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. All advertising materials mentioning features or use of this software
31 * must display the following acknowledgement:
32 * "This product includes cryptographic software written by
33 * Eric Young ([email protected])"
34 * The word 'cryptographic' can be left out if the rouines from the library
35 * being used are not cryptographic related :-).
36 * 4. If you include any Windows specific code (or a derivative thereof) from
37 * the apps directory (application code) you must include an acknowledgement:
38 * "This product includes software written by Tim Hudson ([email protected])"
39 *
40 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * The licence and distribution terms for any publically available version or
53 * derivative of this code cannot be changed. i.e. this code cannot simply be
54 * copied and put under another distribution licence
55 * [including the GNU Public Licence.] */
56
57 #include <openssl/sha.h>
58
59 #include <string.h>
60
61 #include <openssl/mem.h>
62
63 #include "../../internal.h"
64 #include "../digest/md32_common.h"
65 #include "../service_indicator/internal.h"
66 #include "internal.h"
67
68
SHA1_Init(SHA_CTX * sha)69 int SHA1_Init(SHA_CTX *sha) {
70 OPENSSL_memset(sha, 0, sizeof(SHA_CTX));
71 sha->h[0] = 0x67452301UL;
72 sha->h[1] = 0xefcdab89UL;
73 sha->h[2] = 0x98badcfeUL;
74 sha->h[3] = 0x10325476UL;
75 sha->h[4] = 0xc3d2e1f0UL;
76 return 1;
77 }
78
SHA1(const uint8_t * data,size_t len,uint8_t out[SHA_DIGEST_LENGTH])79 uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]) {
80 SHA_CTX ctx;
81 SHA1_Init(&ctx);
82 SHA1_Update(&ctx, data, len);
83 SHA1_Final(out, &ctx);
84 OPENSSL_cleanse(&ctx, sizeof(ctx));
85 return out;
86 }
87
88 #if !defined(SHA1_ASM)
89 static void sha1_block_data_order(uint32_t state[5], const uint8_t *data,
90 size_t num);
91 #endif
92
SHA1_Transform(SHA_CTX * c,const uint8_t data[SHA_CBLOCK])93 void SHA1_Transform(SHA_CTX *c, const uint8_t data[SHA_CBLOCK]) {
94 sha1_block_data_order(c->h, data, 1);
95 }
96
SHA1_Update(SHA_CTX * c,const void * data,size_t len)97 int SHA1_Update(SHA_CTX *c, const void *data, size_t len) {
98 crypto_md32_update(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num,
99 &c->Nh, &c->Nl, data, len);
100 return 1;
101 }
102
sha1_output_state(uint8_t out[SHA_DIGEST_LENGTH],const SHA_CTX * ctx)103 static void sha1_output_state(uint8_t out[SHA_DIGEST_LENGTH],
104 const SHA_CTX *ctx) {
105 CRYPTO_store_u32_be(out, ctx->h[0]);
106 CRYPTO_store_u32_be(out + 4, ctx->h[1]);
107 CRYPTO_store_u32_be(out + 8, ctx->h[2]);
108 CRYPTO_store_u32_be(out + 12, ctx->h[3]);
109 CRYPTO_store_u32_be(out + 16, ctx->h[4]);
110 }
111
SHA1_Final(uint8_t out[SHA_DIGEST_LENGTH],SHA_CTX * c)112 int SHA1_Final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *c) {
113 crypto_md32_final(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num,
114 c->Nh, c->Nl, /*is_big_endian=*/1);
115
116 sha1_output_state(out, c);
117 FIPS_service_indicator_update_state();
118 return 1;
119 }
120
CRYPTO_fips_186_2_prf(uint8_t * out,size_t out_len,const uint8_t xkey[SHA_DIGEST_LENGTH])121 void CRYPTO_fips_186_2_prf(uint8_t *out, size_t out_len,
122 const uint8_t xkey[SHA_DIGEST_LENGTH]) {
123 // XKEY and XVAL are 160-bit values, but are internally right-padded up to
124 // block size. See FIPS 186-2, Appendix 3.3. This buffer maintains both the
125 // current value of XKEY and the padding.
126 uint8_t block[SHA_CBLOCK] = {0};
127 OPENSSL_memcpy(block, xkey, SHA_DIGEST_LENGTH);
128
129 while (out_len != 0) {
130 // We always use a zero XSEED, so we can merge the inner and outer loops.
131 // XVAL is also always equal to XKEY.
132 SHA_CTX ctx;
133 SHA1_Init(&ctx);
134 SHA1_Transform(&ctx, block);
135
136 // XKEY = (1 + XKEY + w_i) mod 2^b
137 uint32_t carry = 1;
138 for (int i = 4; i >= 0; i--) {
139 uint32_t tmp = CRYPTO_load_u32_be(block + i * 4);
140 tmp = CRYPTO_addc_u32(tmp, ctx.h[i], carry, &carry);
141 CRYPTO_store_u32_be(block + i * 4, tmp);
142 }
143
144 // Output w_i.
145 if (out_len < SHA_DIGEST_LENGTH) {
146 uint8_t buf[SHA_DIGEST_LENGTH];
147 sha1_output_state(buf, &ctx);
148 OPENSSL_memcpy(out, buf, out_len);
149 break;
150 }
151 sha1_output_state(out, &ctx);
152 out += SHA_DIGEST_LENGTH;
153 out_len -= SHA_DIGEST_LENGTH;
154 }
155 }
156
157 #define Xupdate(a, ix, ia, ib, ic, id) \
158 do { \
159 (a) = ((ia) ^ (ib) ^ (ic) ^ (id)); \
160 (ix) = (a) = CRYPTO_rotl_u32((a), 1); \
161 } while (0)
162
163 #define K_00_19 0x5a827999UL
164 #define K_20_39 0x6ed9eba1UL
165 #define K_40_59 0x8f1bbcdcUL
166 #define K_60_79 0xca62c1d6UL
167
168 // As pointed out by Wei Dai <[email protected]>, F() below can be simplified
169 // to the code in F_00_19. Wei attributes these optimisations to Peter
170 // Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
171 // F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another
172 // tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a
173 #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
174 #define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
175 #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
176 #define F_60_79(b, c, d) F_20_39(b, c, d)
177
178 #define BODY_00_15(i, a, b, c, d, e, f, xi) \
179 do { \
180 (f) = (xi) + (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + \
181 F_00_19((b), (c), (d)); \
182 (b) = CRYPTO_rotl_u32((b), 30); \
183 } while (0)
184
185 #define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \
186 do { \
187 Xupdate(f, xi, xa, xb, xc, xd); \
188 (f) += (e) + K_00_19 + CRYPTO_rotl_u32((a), 5) + F_00_19((b), (c), (d)); \
189 (b) = CRYPTO_rotl_u32((b), 30); \
190 } while (0)
191
192 #define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \
193 do { \
194 Xupdate(f, xi, xa, xb, xc, xd); \
195 (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \
196 (b) = CRYPTO_rotl_u32((b), 30); \
197 } while (0)
198
199 #define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd) \
200 do { \
201 Xupdate(f, xa, xa, xb, xc, xd); \
202 (f) += (e) + K_20_39 + CRYPTO_rotl_u32((a), 5) + F_20_39((b), (c), (d)); \
203 (b) = CRYPTO_rotl_u32((b), 30); \
204 } while (0)
205
206 #define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd) \
207 do { \
208 Xupdate(f, xa, xa, xb, xc, xd); \
209 (f) += (e) + K_40_59 + CRYPTO_rotl_u32((a), 5) + F_40_59((b), (c), (d)); \
210 (b) = CRYPTO_rotl_u32((b), 30); \
211 } while (0)
212
213 #define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd) \
214 do { \
215 Xupdate(f, xa, xa, xb, xc, xd); \
216 (f) = (xa) + (e) + K_60_79 + CRYPTO_rotl_u32((a), 5) + \
217 F_60_79((b), (c), (d)); \
218 (b) = CRYPTO_rotl_u32((b), 30); \
219 } while (0)
220
221 #ifdef X
222 #undef X
223 #endif
224
225 /* Originally X was an array. As it's automatic it's natural
226 * to expect RISC compiler to accomodate at least part of it in
227 * the register bank, isn't it? Unfortunately not all compilers
228 * "find" this expectation reasonable:-( On order to make such
229 * compilers generate better code I replace X[] with a bunch of
230 * X0, X1, etc. See the function body below...
231 * <[email protected]> */
232 #define X(i) XX##i
233
234 #if !defined(SHA1_ASM)
235
236 #if !defined(SHA1_ASM_NOHW)
sha1_block_data_order_nohw(uint32_t state[5],const uint8_t * data,size_t num)237 static void sha1_block_data_order_nohw(uint32_t state[5], const uint8_t *data,
238 size_t num) {
239 register uint32_t A, B, C, D, E, T;
240 uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
241 XX11, XX12, XX13, XX14, XX15;
242
243 A = state[0];
244 B = state[1];
245 C = state[2];
246 D = state[3];
247 E = state[4];
248
249 for (;;) {
250 X(0) = CRYPTO_load_u32_be(data);
251 data += 4;
252 X(1) = CRYPTO_load_u32_be(data);
253 data += 4;
254 BODY_00_15(0, A, B, C, D, E, T, X(0));
255 X(2) = CRYPTO_load_u32_be(data);
256 data += 4;
257 BODY_00_15(1, T, A, B, C, D, E, X(1));
258 X(3) = CRYPTO_load_u32_be(data);
259 data += 4;
260 BODY_00_15(2, E, T, A, B, C, D, X(2));
261 X(4) = CRYPTO_load_u32_be(data);
262 data += 4;
263 BODY_00_15(3, D, E, T, A, B, C, X(3));
264 X(5) = CRYPTO_load_u32_be(data);
265 data += 4;
266 BODY_00_15(4, C, D, E, T, A, B, X(4));
267 X(6) = CRYPTO_load_u32_be(data);
268 data += 4;
269 BODY_00_15(5, B, C, D, E, T, A, X(5));
270 X(7) = CRYPTO_load_u32_be(data);
271 data += 4;
272 BODY_00_15(6, A, B, C, D, E, T, X(6));
273 X(8) = CRYPTO_load_u32_be(data);
274 data += 4;
275 BODY_00_15(7, T, A, B, C, D, E, X(7));
276 X(9) = CRYPTO_load_u32_be(data);
277 data += 4;
278 BODY_00_15(8, E, T, A, B, C, D, X(8));
279 X(10) = CRYPTO_load_u32_be(data);
280 data += 4;
281 BODY_00_15(9, D, E, T, A, B, C, X(9));
282 X(11) = CRYPTO_load_u32_be(data);
283 data += 4;
284 BODY_00_15(10, C, D, E, T, A, B, X(10));
285 X(12) = CRYPTO_load_u32_be(data);
286 data += 4;
287 BODY_00_15(11, B, C, D, E, T, A, X(11));
288 X(13) = CRYPTO_load_u32_be(data);
289 data += 4;
290 BODY_00_15(12, A, B, C, D, E, T, X(12));
291 X(14) = CRYPTO_load_u32_be(data);
292 data += 4;
293 BODY_00_15(13, T, A, B, C, D, E, X(13));
294 X(15) = CRYPTO_load_u32_be(data);
295 data += 4;
296 BODY_00_15(14, E, T, A, B, C, D, X(14));
297 BODY_00_15(15, D, E, T, A, B, C, X(15));
298
299 BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
300 BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
301 BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
302 BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
303
304 BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
305 BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
306 BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
307 BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
308 BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
309 BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
310 BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
311 BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
312 BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
313 BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
314 BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
315 BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
316
317 BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
318 BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
319 BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
320 BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
321 BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
322 BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
323 BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
324 BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
325
326 BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
327 BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
328 BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
329 BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
330 BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
331 BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
332 BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
333 BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
334 BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
335 BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
336 BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
337 BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
338 BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
339 BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
340 BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
341 BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
342 BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
343 BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
344 BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
345 BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
346
347 BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
348 BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
349 BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
350 BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
351 BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
352 BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
353 BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
354 BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
355 BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
356 BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
357 BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
358 BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
359 BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
360 BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
361 BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
362 BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
363 BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
364 BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
365 BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
366 BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
367
368 state[0] = (state[0] + E) & 0xffffffffL;
369 state[1] = (state[1] + T) & 0xffffffffL;
370 state[2] = (state[2] + A) & 0xffffffffL;
371 state[3] = (state[3] + B) & 0xffffffffL;
372 state[4] = (state[4] + C) & 0xffffffffL;
373
374 if (--num == 0) {
375 break;
376 }
377
378 A = state[0];
379 B = state[1];
380 C = state[2];
381 D = state[3];
382 E = state[4];
383 }
384 }
385 #endif // !SHA1_ASM_NOHW
386
sha1_block_data_order(uint32_t state[5],const uint8_t * data,size_t num)387 static void sha1_block_data_order(uint32_t state[5], const uint8_t *data,
388 size_t num) {
389 #if defined(SHA1_ASM_HW)
390 if (sha1_hw_capable()) {
391 sha1_block_data_order_hw(state, data, num);
392 return;
393 }
394 #endif
395 #if defined(SHA1_ASM_AVX2)
396 if (sha1_avx2_capable()) {
397 sha1_block_data_order_avx2(state, data, num);
398 return;
399 }
400 #endif
401 #if defined(SHA1_ASM_AVX)
402 if (sha1_avx_capable()) {
403 sha1_block_data_order_avx(state, data, num);
404 return;
405 }
406 #endif
407 #if defined(SHA1_ASM_SSSE3)
408 if (sha1_ssse3_capable()) {
409 sha1_block_data_order_ssse3(state, data, num);
410 return;
411 }
412 #endif
413 #if defined(SHA1_ASM_NEON)
414 if (CRYPTO_is_NEON_capable()) {
415 sha1_block_data_order_neon(state, data, num);
416 return;
417 }
418 #endif
419 sha1_block_data_order_nohw(state, data, num);
420 }
421
422 #endif // !SHA1_ASM
423
424 #undef Xupdate
425 #undef K_00_19
426 #undef K_20_39
427 #undef K_40_59
428 #undef K_60_79
429 #undef F_00_19
430 #undef F_20_39
431 #undef F_40_59
432 #undef F_60_79
433 #undef BODY_00_15
434 #undef BODY_16_19
435 #undef BODY_20_31
436 #undef BODY_32_39
437 #undef BODY_40_59
438 #undef BODY_60_79
439 #undef X
440