1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <[email protected]> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <[email protected]>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include "../avb_sha.h"
39 #include "avb_crypto_ops_impl.h"
40
41 #define SHFR(x, n) (x >> n)
42 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
43 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
44 #define CH(x, y, z) ((x & y) ^ (~x & z))
45 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
46
47 #define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
48 #define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
49 #define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
50 #define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
51
52 #define UNPACK32(x, str) \
53 { \
54 *((str) + 3) = (uint8_t)((x)); \
55 *((str) + 2) = (uint8_t)((x) >> 8); \
56 *((str) + 1) = (uint8_t)((x) >> 16); \
57 *((str) + 0) = (uint8_t)((x) >> 24); \
58 }
59
60 #define UNPACK64(x, str) \
61 { \
62 *((str) + 7) = (uint8_t)x; \
63 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
64 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
65 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
66 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
67 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
68 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
69 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
70 }
71
72 #define PACK64(str, x) \
73 { \
74 *(x) = \
75 ((uint64_t) * ((str) + 7)) | ((uint64_t) * ((str) + 6) << 8) | \
76 ((uint64_t) * ((str) + 5) << 16) | ((uint64_t) * ((str) + 4) << 24) | \
77 ((uint64_t) * ((str) + 3) << 32) | ((uint64_t) * ((str) + 2) << 40) | \
78 ((uint64_t) * ((str) + 1) << 48) | ((uint64_t) * ((str) + 0) << 56); \
79 }
80
81 /* Macros used for loops unrolling */
82
83 #define SHA512_SCR(i) \
84 { w[i] = SHA512_F4(w[i - 2]) + w[i - 7] + SHA512_F3(w[i - 15]) + w[i - 16]; }
85
86 #define SHA512_EXP(a, b, c, d, e, f, g, h, j) \
87 { \
88 t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha512_k[j] + \
89 w[j]; \
90 t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
91 wv[d] += t1; \
92 wv[h] = t1 + t2; \
93 }
94
95 static const uint64_t sha512_h0[8] = {0x6a09e667f3bcc908ULL,
96 0xbb67ae8584caa73bULL,
97 0x3c6ef372fe94f82bULL,
98 0xa54ff53a5f1d36f1ULL,
99 0x510e527fade682d1ULL,
100 0x9b05688c2b3e6c1fULL,
101 0x1f83d9abfb41bd6bULL,
102 0x5be0cd19137e2179ULL};
103
104 static const uint64_t sha512_k[80] = {
105 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
106 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
107 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
108 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
109 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
110 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
111 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
112 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
113 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
114 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
115 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
116 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
117 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
118 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
119 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
120 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
121 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
122 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
123 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
124 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
125 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
126 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
127 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
128 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
129 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
130 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
131 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL};
132
133 /* SHA-512 implementation */
134
avb_sha512_init(AvbSHA512Ctx * avb_ctx)135 void avb_sha512_init(AvbSHA512Ctx* avb_ctx) {
136 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
137 #ifdef UNROLL_LOOPS_SHA512
138 ctx->h[0] = sha512_h0[0];
139 ctx->h[1] = sha512_h0[1];
140 ctx->h[2] = sha512_h0[2];
141 ctx->h[3] = sha512_h0[3];
142 ctx->h[4] = sha512_h0[4];
143 ctx->h[5] = sha512_h0[5];
144 ctx->h[6] = sha512_h0[6];
145 ctx->h[7] = sha512_h0[7];
146 #else
147 int i;
148
149 for (i = 0; i < 8; i++)
150 ctx->h[i] = sha512_h0[i];
151 #endif /* UNROLL_LOOPS_SHA512 */
152
153 ctx->len = 0;
154 ctx->tot_len = 0;
155 }
156
SHA512_transform(AvbSHA512ImplCtx * ctx,const uint8_t * message,size_t block_nb)157 static void SHA512_transform(AvbSHA512ImplCtx* ctx,
158 const uint8_t* message,
159 size_t block_nb) {
160 uint64_t w[80];
161 uint64_t wv[8];
162 uint64_t t1, t2;
163 const uint8_t* sub_block;
164 size_t i, j;
165
166 for (i = 0; i < block_nb; i++) {
167 sub_block = message + (i << 7);
168
169 #ifdef UNROLL_LOOPS_SHA512
170 PACK64(&sub_block[0], &w[0]);
171 PACK64(&sub_block[8], &w[1]);
172 PACK64(&sub_block[16], &w[2]);
173 PACK64(&sub_block[24], &w[3]);
174 PACK64(&sub_block[32], &w[4]);
175 PACK64(&sub_block[40], &w[5]);
176 PACK64(&sub_block[48], &w[6]);
177 PACK64(&sub_block[56], &w[7]);
178 PACK64(&sub_block[64], &w[8]);
179 PACK64(&sub_block[72], &w[9]);
180 PACK64(&sub_block[80], &w[10]);
181 PACK64(&sub_block[88], &w[11]);
182 PACK64(&sub_block[96], &w[12]);
183 PACK64(&sub_block[104], &w[13]);
184 PACK64(&sub_block[112], &w[14]);
185 PACK64(&sub_block[120], &w[15]);
186
187 SHA512_SCR(16);
188 SHA512_SCR(17);
189 SHA512_SCR(18);
190 SHA512_SCR(19);
191 SHA512_SCR(20);
192 SHA512_SCR(21);
193 SHA512_SCR(22);
194 SHA512_SCR(23);
195 SHA512_SCR(24);
196 SHA512_SCR(25);
197 SHA512_SCR(26);
198 SHA512_SCR(27);
199 SHA512_SCR(28);
200 SHA512_SCR(29);
201 SHA512_SCR(30);
202 SHA512_SCR(31);
203 SHA512_SCR(32);
204 SHA512_SCR(33);
205 SHA512_SCR(34);
206 SHA512_SCR(35);
207 SHA512_SCR(36);
208 SHA512_SCR(37);
209 SHA512_SCR(38);
210 SHA512_SCR(39);
211 SHA512_SCR(40);
212 SHA512_SCR(41);
213 SHA512_SCR(42);
214 SHA512_SCR(43);
215 SHA512_SCR(44);
216 SHA512_SCR(45);
217 SHA512_SCR(46);
218 SHA512_SCR(47);
219 SHA512_SCR(48);
220 SHA512_SCR(49);
221 SHA512_SCR(50);
222 SHA512_SCR(51);
223 SHA512_SCR(52);
224 SHA512_SCR(53);
225 SHA512_SCR(54);
226 SHA512_SCR(55);
227 SHA512_SCR(56);
228 SHA512_SCR(57);
229 SHA512_SCR(58);
230 SHA512_SCR(59);
231 SHA512_SCR(60);
232 SHA512_SCR(61);
233 SHA512_SCR(62);
234 SHA512_SCR(63);
235 SHA512_SCR(64);
236 SHA512_SCR(65);
237 SHA512_SCR(66);
238 SHA512_SCR(67);
239 SHA512_SCR(68);
240 SHA512_SCR(69);
241 SHA512_SCR(70);
242 SHA512_SCR(71);
243 SHA512_SCR(72);
244 SHA512_SCR(73);
245 SHA512_SCR(74);
246 SHA512_SCR(75);
247 SHA512_SCR(76);
248 SHA512_SCR(77);
249 SHA512_SCR(78);
250 SHA512_SCR(79);
251
252 wv[0] = ctx->h[0];
253 wv[1] = ctx->h[1];
254 wv[2] = ctx->h[2];
255 wv[3] = ctx->h[3];
256 wv[4] = ctx->h[4];
257 wv[5] = ctx->h[5];
258 wv[6] = ctx->h[6];
259 wv[7] = ctx->h[7];
260
261 j = 0;
262
263 do {
264 SHA512_EXP(0, 1, 2, 3, 4, 5, 6, 7, j);
265 j++;
266 SHA512_EXP(7, 0, 1, 2, 3, 4, 5, 6, j);
267 j++;
268 SHA512_EXP(6, 7, 0, 1, 2, 3, 4, 5, j);
269 j++;
270 SHA512_EXP(5, 6, 7, 0, 1, 2, 3, 4, j);
271 j++;
272 SHA512_EXP(4, 5, 6, 7, 0, 1, 2, 3, j);
273 j++;
274 SHA512_EXP(3, 4, 5, 6, 7, 0, 1, 2, j);
275 j++;
276 SHA512_EXP(2, 3, 4, 5, 6, 7, 0, 1, j);
277 j++;
278 SHA512_EXP(1, 2, 3, 4, 5, 6, 7, 0, j);
279 j++;
280 } while (j < 80);
281
282 ctx->h[0] += wv[0];
283 ctx->h[1] += wv[1];
284 ctx->h[2] += wv[2];
285 ctx->h[3] += wv[3];
286 ctx->h[4] += wv[4];
287 ctx->h[5] += wv[5];
288 ctx->h[6] += wv[6];
289 ctx->h[7] += wv[7];
290 #else
291 for (j = 0; j < 16; j++) {
292 PACK64(&sub_block[j << 3], &w[j]);
293 }
294
295 for (j = 16; j < 80; j++) {
296 SHA512_SCR(j);
297 }
298
299 for (j = 0; j < 8; j++) {
300 wv[j] = ctx->h[j];
301 }
302
303 for (j = 0; j < 80; j++) {
304 t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha512_k[j] +
305 w[j];
306 t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
307 wv[7] = wv[6];
308 wv[6] = wv[5];
309 wv[5] = wv[4];
310 wv[4] = wv[3] + t1;
311 wv[3] = wv[2];
312 wv[2] = wv[1];
313 wv[1] = wv[0];
314 wv[0] = t1 + t2;
315 }
316
317 for (j = 0; j < 8; j++)
318 ctx->h[j] += wv[j];
319 #endif /* UNROLL_LOOPS_SHA512 */
320 }
321 }
322
avb_sha512_update(AvbSHA512Ctx * avb_ctx,const uint8_t * data,size_t len)323 void avb_sha512_update(AvbSHA512Ctx* avb_ctx, const uint8_t* data, size_t len) {
324 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
325 size_t block_nb;
326 size_t new_len, rem_len, tmp_len;
327 const uint8_t* shifted_data;
328
329 tmp_len = AVB_SHA512_BLOCK_SIZE - ctx->len;
330 rem_len = len < tmp_len ? len : tmp_len;
331
332 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
333
334 if (ctx->len + len < AVB_SHA512_BLOCK_SIZE) {
335 ctx->len += len;
336 return;
337 }
338
339 new_len = len - rem_len;
340 block_nb = new_len / AVB_SHA512_BLOCK_SIZE;
341
342 shifted_data = data + rem_len;
343
344 SHA512_transform(ctx, ctx->block, 1);
345 SHA512_transform(ctx, shifted_data, block_nb);
346
347 rem_len = new_len % AVB_SHA512_BLOCK_SIZE;
348
349 avb_memcpy(ctx->block, &shifted_data[block_nb << 7], rem_len);
350
351 ctx->len = rem_len;
352 ctx->tot_len += (block_nb + 1) << 7;
353 }
354
avb_sha512_final(AvbSHA512Ctx * avb_ctx)355 uint8_t* avb_sha512_final(AvbSHA512Ctx* avb_ctx) {
356 AvbSHA512ImplCtx* ctx = (AvbSHA512ImplCtx*)avb_ctx->reserved;
357 size_t block_nb;
358 size_t pm_len;
359 uint64_t len_b;
360
361 #ifndef UNROLL_LOOPS_SHA512
362 size_t i;
363 #endif
364
365 block_nb =
366 1 + ((AVB_SHA512_BLOCK_SIZE - 17) < (ctx->len % AVB_SHA512_BLOCK_SIZE));
367
368 len_b = (ctx->tot_len + ctx->len) << 3;
369 pm_len = block_nb << 7;
370
371 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
372 ctx->block[ctx->len] = 0x80;
373 UNPACK64(len_b, ctx->block + pm_len - 8);
374
375 SHA512_transform(ctx, ctx->block, block_nb);
376
377 #ifdef UNROLL_LOOPS_SHA512
378 UNPACK64(ctx->h[0], &avb_ctx->buf[0]);
379 UNPACK64(ctx->h[1], &avb_ctx->buf[8]);
380 UNPACK64(ctx->h[2], &avb_ctx->buf[16]);
381 UNPACK64(ctx->h[3], &avb_ctx->buf[24]);
382 UNPACK64(ctx->h[4], &avb_ctx->buf[32]);
383 UNPACK64(ctx->h[5], &avb_ctx->buf[40]);
384 UNPACK64(ctx->h[6], &avb_ctx->buf[48]);
385 UNPACK64(ctx->h[7], &avb_ctx->buf[56]);
386 #else
387 for (i = 0; i < 8; i++)
388 UNPACK64(ctx->h[i], &avb_ctx->buf[i << 3]);
389 #endif /* UNROLL_LOOPS_SHA512 */
390
391 return avb_ctx->buf;
392 }
393