xref: /nrf52832-nimble/packages/NimBLE-latest/ext/tinycrypt/src/cmac_mode.c (revision 042d53a763ad75cb1465103098bb88c245d95138)
1*042d53a7SEvalZero /* cmac_mode.c - TinyCrypt CMAC mode implementation */
2*042d53a7SEvalZero 
3*042d53a7SEvalZero /*
4*042d53a7SEvalZero  *  Copyright (C) 2017 by Intel Corporation, All Rights Reserved.
5*042d53a7SEvalZero  *
6*042d53a7SEvalZero  *  Redistribution and use in source and binary forms, with or without
7*042d53a7SEvalZero  *  modification, are permitted provided that the following conditions are met:
8*042d53a7SEvalZero  *
9*042d53a7SEvalZero  *    - Redistributions of source code must retain the above copyright notice,
10*042d53a7SEvalZero  *     this list of conditions and the following disclaimer.
11*042d53a7SEvalZero  *
12*042d53a7SEvalZero  *    - Redistributions in binary form must reproduce the above copyright
13*042d53a7SEvalZero  *    notice, this list of conditions and the following disclaimer in the
14*042d53a7SEvalZero  *    documentation and/or other materials provided with the distribution.
15*042d53a7SEvalZero  *
16*042d53a7SEvalZero  *    - Neither the name of Intel Corporation nor the names of its contributors
17*042d53a7SEvalZero  *    may be used to endorse or promote products derived from this software
18*042d53a7SEvalZero  *    without specific prior written permission.
19*042d53a7SEvalZero  *
20*042d53a7SEvalZero  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21*042d53a7SEvalZero  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22*042d53a7SEvalZero  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23*042d53a7SEvalZero  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24*042d53a7SEvalZero  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25*042d53a7SEvalZero  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26*042d53a7SEvalZero  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27*042d53a7SEvalZero  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28*042d53a7SEvalZero  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29*042d53a7SEvalZero  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30*042d53a7SEvalZero  *  POSSIBILITY OF SUCH DAMAGE.
31*042d53a7SEvalZero  */
32*042d53a7SEvalZero 
33*042d53a7SEvalZero #include <tinycrypt/aes.h>
34*042d53a7SEvalZero #include <tinycrypt/cmac_mode.h>
35*042d53a7SEvalZero #include <tinycrypt/constants.h>
36*042d53a7SEvalZero #include <tinycrypt/utils.h>
37*042d53a7SEvalZero 
38*042d53a7SEvalZero /* max number of calls until change the key (2^48).*/
39*042d53a7SEvalZero const static uint64_t MAX_CALLS = ((uint64_t)1 << 48);
40*042d53a7SEvalZero 
41*042d53a7SEvalZero /*
42*042d53a7SEvalZero  *  gf_wrap -- In our implementation, GF(2^128) is represented as a 16 byte
43*042d53a7SEvalZero  *  array with byte 0 the most significant and byte 15 the least significant.
44*042d53a7SEvalZero  *  High bit carry reduction is based on the primitive polynomial
45*042d53a7SEvalZero  *
46*042d53a7SEvalZero  *                     X^128 + X^7 + X^2 + X + 1,
47*042d53a7SEvalZero  *
48*042d53a7SEvalZero  *  which leads to the reduction formula X^128 = X^7 + X^2 + X + 1. Indeed,
49*042d53a7SEvalZero  *  since 0 = (X^128 + X^7 + X^2 + 1) mod (X^128 + X^7 + X^2 + X + 1) and since
50*042d53a7SEvalZero  *  addition of polynomials with coefficients in Z/Z(2) is just XOR, we can
51*042d53a7SEvalZero  *  add X^128 to both sides to get
52*042d53a7SEvalZero  *
53*042d53a7SEvalZero  *       X^128 = (X^7 + X^2 + X + 1) mod (X^128 + X^7 + X^2 + X + 1)
54*042d53a7SEvalZero  *
55*042d53a7SEvalZero  *  and the coefficients of the polynomial on the right hand side form the
56*042d53a7SEvalZero  *  string 1000 0111 = 0x87, which is the value of gf_wrap.
57*042d53a7SEvalZero  *
58*042d53a7SEvalZero  *  This gets used in the following way. Doubling in GF(2^128) is just a left
59*042d53a7SEvalZero  *  shift by 1 bit, except when the most significant bit is 1. In the latter
60*042d53a7SEvalZero  *  case, the relation X^128 = X^7 + X^2 + X + 1 says that the high order bit
61*042d53a7SEvalZero  *  that overflows beyond 128 bits can be replaced by addition of
62*042d53a7SEvalZero  *  X^7 + X^2 + X + 1 <--> 0x87 to the low order 128 bits. Since addition
63*042d53a7SEvalZero  *  in GF(2^128) is represented by XOR, we therefore only have to XOR 0x87
64*042d53a7SEvalZero  *  into the low order byte after a left shift when the starting high order
65*042d53a7SEvalZero  *  bit is 1.
66*042d53a7SEvalZero  */
67*042d53a7SEvalZero const unsigned char gf_wrap = 0x87;
68*042d53a7SEvalZero 
69*042d53a7SEvalZero /*
70*042d53a7SEvalZero  *  assumes: out != NULL and points to a GF(2^n) value to receive the
71*042d53a7SEvalZero  *            doubled value;
72*042d53a7SEvalZero  *           in != NULL and points to a 16 byte GF(2^n) value
73*042d53a7SEvalZero  *            to double;
74*042d53a7SEvalZero  *           the in and out buffers do not overlap.
75*042d53a7SEvalZero  *  effects: doubles the GF(2^n) value pointed to by "in" and places
76*042d53a7SEvalZero  *           the result in the GF(2^n) value pointed to by "out."
77*042d53a7SEvalZero  */
gf_double(uint8_t * out,uint8_t * in)78*042d53a7SEvalZero void gf_double(uint8_t *out, uint8_t *in)
79*042d53a7SEvalZero {
80*042d53a7SEvalZero 
81*042d53a7SEvalZero 	/* start with low order byte */
82*042d53a7SEvalZero 	uint8_t *x = in + (TC_AES_BLOCK_SIZE - 1);
83*042d53a7SEvalZero 
84*042d53a7SEvalZero 	/* if msb == 1, we need to add the gf_wrap value, otherwise add 0 */
85*042d53a7SEvalZero 	uint8_t carry = (in[0] >> 7) ? gf_wrap : 0;
86*042d53a7SEvalZero 
87*042d53a7SEvalZero 	out += (TC_AES_BLOCK_SIZE - 1);
88*042d53a7SEvalZero 	for (;;) {
89*042d53a7SEvalZero 		*out-- = (*x << 1) ^ carry;
90*042d53a7SEvalZero 		if (x == in) {
91*042d53a7SEvalZero 			break;
92*042d53a7SEvalZero 		}
93*042d53a7SEvalZero 		carry = *x-- >> 7;
94*042d53a7SEvalZero 	}
95*042d53a7SEvalZero }
96*042d53a7SEvalZero 
tc_cmac_setup(TCCmacState_t s,const uint8_t * key,TCAesKeySched_t sched)97*042d53a7SEvalZero int tc_cmac_setup(TCCmacState_t s, const uint8_t *key, TCAesKeySched_t sched)
98*042d53a7SEvalZero {
99*042d53a7SEvalZero 
100*042d53a7SEvalZero 	/* input sanity check: */
101*042d53a7SEvalZero 	if (s == (TCCmacState_t) 0 ||
102*042d53a7SEvalZero 	    key == (const uint8_t *) 0) {
103*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
104*042d53a7SEvalZero 	}
105*042d53a7SEvalZero 
106*042d53a7SEvalZero 	/* put s into a known state */
107*042d53a7SEvalZero 	_set(s, 0, sizeof(*s));
108*042d53a7SEvalZero 	s->sched = sched;
109*042d53a7SEvalZero 
110*042d53a7SEvalZero 	/* configure the encryption key used by the underlying block cipher */
111*042d53a7SEvalZero 	tc_aes128_set_encrypt_key(s->sched, key);
112*042d53a7SEvalZero 
113*042d53a7SEvalZero 	/* compute s->K1 and s->K2 from s->iv using s->keyid */
114*042d53a7SEvalZero 	_set(s->iv, 0, TC_AES_BLOCK_SIZE);
115*042d53a7SEvalZero 	tc_aes_encrypt(s->iv, s->iv, s->sched);
116*042d53a7SEvalZero 	gf_double (s->K1, s->iv);
117*042d53a7SEvalZero 	gf_double (s->K2, s->K1);
118*042d53a7SEvalZero 
119*042d53a7SEvalZero 	/* reset s->iv to 0 in case someone wants to compute now */
120*042d53a7SEvalZero 	tc_cmac_init(s);
121*042d53a7SEvalZero 
122*042d53a7SEvalZero 	return TC_CRYPTO_SUCCESS;
123*042d53a7SEvalZero }
124*042d53a7SEvalZero 
tc_cmac_erase(TCCmacState_t s)125*042d53a7SEvalZero int tc_cmac_erase(TCCmacState_t s)
126*042d53a7SEvalZero {
127*042d53a7SEvalZero 	if (s == (TCCmacState_t) 0) {
128*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
129*042d53a7SEvalZero 	}
130*042d53a7SEvalZero 
131*042d53a7SEvalZero 	/* destroy the current state */
132*042d53a7SEvalZero 	_set(s, 0, sizeof(*s));
133*042d53a7SEvalZero 
134*042d53a7SEvalZero 	return TC_CRYPTO_SUCCESS;
135*042d53a7SEvalZero }
136*042d53a7SEvalZero 
tc_cmac_init(TCCmacState_t s)137*042d53a7SEvalZero int tc_cmac_init(TCCmacState_t s)
138*042d53a7SEvalZero {
139*042d53a7SEvalZero 	/* input sanity check: */
140*042d53a7SEvalZero 	if (s == (TCCmacState_t) 0) {
141*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
142*042d53a7SEvalZero 	}
143*042d53a7SEvalZero 
144*042d53a7SEvalZero 	/* CMAC starts with an all zero initialization vector */
145*042d53a7SEvalZero 	_set(s->iv, 0, TC_AES_BLOCK_SIZE);
146*042d53a7SEvalZero 
147*042d53a7SEvalZero 	/* and the leftover buffer is empty */
148*042d53a7SEvalZero 	_set(s->leftover, 0, TC_AES_BLOCK_SIZE);
149*042d53a7SEvalZero 	s->leftover_offset = 0;
150*042d53a7SEvalZero 
151*042d53a7SEvalZero 	/* Set countdown to max number of calls allowed before re-keying: */
152*042d53a7SEvalZero 	s->countdown = MAX_CALLS;
153*042d53a7SEvalZero 
154*042d53a7SEvalZero 	return TC_CRYPTO_SUCCESS;
155*042d53a7SEvalZero }
156*042d53a7SEvalZero 
tc_cmac_update(TCCmacState_t s,const uint8_t * data,size_t data_length)157*042d53a7SEvalZero int tc_cmac_update(TCCmacState_t s, const uint8_t *data, size_t data_length)
158*042d53a7SEvalZero {
159*042d53a7SEvalZero 	unsigned int i;
160*042d53a7SEvalZero 
161*042d53a7SEvalZero 	/* input sanity check: */
162*042d53a7SEvalZero 	if (s == (TCCmacState_t) 0) {
163*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
164*042d53a7SEvalZero 	}
165*042d53a7SEvalZero 	if (data_length == 0) {
166*042d53a7SEvalZero 		return  TC_CRYPTO_SUCCESS;
167*042d53a7SEvalZero 	}
168*042d53a7SEvalZero 	if (data == (const uint8_t *) 0) {
169*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
170*042d53a7SEvalZero 	}
171*042d53a7SEvalZero 
172*042d53a7SEvalZero 	if (s->countdown == 0) {
173*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
174*042d53a7SEvalZero 	}
175*042d53a7SEvalZero 
176*042d53a7SEvalZero 	s->countdown--;
177*042d53a7SEvalZero 
178*042d53a7SEvalZero 	if (s->leftover_offset > 0) {
179*042d53a7SEvalZero 		/* last data added to s didn't end on a TC_AES_BLOCK_SIZE byte boundary */
180*042d53a7SEvalZero 		size_t remaining_space = TC_AES_BLOCK_SIZE - s->leftover_offset;
181*042d53a7SEvalZero 
182*042d53a7SEvalZero 		if (data_length < remaining_space) {
183*042d53a7SEvalZero 			/* still not enough data to encrypt this time either */
184*042d53a7SEvalZero 			_copy(&s->leftover[s->leftover_offset], data_length, data, data_length);
185*042d53a7SEvalZero 			s->leftover_offset += data_length;
186*042d53a7SEvalZero 			return TC_CRYPTO_SUCCESS;
187*042d53a7SEvalZero 		}
188*042d53a7SEvalZero 		/* leftover block is now full; encrypt it first */
189*042d53a7SEvalZero 		_copy(&s->leftover[s->leftover_offset],
190*042d53a7SEvalZero 		      remaining_space,
191*042d53a7SEvalZero 		      data,
192*042d53a7SEvalZero 		      remaining_space);
193*042d53a7SEvalZero 		data_length -= remaining_space;
194*042d53a7SEvalZero 		data += remaining_space;
195*042d53a7SEvalZero 		s->leftover_offset = 0;
196*042d53a7SEvalZero 
197*042d53a7SEvalZero 		for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
198*042d53a7SEvalZero 			s->iv[i] ^= s->leftover[i];
199*042d53a7SEvalZero 		}
200*042d53a7SEvalZero 		tc_aes_encrypt(s->iv, s->iv, s->sched);
201*042d53a7SEvalZero 	}
202*042d53a7SEvalZero 
203*042d53a7SEvalZero 	/* CBC encrypt each (except the last) of the data blocks */
204*042d53a7SEvalZero 	while (data_length > TC_AES_BLOCK_SIZE) {
205*042d53a7SEvalZero 		for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
206*042d53a7SEvalZero 			s->iv[i] ^= data[i];
207*042d53a7SEvalZero 		}
208*042d53a7SEvalZero 		tc_aes_encrypt(s->iv, s->iv, s->sched);
209*042d53a7SEvalZero 		data += TC_AES_BLOCK_SIZE;
210*042d53a7SEvalZero 		data_length  -= TC_AES_BLOCK_SIZE;
211*042d53a7SEvalZero 	}
212*042d53a7SEvalZero 
213*042d53a7SEvalZero 	if (data_length > 0) {
214*042d53a7SEvalZero 		/* save leftover data for next time */
215*042d53a7SEvalZero 		_copy(s->leftover, data_length, data, data_length);
216*042d53a7SEvalZero 		s->leftover_offset = data_length;
217*042d53a7SEvalZero 	}
218*042d53a7SEvalZero 
219*042d53a7SEvalZero 	return TC_CRYPTO_SUCCESS;
220*042d53a7SEvalZero }
221*042d53a7SEvalZero 
tc_cmac_final(uint8_t * tag,TCCmacState_t s)222*042d53a7SEvalZero int tc_cmac_final(uint8_t *tag, TCCmacState_t s)
223*042d53a7SEvalZero {
224*042d53a7SEvalZero 	uint8_t *k;
225*042d53a7SEvalZero 	unsigned int i;
226*042d53a7SEvalZero 
227*042d53a7SEvalZero 	/* input sanity check: */
228*042d53a7SEvalZero 	if (tag == (uint8_t *) 0 ||
229*042d53a7SEvalZero 	    s == (TCCmacState_t) 0) {
230*042d53a7SEvalZero 		return TC_CRYPTO_FAIL;
231*042d53a7SEvalZero 	}
232*042d53a7SEvalZero 
233*042d53a7SEvalZero 	if (s->leftover_offset == TC_AES_BLOCK_SIZE) {
234*042d53a7SEvalZero 		/* the last message block is a full-sized block */
235*042d53a7SEvalZero 		k = (uint8_t *) s->K1;
236*042d53a7SEvalZero 	} else {
237*042d53a7SEvalZero 		/* the final message block is not a full-sized  block */
238*042d53a7SEvalZero 		size_t remaining = TC_AES_BLOCK_SIZE - s->leftover_offset;
239*042d53a7SEvalZero 
240*042d53a7SEvalZero 		_set(&s->leftover[s->leftover_offset], 0, remaining);
241*042d53a7SEvalZero 		s->leftover[s->leftover_offset] = TC_CMAC_PADDING;
242*042d53a7SEvalZero 		k = (uint8_t *) s->K2;
243*042d53a7SEvalZero 	}
244*042d53a7SEvalZero 	for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
245*042d53a7SEvalZero 		s->iv[i] ^= s->leftover[i] ^ k[i];
246*042d53a7SEvalZero 	}
247*042d53a7SEvalZero 
248*042d53a7SEvalZero 	tc_aes_encrypt(tag, s->iv, s->sched);
249*042d53a7SEvalZero 
250*042d53a7SEvalZero 	/* erasing state: */
251*042d53a7SEvalZero 	tc_cmac_erase(s);
252*042d53a7SEvalZero 
253*042d53a7SEvalZero 	return TC_CRYPTO_SUCCESS;
254*042d53a7SEvalZero }
255