1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4  * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
22 
23 #include "tegra-se.h"
24 
25 struct tegra_aes_ctx {
26 	struct tegra_se *se;
27 	u32 alg;
28 	u32 ivsize;
29 	u32 key1_id;
30 	u32 key2_id;
31 	u32 keylen;
32 	u8 key1[AES_MAX_KEY_SIZE];
33 	u8 key2[AES_MAX_KEY_SIZE];
34 };
35 
36 struct tegra_aes_reqctx {
37 	struct tegra_se_datbuf datbuf;
38 	bool encrypt;
39 	u32 config;
40 	u32 crypto_config;
41 	u32 len;
42 	u32 *iv;
43 };
44 
45 struct tegra_aead_ctx {
46 	struct tegra_se *se;
47 	unsigned int authsize;
48 	u32 alg;
49 	u32 key_id;
50 	u32 keylen;
51 	u8 key[AES_MAX_KEY_SIZE];
52 };
53 
54 struct tegra_aead_reqctx {
55 	struct tegra_se_datbuf inbuf;
56 	struct tegra_se_datbuf outbuf;
57 	struct scatterlist *src_sg;
58 	struct scatterlist *dst_sg;
59 	unsigned int assoclen;
60 	unsigned int cryptlen;
61 	unsigned int authsize;
62 	bool encrypt;
63 	u32 crypto_config;
64 	u32 config;
65 	u32 key_id;
66 	u32 iv[4];
67 	u8 authdata[16];
68 };
69 
70 struct tegra_cmac_ctx {
71 	struct tegra_se *se;
72 	unsigned int alg;
73 	u32 key_id;
74 	u32 keylen;
75 	u8 key[AES_MAX_KEY_SIZE];
76 	struct crypto_shash *fallback_tfm;
77 };
78 
79 struct tegra_cmac_reqctx {
80 	struct scatterlist *src_sg;
81 	struct tegra_se_datbuf datbuf;
82 	struct tegra_se_datbuf residue;
83 	unsigned int total_len;
84 	unsigned int blk_size;
85 	unsigned int task;
86 	u32 crypto_config;
87 	u32 config;
88 	u32 key_id;
89 	u32 *iv;
90 	u32 result[CMAC_RESULT_REG_COUNT];
91 };
92 
93 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)94 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
95 {
96 	do {
97 		--bits;
98 		nums += counter[bits];
99 		counter[bits] = nums & 0xff;
100 		nums >>= 8;
101 	} while (bits && nums);
102 }
103 
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)104 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
105 {
106 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
107 	unsigned int offset;
108 
109 	offset = req->cryptlen - ctx->ivsize;
110 
111 	if (rctx->encrypt)
112 		memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
113 	else
114 		scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
115 }
116 
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)117 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
118 {
119 	int num;
120 
121 	if (ctx->alg == SE_ALG_CBC) {
122 		tegra_cbc_iv_copyback(req, ctx);
123 	} else if (ctx->alg == SE_ALG_CTR) {
124 		num = req->cryptlen / ctx->ivsize;
125 		if (req->cryptlen % ctx->ivsize)
126 			num++;
127 
128 		ctr_iv_inc(req->iv, ctx->ivsize, num);
129 	}
130 }
131 
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)132 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
133 {
134 	switch (alg) {
135 	case SE_ALG_CMAC:
136 	case SE_ALG_GMAC:
137 	case SE_ALG_GCM:
138 	case SE_ALG_GCM_FINAL:
139 		return 0;
140 	case SE_ALG_CBC:
141 		if (encrypt)
142 			return SE_CRYPTO_CFG_CBC_ENCRYPT;
143 		else
144 			return SE_CRYPTO_CFG_CBC_DECRYPT;
145 	case SE_ALG_ECB:
146 		if (encrypt)
147 			return SE_CRYPTO_CFG_ECB_ENCRYPT;
148 		else
149 			return SE_CRYPTO_CFG_ECB_DECRYPT;
150 	case SE_ALG_XTS:
151 		if (encrypt)
152 			return SE_CRYPTO_CFG_XTS_ENCRYPT;
153 		else
154 			return SE_CRYPTO_CFG_XTS_DECRYPT;
155 
156 	case SE_ALG_CTR:
157 		return SE_CRYPTO_CFG_CTR;
158 	case SE_ALG_CBC_MAC:
159 		return SE_CRYPTO_CFG_CBC_MAC;
160 
161 	default:
162 		break;
163 	}
164 
165 	return -EINVAL;
166 }
167 
tegra234_aes_cfg(u32 alg,bool encrypt)168 static int tegra234_aes_cfg(u32 alg, bool encrypt)
169 {
170 	switch (alg) {
171 	case SE_ALG_CBC:
172 	case SE_ALG_ECB:
173 	case SE_ALG_XTS:
174 	case SE_ALG_CTR:
175 		if (encrypt)
176 			return SE_CFG_AES_ENCRYPT;
177 		else
178 			return SE_CFG_AES_DECRYPT;
179 
180 	case SE_ALG_GMAC:
181 		if (encrypt)
182 			return SE_CFG_GMAC_ENCRYPT;
183 		else
184 			return SE_CFG_GMAC_DECRYPT;
185 
186 	case SE_ALG_GCM:
187 		if (encrypt)
188 			return SE_CFG_GCM_ENCRYPT;
189 		else
190 			return SE_CFG_GCM_DECRYPT;
191 
192 	case SE_ALG_GCM_FINAL:
193 		if (encrypt)
194 			return SE_CFG_GCM_FINAL_ENCRYPT;
195 		else
196 			return SE_CFG_GCM_FINAL_DECRYPT;
197 
198 	case SE_ALG_CMAC:
199 		return SE_CFG_CMAC;
200 
201 	case SE_ALG_CBC_MAC:
202 		return SE_AES_ENC_ALG_AES_ENC |
203 		       SE_AES_DST_HASH_REG;
204 	}
205 	return -EINVAL;
206 }
207 
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)208 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
209 				       struct tegra_aes_reqctx *rctx)
210 {
211 	unsigned int data_count, res_bits, i = 0, j;
212 	struct tegra_se *se = ctx->se;
213 	u32 *cpuvaddr = se->cmdbuf->addr;
214 	dma_addr_t addr = rctx->datbuf.addr;
215 
216 	data_count = rctx->len / AES_BLOCK_SIZE;
217 	res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
218 
219 	/*
220 	 * Hardware processes data_count + 1 blocks.
221 	 * Reduce 1 block if there is no residue
222 	 */
223 	if (!res_bits)
224 		data_count--;
225 
226 	if (rctx->iv) {
227 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
228 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
229 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
230 			cpuvaddr[i++] = rctx->iv[j];
231 	}
232 
233 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
234 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
235 			SE_LAST_BLOCK_RES_BITS(res_bits);
236 
237 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
238 	cpuvaddr[i++] = rctx->config;
239 	cpuvaddr[i++] = rctx->crypto_config;
240 
241 	/* Source address setting */
242 	cpuvaddr[i++] = lower_32_bits(addr);
243 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
244 
245 	/* Destination address setting */
246 	cpuvaddr[i++] = lower_32_bits(addr);
247 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
248 			SE_ADDR_HI_SZ(rctx->len);
249 
250 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
251 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
252 			SE_AES_OP_START;
253 
254 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
255 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
256 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
257 
258 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
259 
260 	return i;
261 }
262 
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)263 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
264 {
265 	struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
266 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
267 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
268 	struct tegra_se *se = ctx->se;
269 	unsigned int cmdlen, key1_id, key2_id;
270 	int ret;
271 
272 	rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
273 	rctx->len = req->cryptlen;
274 	key1_id = ctx->key1_id;
275 	key2_id = ctx->key2_id;
276 
277 	/* Pad input to AES Block size */
278 	if (ctx->alg != SE_ALG_XTS) {
279 		if (rctx->len % AES_BLOCK_SIZE)
280 			rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
281 	}
282 
283 	rctx->datbuf.size = rctx->len;
284 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
285 					      &rctx->datbuf.addr, GFP_KERNEL);
286 	if (!rctx->datbuf.buf) {
287 		ret = -ENOMEM;
288 		goto out_finalize;
289 	}
290 
291 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
292 
293 	rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
294 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
295 
296 	if (!key1_id) {
297 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
298 						    ctx->keylen, ctx->alg, &key1_id);
299 		if (ret)
300 			goto out;
301 	}
302 
303 	rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
304 
305 	if (ctx->alg == SE_ALG_XTS) {
306 		if (!key2_id) {
307 			ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
308 							    ctx->keylen, ctx->alg, &key2_id);
309 			if (ret)
310 				goto out;
311 		}
312 
313 		rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
314 	}
315 
316 	/* Prepare the command and submit for execution */
317 	cmdlen = tegra_aes_prep_cmd(ctx, rctx);
318 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
319 
320 	/* Copy the result */
321 	tegra_aes_update_iv(req, ctx);
322 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
323 
324 out:
325 	/* Free the buffer */
326 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
327 			  rctx->datbuf.buf, rctx->datbuf.addr);
328 
329 	if (tegra_key_is_reserved(key1_id))
330 		tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
331 
332 	if (tegra_key_is_reserved(key2_id))
333 		tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
334 
335 out_finalize:
336 	crypto_finalize_skcipher_request(se->engine, req, ret);
337 
338 	return 0;
339 }
340 
tegra_aes_cra_init(struct crypto_skcipher * tfm)341 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
342 {
343 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
344 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
345 	struct tegra_se_alg *se_alg;
346 	const char *algname;
347 	int ret;
348 
349 	se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
350 
351 	crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
352 
353 	ctx->ivsize = crypto_skcipher_ivsize(tfm);
354 	ctx->se = se_alg->se_dev;
355 	ctx->key1_id = 0;
356 	ctx->key2_id = 0;
357 	ctx->keylen = 0;
358 
359 	algname = crypto_tfm_alg_name(&tfm->base);
360 	ret = se_algname_to_algid(algname);
361 	if (ret < 0) {
362 		dev_err(ctx->se->dev, "invalid algorithm\n");
363 		return ret;
364 	}
365 
366 	ctx->alg = ret;
367 
368 	return 0;
369 }
370 
tegra_aes_cra_exit(struct crypto_skcipher * tfm)371 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
372 {
373 	struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
374 
375 	if (ctx->key1_id)
376 		tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
377 
378 	if (ctx->key2_id)
379 		tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
380 }
381 
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)382 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
383 			    const u8 *key, u32 keylen)
384 {
385 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
386 	int ret;
387 
388 	if (aes_check_keylen(keylen)) {
389 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
390 		return -EINVAL;
391 	}
392 
393 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
394 	if (ret) {
395 		ctx->keylen = keylen;
396 		memcpy(ctx->key1, key, keylen);
397 	}
398 
399 	return 0;
400 }
401 
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)402 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
403 			    const u8 *key, u32 keylen)
404 {
405 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
406 	u32 len = keylen / 2;
407 	int ret;
408 
409 	ret = xts_verify_key(tfm, key, keylen);
410 	if (ret || aes_check_keylen(len)) {
411 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
412 		return -EINVAL;
413 	}
414 
415 	ret = tegra_key_submit(ctx->se, key, len,
416 			       ctx->alg, &ctx->key1_id);
417 	if (ret) {
418 		ctx->keylen = len;
419 		memcpy(ctx->key1, key, len);
420 	}
421 
422 	ret = tegra_key_submit(ctx->se, key + len, len,
423 			       ctx->alg, &ctx->key2_id);
424 	if (ret) {
425 		ctx->keylen = len;
426 		memcpy(ctx->key2, key + len, len);
427 	}
428 
429 	return 0;
430 }
431 
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)432 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
433 {
434 	int manifest;
435 
436 	manifest = SE_KAC_USER_NS;
437 
438 	switch (alg) {
439 	case SE_ALG_CBC:
440 	case SE_ALG_ECB:
441 	case SE_ALG_CTR:
442 		manifest |= SE_KAC_ENC;
443 		break;
444 	case SE_ALG_XTS:
445 		manifest |= SE_KAC_XTS;
446 		break;
447 	case SE_ALG_GCM:
448 		manifest |= SE_KAC_GCM;
449 		break;
450 	case SE_ALG_CMAC:
451 		manifest |= SE_KAC_CMAC;
452 		break;
453 	case SE_ALG_CBC_MAC:
454 		manifest |= SE_KAC_ENC;
455 		break;
456 	default:
457 		return -EINVAL;
458 	}
459 
460 	switch (keylen) {
461 	case AES_KEYSIZE_128:
462 		manifest |= SE_KAC_SIZE_128;
463 		break;
464 	case AES_KEYSIZE_192:
465 		manifest |= SE_KAC_SIZE_192;
466 		break;
467 	case AES_KEYSIZE_256:
468 		manifest |= SE_KAC_SIZE_256;
469 		break;
470 	default:
471 		return -EINVAL;
472 	}
473 
474 	return manifest;
475 }
476 
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)477 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
478 
479 {
480 	struct crypto_skcipher *tfm;
481 	struct tegra_aes_ctx *ctx;
482 	struct tegra_aes_reqctx *rctx;
483 
484 	tfm = crypto_skcipher_reqtfm(req);
485 	ctx  = crypto_skcipher_ctx(tfm);
486 	rctx = skcipher_request_ctx(req);
487 
488 	if (ctx->alg != SE_ALG_XTS) {
489 		if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
490 			dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
491 			return -EINVAL;
492 		}
493 	} else if (req->cryptlen < XTS_BLOCK_SIZE) {
494 		dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
495 		return -EINVAL;
496 	}
497 
498 	if (!req->cryptlen)
499 		return 0;
500 
501 	rctx->encrypt = encrypt;
502 
503 	return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
504 }
505 
tegra_aes_encrypt(struct skcipher_request * req)506 static int tegra_aes_encrypt(struct skcipher_request *req)
507 {
508 	return tegra_aes_crypt(req, true);
509 }
510 
tegra_aes_decrypt(struct skcipher_request * req)511 static int tegra_aes_decrypt(struct skcipher_request *req)
512 {
513 	return tegra_aes_crypt(req, false);
514 }
515 
516 static struct tegra_se_alg tegra_aes_algs[] = {
517 	{
518 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
519 		.alg.skcipher.base = {
520 			.init = tegra_aes_cra_init,
521 			.exit = tegra_aes_cra_exit,
522 			.setkey	= tegra_aes_setkey,
523 			.encrypt = tegra_aes_encrypt,
524 			.decrypt = tegra_aes_decrypt,
525 			.min_keysize = AES_MIN_KEY_SIZE,
526 			.max_keysize = AES_MAX_KEY_SIZE,
527 			.ivsize	= AES_BLOCK_SIZE,
528 			.base = {
529 				.cra_name = "cbc(aes)",
530 				.cra_driver_name = "cbc-aes-tegra",
531 				.cra_priority = 500,
532 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
533 				.cra_blocksize = AES_BLOCK_SIZE,
534 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
535 				.cra_alignmask = 0xf,
536 				.cra_module = THIS_MODULE,
537 			},
538 		}
539 	}, {
540 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
541 		.alg.skcipher.base = {
542 			.init = tegra_aes_cra_init,
543 			.exit = tegra_aes_cra_exit,
544 			.setkey	= tegra_aes_setkey,
545 			.encrypt = tegra_aes_encrypt,
546 			.decrypt = tegra_aes_decrypt,
547 			.min_keysize = AES_MIN_KEY_SIZE,
548 			.max_keysize = AES_MAX_KEY_SIZE,
549 			.base = {
550 				.cra_name = "ecb(aes)",
551 				.cra_driver_name = "ecb-aes-tegra",
552 				.cra_priority = 500,
553 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
554 				.cra_blocksize = AES_BLOCK_SIZE,
555 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
556 				.cra_alignmask = 0xf,
557 				.cra_module = THIS_MODULE,
558 			},
559 		}
560 	}, {
561 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
562 		.alg.skcipher.base = {
563 			.init = tegra_aes_cra_init,
564 			.exit = tegra_aes_cra_exit,
565 			.setkey = tegra_aes_setkey,
566 			.encrypt = tegra_aes_encrypt,
567 			.decrypt = tegra_aes_decrypt,
568 			.min_keysize = AES_MIN_KEY_SIZE,
569 			.max_keysize = AES_MAX_KEY_SIZE,
570 			.ivsize	= AES_BLOCK_SIZE,
571 			.base = {
572 				.cra_name = "ctr(aes)",
573 				.cra_driver_name = "ctr-aes-tegra",
574 				.cra_priority = 500,
575 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
576 				.cra_blocksize = 1,
577 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
578 				.cra_alignmask = 0xf,
579 				.cra_module = THIS_MODULE,
580 			},
581 		}
582 	}, {
583 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
584 		.alg.skcipher.base = {
585 			.init = tegra_aes_cra_init,
586 			.exit = tegra_aes_cra_exit,
587 			.setkey	= tegra_xts_setkey,
588 			.encrypt = tegra_aes_encrypt,
589 			.decrypt = tegra_aes_decrypt,
590 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
591 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
592 			.ivsize	= AES_BLOCK_SIZE,
593 			.base = {
594 				.cra_name = "xts(aes)",
595 				.cra_driver_name = "xts-aes-tegra",
596 				.cra_priority = 500,
597 				.cra_blocksize = AES_BLOCK_SIZE,
598 				.cra_ctxsize	   = sizeof(struct tegra_aes_ctx),
599 				.cra_alignmask	   = (__alignof__(u64) - 1),
600 				.cra_module	   = THIS_MODULE,
601 			},
602 		}
603 	},
604 };
605 
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)606 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
607 					struct tegra_aead_reqctx *rctx)
608 {
609 	unsigned int data_count, res_bits, i = 0;
610 	struct tegra_se *se = ctx->se;
611 	u32 *cpuvaddr = se->cmdbuf->addr;
612 
613 	data_count = (rctx->assoclen / AES_BLOCK_SIZE);
614 	res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
615 
616 	/*
617 	 * Hardware processes data_count + 1 blocks.
618 	 * Reduce 1 block if there is no residue
619 	 */
620 	if (!res_bits)
621 		data_count--;
622 
623 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
624 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
625 			SE_LAST_BLOCK_RES_BITS(res_bits);
626 
627 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
628 	cpuvaddr[i++] = rctx->config;
629 	cpuvaddr[i++] = rctx->crypto_config;
630 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
631 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
632 			SE_ADDR_HI_SZ(rctx->assoclen);
633 
634 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
635 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
636 			SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
637 			SE_AES_OP_START;
638 
639 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
640 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
641 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
642 
643 	return i;
644 }
645 
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)646 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
647 					     struct tegra_aead_reqctx *rctx)
648 {
649 	unsigned int data_count, res_bits, i = 0, j;
650 	struct tegra_se *se = ctx->se;
651 	u32 *cpuvaddr = se->cmdbuf->addr, op;
652 
653 	data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
654 	res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
655 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
656 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
657 
658 	/*
659 	 * If there is no assoc data,
660 	 * this will be the init command
661 	 */
662 	if (!rctx->assoclen)
663 		op |= SE_AES_OP_INIT;
664 
665 	/*
666 	 * Hardware processes data_count + 1 blocks.
667 	 * Reduce 1 block if there is no residue
668 	 */
669 	if (!res_bits)
670 		data_count--;
671 
672 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
673 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
674 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
675 		cpuvaddr[i++] = rctx->iv[j];
676 
677 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
678 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
679 			SE_LAST_BLOCK_RES_BITS(res_bits);
680 
681 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
682 	cpuvaddr[i++] = rctx->config;
683 	cpuvaddr[i++] = rctx->crypto_config;
684 
685 	/* Source Address */
686 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
687 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
688 			SE_ADDR_HI_SZ(rctx->cryptlen);
689 
690 	/* Destination Address */
691 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
692 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
693 			SE_ADDR_HI_SZ(rctx->cryptlen);
694 
695 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
696 	cpuvaddr[i++] = op;
697 
698 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
699 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
700 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
701 
702 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
703 	return i;
704 }
705 
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)706 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
707 				    struct tegra_aead_reqctx *rctx)
708 {
709 	unsigned int i = 0, j;
710 	u32 op;
711 
712 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
713 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
714 
715 	/*
716 	 * Set init for zero sized vector
717 	 */
718 	if (!rctx->assoclen && !rctx->cryptlen)
719 		op |= SE_AES_OP_INIT;
720 
721 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
722 	cpuvaddr[i++] = rctx->assoclen * 8;
723 	cpuvaddr[i++] = 0;
724 
725 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
726 	cpuvaddr[i++] = rctx->cryptlen * 8;
727 	cpuvaddr[i++] = 0;
728 
729 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
730 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
731 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
732 		cpuvaddr[i++] = rctx->iv[j];
733 
734 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
735 	cpuvaddr[i++] = rctx->config;
736 	cpuvaddr[i++] = rctx->crypto_config;
737 	cpuvaddr[i++] = 0;
738 	cpuvaddr[i++] = 0;
739 
740 	/* Destination Address */
741 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
742 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
743 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
744 
745 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
746 	cpuvaddr[i++] = op;
747 
748 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
749 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
750 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
751 
752 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
753 
754 	return i;
755 }
756 
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)757 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
758 {
759 	struct tegra_se *se = ctx->se;
760 	unsigned int cmdlen;
761 
762 	scatterwalk_map_and_copy(rctx->inbuf.buf,
763 				 rctx->src_sg, 0, rctx->assoclen, 0);
764 
765 	rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
766 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
767 			      SE_AES_KEY_INDEX(rctx->key_id);
768 
769 	cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
770 
771 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
772 }
773 
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)774 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
775 {
776 	struct tegra_se *se = ctx->se;
777 	int cmdlen, ret;
778 
779 	scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
780 				 rctx->assoclen, rctx->cryptlen, 0);
781 
782 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
783 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
784 			      SE_AES_KEY_INDEX(rctx->key_id);
785 
786 	/* Prepare command and submit */
787 	cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
788 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
789 	if (ret)
790 		return ret;
791 
792 	/* Copy the result */
793 	scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
794 				 rctx->assoclen, rctx->cryptlen, 1);
795 
796 	return 0;
797 }
798 
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)799 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
800 {
801 	struct tegra_se *se = ctx->se;
802 	u32 *cpuvaddr = se->cmdbuf->addr;
803 	int cmdlen, ret, offset;
804 
805 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
806 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
807 			      SE_AES_KEY_INDEX(rctx->key_id);
808 
809 	/* Prepare command and submit */
810 	cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
811 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
812 	if (ret)
813 		return ret;
814 
815 	if (rctx->encrypt) {
816 		/* Copy the result */
817 		offset = rctx->assoclen + rctx->cryptlen;
818 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
819 					 offset, rctx->authsize, 1);
820 	}
821 
822 	return 0;
823 }
824 
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)825 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
826 {
827 	unsigned int offset;
828 	u8 mac[16];
829 
830 	offset = rctx->assoclen + rctx->cryptlen;
831 	scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
832 
833 	if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
834 		return -EBADMSG;
835 
836 	return 0;
837 }
838 
tegra_ccm_check_iv(const u8 * iv)839 static inline int tegra_ccm_check_iv(const u8 *iv)
840 {
841 	/* iv[0] gives value of q-1
842 	 * 2 <= q <= 8 as per NIST 800-38C notation
843 	 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
844 	 */
845 	if (iv[0] < 1 || iv[0] > 7) {
846 		pr_debug("ccm_check_iv failed %d\n", iv[0]);
847 		return -EINVAL;
848 	}
849 
850 	return 0;
851 }
852 
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)853 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
854 					  struct tegra_aead_reqctx *rctx)
855 {
856 	unsigned int data_count, i = 0;
857 	struct tegra_se *se = ctx->se;
858 	u32 *cpuvaddr = se->cmdbuf->addr;
859 
860 	data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
861 
862 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
863 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
864 
865 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
866 	cpuvaddr[i++] = rctx->config;
867 	cpuvaddr[i++] = rctx->crypto_config;
868 
869 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
870 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
871 			SE_ADDR_HI_SZ(rctx->inbuf.size);
872 
873 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
874 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
875 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
876 
877 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
878 	cpuvaddr[i++] = SE_AES_OP_WRSTALL |
879 			SE_AES_OP_LASTBUF | SE_AES_OP_START;
880 
881 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
882 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
883 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
884 
885 	return i;
886 }
887 
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)888 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
889 				       struct tegra_aead_reqctx *rctx)
890 {
891 	unsigned int i = 0, j;
892 	struct tegra_se *se = ctx->se;
893 	u32 *cpuvaddr = se->cmdbuf->addr;
894 
895 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
896 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
897 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
898 		cpuvaddr[i++] = rctx->iv[j];
899 
900 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
901 	cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
902 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
903 	cpuvaddr[i++] = rctx->config;
904 	cpuvaddr[i++] = rctx->crypto_config;
905 
906 	/* Source address setting */
907 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
908 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
909 			SE_ADDR_HI_SZ(rctx->inbuf.size);
910 
911 	/* Destination address setting */
912 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
913 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
914 			SE_ADDR_HI_SZ(rctx->inbuf.size);
915 
916 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
917 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
918 			SE_AES_OP_START;
919 
920 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
921 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
922 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
923 
924 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
925 		rctx->config, rctx->crypto_config);
926 
927 	return i;
928 }
929 
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)930 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
931 {
932 	struct tegra_se *se = ctx->se;
933 	int cmdlen;
934 
935 	rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
936 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
937 						      rctx->encrypt) |
938 						      SE_AES_KEY_INDEX(rctx->key_id);
939 
940 	/* Prepare command and submit */
941 	cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
942 
943 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
944 }
945 
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)946 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
947 {
948 	__be32 data;
949 
950 	memset(block, 0, csize);
951 	block += csize;
952 
953 	if (csize >= 4)
954 		csize = 4;
955 	else if (msglen > (1 << (8 * csize)))
956 		return -EOVERFLOW;
957 
958 	data = cpu_to_be32(msglen);
959 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
960 
961 	return 0;
962 }
963 
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)964 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
965 {
966 	unsigned int q, t;
967 	u8 *q_ptr, *iv = (u8 *)rctx->iv;
968 
969 	memcpy(nonce, rctx->iv, 16);
970 
971 	/*** 1. Prepare Flags Octet ***/
972 
973 	/* Encode t (mac length) */
974 	t = rctx->authsize;
975 	nonce[0] |= (((t - 2) / 2) << 3);
976 
977 	/* Adata */
978 	if (rctx->assoclen)
979 		nonce[0] |= (1 << 6);
980 
981 	/*** Encode Q - message length ***/
982 	q = iv[0] + 1;
983 	q_ptr = nonce + 16 - q;
984 
985 	return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
986 }
987 
tegra_ccm_format_adata(u8 * adata,unsigned int a)988 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
989 {
990 	int len = 0;
991 
992 	/* add control info for associated data
993 	 * RFC 3610 and NIST Special Publication 800-38C
994 	 */
995 	if (a < 65280) {
996 		*(__be16 *)adata = cpu_to_be16(a);
997 		len = 2;
998 	} else	{
999 		*(__be16 *)adata = cpu_to_be16(0xfffe);
1000 		*(__be32 *)&adata[2] = cpu_to_be32(a);
1001 		len = 6;
1002 	}
1003 
1004 	return len;
1005 }
1006 
tegra_ccm_add_padding(u8 * buf,unsigned int len)1007 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
1008 {
1009 	unsigned int padlen = 16 - (len % 16);
1010 	u8 padding[16] = {0};
1011 
1012 	if (padlen == 16)
1013 		return 0;
1014 
1015 	memcpy(buf, padding, padlen);
1016 
1017 	return padlen;
1018 }
1019 
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)1020 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
1021 {
1022 	unsigned int alen = 0, offset = 0;
1023 	u8 nonce[16], adata[16];
1024 	int ret;
1025 
1026 	ret = tegra_ccm_format_nonce(rctx, nonce);
1027 	if (ret)
1028 		return ret;
1029 
1030 	memcpy(rctx->inbuf.buf, nonce, 16);
1031 	offset = 16;
1032 
1033 	if (rctx->assoclen) {
1034 		alen = tegra_ccm_format_adata(adata, rctx->assoclen);
1035 		memcpy(rctx->inbuf.buf + offset, adata, alen);
1036 		offset += alen;
1037 
1038 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1039 					 rctx->src_sg, 0, rctx->assoclen, 0);
1040 
1041 		offset += rctx->assoclen;
1042 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
1043 					 rctx->assoclen + alen);
1044 	}
1045 
1046 	return offset;
1047 }
1048 
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1049 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1050 {
1051 	u32 result[16];
1052 	int i, ret;
1053 
1054 	/* Read and clear Result */
1055 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1056 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1057 
1058 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1059 		writel(0, se->base + se->hw->regs->result + (i * 4));
1060 
1061 	if (rctx->encrypt) {
1062 		memcpy(rctx->authdata, result, rctx->authsize);
1063 	} else {
1064 		ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1065 		if (ret)
1066 			return -EBADMSG;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1072 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1073 {
1074 	/* Copy result */
1075 	scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1076 				 rctx->assoclen, rctx->cryptlen, 1);
1077 
1078 	if (rctx->encrypt)
1079 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1080 					 rctx->assoclen + rctx->cryptlen,
1081 					 rctx->authsize, 1);
1082 	else
1083 		memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1084 
1085 	return 0;
1086 }
1087 
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1088 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1089 {
1090 	struct tegra_se *se = ctx->se;
1091 	struct scatterlist *sg;
1092 	int offset, ret;
1093 
1094 	offset = tegra_ccm_format_blocks(rctx);
1095 	if (offset < 0)
1096 		return -EINVAL;
1097 
1098 	/* Copy plain text to the buffer */
1099 	sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1100 
1101 	scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1102 				 sg, rctx->assoclen,
1103 				 rctx->cryptlen, 0);
1104 	offset += rctx->cryptlen;
1105 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1106 
1107 	rctx->inbuf.size = offset;
1108 
1109 	ret = tegra_ccm_do_cbcmac(ctx, rctx);
1110 	if (ret)
1111 		return ret;
1112 
1113 	return tegra_ccm_mac_result(se, rctx);
1114 }
1115 
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1116 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1117 {
1118 	struct tegra_se *se = ctx->se;
1119 	unsigned int cmdlen, offset = 0;
1120 	struct scatterlist *sg = rctx->src_sg;
1121 	int ret;
1122 
1123 	rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1124 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1125 			      SE_AES_KEY_INDEX(rctx->key_id);
1126 
1127 	/* Copy authdata in the top of buffer for encryption/decryption */
1128 	if (rctx->encrypt)
1129 		memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1130 	else
1131 		scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1132 					 rctx->assoclen + rctx->cryptlen,
1133 					 rctx->authsize, 0);
1134 
1135 	offset += rctx->authsize;
1136 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1137 
1138 	/* If there is no cryptlen, proceed to submit the task */
1139 	if (rctx->cryptlen) {
1140 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1141 					 rctx->assoclen, rctx->cryptlen, 0);
1142 		offset += rctx->cryptlen;
1143 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1144 	}
1145 
1146 	rctx->inbuf.size = offset;
1147 
1148 	/* Prepare command and submit */
1149 	cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1150 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1151 	if (ret)
1152 		return ret;
1153 
1154 	return tegra_ccm_ctr_result(se, rctx);
1155 }
1156 
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)1157 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1158 				struct tegra_aead_reqctx *rctx)
1159 {
1160 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161 	u8 *iv = (u8 *)rctx->iv;
1162 	int ret, i;
1163 
1164 	rctx->src_sg = req->src;
1165 	rctx->dst_sg = req->dst;
1166 	rctx->assoclen = req->assoclen;
1167 	rctx->authsize = crypto_aead_authsize(tfm);
1168 
1169 	if (rctx->encrypt)
1170 		rctx->cryptlen = req->cryptlen;
1171 	else
1172 		rctx->cryptlen = req->cryptlen - rctx->authsize;
1173 
1174 	memcpy(iv, req->iv, 16);
1175 
1176 	ret = tegra_ccm_check_iv(iv);
1177 	if (ret)
1178 		return ret;
1179 
1180 	/* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1181 	 * zero to encrypt auth tag.
1182 	 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1183 	 */
1184 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
1185 
1186 	/* Clear any previous result */
1187 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1188 		writel(0, se->base + se->hw->regs->result + (i * 4));
1189 
1190 	return 0;
1191 }
1192 
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)1193 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1194 {
1195 	struct aead_request *req = container_of(areq, struct aead_request, base);
1196 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1197 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1198 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1199 	struct tegra_se *se = ctx->se;
1200 	int ret;
1201 
1202 	ret = tegra_ccm_crypt_init(req, se, rctx);
1203 	if (ret)
1204 		goto out_finalize;
1205 
1206 	rctx->key_id = ctx->key_id;
1207 
1208 	/* Allocate buffers required */
1209 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1210 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1211 					     &rctx->inbuf.addr, GFP_KERNEL);
1212 	if (!rctx->inbuf.buf)
1213 		goto out_finalize;
1214 
1215 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1216 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1217 					      &rctx->outbuf.addr, GFP_KERNEL);
1218 	if (!rctx->outbuf.buf) {
1219 		ret = -ENOMEM;
1220 		goto out_free_inbuf;
1221 	}
1222 
1223 	if (!ctx->key_id) {
1224 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1225 						    ctx->keylen, ctx->alg, &rctx->key_id);
1226 		if (ret)
1227 			goto out;
1228 	}
1229 
1230 	if (rctx->encrypt) {
1231 		/* CBC MAC Operation */
1232 		ret = tegra_ccm_compute_auth(ctx, rctx);
1233 		if (ret)
1234 			goto out;
1235 
1236 		/* CTR operation */
1237 		ret = tegra_ccm_do_ctr(ctx, rctx);
1238 		if (ret)
1239 			goto out;
1240 	} else {
1241 		/* CTR operation */
1242 		ret = tegra_ccm_do_ctr(ctx, rctx);
1243 		if (ret)
1244 			goto out;
1245 
1246 		/* CBC MAC Operation */
1247 		ret = tegra_ccm_compute_auth(ctx, rctx);
1248 		if (ret)
1249 			goto out;
1250 	}
1251 
1252 out:
1253 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1254 			  rctx->outbuf.buf, rctx->outbuf.addr);
1255 
1256 out_free_inbuf:
1257 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1258 			  rctx->inbuf.buf, rctx->inbuf.addr);
1259 
1260 	if (tegra_key_is_reserved(rctx->key_id))
1261 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1262 
1263 out_finalize:
1264 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1265 
1266 	return 0;
1267 }
1268 
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)1269 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1270 {
1271 	struct aead_request *req = container_of(areq, struct aead_request, base);
1272 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1273 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1274 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1275 	int ret;
1276 
1277 	rctx->src_sg = req->src;
1278 	rctx->dst_sg = req->dst;
1279 	rctx->assoclen = req->assoclen;
1280 	rctx->authsize = crypto_aead_authsize(tfm);
1281 
1282 	if (rctx->encrypt)
1283 		rctx->cryptlen = req->cryptlen;
1284 	else
1285 		rctx->cryptlen = req->cryptlen - ctx->authsize;
1286 
1287 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1288 	rctx->iv[3] = (1 << 24);
1289 
1290 	rctx->key_id = ctx->key_id;
1291 
1292 	/* Allocate buffers required */
1293 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1294 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1295 					     &rctx->inbuf.addr, GFP_KERNEL);
1296 	if (!rctx->inbuf.buf) {
1297 		ret = -ENOMEM;
1298 		goto out_finalize;
1299 	}
1300 
1301 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1302 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1303 					      &rctx->outbuf.addr, GFP_KERNEL);
1304 	if (!rctx->outbuf.buf) {
1305 		ret = -ENOMEM;
1306 		goto out_free_inbuf;
1307 	}
1308 
1309 	if (!ctx->key_id) {
1310 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1311 						    ctx->keylen, ctx->alg, &rctx->key_id);
1312 		if (ret)
1313 			goto out;
1314 	}
1315 
1316 	/* If there is associated data perform GMAC operation */
1317 	if (rctx->assoclen) {
1318 		ret = tegra_gcm_do_gmac(ctx, rctx);
1319 		if (ret)
1320 			goto out;
1321 	}
1322 
1323 	/* GCM Encryption/Decryption operation */
1324 	if (rctx->cryptlen) {
1325 		ret = tegra_gcm_do_crypt(ctx, rctx);
1326 		if (ret)
1327 			goto out;
1328 	}
1329 
1330 	/* GCM_FINAL operation */
1331 	ret = tegra_gcm_do_final(ctx, rctx);
1332 	if (ret)
1333 		goto out;
1334 
1335 	if (!rctx->encrypt)
1336 		ret = tegra_gcm_do_verify(ctx->se, rctx);
1337 
1338 out:
1339 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1340 			  rctx->outbuf.buf, rctx->outbuf.addr);
1341 
1342 out_free_inbuf:
1343 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1344 			  rctx->inbuf.buf, rctx->inbuf.addr);
1345 
1346 	if (tegra_key_is_reserved(rctx->key_id))
1347 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1348 
1349 out_finalize:
1350 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1351 
1352 	return 0;
1353 }
1354 
tegra_aead_cra_init(struct crypto_aead * tfm)1355 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1356 {
1357 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1358 	struct aead_alg *alg = crypto_aead_alg(tfm);
1359 	struct tegra_se_alg *se_alg;
1360 	const char *algname;
1361 	int ret;
1362 
1363 	algname = crypto_tfm_alg_name(&tfm->base);
1364 
1365 	se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1366 
1367 	crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1368 
1369 	ctx->se = se_alg->se_dev;
1370 	ctx->key_id = 0;
1371 	ctx->keylen = 0;
1372 
1373 	ret = se_algname_to_algid(algname);
1374 	if (ret < 0) {
1375 		dev_err(ctx->se->dev, "invalid algorithm\n");
1376 		return ret;
1377 	}
1378 
1379 	ctx->alg = ret;
1380 
1381 	return 0;
1382 }
1383 
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1384 static int tegra_ccm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1385 {
1386 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1387 
1388 	switch (authsize) {
1389 	case 4:
1390 	case 6:
1391 	case 8:
1392 	case 10:
1393 	case 12:
1394 	case 14:
1395 	case 16:
1396 		break;
1397 	default:
1398 		return -EINVAL;
1399 	}
1400 
1401 	ctx->authsize = authsize;
1402 
1403 	return 0;
1404 }
1405 
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1406 static int tegra_gcm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1407 {
1408 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1409 	int ret;
1410 
1411 	ret = crypto_gcm_check_authsize(authsize);
1412 	if (ret)
1413 		return ret;
1414 
1415 	ctx->authsize = authsize;
1416 
1417 	return 0;
1418 }
1419 
tegra_aead_cra_exit(struct crypto_aead * tfm)1420 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1421 {
1422 	struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1423 
1424 	if (ctx->key_id)
1425 		tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1426 }
1427 
tegra_aead_crypt(struct aead_request * req,bool encrypt)1428 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1429 {
1430 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1431 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1432 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1433 
1434 	rctx->encrypt = encrypt;
1435 
1436 	return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1437 }
1438 
tegra_aead_encrypt(struct aead_request * req)1439 static int tegra_aead_encrypt(struct aead_request *req)
1440 {
1441 	return tegra_aead_crypt(req, true);
1442 }
1443 
tegra_aead_decrypt(struct aead_request * req)1444 static int tegra_aead_decrypt(struct aead_request *req)
1445 {
1446 	return tegra_aead_crypt(req, false);
1447 }
1448 
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)1449 static int tegra_aead_setkey(struct crypto_aead *tfm,
1450 			     const u8 *key, u32 keylen)
1451 {
1452 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1453 	int ret;
1454 
1455 	if (aes_check_keylen(keylen)) {
1456 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1457 		return -EINVAL;
1458 	}
1459 
1460 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1461 	if (ret) {
1462 		ctx->keylen = keylen;
1463 		memcpy(ctx->key, key, keylen);
1464 	}
1465 
1466 	return 0;
1467 }
1468 
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)1469 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1470 					struct tegra_cmac_reqctx *rctx)
1471 {
1472 	unsigned int data_count, res_bits = 0, i = 0, j;
1473 	struct tegra_se *se = ctx->se;
1474 	u32 *cpuvaddr = se->cmdbuf->addr, op;
1475 
1476 	data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1477 
1478 	op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1479 
1480 	if (!(rctx->task & SHA_UPDATE)) {
1481 		op |= SE_AES_OP_FINAL;
1482 		res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1483 	}
1484 
1485 	if (!res_bits && data_count)
1486 		data_count--;
1487 
1488 	if (rctx->task & SHA_FIRST) {
1489 		rctx->task &= ~SHA_FIRST;
1490 
1491 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1492 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1493 		/* Load 0 IV */
1494 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1495 			cpuvaddr[i++] = 0;
1496 	}
1497 
1498 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1499 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1500 			SE_LAST_BLOCK_RES_BITS(res_bits);
1501 
1502 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1503 	cpuvaddr[i++] = rctx->config;
1504 	cpuvaddr[i++] = rctx->crypto_config;
1505 
1506 	/* Source Address */
1507 	cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1508 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1509 			SE_ADDR_HI_SZ(rctx->datbuf.size);
1510 	cpuvaddr[i++] = 0;
1511 	cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1512 
1513 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1514 	cpuvaddr[i++] = op;
1515 
1516 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1517 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1518 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1519 
1520 	return i;
1521 }
1522 
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1523 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1524 {
1525 	int i;
1526 
1527 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1528 		rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1529 }
1530 
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1531 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1532 {
1533 	int i;
1534 
1535 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1536 		writel(rctx->result[i],
1537 		       se->base + se->hw->regs->result + (i * 4));
1538 }
1539 
tegra_cmac_do_init(struct ahash_request * req)1540 static int tegra_cmac_do_init(struct ahash_request *req)
1541 {
1542 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1543 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1544 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1545 	struct tegra_se *se = ctx->se;
1546 	int i;
1547 
1548 	rctx->total_len = 0;
1549 	rctx->datbuf.size = 0;
1550 	rctx->residue.size = 0;
1551 	rctx->key_id = ctx->key_id;
1552 	rctx->task |= SHA_FIRST;
1553 	rctx->blk_size = crypto_ahash_blocksize(tfm);
1554 
1555 	rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1556 					       &rctx->residue.addr, GFP_KERNEL);
1557 	if (!rctx->residue.buf)
1558 		return -ENOMEM;
1559 
1560 	rctx->residue.size = 0;
1561 
1562 	/* Clear any previous result */
1563 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1564 		writel(0, se->base + se->hw->regs->result + (i * 4));
1565 
1566 	return 0;
1567 }
1568 
tegra_cmac_do_update(struct ahash_request * req)1569 static int tegra_cmac_do_update(struct ahash_request *req)
1570 {
1571 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1572 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1573 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1574 	struct tegra_se *se = ctx->se;
1575 	unsigned int nblks, nresidue, cmdlen;
1576 	int ret;
1577 
1578 	if (!req->nbytes)
1579 		return 0;
1580 
1581 	nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1582 	nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1583 
1584 	/*
1585 	 * Reserve the last block as residue during final() to process.
1586 	 */
1587 	if (!nresidue && nblks) {
1588 		nresidue += rctx->blk_size;
1589 		nblks--;
1590 	}
1591 
1592 	rctx->src_sg = req->src;
1593 	rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1594 	rctx->total_len += rctx->datbuf.size;
1595 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1596 	rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
1597 
1598 	/*
1599 	 * Keep one block and residue bytes in residue and
1600 	 * return. The bytes will be processed in final()
1601 	 */
1602 	if (nblks < 1) {
1603 		scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1604 					 rctx->src_sg, 0, req->nbytes, 0);
1605 
1606 		rctx->residue.size += req->nbytes;
1607 		return 0;
1608 	}
1609 
1610 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
1611 					      &rctx->datbuf.addr, GFP_KERNEL);
1612 	if (!rctx->datbuf.buf)
1613 		return -ENOMEM;
1614 
1615 	/* Copy the previous residue first */
1616 	if (rctx->residue.size)
1617 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1618 
1619 	scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1620 				 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1621 
1622 	scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1623 				 req->nbytes - nresidue, nresidue, 0);
1624 
1625 	/* Update residue value with the residue after current block */
1626 	rctx->residue.size = nresidue;
1627 
1628 	/*
1629 	 * If this is not the first task, paste the previous copied
1630 	 * intermediate results to the registers so that it gets picked up.
1631 	 */
1632 	if (!(rctx->task & SHA_FIRST))
1633 		tegra_cmac_paste_result(ctx->se, rctx);
1634 
1635 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1636 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1637 
1638 	tegra_cmac_copy_result(ctx->se, rctx);
1639 
1640 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
1641 			  rctx->datbuf.buf, rctx->datbuf.addr);
1642 
1643 	return ret;
1644 }
1645 
tegra_cmac_do_final(struct ahash_request * req)1646 static int tegra_cmac_do_final(struct ahash_request *req)
1647 {
1648 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1649 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1650 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1651 	struct tegra_se *se = ctx->se;
1652 	u32 *result = (u32 *)req->result;
1653 	int ret = 0, i, cmdlen;
1654 
1655 	if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1656 		return crypto_shash_tfm_digest(ctx->fallback_tfm,
1657 					NULL, 0, req->result);
1658 	}
1659 
1660 	if (rctx->residue.size) {
1661 		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
1662 						      &rctx->datbuf.addr, GFP_KERNEL);
1663 		if (!rctx->datbuf.buf) {
1664 			ret = -ENOMEM;
1665 			goto out_free;
1666 		}
1667 
1668 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1669 	}
1670 
1671 	rctx->datbuf.size = rctx->residue.size;
1672 	rctx->total_len += rctx->residue.size;
1673 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1674 
1675 	/*
1676 	 * If this is not the first task, paste the previous copied
1677 	 * intermediate results to the registers so that it gets picked up.
1678 	 */
1679 	if (!(rctx->task & SHA_FIRST))
1680 		tegra_cmac_paste_result(ctx->se, rctx);
1681 
1682 	/* Prepare command and submit */
1683 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1684 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1685 	if (ret)
1686 		goto out;
1687 
1688 	/* Read and clear Result register */
1689 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1690 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1691 
1692 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1693 		writel(0, se->base + se->hw->regs->result + (i * 4));
1694 
1695 out:
1696 	if (rctx->residue.size)
1697 		dma_free_coherent(se->dev, rctx->datbuf.size,
1698 				  rctx->datbuf.buf, rctx->datbuf.addr);
1699 out_free:
1700 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1701 			  rctx->residue.buf, rctx->residue.addr);
1702 	return ret;
1703 }
1704 
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)1705 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1706 {
1707 	struct ahash_request *req = ahash_request_cast(areq);
1708 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1709 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1710 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1711 	struct tegra_se *se = ctx->se;
1712 	int ret = 0;
1713 
1714 	if (rctx->task & SHA_INIT) {
1715 		ret = tegra_cmac_do_init(req);
1716 		if (ret)
1717 			goto out;
1718 
1719 		rctx->task &= ~SHA_INIT;
1720 	}
1721 
1722 	if (!ctx->key_id) {
1723 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1724 						    ctx->keylen, ctx->alg, &rctx->key_id);
1725 		if (ret)
1726 			goto out;
1727 	}
1728 
1729 	if (rctx->task & SHA_UPDATE) {
1730 		ret = tegra_cmac_do_update(req);
1731 		if (ret)
1732 			goto out;
1733 
1734 		rctx->task &= ~SHA_UPDATE;
1735 	}
1736 
1737 	if (rctx->task & SHA_FINAL) {
1738 		ret = tegra_cmac_do_final(req);
1739 		if (ret)
1740 			goto out;
1741 
1742 		rctx->task &= ~SHA_FINAL;
1743 	}
1744 out:
1745 	if (tegra_key_is_reserved(rctx->key_id))
1746 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1747 
1748 	crypto_finalize_hash_request(se->engine, req, ret);
1749 
1750 	return 0;
1751 }
1752 
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)1753 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1754 				     const char *algname)
1755 {
1756 	unsigned int statesize;
1757 
1758 	ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1759 
1760 	if (IS_ERR(ctx->fallback_tfm)) {
1761 		dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1762 		ctx->fallback_tfm = NULL;
1763 		return;
1764 	}
1765 
1766 	statesize = crypto_shash_statesize(ctx->fallback_tfm);
1767 
1768 	if (statesize > sizeof(struct tegra_cmac_reqctx))
1769 		crypto_ahash_set_statesize(tfm, statesize);
1770 }
1771 
tegra_cmac_cra_init(struct crypto_tfm * tfm)1772 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1773 {
1774 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1775 	struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1776 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1777 	struct tegra_se_alg *se_alg;
1778 	const char *algname;
1779 	int ret;
1780 
1781 	algname = crypto_tfm_alg_name(tfm);
1782 	se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1783 
1784 	crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1785 
1786 	ctx->se = se_alg->se_dev;
1787 	ctx->key_id = 0;
1788 	ctx->keylen = 0;
1789 
1790 	ret = se_algname_to_algid(algname);
1791 	if (ret < 0) {
1792 		dev_err(ctx->se->dev, "invalid algorithm\n");
1793 		return ret;
1794 	}
1795 
1796 	ctx->alg = ret;
1797 
1798 	tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1799 
1800 	return 0;
1801 }
1802 
tegra_cmac_cra_exit(struct crypto_tfm * tfm)1803 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1804 {
1805 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1806 
1807 	if (ctx->fallback_tfm)
1808 		crypto_free_shash(ctx->fallback_tfm);
1809 
1810 	tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1811 }
1812 
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1813 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1814 			     unsigned int keylen)
1815 {
1816 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1817 	int ret;
1818 
1819 	if (aes_check_keylen(keylen)) {
1820 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1821 		return -EINVAL;
1822 	}
1823 
1824 	if (ctx->fallback_tfm)
1825 		crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1826 
1827 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1828 	if (ret) {
1829 		ctx->keylen = keylen;
1830 		memcpy(ctx->key, key, keylen);
1831 	}
1832 
1833 	return 0;
1834 }
1835 
tegra_cmac_init(struct ahash_request * req)1836 static int tegra_cmac_init(struct ahash_request *req)
1837 {
1838 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1839 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1840 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1841 
1842 	rctx->task = SHA_INIT;
1843 
1844 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1845 }
1846 
tegra_cmac_update(struct ahash_request * req)1847 static int tegra_cmac_update(struct ahash_request *req)
1848 {
1849 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1850 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1851 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1852 
1853 	rctx->task |= SHA_UPDATE;
1854 
1855 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1856 }
1857 
tegra_cmac_final(struct ahash_request * req)1858 static int tegra_cmac_final(struct ahash_request *req)
1859 {
1860 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1861 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1862 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1863 
1864 	rctx->task |= SHA_FINAL;
1865 
1866 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1867 }
1868 
tegra_cmac_finup(struct ahash_request * req)1869 static int tegra_cmac_finup(struct ahash_request *req)
1870 {
1871 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1872 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1873 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1874 
1875 	rctx->task |= SHA_UPDATE | SHA_FINAL;
1876 
1877 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1878 }
1879 
tegra_cmac_digest(struct ahash_request * req)1880 static int tegra_cmac_digest(struct ahash_request *req)
1881 {
1882 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1883 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1884 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1885 
1886 	rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
1887 
1888 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1889 }
1890 
tegra_cmac_export(struct ahash_request * req,void * out)1891 static int tegra_cmac_export(struct ahash_request *req, void *out)
1892 {
1893 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1894 
1895 	memcpy(out, rctx, sizeof(*rctx));
1896 
1897 	return 0;
1898 }
1899 
tegra_cmac_import(struct ahash_request * req,const void * in)1900 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1901 {
1902 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1903 
1904 	memcpy(rctx, in, sizeof(*rctx));
1905 
1906 	return 0;
1907 }
1908 
1909 static struct tegra_se_alg tegra_aead_algs[] = {
1910 	{
1911 		.alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1912 		.alg.aead.base = {
1913 			.init = tegra_aead_cra_init,
1914 			.exit = tegra_aead_cra_exit,
1915 			.setkey = tegra_aead_setkey,
1916 			.setauthsize = tegra_gcm_setauthsize,
1917 			.encrypt = tegra_aead_encrypt,
1918 			.decrypt = tegra_aead_decrypt,
1919 			.maxauthsize = AES_BLOCK_SIZE,
1920 			.ivsize	= GCM_AES_IV_SIZE,
1921 			.base = {
1922 				.cra_name = "gcm(aes)",
1923 				.cra_driver_name = "gcm-aes-tegra",
1924 				.cra_priority = 500,
1925 				.cra_blocksize = 1,
1926 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1927 				.cra_alignmask = 0xf,
1928 				.cra_module = THIS_MODULE,
1929 			},
1930 		}
1931 	}, {
1932 		.alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1933 		.alg.aead.base = {
1934 			.init = tegra_aead_cra_init,
1935 			.exit = tegra_aead_cra_exit,
1936 			.setkey	= tegra_aead_setkey,
1937 			.setauthsize = tegra_ccm_setauthsize,
1938 			.encrypt = tegra_aead_encrypt,
1939 			.decrypt = tegra_aead_decrypt,
1940 			.maxauthsize = AES_BLOCK_SIZE,
1941 			.ivsize	= AES_BLOCK_SIZE,
1942 			.chunksize = AES_BLOCK_SIZE,
1943 			.base = {
1944 				.cra_name = "ccm(aes)",
1945 				.cra_driver_name = "ccm-aes-tegra",
1946 				.cra_priority = 500,
1947 				.cra_blocksize = 1,
1948 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1949 				.cra_alignmask = 0xf,
1950 				.cra_module = THIS_MODULE,
1951 			},
1952 		}
1953 	}
1954 };
1955 
1956 static struct tegra_se_alg tegra_cmac_algs[] = {
1957 	{
1958 		.alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1959 		.alg.ahash.base = {
1960 			.init = tegra_cmac_init,
1961 			.setkey	= tegra_cmac_setkey,
1962 			.update = tegra_cmac_update,
1963 			.final = tegra_cmac_final,
1964 			.finup = tegra_cmac_finup,
1965 			.digest = tegra_cmac_digest,
1966 			.export = tegra_cmac_export,
1967 			.import = tegra_cmac_import,
1968 			.halg.digestsize = AES_BLOCK_SIZE,
1969 			.halg.statesize = sizeof(struct tegra_cmac_reqctx),
1970 			.halg.base = {
1971 				.cra_name = "cmac(aes)",
1972 				.cra_driver_name = "tegra-se-cmac",
1973 				.cra_priority = 300,
1974 				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
1975 				.cra_blocksize = AES_BLOCK_SIZE,
1976 				.cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1977 				.cra_alignmask = 0,
1978 				.cra_module = THIS_MODULE,
1979 				.cra_init = tegra_cmac_cra_init,
1980 				.cra_exit = tegra_cmac_cra_exit,
1981 			}
1982 		}
1983 	}
1984 };
1985 
tegra_init_aes(struct tegra_se * se)1986 int tegra_init_aes(struct tegra_se *se)
1987 {
1988 	struct aead_engine_alg *aead_alg;
1989 	struct ahash_engine_alg *ahash_alg;
1990 	struct skcipher_engine_alg *sk_alg;
1991 	int i, ret;
1992 
1993 	se->manifest = tegra_aes_kac_manifest;
1994 
1995 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1996 		sk_alg = &tegra_aes_algs[i].alg.skcipher;
1997 		tegra_aes_algs[i].se_dev = se;
1998 
1999 		ret = crypto_engine_register_skcipher(sk_alg);
2000 		if (ret) {
2001 			dev_err(se->dev, "failed to register %s\n",
2002 				sk_alg->base.base.cra_name);
2003 			goto err_aes;
2004 		}
2005 	}
2006 
2007 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
2008 		aead_alg = &tegra_aead_algs[i].alg.aead;
2009 		tegra_aead_algs[i].se_dev = se;
2010 
2011 		ret = crypto_engine_register_aead(aead_alg);
2012 		if (ret) {
2013 			dev_err(se->dev, "failed to register %s\n",
2014 				aead_alg->base.base.cra_name);
2015 			goto err_aead;
2016 		}
2017 	}
2018 
2019 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
2020 		ahash_alg = &tegra_cmac_algs[i].alg.ahash;
2021 		tegra_cmac_algs[i].se_dev = se;
2022 
2023 		ret = crypto_engine_register_ahash(ahash_alg);
2024 		if (ret) {
2025 			dev_err(se->dev, "failed to register %s\n",
2026 				ahash_alg->base.halg.base.cra_name);
2027 			goto err_cmac;
2028 		}
2029 	}
2030 
2031 	return 0;
2032 
2033 err_cmac:
2034 	while (i--)
2035 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2036 
2037 	i = ARRAY_SIZE(tegra_aead_algs);
2038 err_aead:
2039 	while (i--)
2040 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2041 
2042 	i = ARRAY_SIZE(tegra_aes_algs);
2043 err_aes:
2044 	while (i--)
2045 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2046 
2047 	return ret;
2048 }
2049 
tegra_deinit_aes(struct tegra_se * se)2050 void tegra_deinit_aes(struct tegra_se *se)
2051 {
2052 	int i;
2053 
2054 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
2055 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2056 
2057 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
2058 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2059 
2060 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
2061 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2062 }
2063