1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Glue code for accelerated AES-GCM stitched implementation for ppc64le.
4  *
5  * Copyright 2022- IBM Inc. All rights reserved
6  */
7 
8 #include <linux/unaligned.h>
9 #include <asm/simd.h>
10 #include <asm/switch_to.h>
11 #include <crypto/gcm.h>
12 #include <crypto/aes.h>
13 #include <crypto/algapi.h>
14 #include <crypto/b128ops.h>
15 #include <crypto/gf128mul.h>
16 #include <crypto/internal/simd.h>
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/hash.h>
19 #include <crypto/internal/skcipher.h>
20 #include <crypto/scatterwalk.h>
21 #include <linux/cpufeature.h>
22 #include <linux/crypto.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 
26 #define	PPC_ALIGN		16
27 #define GCM_IV_SIZE		12
28 #define RFC4106_NONCE_SIZE	4
29 
30 MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
31 MODULE_AUTHOR("Danny Tsen <[email protected]");
32 MODULE_LICENSE("GPL v2");
33 MODULE_ALIAS_CRYPTO("aes");
34 
35 asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
36 				       void *key);
37 asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
38 asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
39 				    void *rkey, u8 *iv, void *Xi);
40 asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
41 				    void *rkey, u8 *iv, void *Xi);
42 asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
43 asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
44 			      unsigned char *aad, unsigned int alen);
45 asmlinkage void gcm_update(u8 *iv, void *Xi);
46 
47 struct aes_key {
48 	u8 key[AES_MAX_KEYLENGTH];
49 	u64 rounds;
50 };
51 
52 struct gcm_ctx {
53 	u8 iv[16];
54 	u8 ivtag[16];
55 	u8 aad_hash[16];
56 	u64 aadLen;
57 	u64 Plen;	/* offset 56 - used in aes_p10_gcm_{en/de}crypt */
58 	u8 pblock[16];
59 };
60 struct Hash_ctx {
61 	u8 H[16];	/* subkey */
62 	u8 Htable[256];	/* Xi, Hash table(offset 32) */
63 };
64 
65 struct p10_aes_gcm_ctx {
66 	struct aes_key enc_key;
67 	u8 nonce[RFC4106_NONCE_SIZE];
68 };
69 
vsx_begin(void)70 static void vsx_begin(void)
71 {
72 	preempt_disable();
73 	pagefault_disable();
74 	enable_kernel_vsx();
75 }
76 
vsx_end(void)77 static void vsx_end(void)
78 {
79 	disable_kernel_vsx();
80 	pagefault_enable();
81 	preempt_enable();
82 }
83 
set_subkey(unsigned char * hash)84 static void set_subkey(unsigned char *hash)
85 {
86 	*(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
87 	*(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
88 }
89 
90 /*
91  * Compute aad if any.
92  *   - Hash aad and copy to Xi.
93  */
set_aad(struct gcm_ctx * gctx,struct Hash_ctx * hash,unsigned char * aad,int alen)94 static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
95 		    unsigned char *aad, int alen)
96 {
97 	int i;
98 	u8 nXi[16] = {0, };
99 
100 	gctx->aadLen = alen;
101 	i = alen & ~0xf;
102 	if (i) {
103 		gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
104 		aad += i;
105 		alen -= i;
106 	}
107 	if (alen) {
108 		for (i = 0; i < alen; i++)
109 			nXi[i] ^= aad[i];
110 
111 		memset(gctx->aad_hash, 0, 16);
112 		gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
113 	} else {
114 		memcpy(gctx->aad_hash, nXi, 16);
115 	}
116 
117 	memcpy(hash->Htable, gctx->aad_hash, 16);
118 }
119 
gcmp10_init(struct gcm_ctx * gctx,u8 * iv,unsigned char * rdkey,struct Hash_ctx * hash,u8 * assoc,unsigned int assoclen)120 static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
121 			struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
122 {
123 	__be32 counter = cpu_to_be32(1);
124 
125 	aes_p10_encrypt(hash->H, hash->H, rdkey);
126 	set_subkey(hash->H);
127 	gcm_init_htable(hash->Htable+32, hash->H);
128 
129 	*((__be32 *)(iv+12)) = counter;
130 
131 	gctx->Plen = 0;
132 
133 	/*
134 	 * Encrypt counter vector as iv tag and increment counter.
135 	 */
136 	aes_p10_encrypt(iv, gctx->ivtag, rdkey);
137 
138 	counter = cpu_to_be32(2);
139 	*((__be32 *)(iv+12)) = counter;
140 	memcpy(gctx->iv, iv, 16);
141 
142 	gctx->aadLen = assoclen;
143 	memset(gctx->aad_hash, 0, 16);
144 	if (assoclen)
145 		set_aad(gctx, hash, assoc, assoclen);
146 }
147 
finish_tag(struct gcm_ctx * gctx,struct Hash_ctx * hash,int len)148 static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
149 {
150 	int i;
151 	unsigned char len_ac[16 + PPC_ALIGN];
152 	unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
153 	__be64 clen = cpu_to_be64(len << 3);
154 	__be64 alen = cpu_to_be64(gctx->aadLen << 3);
155 
156 	if (len == 0 && gctx->aadLen == 0) {
157 		memcpy(hash->Htable, gctx->ivtag, 16);
158 		return;
159 	}
160 
161 	/*
162 	 * Len is in bits.
163 	 */
164 	*((__be64 *)(aclen)) = alen;
165 	*((__be64 *)(aclen+8)) = clen;
166 
167 	/*
168 	 * hash (AAD len and len)
169 	 */
170 	gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
171 
172 	for (i = 0; i < 16; i++)
173 		hash->Htable[i] ^= gctx->ivtag[i];
174 }
175 
set_authsize(struct crypto_aead * tfm,unsigned int authsize)176 static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
177 {
178 	switch (authsize) {
179 	case 4:
180 	case 8:
181 	case 12:
182 	case 13:
183 	case 14:
184 	case 15:
185 	case 16:
186 		break;
187 	default:
188 		return -EINVAL;
189 	}
190 
191 	return 0;
192 }
193 
p10_aes_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)194 static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
195 			      unsigned int keylen)
196 {
197 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
198 	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
199 	int ret;
200 
201 	vsx_begin();
202 	ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
203 	vsx_end();
204 
205 	return ret ? -EINVAL : 0;
206 }
207 
p10_aes_gcm_crypt(struct aead_request * req,u8 * riv,int assoclen,int enc)208 static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
209 			     int assoclen, int enc)
210 {
211 	struct crypto_tfm *tfm = req->base.tfm;
212 	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
213 	u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
214 	struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
215 	u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
216 	struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
217 	struct skcipher_walk walk;
218 	u8 *assocmem = NULL;
219 	u8 *assoc;
220 	unsigned int cryptlen = req->cryptlen;
221 	unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
222 	unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
223 	int ret;
224 	unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
225 	u8 otag[16];
226 	int total_processed = 0;
227 	int nbytes;
228 
229 	memset(databuf, 0, sizeof(databuf));
230 	memset(hashbuf, 0, sizeof(hashbuf));
231 	memset(ivbuf, 0, sizeof(ivbuf));
232 	memcpy(iv, riv, GCM_IV_SIZE);
233 
234 	/* Linearize assoc, if not already linear */
235 	if (req->src->length >= assoclen && req->src->length) {
236 		assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
237 	} else {
238 		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
239 			      GFP_KERNEL : GFP_ATOMIC;
240 
241 		/* assoc can be any length, so must be on heap */
242 		assocmem = kmalloc(assoclen, flags);
243 		if (unlikely(!assocmem))
244 			return -ENOMEM;
245 		assoc = assocmem;
246 
247 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
248 	}
249 
250 	vsx_begin();
251 	gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
252 	vsx_end();
253 
254 	kfree(assocmem);
255 
256 	if (enc)
257 		ret = skcipher_walk_aead_encrypt(&walk, req, false);
258 	else
259 		ret = skcipher_walk_aead_decrypt(&walk, req, false);
260 	if (ret)
261 		return ret;
262 
263 	while ((nbytes = walk.nbytes) > 0 && ret == 0) {
264 		u8 *src = walk.src.virt.addr;
265 		u8 *dst = walk.dst.virt.addr;
266 		u8 buf[AES_BLOCK_SIZE];
267 
268 		if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
269 			src = dst = memcpy(buf, src, nbytes);
270 
271 		vsx_begin();
272 		if (enc)
273 			aes_p10_gcm_encrypt(src, dst, nbytes,
274 					    &ctx->enc_key, gctx->iv, hash->Htable);
275 		else
276 			aes_p10_gcm_decrypt(src, dst, nbytes,
277 					    &ctx->enc_key, gctx->iv, hash->Htable);
278 
279 		if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
280 			memcpy(walk.dst.virt.addr, buf, nbytes);
281 
282 		vsx_end();
283 
284 		total_processed += walk.nbytes;
285 		ret = skcipher_walk_done(&walk, 0);
286 	}
287 
288 	if (ret)
289 		return ret;
290 
291 	/* Finalize hash */
292 	vsx_begin();
293 	gcm_update(gctx->iv, hash->Htable);
294 	finish_tag(gctx, hash, total_processed);
295 	vsx_end();
296 
297 	/* copy Xi to end of dst */
298 	if (enc)
299 		scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
300 					 auth_tag_len, 1);
301 	else {
302 		scatterwalk_map_and_copy(otag, req->src,
303 					 req->assoclen + cryptlen - auth_tag_len,
304 					 auth_tag_len, 0);
305 
306 		if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
307 			memzero_explicit(hash->Htable, 16);
308 			return -EBADMSG;
309 		}
310 	}
311 
312 	return 0;
313 }
314 
rfc4106_setkey(struct crypto_aead * tfm,const u8 * inkey,unsigned int keylen)315 static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey,
316 			  unsigned int keylen)
317 {
318 	struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
319 	int err;
320 
321 	keylen -= RFC4106_NONCE_SIZE;
322 	err = p10_aes_gcm_setkey(tfm, inkey, keylen);
323 	if (err)
324 		return err;
325 
326 	memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE);
327 	return 0;
328 }
329 
rfc4106_setauthsize(struct crypto_aead * tfm,unsigned int authsize)330 static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
331 {
332 	return crypto_rfc4106_check_authsize(authsize);
333 }
334 
rfc4106_encrypt(struct aead_request * req)335 static int rfc4106_encrypt(struct aead_request *req)
336 {
337 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
338 	struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
339 	u8 iv[AES_BLOCK_SIZE];
340 
341 	memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
342 	memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
343 
344 	return crypto_ipsec_check_assoclen(req->assoclen) ?:
345 	       p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 1);
346 }
347 
rfc4106_decrypt(struct aead_request * req)348 static int rfc4106_decrypt(struct aead_request *req)
349 {
350 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
351 	struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
352 	u8 iv[AES_BLOCK_SIZE];
353 
354 	memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
355 	memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
356 
357 	return crypto_ipsec_check_assoclen(req->assoclen) ?:
358 	       p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 0);
359 }
360 
p10_aes_gcm_encrypt(struct aead_request * req)361 static int p10_aes_gcm_encrypt(struct aead_request *req)
362 {
363 	return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 1);
364 }
365 
p10_aes_gcm_decrypt(struct aead_request * req)366 static int p10_aes_gcm_decrypt(struct aead_request *req)
367 {
368 	return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 0);
369 }
370 
371 static struct aead_alg gcm_aes_algs[] = {{
372 	.ivsize			= GCM_IV_SIZE,
373 	.maxauthsize		= 16,
374 
375 	.setauthsize		= set_authsize,
376 	.setkey			= p10_aes_gcm_setkey,
377 	.encrypt		= p10_aes_gcm_encrypt,
378 	.decrypt		= p10_aes_gcm_decrypt,
379 
380 	.base.cra_name		= "__gcm(aes)",
381 	.base.cra_driver_name	= "__aes_gcm_p10",
382 	.base.cra_priority	= 2100,
383 	.base.cra_blocksize	= 1,
384 	.base.cra_ctxsize	= sizeof(struct p10_aes_gcm_ctx)+
385 				  4 * sizeof(u64[2]),
386 	.base.cra_module	= THIS_MODULE,
387 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
388 }, {
389 	.ivsize			= GCM_RFC4106_IV_SIZE,
390 	.maxauthsize		= 16,
391 	.setkey			= rfc4106_setkey,
392 	.setauthsize		= rfc4106_setauthsize,
393 	.encrypt		= rfc4106_encrypt,
394 	.decrypt		= rfc4106_decrypt,
395 
396 	.base.cra_name		= "__rfc4106(gcm(aes))",
397 	.base.cra_driver_name	= "__rfc4106_aes_gcm_p10",
398 	.base.cra_priority	= 2100,
399 	.base.cra_blocksize	= 1,
400 	.base.cra_ctxsize	= sizeof(struct p10_aes_gcm_ctx) +
401 				  4 * sizeof(u64[2]),
402 	.base.cra_module	= THIS_MODULE,
403 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
404 }};
405 
406 static struct simd_aead_alg *p10_simd_aeads[ARRAY_SIZE(gcm_aes_algs)];
407 
p10_init(void)408 static int __init p10_init(void)
409 {
410 	int ret;
411 
412 	if (!cpu_has_feature(CPU_FTR_ARCH_31))
413 		return 0;
414 
415 	ret = simd_register_aeads_compat(gcm_aes_algs,
416 					 ARRAY_SIZE(gcm_aes_algs),
417 					 p10_simd_aeads);
418 	if (ret) {
419 		simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
420 				      p10_simd_aeads);
421 		return ret;
422 	}
423 	return 0;
424 }
425 
p10_exit(void)426 static void __exit p10_exit(void)
427 {
428 	simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
429 			      p10_simd_aeads);
430 }
431 
432 module_init(p10_init);
433 module_exit(p10_exit);
434