xref: /linux/drivers/crypto/nx/nx-aes-gcm.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /**
2  * AES GCM routines supporting the Power 7+ Nest Accelerators driver
3  *
4  * Copyright (C) 2012 International Business Machines Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 only.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  * Author: Kent Yoder <yoder1@us.ibm.com>
20  */
21 
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
29 #include <asm/vio.h>
30 
31 #include "nx_csbcpb.h"
32 #include "nx.h"
33 
34 
35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36 			      const u8           *in_key,
37 			      unsigned int        key_len)
38 {
39 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42 
43 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
44 
45 	switch (key_len) {
46 	case AES_KEYSIZE_128:
47 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50 		break;
51 	case AES_KEYSIZE_192:
52 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55 		break;
56 	case AES_KEYSIZE_256:
57 		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58 		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59 		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60 		break;
61 	default:
62 		return -EINVAL;
63 	}
64 
65 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66 	memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
67 
68 	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69 	memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
70 
71 	return 0;
72 }
73 
74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75 				  const u8           *in_key,
76 				  unsigned int        key_len)
77 {
78 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79 	char *nonce = nx_ctx->priv.gcm.nonce;
80 	int rc;
81 
82 	if (key_len < 4)
83 		return -EINVAL;
84 
85 	key_len -= 4;
86 
87 	rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88 	if (rc)
89 		goto out;
90 
91 	memcpy(nonce, in_key + key_len, 4);
92 out:
93 	return rc;
94 }
95 
96 static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
97 				  unsigned int authsize)
98 {
99 	if (authsize > crypto_aead_alg(tfm)->maxauthsize)
100 		return -EINVAL;
101 
102 	crypto_aead_crt(tfm)->authsize = authsize;
103 
104 	return 0;
105 }
106 
107 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
108 				      unsigned int authsize)
109 {
110 	switch (authsize) {
111 	case 8:
112 	case 12:
113 	case 16:
114 		break;
115 	default:
116 		return -EINVAL;
117 	}
118 
119 	crypto_aead_crt(tfm)->authsize = authsize;
120 
121 	return 0;
122 }
123 
124 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
125 		  struct aead_request   *req,
126 		  u8                    *out)
127 {
128 	int rc;
129 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
130 	struct scatter_walk walk;
131 	struct nx_sg *nx_sg = nx_ctx->in_sg;
132 	unsigned int nbytes = req->assoclen;
133 	unsigned int processed = 0, to_process;
134 	u32 max_sg_len;
135 
136 	if (nbytes <= AES_BLOCK_SIZE) {
137 		scatterwalk_start(&walk, req->assoc);
138 		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
139 		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
140 		return 0;
141 	}
142 
143 	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
144 
145 	/* page_limit: number of sg entries that fit on one page */
146 	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147 			   nx_ctx->ap->sglen);
148 
149 	do {
150 		/*
151 		 * to_process: the data chunk to process in this update.
152 		 * This value is bound by sg list limits.
153 		 */
154 		to_process = min_t(u64, nbytes - processed,
155 				   nx_ctx->ap->databytelen);
156 		to_process = min_t(u64, to_process,
157 				   NX_PAGE_SIZE * (max_sg_len - 1));
158 
159 		if ((to_process + processed) < nbytes)
160 			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
161 		else
162 			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
163 
164 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
165 					  req->assoc, processed, to_process);
166 		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
167 					* sizeof(struct nx_sg);
168 
169 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
170 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
171 		if (rc)
172 			return rc;
173 
174 		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
175 				csbcpb_aead->cpb.aes_gca.out_pat,
176 				AES_BLOCK_SIZE);
177 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
178 
179 		atomic_inc(&(nx_ctx->stats->aes_ops));
180 		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
181 
182 		processed += to_process;
183 	} while (processed < nbytes);
184 
185 	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
186 
187 	return rc;
188 }
189 
190 static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
191 {
192 	int rc;
193 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
194 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
195 	struct nx_sg *nx_sg;
196 	unsigned int nbytes = req->assoclen;
197 	unsigned int processed = 0, to_process;
198 	u32 max_sg_len;
199 
200 	/* Set GMAC mode */
201 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
202 
203 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
204 
205 	/* page_limit: number of sg entries that fit on one page */
206 	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
207 			   nx_ctx->ap->sglen);
208 
209 	/* Copy IV */
210 	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
211 
212 	do {
213 		/*
214 		 * to_process: the data chunk to process in this update.
215 		 * This value is bound by sg list limits.
216 		 */
217 		to_process = min_t(u64, nbytes - processed,
218 				   nx_ctx->ap->databytelen);
219 		to_process = min_t(u64, to_process,
220 				   NX_PAGE_SIZE * (max_sg_len - 1));
221 
222 		if ((to_process + processed) < nbytes)
223 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
224 		else
225 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
226 
227 		nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
228 					  req->assoc, processed, to_process);
229 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
230 					* sizeof(struct nx_sg);
231 
232 		csbcpb->cpb.aes_gcm.bit_length_data = 0;
233 		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
234 
235 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
236 				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
237 		if (rc)
238 			goto out;
239 
240 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
241 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
242 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
243 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
244 
245 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
246 
247 		atomic_inc(&(nx_ctx->stats->aes_ops));
248 		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
249 
250 		processed += to_process;
251 	} while (processed < nbytes);
252 
253 out:
254 	/* Restore GCM mode */
255 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
256 	return rc;
257 }
258 
259 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
260 		     int enc)
261 {
262 	int rc;
263 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
264 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
265 	char out[AES_BLOCK_SIZE];
266 	struct nx_sg *in_sg, *out_sg;
267 
268 	/* For scenarios where the input message is zero length, AES CTR mode
269 	 * may be used. Set the source data to be a single block (16B) of all
270 	 * zeros, and set the input IV value to be the same as the GMAC IV
271 	 * value. - nx_wb 4.8.1.3 */
272 
273 	/* Change to ECB mode */
274 	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
275 	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
276 			sizeof(csbcpb->cpb.aes_ecb.key));
277 	if (enc)
278 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
279 	else
280 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
281 
282 	/* Encrypt the counter/IV */
283 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
284 				 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
285 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
286 				  nx_ctx->ap->sglen);
287 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
288 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
289 
290 	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
291 			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
292 	if (rc)
293 		goto out;
294 	atomic_inc(&(nx_ctx->stats->aes_ops));
295 
296 	/* Copy out the auth tag */
297 	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
298 			crypto_aead_authsize(crypto_aead_reqtfm(req)));
299 out:
300 	/* Restore XCBC mode */
301 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
302 
303 	/*
304 	 * ECB key uses the same region that GCM AAD and counter, so it's safe
305 	 * to just fill it with zeroes.
306 	 */
307 	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
308 
309 	return rc;
310 }
311 
312 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
313 {
314 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
315 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
316 	struct blkcipher_desc desc;
317 	unsigned int nbytes = req->cryptlen;
318 	unsigned int processed = 0, to_process;
319 	unsigned long irq_flags;
320 	u32 max_sg_len;
321 	int rc = -EINVAL;
322 
323 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
324 
325 	desc.info = nx_ctx->priv.gcm.iv;
326 	/* initialize the counter */
327 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
328 
329 	if (nbytes == 0) {
330 		if (req->assoclen == 0)
331 			rc = gcm_empty(req, &desc, enc);
332 		else
333 			rc = gmac(req, &desc);
334 		if (rc)
335 			goto out;
336 		else
337 			goto mac;
338 	}
339 
340 	/* Process associated data */
341 	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
342 	if (req->assoclen) {
343 		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
344 		if (rc)
345 			goto out;
346 	}
347 
348 	/* Set flags for encryption */
349 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
350 	if (enc) {
351 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
352 	} else {
353 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
354 		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
355 	}
356 
357 	/* page_limit: number of sg entries that fit on one page */
358 	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
359 			   nx_ctx->ap->sglen);
360 
361 	do {
362 		/*
363 		 * to_process: the data chunk to process in this update.
364 		 * This value is bound by sg list limits.
365 		 */
366 		to_process = min_t(u64, nbytes - processed,
367 				   nx_ctx->ap->databytelen);
368 		to_process = min_t(u64, to_process,
369 				   NX_PAGE_SIZE * (max_sg_len - 1));
370 
371 		if ((to_process + processed) < nbytes)
372 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
373 		else
374 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
375 
376 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
377 		desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
378 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
379 				       req->src, to_process, processed,
380 				       csbcpb->cpb.aes_gcm.iv_or_cnt);
381 		if (rc)
382 			goto out;
383 
384 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
385 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
386 		if (rc)
387 			goto out;
388 
389 		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
390 		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
391 			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
392 		memcpy(csbcpb->cpb.aes_gcm.in_s0,
393 			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
394 
395 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
396 
397 		atomic_inc(&(nx_ctx->stats->aes_ops));
398 		atomic64_add(csbcpb->csb.processed_byte_count,
399 			     &(nx_ctx->stats->aes_bytes));
400 
401 		processed += to_process;
402 	} while (processed < nbytes);
403 
404 mac:
405 	if (enc) {
406 		/* copy out the auth tag */
407 		scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
408 				 req->dst, nbytes,
409 				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
410 				 SCATTERWALK_TO_SG);
411 	} else {
412 		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
413 		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
414 
415 		scatterwalk_map_and_copy(itag, req->src, nbytes,
416 				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
417 				 SCATTERWALK_FROM_SG);
418 		rc = memcmp(itag, otag,
419 			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
420 		     -EBADMSG : 0;
421 	}
422 out:
423 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
424 	return rc;
425 }
426 
427 static int gcm_aes_nx_encrypt(struct aead_request *req)
428 {
429 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
430 	char *iv = nx_ctx->priv.gcm.iv;
431 
432 	memcpy(iv, req->iv, 12);
433 
434 	return gcm_aes_nx_crypt(req, 1);
435 }
436 
437 static int gcm_aes_nx_decrypt(struct aead_request *req)
438 {
439 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
440 	char *iv = nx_ctx->priv.gcm.iv;
441 
442 	memcpy(iv, req->iv, 12);
443 
444 	return gcm_aes_nx_crypt(req, 0);
445 }
446 
447 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
448 {
449 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
450 	char *iv = nx_ctx->priv.gcm.iv;
451 	char *nonce = nx_ctx->priv.gcm.nonce;
452 
453 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
454 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
455 
456 	return gcm_aes_nx_crypt(req, 1);
457 }
458 
459 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
460 {
461 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
462 	char *iv = nx_ctx->priv.gcm.iv;
463 	char *nonce = nx_ctx->priv.gcm.nonce;
464 
465 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
466 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
467 
468 	return gcm_aes_nx_crypt(req, 0);
469 }
470 
471 /* tell the block cipher walk routines that this is a stream cipher by
472  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
473  * during encrypt/decrypt doesn't solve this problem, because it calls
474  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
475  * but instead uses this tfm->blocksize. */
476 struct crypto_alg nx_gcm_aes_alg = {
477 	.cra_name        = "gcm(aes)",
478 	.cra_driver_name = "gcm-aes-nx",
479 	.cra_priority    = 300,
480 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD,
481 	.cra_blocksize   = 1,
482 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
483 	.cra_type        = &crypto_aead_type,
484 	.cra_module      = THIS_MODULE,
485 	.cra_init        = nx_crypto_ctx_aes_gcm_init,
486 	.cra_exit        = nx_crypto_ctx_exit,
487 	.cra_aead = {
488 		.ivsize      = AES_BLOCK_SIZE,
489 		.maxauthsize = AES_BLOCK_SIZE,
490 		.setkey      = gcm_aes_nx_set_key,
491 		.setauthsize = gcm_aes_nx_setauthsize,
492 		.encrypt     = gcm_aes_nx_encrypt,
493 		.decrypt     = gcm_aes_nx_decrypt,
494 	}
495 };
496 
497 struct crypto_alg nx_gcm4106_aes_alg = {
498 	.cra_name        = "rfc4106(gcm(aes))",
499 	.cra_driver_name = "rfc4106-gcm-aes-nx",
500 	.cra_priority    = 300,
501 	.cra_flags       = CRYPTO_ALG_TYPE_AEAD,
502 	.cra_blocksize   = 1,
503 	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
504 	.cra_type        = &crypto_nivaead_type,
505 	.cra_module      = THIS_MODULE,
506 	.cra_init        = nx_crypto_ctx_aes_gcm_init,
507 	.cra_exit        = nx_crypto_ctx_exit,
508 	.cra_aead = {
509 		.ivsize      = 8,
510 		.maxauthsize = AES_BLOCK_SIZE,
511 		.geniv       = "seqiv",
512 		.setkey      = gcm4106_aes_nx_set_key,
513 		.setauthsize = gcm4106_aes_nx_setauthsize,
514 		.encrypt     = gcm4106_aes_nx_encrypt,
515 		.decrypt     = gcm4106_aes_nx_decrypt,
516 	}
517 };
518