1 /** 2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/aead.h> 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <crypto/scatterwalk.h> 26 #include <linux/module.h> 27 #include <linux/types.h> 28 #include <linux/crypto.h> 29 #include <asm/vio.h> 30 31 #include "nx_csbcpb.h" 32 #include "nx.h" 33 34 35 static int gcm_aes_nx_set_key(struct crypto_aead *tfm, 36 const u8 *in_key, 37 unsigned int key_len) 38 { 39 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); 40 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 41 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 42 43 nx_ctx_init(nx_ctx, HCOP_FC_AES); 44 45 switch (key_len) { 46 case AES_KEYSIZE_128: 47 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 48 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); 49 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; 50 break; 51 case AES_KEYSIZE_192: 52 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); 53 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); 54 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; 55 break; 56 case AES_KEYSIZE_256: 57 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); 58 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); 59 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; 60 break; 61 default: 62 return -EINVAL; 63 } 64 65 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 66 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); 67 68 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; 69 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); 70 71 return 0; 72 } 73 74 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, 75 const u8 *in_key, 76 unsigned int key_len) 77 { 78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); 79 char *nonce = nx_ctx->priv.gcm.nonce; 80 int rc; 81 82 if (key_len < 4) 83 return -EINVAL; 84 85 key_len -= 4; 86 87 rc = gcm_aes_nx_set_key(tfm, in_key, key_len); 88 if (rc) 89 goto out; 90 91 memcpy(nonce, in_key + key_len, 4); 92 out: 93 return rc; 94 } 95 96 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, 97 unsigned int authsize) 98 { 99 switch (authsize) { 100 case 8: 101 case 12: 102 case 16: 103 break; 104 default: 105 return -EINVAL; 106 } 107 108 return 0; 109 } 110 111 static int nx_gca(struct nx_crypto_ctx *nx_ctx, 112 struct aead_request *req, 113 u8 *out) 114 { 115 int rc; 116 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; 117 struct scatter_walk walk; 118 struct nx_sg *nx_sg = nx_ctx->in_sg; 119 unsigned int nbytes = req->assoclen; 120 unsigned int processed = 0, to_process; 121 unsigned int max_sg_len; 122 123 if (nbytes <= AES_BLOCK_SIZE) { 124 scatterwalk_start(&walk, req->src); 125 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); 126 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); 127 return 0; 128 } 129 130 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; 131 132 /* page_limit: number of sg entries that fit on one page */ 133 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 134 nx_ctx->ap->sglen); 135 max_sg_len = min_t(u64, max_sg_len, 136 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 137 138 do { 139 /* 140 * to_process: the data chunk to process in this update. 141 * This value is bound by sg list limits. 142 */ 143 to_process = min_t(u64, nbytes - processed, 144 nx_ctx->ap->databytelen); 145 to_process = min_t(u64, to_process, 146 NX_PAGE_SIZE * (max_sg_len - 1)); 147 148 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 149 req->src, processed, &to_process); 150 151 if ((to_process + processed) < nbytes) 152 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; 153 else 154 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; 155 156 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) 157 * sizeof(struct nx_sg); 158 159 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, 160 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 161 if (rc) 162 return rc; 163 164 memcpy(csbcpb_aead->cpb.aes_gca.in_pat, 165 csbcpb_aead->cpb.aes_gca.out_pat, 166 AES_BLOCK_SIZE); 167 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; 168 169 atomic_inc(&(nx_ctx->stats->aes_ops)); 170 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 171 172 processed += to_process; 173 } while (processed < nbytes); 174 175 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); 176 177 return rc; 178 } 179 180 static int gmac(struct aead_request *req, struct blkcipher_desc *desc) 181 { 182 int rc; 183 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 184 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 185 struct nx_sg *nx_sg; 186 unsigned int nbytes = req->assoclen; 187 unsigned int processed = 0, to_process; 188 unsigned int max_sg_len; 189 190 /* Set GMAC mode */ 191 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; 192 193 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 194 195 /* page_limit: number of sg entries that fit on one page */ 196 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), 197 nx_ctx->ap->sglen); 198 max_sg_len = min_t(u64, max_sg_len, 199 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 200 201 /* Copy IV */ 202 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 203 204 do { 205 /* 206 * to_process: the data chunk to process in this update. 207 * This value is bound by sg list limits. 208 */ 209 to_process = min_t(u64, nbytes - processed, 210 nx_ctx->ap->databytelen); 211 to_process = min_t(u64, to_process, 212 NX_PAGE_SIZE * (max_sg_len - 1)); 213 214 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, 215 req->src, processed, &to_process); 216 217 if ((to_process + processed) < nbytes) 218 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 219 else 220 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 221 222 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) 223 * sizeof(struct nx_sg); 224 225 csbcpb->cpb.aes_gcm.bit_length_data = 0; 226 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; 227 228 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 229 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 230 if (rc) 231 goto out; 232 233 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 234 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 235 memcpy(csbcpb->cpb.aes_gcm.in_s0, 236 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 237 238 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 239 240 atomic_inc(&(nx_ctx->stats->aes_ops)); 241 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); 242 243 processed += to_process; 244 } while (processed < nbytes); 245 246 out: 247 /* Restore GCM mode */ 248 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 249 return rc; 250 } 251 252 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 253 int enc) 254 { 255 int rc; 256 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 257 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 258 char out[AES_BLOCK_SIZE]; 259 struct nx_sg *in_sg, *out_sg; 260 int len; 261 262 /* For scenarios where the input message is zero length, AES CTR mode 263 * may be used. Set the source data to be a single block (16B) of all 264 * zeros, and set the input IV value to be the same as the GMAC IV 265 * value. - nx_wb 4.8.1.3 */ 266 267 /* Change to ECB mode */ 268 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; 269 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, 270 sizeof(csbcpb->cpb.aes_ecb.key)); 271 if (enc) 272 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 273 else 274 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 275 276 len = AES_BLOCK_SIZE; 277 278 /* Encrypt the counter/IV */ 279 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 280 &len, nx_ctx->ap->sglen); 281 282 if (len != AES_BLOCK_SIZE) 283 return -EINVAL; 284 285 len = sizeof(out); 286 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, 287 nx_ctx->ap->sglen); 288 289 if (len != sizeof(out)) 290 return -EINVAL; 291 292 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 293 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 294 295 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 296 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 297 if (rc) 298 goto out; 299 atomic_inc(&(nx_ctx->stats->aes_ops)); 300 301 /* Copy out the auth tag */ 302 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, 303 crypto_aead_authsize(crypto_aead_reqtfm(req))); 304 out: 305 /* Restore XCBC mode */ 306 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; 307 308 /* 309 * ECB key uses the same region that GCM AAD and counter, so it's safe 310 * to just fill it with zeroes. 311 */ 312 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); 313 314 return rc; 315 } 316 317 static int gcm_aes_nx_crypt(struct aead_request *req, int enc) 318 { 319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 320 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 321 struct blkcipher_desc desc; 322 unsigned int nbytes = req->cryptlen; 323 unsigned int processed = 0, to_process; 324 unsigned long irq_flags; 325 int rc = -EINVAL; 326 327 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 328 329 desc.info = nx_ctx->priv.gcm.iv; 330 /* initialize the counter */ 331 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 332 333 if (nbytes == 0) { 334 if (req->assoclen == 0) 335 rc = gcm_empty(req, &desc, enc); 336 else 337 rc = gmac(req, &desc); 338 if (rc) 339 goto out; 340 else 341 goto mac; 342 } 343 344 /* Process associated data */ 345 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; 346 if (req->assoclen) { 347 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); 348 if (rc) 349 goto out; 350 } 351 352 /* Set flags for encryption */ 353 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; 354 if (enc) { 355 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 356 } else { 357 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 358 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 359 } 360 361 do { 362 to_process = nbytes - processed; 363 364 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 365 desc.tfm = (struct crypto_blkcipher *) req->base.tfm; 366 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 367 req->src, &to_process, 368 processed + req->assoclen, 369 csbcpb->cpb.aes_gcm.iv_or_cnt); 370 371 if (rc) 372 goto out; 373 374 if ((to_process + processed) < nbytes) 375 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 376 else 377 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 378 379 380 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 381 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 382 if (rc) 383 goto out; 384 385 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 386 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 387 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 388 memcpy(csbcpb->cpb.aes_gcm.in_s0, 389 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); 390 391 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 392 393 atomic_inc(&(nx_ctx->stats->aes_ops)); 394 atomic64_add(csbcpb->csb.processed_byte_count, 395 &(nx_ctx->stats->aes_bytes)); 396 397 processed += to_process; 398 } while (processed < nbytes); 399 400 mac: 401 if (enc) { 402 /* copy out the auth tag */ 403 scatterwalk_map_and_copy( 404 csbcpb->cpb.aes_gcm.out_pat_or_mac, 405 req->dst, req->assoclen + nbytes, 406 crypto_aead_authsize(crypto_aead_reqtfm(req)), 407 SCATTERWALK_TO_SG); 408 } else { 409 u8 *itag = nx_ctx->priv.gcm.iauth_tag; 410 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; 411 412 scatterwalk_map_and_copy( 413 itag, req->src, req->assoclen + nbytes, 414 crypto_aead_authsize(crypto_aead_reqtfm(req)), 415 SCATTERWALK_FROM_SG); 416 rc = memcmp(itag, otag, 417 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 418 -EBADMSG : 0; 419 } 420 out: 421 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 422 return rc; 423 } 424 425 static int gcm_aes_nx_encrypt(struct aead_request *req) 426 { 427 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 428 char *iv = nx_ctx->priv.gcm.iv; 429 430 memcpy(iv, req->iv, 12); 431 432 return gcm_aes_nx_crypt(req, 1); 433 } 434 435 static int gcm_aes_nx_decrypt(struct aead_request *req) 436 { 437 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 438 char *iv = nx_ctx->priv.gcm.iv; 439 440 memcpy(iv, req->iv, 12); 441 442 return gcm_aes_nx_crypt(req, 0); 443 } 444 445 static int gcm4106_aes_nx_encrypt(struct aead_request *req) 446 { 447 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 448 char *iv = nx_ctx->priv.gcm.iv; 449 char *nonce = nx_ctx->priv.gcm.nonce; 450 451 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 452 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 453 454 return gcm_aes_nx_crypt(req, 1); 455 } 456 457 static int gcm4106_aes_nx_decrypt(struct aead_request *req) 458 { 459 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 460 char *iv = nx_ctx->priv.gcm.iv; 461 char *nonce = nx_ctx->priv.gcm.nonce; 462 463 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 464 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); 465 466 return gcm_aes_nx_crypt(req, 0); 467 } 468 469 /* tell the block cipher walk routines that this is a stream cipher by 470 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block 471 * during encrypt/decrypt doesn't solve this problem, because it calls 472 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, 473 * but instead uses this tfm->blocksize. */ 474 struct aead_alg nx_gcm_aes_alg = { 475 .base = { 476 .cra_name = "gcm(aes)", 477 .cra_driver_name = "gcm-aes-nx", 478 .cra_priority = 300, 479 .cra_blocksize = 1, 480 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 481 .cra_module = THIS_MODULE, 482 }, 483 .init = nx_crypto_ctx_aes_gcm_init, 484 .exit = nx_crypto_ctx_aead_exit, 485 .ivsize = 12, 486 .maxauthsize = AES_BLOCK_SIZE, 487 .setkey = gcm_aes_nx_set_key, 488 .encrypt = gcm_aes_nx_encrypt, 489 .decrypt = gcm_aes_nx_decrypt, 490 }; 491 492 struct aead_alg nx_gcm4106_aes_alg = { 493 .base = { 494 .cra_name = "rfc4106(gcm(aes))", 495 .cra_driver_name = "rfc4106-gcm-aes-nx", 496 .cra_priority = 300, 497 .cra_blocksize = 1, 498 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 499 .cra_module = THIS_MODULE, 500 }, 501 .init = nx_crypto_ctx_aes_gcm_init, 502 .exit = nx_crypto_ctx_aead_exit, 503 .ivsize = 8, 504 .maxauthsize = AES_BLOCK_SIZE, 505 .setkey = gcm4106_aes_nx_set_key, 506 .setauthsize = gcm4106_aes_nx_setauthsize, 507 .encrypt = gcm4106_aes_nx_encrypt, 508 .decrypt = gcm4106_aes_nx_decrypt, 509 }; 510