1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support 4 * 5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/delay.h> 14 #include <linux/scatterlist.h> 15 #include <linux/crypto.h> 16 #include <crypto/algapi.h> 17 #include <crypto/hash.h> 18 #include <crypto/hmac.h> 19 #include <crypto/internal/hash.h> 20 #include <crypto/sha.h> 21 #include <crypto/scatterwalk.h> 22 23 #include "ccp-crypto.h" 24 25 static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) 26 { 27 struct ahash_request *req = ahash_request_cast(async_req); 28 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 29 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 30 unsigned int digest_size = crypto_ahash_digestsize(tfm); 31 32 if (ret) 33 goto e_free; 34 35 if (rctx->hash_rem) { 36 /* Save remaining data to buffer */ 37 unsigned int offset = rctx->nbytes - rctx->hash_rem; 38 39 scatterwalk_map_and_copy(rctx->buf, rctx->src, 40 offset, rctx->hash_rem, 0); 41 rctx->buf_count = rctx->hash_rem; 42 } else { 43 rctx->buf_count = 0; 44 } 45 46 /* Update result area if supplied */ 47 if (req->result && rctx->final) 48 memcpy(req->result, rctx->ctx, digest_size); 49 50 e_free: 51 sg_free_table(&rctx->data_sg); 52 53 return ret; 54 } 55 56 static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, 57 unsigned int final) 58 { 59 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 60 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 61 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 62 struct scatterlist *sg; 63 unsigned int block_size = 64 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 65 unsigned int sg_count; 66 gfp_t gfp; 67 u64 len; 68 int ret; 69 70 len = (u64)rctx->buf_count + (u64)nbytes; 71 72 if (!final && (len <= block_size)) { 73 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, 74 0, nbytes, 0); 75 rctx->buf_count += nbytes; 76 77 return 0; 78 } 79 80 rctx->src = req->src; 81 rctx->nbytes = nbytes; 82 83 rctx->final = final; 84 rctx->hash_rem = final ? 0 : len & (block_size - 1); 85 rctx->hash_cnt = len - rctx->hash_rem; 86 if (!final && !rctx->hash_rem) { 87 /* CCP can't do zero length final, so keep some data around */ 88 rctx->hash_cnt -= block_size; 89 rctx->hash_rem = block_size; 90 } 91 92 /* Initialize the context scatterlist */ 93 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); 94 95 sg = NULL; 96 if (rctx->buf_count && nbytes) { 97 /* Build the data scatterlist table - allocate enough entries 98 * for both data pieces (buffer and input data) 99 */ 100 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 101 GFP_KERNEL : GFP_ATOMIC; 102 sg_count = sg_nents(req->src) + 1; 103 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); 104 if (ret) 105 return ret; 106 107 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 108 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 109 if (!sg) { 110 ret = -EINVAL; 111 goto e_free; 112 } 113 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 114 if (!sg) { 115 ret = -EINVAL; 116 goto e_free; 117 } 118 sg_mark_end(sg); 119 120 sg = rctx->data_sg.sgl; 121 } else if (rctx->buf_count) { 122 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 123 124 sg = &rctx->buf_sg; 125 } else if (nbytes) { 126 sg = req->src; 127 } 128 129 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ 130 131 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 132 INIT_LIST_HEAD(&rctx->cmd.entry); 133 rctx->cmd.engine = CCP_ENGINE_SHA; 134 rctx->cmd.u.sha.type = rctx->type; 135 rctx->cmd.u.sha.ctx = &rctx->ctx_sg; 136 137 switch (rctx->type) { 138 case CCP_SHA_TYPE_1: 139 rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; 140 break; 141 case CCP_SHA_TYPE_224: 142 rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; 143 break; 144 case CCP_SHA_TYPE_256: 145 rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; 146 break; 147 case CCP_SHA_TYPE_384: 148 rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE; 149 break; 150 case CCP_SHA_TYPE_512: 151 rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE; 152 break; 153 default: 154 /* Should never get here */ 155 break; 156 } 157 158 rctx->cmd.u.sha.src = sg; 159 rctx->cmd.u.sha.src_len = rctx->hash_cnt; 160 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? 161 &ctx->u.sha.opad_sg : NULL; 162 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? 163 ctx->u.sha.opad_count : 0; 164 rctx->cmd.u.sha.first = rctx->first; 165 rctx->cmd.u.sha.final = rctx->final; 166 rctx->cmd.u.sha.msg_bits = rctx->msg_bits; 167 168 rctx->first = 0; 169 170 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 171 172 return ret; 173 174 e_free: 175 sg_free_table(&rctx->data_sg); 176 177 return ret; 178 } 179 180 static int ccp_sha_init(struct ahash_request *req) 181 { 182 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 183 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 184 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 185 struct ccp_crypto_ahash_alg *alg = 186 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); 187 unsigned int block_size = 188 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 189 190 memset(rctx, 0, sizeof(*rctx)); 191 192 rctx->type = alg->type; 193 rctx->first = 1; 194 195 if (ctx->u.sha.key_len) { 196 /* Buffer the HMAC key for first update */ 197 memcpy(rctx->buf, ctx->u.sha.ipad, block_size); 198 rctx->buf_count = block_size; 199 } 200 201 return 0; 202 } 203 204 static int ccp_sha_update(struct ahash_request *req) 205 { 206 return ccp_do_sha_update(req, req->nbytes, 0); 207 } 208 209 static int ccp_sha_final(struct ahash_request *req) 210 { 211 return ccp_do_sha_update(req, 0, 1); 212 } 213 214 static int ccp_sha_finup(struct ahash_request *req) 215 { 216 return ccp_do_sha_update(req, req->nbytes, 1); 217 } 218 219 static int ccp_sha_digest(struct ahash_request *req) 220 { 221 int ret; 222 223 ret = ccp_sha_init(req); 224 if (ret) 225 return ret; 226 227 return ccp_sha_finup(req); 228 } 229 230 static int ccp_sha_export(struct ahash_request *req, void *out) 231 { 232 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 233 struct ccp_sha_exp_ctx state; 234 235 /* Don't let anything leak to 'out' */ 236 memset(&state, 0, sizeof(state)); 237 238 state.type = rctx->type; 239 state.msg_bits = rctx->msg_bits; 240 state.first = rctx->first; 241 memcpy(state.ctx, rctx->ctx, sizeof(state.ctx)); 242 state.buf_count = rctx->buf_count; 243 memcpy(state.buf, rctx->buf, sizeof(state.buf)); 244 245 /* 'out' may not be aligned so memcpy from local variable */ 246 memcpy(out, &state, sizeof(state)); 247 248 return 0; 249 } 250 251 static int ccp_sha_import(struct ahash_request *req, const void *in) 252 { 253 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 254 struct ccp_sha_exp_ctx state; 255 256 /* 'in' may not be aligned so memcpy to local variable */ 257 memcpy(&state, in, sizeof(state)); 258 259 memset(rctx, 0, sizeof(*rctx)); 260 rctx->type = state.type; 261 rctx->msg_bits = state.msg_bits; 262 rctx->first = state.first; 263 memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx)); 264 rctx->buf_count = state.buf_count; 265 memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); 266 267 return 0; 268 } 269 270 static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, 271 unsigned int key_len) 272 { 273 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 274 struct crypto_shash *shash = ctx->u.sha.hmac_tfm; 275 276 SHASH_DESC_ON_STACK(sdesc, shash); 277 278 unsigned int block_size = crypto_shash_blocksize(shash); 279 unsigned int digest_size = crypto_shash_digestsize(shash); 280 int i, ret; 281 282 /* Set to zero until complete */ 283 ctx->u.sha.key_len = 0; 284 285 /* Clear key area to provide zero padding for keys smaller 286 * than the block size 287 */ 288 memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); 289 290 if (key_len > block_size) { 291 /* Must hash the input key */ 292 sdesc->tfm = shash; 293 294 ret = crypto_shash_digest(sdesc, key, key_len, 295 ctx->u.sha.key); 296 if (ret) 297 return -EINVAL; 298 299 key_len = digest_size; 300 } else { 301 memcpy(ctx->u.sha.key, key, key_len); 302 } 303 304 for (i = 0; i < block_size; i++) { 305 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE; 306 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE; 307 } 308 309 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); 310 ctx->u.sha.opad_count = block_size; 311 312 ctx->u.sha.key_len = key_len; 313 314 return 0; 315 } 316 317 static int ccp_sha_cra_init(struct crypto_tfm *tfm) 318 { 319 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 320 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 321 322 ctx->complete = ccp_sha_complete; 323 ctx->u.sha.key_len = 0; 324 325 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); 326 327 return 0; 328 } 329 330 static void ccp_sha_cra_exit(struct crypto_tfm *tfm) 331 { 332 } 333 334 static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) 335 { 336 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 337 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); 338 struct crypto_shash *hmac_tfm; 339 340 hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); 341 if (IS_ERR(hmac_tfm)) { 342 pr_warn("could not load driver %s need for HMAC support\n", 343 alg->child_alg); 344 return PTR_ERR(hmac_tfm); 345 } 346 347 ctx->u.sha.hmac_tfm = hmac_tfm; 348 349 return ccp_sha_cra_init(tfm); 350 } 351 352 static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) 353 { 354 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 355 356 if (ctx->u.sha.hmac_tfm) 357 crypto_free_shash(ctx->u.sha.hmac_tfm); 358 359 ccp_sha_cra_exit(tfm); 360 } 361 362 struct ccp_sha_def { 363 unsigned int version; 364 const char *name; 365 const char *drv_name; 366 enum ccp_sha_type type; 367 u32 digest_size; 368 u32 block_size; 369 }; 370 371 static struct ccp_sha_def sha_algs[] = { 372 { 373 .version = CCP_VERSION(3, 0), 374 .name = "sha1", 375 .drv_name = "sha1-ccp", 376 .type = CCP_SHA_TYPE_1, 377 .digest_size = SHA1_DIGEST_SIZE, 378 .block_size = SHA1_BLOCK_SIZE, 379 }, 380 { 381 .version = CCP_VERSION(3, 0), 382 .name = "sha224", 383 .drv_name = "sha224-ccp", 384 .type = CCP_SHA_TYPE_224, 385 .digest_size = SHA224_DIGEST_SIZE, 386 .block_size = SHA224_BLOCK_SIZE, 387 }, 388 { 389 .version = CCP_VERSION(3, 0), 390 .name = "sha256", 391 .drv_name = "sha256-ccp", 392 .type = CCP_SHA_TYPE_256, 393 .digest_size = SHA256_DIGEST_SIZE, 394 .block_size = SHA256_BLOCK_SIZE, 395 }, 396 { 397 .version = CCP_VERSION(5, 0), 398 .name = "sha384", 399 .drv_name = "sha384-ccp", 400 .type = CCP_SHA_TYPE_384, 401 .digest_size = SHA384_DIGEST_SIZE, 402 .block_size = SHA384_BLOCK_SIZE, 403 }, 404 { 405 .version = CCP_VERSION(5, 0), 406 .name = "sha512", 407 .drv_name = "sha512-ccp", 408 .type = CCP_SHA_TYPE_512, 409 .digest_size = SHA512_DIGEST_SIZE, 410 .block_size = SHA512_BLOCK_SIZE, 411 }, 412 }; 413 414 static int ccp_register_hmac_alg(struct list_head *head, 415 const struct ccp_sha_def *def, 416 const struct ccp_crypto_ahash_alg *base_alg) 417 { 418 struct ccp_crypto_ahash_alg *ccp_alg; 419 struct ahash_alg *alg; 420 struct hash_alg_common *halg; 421 struct crypto_alg *base; 422 int ret; 423 424 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 425 if (!ccp_alg) 426 return -ENOMEM; 427 428 /* Copy the base algorithm and only change what's necessary */ 429 *ccp_alg = *base_alg; 430 INIT_LIST_HEAD(&ccp_alg->entry); 431 432 strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); 433 434 alg = &ccp_alg->alg; 435 alg->setkey = ccp_sha_setkey; 436 437 halg = &alg->halg; 438 439 base = &halg->base; 440 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); 441 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", 442 def->drv_name); 443 base->cra_init = ccp_hmac_sha_cra_init; 444 base->cra_exit = ccp_hmac_sha_cra_exit; 445 446 ret = crypto_register_ahash(alg); 447 if (ret) { 448 pr_err("%s ahash algorithm registration error (%d)\n", 449 base->cra_name, ret); 450 kfree(ccp_alg); 451 return ret; 452 } 453 454 list_add(&ccp_alg->entry, head); 455 456 return ret; 457 } 458 459 static int ccp_register_sha_alg(struct list_head *head, 460 const struct ccp_sha_def *def) 461 { 462 struct ccp_crypto_ahash_alg *ccp_alg; 463 struct ahash_alg *alg; 464 struct hash_alg_common *halg; 465 struct crypto_alg *base; 466 int ret; 467 468 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 469 if (!ccp_alg) 470 return -ENOMEM; 471 472 INIT_LIST_HEAD(&ccp_alg->entry); 473 474 ccp_alg->type = def->type; 475 476 alg = &ccp_alg->alg; 477 alg->init = ccp_sha_init; 478 alg->update = ccp_sha_update; 479 alg->final = ccp_sha_final; 480 alg->finup = ccp_sha_finup; 481 alg->digest = ccp_sha_digest; 482 alg->export = ccp_sha_export; 483 alg->import = ccp_sha_import; 484 485 halg = &alg->halg; 486 halg->digestsize = def->digest_size; 487 halg->statesize = sizeof(struct ccp_sha_exp_ctx); 488 489 base = &halg->base; 490 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 491 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 492 def->drv_name); 493 base->cra_flags = CRYPTO_ALG_ASYNC | 494 CRYPTO_ALG_KERN_DRIVER_ONLY | 495 CRYPTO_ALG_NEED_FALLBACK; 496 base->cra_blocksize = def->block_size; 497 base->cra_ctxsize = sizeof(struct ccp_ctx); 498 base->cra_priority = CCP_CRA_PRIORITY; 499 base->cra_init = ccp_sha_cra_init; 500 base->cra_exit = ccp_sha_cra_exit; 501 base->cra_module = THIS_MODULE; 502 503 ret = crypto_register_ahash(alg); 504 if (ret) { 505 pr_err("%s ahash algorithm registration error (%d)\n", 506 base->cra_name, ret); 507 kfree(ccp_alg); 508 return ret; 509 } 510 511 list_add(&ccp_alg->entry, head); 512 513 ret = ccp_register_hmac_alg(head, def, ccp_alg); 514 515 return ret; 516 } 517 518 int ccp_register_sha_algs(struct list_head *head) 519 { 520 int i, ret; 521 unsigned int ccpversion = ccp_version(); 522 523 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { 524 if (sha_algs[i].version > ccpversion) 525 continue; 526 ret = ccp_register_sha_alg(head, &sha_algs[i]); 527 if (ret) 528 return ret; 529 } 530 531 return 0; 532 } 533