1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interrupt.h> 9 #include <crypto/internal/hash.h> 10 11 #include "common.h" 12 #include "core.h" 13 #include "sha.h" 14 15 struct qce_sha_saved_state { 16 u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; 17 u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; 18 __be32 byte_count[2]; 19 unsigned int pending_buflen; 20 unsigned int flags; 21 u64 count; 22 bool first_blk; 23 }; 24 25 static LIST_HEAD(ahash_algs); 26 27 static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { 28 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 29 }; 30 31 static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { 32 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 33 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 34 }; 35 36 static void qce_ahash_done(void *data) 37 { 38 struct crypto_async_request *async_req = data; 39 struct ahash_request *req = ahash_request_cast(async_req); 40 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 41 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 42 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); 43 struct qce_device *qce = tmpl->qce; 44 struct qce_result_dump *result = qce->dma.result_buf; 45 unsigned int digestsize = crypto_ahash_digestsize(ahash); 46 int error; 47 u32 status; 48 49 error = qce_dma_terminate_all(&qce->dma); 50 if (error) 51 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); 52 53 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 54 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 55 56 memcpy(rctx->digest, result->auth_iv, digestsize); 57 if (req->result && rctx->last_blk) 58 memcpy(req->result, result->auth_iv, digestsize); 59 60 rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); 61 rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); 62 63 error = qce_check_status(qce, &status); 64 if (error < 0) 65 dev_dbg(qce->dev, "ahash operation error (%x)\n", status); 66 67 req->src = rctx->src_orig; 68 req->nbytes = rctx->nbytes_orig; 69 rctx->last_blk = false; 70 rctx->first_blk = false; 71 72 qce->async_req_done(tmpl->qce, error); 73 } 74 75 static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) 76 { 77 struct ahash_request *req = ahash_request_cast(async_req); 78 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 79 struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); 80 struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); 81 struct qce_device *qce = tmpl->qce; 82 unsigned long flags = rctx->flags; 83 int ret; 84 85 if (IS_SHA_HMAC(flags)) { 86 rctx->authkey = ctx->authkey; 87 rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; 88 } else if (IS_CMAC(flags)) { 89 rctx->authkey = ctx->authkey; 90 rctx->authklen = AES_KEYSIZE_128; 91 } 92 93 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); 94 if (rctx->src_nents < 0) { 95 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 96 return rctx->src_nents; 97 } 98 99 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 100 if (ret < 0) 101 return ret; 102 103 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 104 105 ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 106 if (ret < 0) 107 goto error_unmap_src; 108 109 ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, 110 &rctx->result_sg, 1, qce_ahash_done, async_req); 111 if (ret) 112 goto error_unmap_dst; 113 114 qce_dma_issue_pending(&qce->dma); 115 116 ret = qce_start(async_req, tmpl->crypto_alg_type); 117 if (ret) 118 goto error_terminate; 119 120 return 0; 121 122 error_terminate: 123 qce_dma_terminate_all(&qce->dma); 124 error_unmap_dst: 125 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); 126 error_unmap_src: 127 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); 128 return ret; 129 } 130 131 static int qce_ahash_init(struct ahash_request *req) 132 { 133 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 134 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); 135 const u32 *std_iv = tmpl->std_iv; 136 137 memset(rctx, 0, sizeof(*rctx)); 138 rctx->first_blk = true; 139 rctx->last_blk = false; 140 rctx->flags = tmpl->alg_flags; 141 memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); 142 143 return 0; 144 } 145 146 static int qce_ahash_export(struct ahash_request *req, void *out) 147 { 148 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 149 struct qce_sha_saved_state *export_state = out; 150 151 memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); 152 memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); 153 export_state->byte_count[0] = rctx->byte_count[0]; 154 export_state->byte_count[1] = rctx->byte_count[1]; 155 export_state->pending_buflen = rctx->buflen; 156 export_state->count = rctx->count; 157 export_state->first_blk = rctx->first_blk; 158 export_state->flags = rctx->flags; 159 160 return 0; 161 } 162 163 static int qce_ahash_import(struct ahash_request *req, const void *in) 164 { 165 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 166 const struct qce_sha_saved_state *import_state = in; 167 168 memset(rctx, 0, sizeof(*rctx)); 169 rctx->count = import_state->count; 170 rctx->buflen = import_state->pending_buflen; 171 rctx->first_blk = import_state->first_blk; 172 rctx->flags = import_state->flags; 173 rctx->byte_count[0] = import_state->byte_count[0]; 174 rctx->byte_count[1] = import_state->byte_count[1]; 175 memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); 176 memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); 177 178 return 0; 179 } 180 181 static int qce_ahash_update(struct ahash_request *req) 182 { 183 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 184 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 185 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); 186 struct qce_device *qce = tmpl->qce; 187 struct scatterlist *sg_last, *sg; 188 unsigned int total, len; 189 unsigned int hash_later; 190 unsigned int nbytes; 191 unsigned int blocksize; 192 193 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 194 rctx->count += req->nbytes; 195 196 /* check for buffer from previous updates and append it */ 197 total = req->nbytes + rctx->buflen; 198 199 if (total <= blocksize) { 200 scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, 201 0, req->nbytes, 0); 202 rctx->buflen += req->nbytes; 203 return 0; 204 } 205 206 /* save the original req structure fields */ 207 rctx->src_orig = req->src; 208 rctx->nbytes_orig = req->nbytes; 209 210 /* 211 * if we have data from previous update copy them on buffer. The old 212 * data will be combined with current request bytes. 213 */ 214 if (rctx->buflen) 215 memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); 216 217 /* calculate how many bytes will be hashed later */ 218 hash_later = total % blocksize; 219 220 /* 221 * At this point, there is more than one block size of data. If 222 * the available data to transfer is exactly a multiple of block 223 * size, save the last block to be transferred in qce_ahash_final 224 * (with the last block bit set) if this is indeed the end of data 225 * stream. If not this saved block will be transferred as part of 226 * next update. If this block is not held back and if this is 227 * indeed the end of data stream, the digest obtained will be wrong 228 * since qce_ahash_final will see that rctx->buflen is 0 and return 229 * doing nothing which in turn means that a digest will not be 230 * copied to the destination result buffer. qce_ahash_final cannot 231 * be made to alter this behavior and allowed to proceed if 232 * rctx->buflen is 0 because the crypto engine BAM does not allow 233 * for zero length transfers. 234 */ 235 if (!hash_later) 236 hash_later = blocksize; 237 238 if (hash_later) { 239 unsigned int src_offset = req->nbytes - hash_later; 240 scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, 241 hash_later, 0); 242 } 243 244 /* here nbytes is multiple of blocksize */ 245 nbytes = total - hash_later; 246 247 len = rctx->buflen; 248 sg = sg_last = req->src; 249 250 while (len < nbytes && sg) { 251 if (len + sg_dma_len(sg) > nbytes) 252 break; 253 len += sg_dma_len(sg); 254 sg_last = sg; 255 sg = sg_next(sg); 256 } 257 258 if (!sg_last) 259 return -EINVAL; 260 261 if (rctx->buflen) { 262 sg_init_table(rctx->sg, 2); 263 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); 264 sg_chain(rctx->sg, 2, req->src); 265 req->src = rctx->sg; 266 } 267 268 req->nbytes = nbytes; 269 rctx->buflen = hash_later; 270 271 return qce->async_req_enqueue(tmpl->qce, &req->base); 272 } 273 274 static int qce_ahash_final(struct ahash_request *req) 275 { 276 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 277 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); 278 struct qce_device *qce = tmpl->qce; 279 280 if (!rctx->buflen) { 281 if (tmpl->hash_zero) 282 memcpy(req->result, tmpl->hash_zero, 283 tmpl->alg.ahash.halg.digestsize); 284 return 0; 285 } 286 287 rctx->last_blk = true; 288 289 rctx->src_orig = req->src; 290 rctx->nbytes_orig = req->nbytes; 291 292 memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); 293 sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); 294 295 req->src = rctx->sg; 296 req->nbytes = rctx->buflen; 297 298 return qce->async_req_enqueue(tmpl->qce, &req->base); 299 } 300 301 static int qce_ahash_digest(struct ahash_request *req) 302 { 303 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); 304 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); 305 struct qce_device *qce = tmpl->qce; 306 int ret; 307 308 ret = qce_ahash_init(req); 309 if (ret) 310 return ret; 311 312 rctx->src_orig = req->src; 313 rctx->nbytes_orig = req->nbytes; 314 rctx->first_blk = true; 315 rctx->last_blk = true; 316 317 if (!rctx->nbytes_orig) { 318 if (tmpl->hash_zero) 319 memcpy(req->result, tmpl->hash_zero, 320 tmpl->alg.ahash.halg.digestsize); 321 return 0; 322 } 323 324 return qce->async_req_enqueue(tmpl->qce, &req->base); 325 } 326 327 static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, 328 unsigned int keylen) 329 { 330 unsigned int digestsize = crypto_ahash_digestsize(tfm); 331 struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); 332 struct crypto_wait wait; 333 struct ahash_request *req; 334 struct scatterlist sg; 335 unsigned int blocksize; 336 struct crypto_ahash *ahash_tfm; 337 u8 *buf; 338 int ret; 339 const char *alg_name; 340 341 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 342 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 343 344 if (keylen <= blocksize) { 345 memcpy(ctx->authkey, key, keylen); 346 return 0; 347 } 348 349 if (digestsize == SHA1_DIGEST_SIZE) 350 alg_name = "sha1-qce"; 351 else if (digestsize == SHA256_DIGEST_SIZE) 352 alg_name = "sha256-qce"; 353 else 354 return -EINVAL; 355 356 ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); 357 if (IS_ERR(ahash_tfm)) 358 return PTR_ERR(ahash_tfm); 359 360 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); 361 if (!req) { 362 ret = -ENOMEM; 363 goto err_free_ahash; 364 } 365 366 crypto_init_wait(&wait); 367 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 368 crypto_req_done, &wait); 369 crypto_ahash_clear_flags(ahash_tfm, ~0); 370 371 buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); 372 if (!buf) { 373 ret = -ENOMEM; 374 goto err_free_req; 375 } 376 377 memcpy(buf, key, keylen); 378 sg_init_one(&sg, buf, keylen); 379 ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); 380 381 ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 382 383 kfree(buf); 384 err_free_req: 385 ahash_request_free(req); 386 err_free_ahash: 387 crypto_free_ahash(ahash_tfm); 388 return ret; 389 } 390 391 static int qce_ahash_cra_init(struct crypto_tfm *tfm) 392 { 393 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 394 struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); 395 396 crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); 397 memset(ctx, 0, sizeof(*ctx)); 398 return 0; 399 } 400 401 struct qce_ahash_def { 402 unsigned long flags; 403 const char *name; 404 const char *drv_name; 405 unsigned int digestsize; 406 unsigned int blocksize; 407 unsigned int statesize; 408 const u32 *std_iv; 409 }; 410 411 static const struct qce_ahash_def ahash_def[] = { 412 { 413 .flags = QCE_HASH_SHA1, 414 .name = "sha1", 415 .drv_name = "sha1-qce", 416 .digestsize = SHA1_DIGEST_SIZE, 417 .blocksize = SHA1_BLOCK_SIZE, 418 .statesize = sizeof(struct qce_sha_saved_state), 419 .std_iv = std_iv_sha1, 420 }, 421 { 422 .flags = QCE_HASH_SHA256, 423 .name = "sha256", 424 .drv_name = "sha256-qce", 425 .digestsize = SHA256_DIGEST_SIZE, 426 .blocksize = SHA256_BLOCK_SIZE, 427 .statesize = sizeof(struct qce_sha_saved_state), 428 .std_iv = std_iv_sha256, 429 }, 430 { 431 .flags = QCE_HASH_SHA1_HMAC, 432 .name = "hmac(sha1)", 433 .drv_name = "hmac-sha1-qce", 434 .digestsize = SHA1_DIGEST_SIZE, 435 .blocksize = SHA1_BLOCK_SIZE, 436 .statesize = sizeof(struct qce_sha_saved_state), 437 .std_iv = std_iv_sha1, 438 }, 439 { 440 .flags = QCE_HASH_SHA256_HMAC, 441 .name = "hmac(sha256)", 442 .drv_name = "hmac-sha256-qce", 443 .digestsize = SHA256_DIGEST_SIZE, 444 .blocksize = SHA256_BLOCK_SIZE, 445 .statesize = sizeof(struct qce_sha_saved_state), 446 .std_iv = std_iv_sha256, 447 }, 448 }; 449 450 static int qce_ahash_register_one(const struct qce_ahash_def *def, 451 struct qce_device *qce) 452 { 453 struct qce_alg_template *tmpl; 454 struct ahash_alg *alg; 455 struct crypto_alg *base; 456 int ret; 457 458 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 459 if (!tmpl) 460 return -ENOMEM; 461 462 tmpl->std_iv = def->std_iv; 463 464 alg = &tmpl->alg.ahash; 465 alg->init = qce_ahash_init; 466 alg->update = qce_ahash_update; 467 alg->final = qce_ahash_final; 468 alg->digest = qce_ahash_digest; 469 alg->export = qce_ahash_export; 470 alg->import = qce_ahash_import; 471 if (IS_SHA_HMAC(def->flags)) 472 alg->setkey = qce_ahash_hmac_setkey; 473 alg->halg.digestsize = def->digestsize; 474 alg->halg.statesize = def->statesize; 475 476 if (IS_SHA1(def->flags)) 477 tmpl->hash_zero = sha1_zero_message_hash; 478 else if (IS_SHA256(def->flags)) 479 tmpl->hash_zero = sha256_zero_message_hash; 480 481 base = &alg->halg.base; 482 base->cra_blocksize = def->blocksize; 483 base->cra_priority = 300; 484 base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 485 base->cra_ctxsize = sizeof(struct qce_sha_ctx); 486 base->cra_alignmask = 0; 487 base->cra_module = THIS_MODULE; 488 base->cra_init = qce_ahash_cra_init; 489 490 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 491 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 492 def->drv_name); 493 494 INIT_LIST_HEAD(&tmpl->entry); 495 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; 496 tmpl->alg_flags = def->flags; 497 tmpl->qce = qce; 498 499 ret = crypto_register_ahash(alg); 500 if (ret) { 501 dev_err(qce->dev, "%s registration failed\n", base->cra_name); 502 kfree(tmpl); 503 return ret; 504 } 505 506 list_add_tail(&tmpl->entry, &ahash_algs); 507 dev_dbg(qce->dev, "%s is registered\n", base->cra_name); 508 return 0; 509 } 510 511 static void qce_ahash_unregister(struct qce_device *qce) 512 { 513 struct qce_alg_template *tmpl, *n; 514 515 list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { 516 crypto_unregister_ahash(&tmpl->alg.ahash); 517 list_del(&tmpl->entry); 518 kfree(tmpl); 519 } 520 } 521 522 static int qce_ahash_register(struct qce_device *qce) 523 { 524 int ret, i; 525 526 for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { 527 ret = qce_ahash_register_one(&ahash_def[i], qce); 528 if (ret) 529 goto err; 530 } 531 532 return 0; 533 err: 534 qce_ahash_unregister(qce); 535 return ret; 536 } 537 538 const struct qce_algo_ops ahash_ops = { 539 .type = CRYPTO_ALG_TYPE_AHASH, 540 .register_algs = qce_ahash_register, 541 .unregister_algs = qce_ahash_unregister, 542 .async_req_handle = qce_ahash_async_req_handle, 543 }; 544