1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <crypto/aes.h> 5 #include <crypto/aead.h> 6 #include <crypto/algapi.h> 7 #include <crypto/authenc.h> 8 #include <crypto/des.h> 9 #include <crypto/hash.h> 10 #include <crypto/internal/aead.h> 11 #include <crypto/internal/des.h> 12 #include <crypto/sha1.h> 13 #include <crypto/sha2.h> 14 #include <crypto/skcipher.h> 15 #include <crypto/xts.h> 16 #include <linux/crypto.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/idr.h> 19 20 #include "sec.h" 21 #include "sec_crypto.h" 22 23 #define SEC_PRIORITY 4001 24 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 25 #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE) 26 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 27 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 28 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 29 30 /* SEC sqe(bd) bit operational relative MACRO */ 31 #define SEC_DE_OFFSET 1 32 #define SEC_CIPHER_OFFSET 4 33 #define SEC_SCENE_OFFSET 3 34 #define SEC_DST_SGL_OFFSET 2 35 #define SEC_SRC_SGL_OFFSET 7 36 #define SEC_CKEY_OFFSET 9 37 #define SEC_CMODE_OFFSET 12 38 #define SEC_AKEY_OFFSET 5 39 #define SEC_AEAD_ALG_OFFSET 11 40 #define SEC_AUTH_OFFSET 6 41 42 #define SEC_DE_OFFSET_V3 9 43 #define SEC_SCENE_OFFSET_V3 5 44 #define SEC_CKEY_OFFSET_V3 13 45 #define SEC_CTR_CNT_OFFSET 25 46 #define SEC_CTR_CNT_ROLLOVER 2 47 #define SEC_SRC_SGL_OFFSET_V3 11 48 #define SEC_DST_SGL_OFFSET_V3 14 49 #define SEC_CALG_OFFSET_V3 4 50 #define SEC_AKEY_OFFSET_V3 9 51 #define SEC_MAC_OFFSET_V3 4 52 #define SEC_AUTH_ALG_OFFSET_V3 15 53 #define SEC_CIPHER_AUTH_V3 0xbf 54 #define SEC_AUTH_CIPHER_V3 0x40 55 #define SEC_FLAG_OFFSET 7 56 #define SEC_FLAG_MASK 0x0780 57 #define SEC_TYPE_MASK 0x0F 58 #define SEC_DONE_MASK 0x0001 59 #define SEC_ICV_MASK 0x000E 60 #define SEC_SQE_LEN_RATE_MASK 0x3 61 62 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) 63 #define SEC_SGL_SGE_NR 128 64 #define SEC_CIPHER_AUTH 0xfe 65 #define SEC_AUTH_CIPHER 0x1 66 #define SEC_MAX_MAC_LEN 64 67 #define SEC_MAX_AAD_LEN 65535 68 #define SEC_MAX_CCM_AAD_LEN 65279 69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) 70 71 #define SEC_PBUF_SZ 512 72 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 73 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 74 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 75 SEC_MAX_MAC_LEN * 2) 76 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 77 #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) 78 #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ 79 SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) 80 #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ 81 SEC_PBUF_LEFT_SZ(depth)) 82 83 #define SEC_SQE_LEN_RATE 4 84 #define SEC_SQE_CFLAG 2 85 #define SEC_SQE_AEAD_FLAG 3 86 #define SEC_SQE_DONE 0x1 87 #define SEC_ICV_ERR 0x2 88 #define MIN_MAC_LEN 4 89 #define MAC_LEN_MASK 0x1U 90 #define MAX_INPUT_DATA_LEN 0xFFFE00 91 #define BITS_MASK 0xFF 92 #define BYTE_BITS 0x8 93 #define SEC_XTS_NAME_SZ 0x3 94 #define IV_CM_CAL_NUM 2 95 #define IV_CL_MASK 0x7 96 #define IV_CL_MIN 2 97 #define IV_CL_MID 4 98 #define IV_CL_MAX 8 99 #define IV_FLAGS_OFFSET 0x6 100 #define IV_CM_OFFSET 0x3 101 #define IV_LAST_BYTE1 1 102 #define IV_LAST_BYTE2 2 103 #define IV_LAST_BYTE_MASK 0xFF 104 #define IV_CTR_INIT 0x1 105 #define IV_BYTE_OFFSET 0x8 106 107 static DEFINE_MUTEX(sec_algs_lock); 108 static unsigned int sec_available_devs; 109 110 struct sec_skcipher { 111 u64 alg_msk; 112 struct skcipher_alg alg; 113 }; 114 115 struct sec_aead { 116 u64 alg_msk; 117 struct aead_alg alg; 118 }; 119 120 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 121 static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 122 { 123 if (req->c_req.encrypt) 124 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 125 ctx->hlf_q_num; 126 127 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 128 ctx->hlf_q_num; 129 } 130 131 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 132 { 133 if (req->c_req.encrypt) 134 atomic_dec(&ctx->enc_qcyclic); 135 else 136 atomic_dec(&ctx->dec_qcyclic); 137 } 138 139 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 140 { 141 int req_id; 142 143 spin_lock_bh(&qp_ctx->req_lock); 144 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); 145 spin_unlock_bh(&qp_ctx->req_lock); 146 if (unlikely(req_id < 0)) { 147 dev_err(req->ctx->dev, "alloc req id fail!\n"); 148 return req_id; 149 } 150 151 req->qp_ctx = qp_ctx; 152 qp_ctx->req_list[req_id] = req; 153 154 return req_id; 155 } 156 157 static void sec_free_req_id(struct sec_req *req) 158 { 159 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 160 int req_id = req->req_id; 161 162 if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { 163 dev_err(req->ctx->dev, "free request id invalid!\n"); 164 return; 165 } 166 167 qp_ctx->req_list[req_id] = NULL; 168 req->qp_ctx = NULL; 169 170 spin_lock_bh(&qp_ctx->req_lock); 171 idr_remove(&qp_ctx->req_idr, req_id); 172 spin_unlock_bh(&qp_ctx->req_lock); 173 } 174 175 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) 176 { 177 struct sec_sqe *bd = resp; 178 179 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 180 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; 181 status->flag = (le16_to_cpu(bd->type2.done_flag) & 182 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 183 status->tag = le16_to_cpu(bd->type2.tag); 184 status->err_type = bd->type2.error_type; 185 186 return bd->type_cipher_auth & SEC_TYPE_MASK; 187 } 188 189 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) 190 { 191 struct sec_sqe3 *bd3 = resp; 192 193 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; 194 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; 195 status->flag = (le16_to_cpu(bd3->done_flag) & 196 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 197 status->tag = le64_to_cpu(bd3->tag); 198 status->err_type = bd3->error_type; 199 200 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; 201 } 202 203 static int sec_cb_status_check(struct sec_req *req, 204 struct bd_status *status) 205 { 206 struct sec_ctx *ctx = req->ctx; 207 208 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { 209 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", 210 req->err_type, status->done); 211 return -EIO; 212 } 213 214 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { 215 if (unlikely(status->flag != SEC_SQE_CFLAG)) { 216 dev_err_ratelimited(ctx->dev, "flag[%u]\n", 217 status->flag); 218 return -EIO; 219 } 220 } else if (unlikely(ctx->alg_type == SEC_AEAD)) { 221 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || 222 status->icv == SEC_ICV_ERR)) { 223 dev_err_ratelimited(ctx->dev, 224 "flag[%u], icv[%u]\n", 225 status->flag, status->icv); 226 return -EBADMSG; 227 } 228 } 229 230 return 0; 231 } 232 233 static void sec_req_cb(struct hisi_qp *qp, void *resp) 234 { 235 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 236 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; 237 u8 type_supported = qp_ctx->ctx->type_supported; 238 struct bd_status status; 239 struct sec_ctx *ctx; 240 struct sec_req *req; 241 int err; 242 u8 type; 243 244 if (type_supported == SEC_BD_TYPE2) { 245 type = pre_parse_finished_bd(&status, resp); 246 req = qp_ctx->req_list[status.tag]; 247 } else { 248 type = pre_parse_finished_bd3(&status, resp); 249 req = (void *)(uintptr_t)status.tag; 250 } 251 252 if (unlikely(type != type_supported)) { 253 atomic64_inc(&dfx->err_bd_cnt); 254 pr_err("err bd type [%u]\n", type); 255 return; 256 } 257 258 if (unlikely(!req)) { 259 atomic64_inc(&dfx->invalid_req_cnt); 260 atomic_inc(&qp->qp_status.used); 261 return; 262 } 263 264 req->err_type = status.err_type; 265 ctx = req->ctx; 266 err = sec_cb_status_check(req, &status); 267 if (err) 268 atomic64_inc(&dfx->done_flag_cnt); 269 270 atomic64_inc(&dfx->recv_cnt); 271 272 ctx->req_op->buf_unmap(ctx, req); 273 274 ctx->req_op->callback(ctx, req, err); 275 } 276 277 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 278 { 279 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 280 int ret; 281 282 if (ctx->fake_req_limit <= 283 atomic_read(&qp_ctx->qp->qp_status.used) && 284 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) 285 return -EBUSY; 286 287 spin_lock_bh(&qp_ctx->req_lock); 288 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 289 if (ctx->fake_req_limit <= 290 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { 291 list_add_tail(&req->backlog_head, &qp_ctx->backlog); 292 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 293 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); 294 spin_unlock_bh(&qp_ctx->req_lock); 295 return -EBUSY; 296 } 297 spin_unlock_bh(&qp_ctx->req_lock); 298 299 if (unlikely(ret == -EBUSY)) 300 return -ENOBUFS; 301 302 if (likely(!ret)) { 303 ret = -EINPROGRESS; 304 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 305 } 306 307 return ret; 308 } 309 310 /* Get DMA memory resources */ 311 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 312 { 313 u16 q_depth = res->depth; 314 int i; 315 316 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 317 &res->c_ivin_dma, GFP_KERNEL); 318 if (!res->c_ivin) 319 return -ENOMEM; 320 321 for (i = 1; i < q_depth; i++) { 322 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 323 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 324 } 325 326 return 0; 327 } 328 329 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 330 { 331 if (res->c_ivin) 332 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 333 res->c_ivin, res->c_ivin_dma); 334 } 335 336 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) 337 { 338 u16 q_depth = res->depth; 339 int i; 340 341 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 342 &res->a_ivin_dma, GFP_KERNEL); 343 if (!res->a_ivin) 344 return -ENOMEM; 345 346 for (i = 1; i < q_depth; i++) { 347 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; 348 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; 349 } 350 351 return 0; 352 } 353 354 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) 355 { 356 if (res->a_ivin) 357 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 358 res->a_ivin, res->a_ivin_dma); 359 } 360 361 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 362 { 363 u16 q_depth = res->depth; 364 int i; 365 366 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, 367 &res->out_mac_dma, GFP_KERNEL); 368 if (!res->out_mac) 369 return -ENOMEM; 370 371 for (i = 1; i < q_depth; i++) { 372 res[i].out_mac_dma = res->out_mac_dma + 373 i * (SEC_MAX_MAC_LEN << 1); 374 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 375 } 376 377 return 0; 378 } 379 380 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 381 { 382 if (res->out_mac) 383 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, 384 res->out_mac, res->out_mac_dma); 385 } 386 387 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 388 { 389 if (res->pbuf) 390 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), 391 res->pbuf, res->pbuf_dma); 392 } 393 394 /* 395 * To improve performance, pbuffer is used for 396 * small packets (< 512Bytes) as IOMMU translation using. 397 */ 398 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 399 { 400 u16 q_depth = res->depth; 401 int size = SEC_PBUF_PAGE_NUM(q_depth); 402 int pbuf_page_offset; 403 int i, j, k; 404 405 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), 406 &res->pbuf_dma, GFP_KERNEL); 407 if (!res->pbuf) 408 return -ENOMEM; 409 410 /* 411 * SEC_PBUF_PKG contains data pbuf, iv and 412 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 413 * Every PAGE contains six SEC_PBUF_PKG 414 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 415 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 416 * for the SEC_TOTAL_PBUF_SZ 417 */ 418 for (i = 0; i <= size; i++) { 419 pbuf_page_offset = PAGE_SIZE * i; 420 for (j = 0; j < SEC_PBUF_NUM; j++) { 421 k = i * SEC_PBUF_NUM + j; 422 if (k == q_depth) 423 break; 424 res[k].pbuf = res->pbuf + 425 j * SEC_PBUF_PKG + pbuf_page_offset; 426 res[k].pbuf_dma = res->pbuf_dma + 427 j * SEC_PBUF_PKG + pbuf_page_offset; 428 } 429 } 430 431 return 0; 432 } 433 434 static int sec_alg_resource_alloc(struct sec_ctx *ctx, 435 struct sec_qp_ctx *qp_ctx) 436 { 437 struct sec_alg_res *res = qp_ctx->res; 438 struct device *dev = ctx->dev; 439 int ret; 440 441 ret = sec_alloc_civ_resource(dev, res); 442 if (ret) 443 return ret; 444 445 if (ctx->alg_type == SEC_AEAD) { 446 ret = sec_alloc_aiv_resource(dev, res); 447 if (ret) 448 goto alloc_aiv_fail; 449 450 ret = sec_alloc_mac_resource(dev, res); 451 if (ret) 452 goto alloc_mac_fail; 453 } 454 if (ctx->pbuf_supported) { 455 ret = sec_alloc_pbuf_resource(dev, res); 456 if (ret) { 457 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 458 goto alloc_pbuf_fail; 459 } 460 } 461 462 return 0; 463 464 alloc_pbuf_fail: 465 if (ctx->alg_type == SEC_AEAD) 466 sec_free_mac_resource(dev, qp_ctx->res); 467 alloc_mac_fail: 468 if (ctx->alg_type == SEC_AEAD) 469 sec_free_aiv_resource(dev, res); 470 alloc_aiv_fail: 471 sec_free_civ_resource(dev, res); 472 return ret; 473 } 474 475 static void sec_alg_resource_free(struct sec_ctx *ctx, 476 struct sec_qp_ctx *qp_ctx) 477 { 478 struct device *dev = ctx->dev; 479 480 sec_free_civ_resource(dev, qp_ctx->res); 481 482 if (ctx->pbuf_supported) 483 sec_free_pbuf_resource(dev, qp_ctx->res); 484 if (ctx->alg_type == SEC_AEAD) 485 sec_free_mac_resource(dev, qp_ctx->res); 486 } 487 488 static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) 489 { 490 u16 q_depth = qp_ctx->qp->sq_depth; 491 struct device *dev = ctx->dev; 492 int ret = -ENOMEM; 493 494 qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); 495 if (!qp_ctx->req_list) 496 return ret; 497 498 qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); 499 if (!qp_ctx->res) 500 goto err_free_req_list; 501 qp_ctx->res->depth = q_depth; 502 503 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 504 if (IS_ERR(qp_ctx->c_in_pool)) { 505 dev_err(dev, "fail to create sgl pool for input!\n"); 506 goto err_free_res; 507 } 508 509 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 510 if (IS_ERR(qp_ctx->c_out_pool)) { 511 dev_err(dev, "fail to create sgl pool for output!\n"); 512 goto err_free_c_in_pool; 513 } 514 515 ret = sec_alg_resource_alloc(ctx, qp_ctx); 516 if (ret) 517 goto err_free_c_out_pool; 518 519 return 0; 520 521 err_free_c_out_pool: 522 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 523 err_free_c_in_pool: 524 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 525 err_free_res: 526 kfree(qp_ctx->res); 527 err_free_req_list: 528 kfree(qp_ctx->req_list); 529 return ret; 530 } 531 532 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) 533 { 534 struct device *dev = ctx->dev; 535 536 sec_alg_resource_free(ctx, qp_ctx); 537 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 538 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 539 kfree(qp_ctx->res); 540 kfree(qp_ctx->req_list); 541 } 542 543 static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) 544 { 545 struct sec_qp_ctx *qp_ctx; 546 struct hisi_qp *qp; 547 int ret; 548 549 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 550 qp = ctx->qps[qp_ctx_id]; 551 qp->req_type = 0; 552 qp->qp_ctx = qp_ctx; 553 qp_ctx->qp = qp; 554 qp_ctx->ctx = ctx; 555 556 qp->req_cb = sec_req_cb; 557 558 spin_lock_init(&qp_ctx->req_lock); 559 idr_init(&qp_ctx->req_idr); 560 INIT_LIST_HEAD(&qp_ctx->backlog); 561 562 ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); 563 if (ret) 564 goto err_destroy_idr; 565 566 ret = hisi_qm_start_qp(qp, 0); 567 if (ret < 0) 568 goto err_resource_free; 569 570 return 0; 571 572 err_resource_free: 573 sec_free_qp_ctx_resource(ctx, qp_ctx); 574 err_destroy_idr: 575 idr_destroy(&qp_ctx->req_idr); 576 return ret; 577 } 578 579 static void sec_release_qp_ctx(struct sec_ctx *ctx, 580 struct sec_qp_ctx *qp_ctx) 581 { 582 hisi_qm_stop_qp(qp_ctx->qp); 583 sec_free_qp_ctx_resource(ctx, qp_ctx); 584 idr_destroy(&qp_ctx->req_idr); 585 } 586 587 static int sec_ctx_base_init(struct sec_ctx *ctx) 588 { 589 struct sec_dev *sec; 590 int i, ret; 591 592 ctx->qps = sec_create_qps(); 593 if (!ctx->qps) { 594 pr_err("Can not create sec qps!\n"); 595 return -ENODEV; 596 } 597 598 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 599 ctx->sec = sec; 600 ctx->dev = &sec->qm.pdev->dev; 601 ctx->hlf_q_num = sec->ctx_q_num >> 1; 602 603 ctx->pbuf_supported = ctx->sec->iommu_used; 604 605 /* Half of queue depth is taken as fake requests limit in the queue. */ 606 ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; 607 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 608 GFP_KERNEL); 609 if (!ctx->qp_ctx) { 610 ret = -ENOMEM; 611 goto err_destroy_qps; 612 } 613 614 for (i = 0; i < sec->ctx_q_num; i++) { 615 ret = sec_create_qp_ctx(ctx, i); 616 if (ret) 617 goto err_sec_release_qp_ctx; 618 } 619 620 return 0; 621 622 err_sec_release_qp_ctx: 623 for (i = i - 1; i >= 0; i--) 624 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 625 kfree(ctx->qp_ctx); 626 err_destroy_qps: 627 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 628 return ret; 629 } 630 631 static void sec_ctx_base_uninit(struct sec_ctx *ctx) 632 { 633 int i; 634 635 for (i = 0; i < ctx->sec->ctx_q_num; i++) 636 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 637 638 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 639 kfree(ctx->qp_ctx); 640 } 641 642 static int sec_cipher_init(struct sec_ctx *ctx) 643 { 644 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 645 646 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 647 &c_ctx->c_key_dma, GFP_KERNEL); 648 if (!c_ctx->c_key) 649 return -ENOMEM; 650 651 return 0; 652 } 653 654 static void sec_cipher_uninit(struct sec_ctx *ctx) 655 { 656 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 657 658 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 659 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 660 c_ctx->c_key, c_ctx->c_key_dma); 661 } 662 663 static int sec_auth_init(struct sec_ctx *ctx) 664 { 665 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 666 667 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 668 &a_ctx->a_key_dma, GFP_KERNEL); 669 if (!a_ctx->a_key) 670 return -ENOMEM; 671 672 return 0; 673 } 674 675 static void sec_auth_uninit(struct sec_ctx *ctx) 676 { 677 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 678 679 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); 680 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 681 a_ctx->a_key, a_ctx->a_key_dma); 682 } 683 684 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) 685 { 686 const char *alg = crypto_tfm_alg_name(&tfm->base); 687 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 688 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 689 690 c_ctx->fallback = false; 691 692 /* Currently, only XTS mode need fallback tfm when using 192bit key */ 693 if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) 694 return 0; 695 696 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, 697 CRYPTO_ALG_NEED_FALLBACK); 698 if (IS_ERR(c_ctx->fbtfm)) { 699 pr_err("failed to alloc xts mode fallback tfm!\n"); 700 return PTR_ERR(c_ctx->fbtfm); 701 } 702 703 return 0; 704 } 705 706 static int sec_skcipher_init(struct crypto_skcipher *tfm) 707 { 708 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 709 int ret; 710 711 ctx->alg_type = SEC_SKCIPHER; 712 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 713 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 714 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 715 pr_err("get error skcipher iv size!\n"); 716 return -EINVAL; 717 } 718 719 ret = sec_ctx_base_init(ctx); 720 if (ret) 721 return ret; 722 723 ret = sec_cipher_init(ctx); 724 if (ret) 725 goto err_cipher_init; 726 727 ret = sec_skcipher_fbtfm_init(tfm); 728 if (ret) 729 goto err_fbtfm_init; 730 731 return 0; 732 733 err_fbtfm_init: 734 sec_cipher_uninit(ctx); 735 err_cipher_init: 736 sec_ctx_base_uninit(ctx); 737 return ret; 738 } 739 740 static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 741 { 742 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 743 744 if (ctx->c_ctx.fbtfm) 745 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); 746 747 sec_cipher_uninit(ctx); 748 sec_ctx_base_uninit(ctx); 749 } 750 751 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen) 752 { 753 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 754 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 755 int ret; 756 757 ret = verify_skcipher_des3_key(tfm, key); 758 if (ret) 759 return ret; 760 761 switch (keylen) { 762 case SEC_DES3_2KEY_SIZE: 763 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 764 break; 765 case SEC_DES3_3KEY_SIZE: 766 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 767 break; 768 default: 769 return -EINVAL; 770 } 771 772 return 0; 773 } 774 775 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 776 const u32 keylen, 777 const enum sec_cmode c_mode) 778 { 779 if (c_mode == SEC_CMODE_XTS) { 780 switch (keylen) { 781 case SEC_XTS_MIN_KEY_SIZE: 782 c_ctx->c_key_len = SEC_CKEY_128BIT; 783 break; 784 case SEC_XTS_MID_KEY_SIZE: 785 c_ctx->fallback = true; 786 break; 787 case SEC_XTS_MAX_KEY_SIZE: 788 c_ctx->c_key_len = SEC_CKEY_256BIT; 789 break; 790 default: 791 pr_err("hisi_sec2: xts mode key error!\n"); 792 return -EINVAL; 793 } 794 } else { 795 if (c_ctx->c_alg == SEC_CALG_SM4 && 796 keylen != AES_KEYSIZE_128) { 797 pr_err("hisi_sec2: sm4 key error!\n"); 798 return -EINVAL; 799 } else { 800 switch (keylen) { 801 case AES_KEYSIZE_128: 802 c_ctx->c_key_len = SEC_CKEY_128BIT; 803 break; 804 case AES_KEYSIZE_192: 805 c_ctx->c_key_len = SEC_CKEY_192BIT; 806 break; 807 case AES_KEYSIZE_256: 808 c_ctx->c_key_len = SEC_CKEY_256BIT; 809 break; 810 default: 811 pr_err("hisi_sec2: aes key error!\n"); 812 return -EINVAL; 813 } 814 } 815 } 816 817 return 0; 818 } 819 820 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 821 const u32 keylen, const enum sec_calg c_alg, 822 const enum sec_cmode c_mode) 823 { 824 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 825 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 826 struct device *dev = ctx->dev; 827 int ret; 828 829 if (c_mode == SEC_CMODE_XTS) { 830 ret = xts_verify_key(tfm, key, keylen); 831 if (ret) { 832 dev_err(dev, "xts mode key err!\n"); 833 return ret; 834 } 835 } 836 837 c_ctx->c_alg = c_alg; 838 c_ctx->c_mode = c_mode; 839 840 switch (c_alg) { 841 case SEC_CALG_3DES: 842 ret = sec_skcipher_3des_setkey(tfm, key, keylen); 843 break; 844 case SEC_CALG_AES: 845 case SEC_CALG_SM4: 846 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 847 break; 848 default: 849 dev_err(dev, "sec c_alg err!\n"); 850 return -EINVAL; 851 } 852 853 if (ret) { 854 dev_err(dev, "set sec key err!\n"); 855 return ret; 856 } 857 858 memcpy(c_ctx->c_key, key, keylen); 859 if (c_ctx->fallback && c_ctx->fbtfm) { 860 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); 861 if (ret) { 862 dev_err(dev, "failed to set fallback skcipher key!\n"); 863 return ret; 864 } 865 } 866 return 0; 867 } 868 869 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 870 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 871 u32 keylen) \ 872 { \ 873 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 874 } 875 876 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 877 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 878 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 879 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR) 880 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 881 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 882 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 883 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 884 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) 885 886 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 887 struct scatterlist *src) 888 { 889 struct sec_aead_req *a_req = &req->aead_req; 890 struct aead_request *aead_req = a_req->aead_req; 891 struct sec_cipher_req *c_req = &req->c_req; 892 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 893 struct device *dev = ctx->dev; 894 int copy_size, pbuf_length; 895 int req_id = req->req_id; 896 struct crypto_aead *tfm; 897 size_t authsize; 898 u8 *mac_offset; 899 900 if (ctx->alg_type == SEC_AEAD) 901 copy_size = aead_req->cryptlen + aead_req->assoclen; 902 else 903 copy_size = c_req->c_len; 904 905 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 906 qp_ctx->res[req_id].pbuf, copy_size); 907 if (unlikely(pbuf_length != copy_size)) { 908 dev_err(dev, "copy src data to pbuf error!\n"); 909 return -EINVAL; 910 } 911 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { 912 tfm = crypto_aead_reqtfm(aead_req); 913 authsize = crypto_aead_authsize(tfm); 914 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; 915 memcpy(a_req->out_mac, mac_offset, authsize); 916 } 917 918 req->in_dma = qp_ctx->res[req_id].pbuf_dma; 919 c_req->c_out_dma = req->in_dma; 920 921 return 0; 922 } 923 924 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 925 struct scatterlist *dst) 926 { 927 struct aead_request *aead_req = req->aead_req.aead_req; 928 struct sec_cipher_req *c_req = &req->c_req; 929 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 930 int copy_size, pbuf_length; 931 int req_id = req->req_id; 932 933 if (ctx->alg_type == SEC_AEAD) 934 copy_size = c_req->c_len + aead_req->assoclen; 935 else 936 copy_size = c_req->c_len; 937 938 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 939 qp_ctx->res[req_id].pbuf, copy_size); 940 if (unlikely(pbuf_length != copy_size)) 941 dev_err(ctx->dev, "copy pbuf data to dst error!\n"); 942 } 943 944 static int sec_aead_mac_init(struct sec_aead_req *req) 945 { 946 struct aead_request *aead_req = req->aead_req; 947 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 948 size_t authsize = crypto_aead_authsize(tfm); 949 u8 *mac_out = req->out_mac; 950 struct scatterlist *sgl = aead_req->src; 951 size_t copy_size; 952 off_t skip_size; 953 954 /* Copy input mac */ 955 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; 956 copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, 957 authsize, skip_size); 958 if (unlikely(copy_size != authsize)) 959 return -EINVAL; 960 961 return 0; 962 } 963 964 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 965 struct scatterlist *src, struct scatterlist *dst) 966 { 967 struct sec_cipher_req *c_req = &req->c_req; 968 struct sec_aead_req *a_req = &req->aead_req; 969 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 970 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 971 struct device *dev = ctx->dev; 972 int ret; 973 974 if (req->use_pbuf) { 975 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 976 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 977 if (ctx->alg_type == SEC_AEAD) { 978 a_req->a_ivin = res->a_ivin; 979 a_req->a_ivin_dma = res->a_ivin_dma; 980 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 981 a_req->out_mac_dma = res->pbuf_dma + 982 SEC_PBUF_MAC_OFFSET; 983 } 984 ret = sec_cipher_pbuf_map(ctx, req, src); 985 986 return ret; 987 } 988 c_req->c_ivin = res->c_ivin; 989 c_req->c_ivin_dma = res->c_ivin_dma; 990 if (ctx->alg_type == SEC_AEAD) { 991 a_req->a_ivin = res->a_ivin; 992 a_req->a_ivin_dma = res->a_ivin_dma; 993 a_req->out_mac = res->out_mac; 994 a_req->out_mac_dma = res->out_mac_dma; 995 } 996 997 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 998 qp_ctx->c_in_pool, 999 req->req_id, 1000 &req->in_dma); 1001 if (IS_ERR(req->in)) { 1002 dev_err(dev, "fail to dma map input sgl buffers!\n"); 1003 return PTR_ERR(req->in); 1004 } 1005 1006 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { 1007 ret = sec_aead_mac_init(a_req); 1008 if (unlikely(ret)) { 1009 dev_err(dev, "fail to init mac data for ICV!\n"); 1010 hisi_acc_sg_buf_unmap(dev, src, req->in); 1011 return ret; 1012 } 1013 } 1014 1015 if (dst == src) { 1016 c_req->c_out = req->in; 1017 c_req->c_out_dma = req->in_dma; 1018 } else { 1019 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 1020 qp_ctx->c_out_pool, 1021 req->req_id, 1022 &c_req->c_out_dma); 1023 1024 if (IS_ERR(c_req->c_out)) { 1025 dev_err(dev, "fail to dma map output sgl buffers!\n"); 1026 hisi_acc_sg_buf_unmap(dev, src, req->in); 1027 return PTR_ERR(c_req->c_out); 1028 } 1029 } 1030 1031 return 0; 1032 } 1033 1034 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 1035 struct scatterlist *src, struct scatterlist *dst) 1036 { 1037 struct sec_cipher_req *c_req = &req->c_req; 1038 struct device *dev = ctx->dev; 1039 1040 if (req->use_pbuf) { 1041 sec_cipher_pbuf_unmap(ctx, req, dst); 1042 } else { 1043 if (dst != src) 1044 hisi_acc_sg_buf_unmap(dev, src, req->in); 1045 1046 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 1047 } 1048 } 1049 1050 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 1051 { 1052 struct skcipher_request *sq = req->c_req.sk_req; 1053 1054 return sec_cipher_map(ctx, req, sq->src, sq->dst); 1055 } 1056 1057 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 1058 { 1059 struct skcipher_request *sq = req->c_req.sk_req; 1060 1061 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 1062 } 1063 1064 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 1065 struct crypto_authenc_keys *keys) 1066 { 1067 switch (keys->enckeylen) { 1068 case AES_KEYSIZE_128: 1069 c_ctx->c_key_len = SEC_CKEY_128BIT; 1070 break; 1071 case AES_KEYSIZE_192: 1072 c_ctx->c_key_len = SEC_CKEY_192BIT; 1073 break; 1074 case AES_KEYSIZE_256: 1075 c_ctx->c_key_len = SEC_CKEY_256BIT; 1076 break; 1077 default: 1078 pr_err("hisi_sec2: aead aes key error!\n"); 1079 return -EINVAL; 1080 } 1081 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 1082 1083 return 0; 1084 } 1085 1086 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 1087 struct crypto_authenc_keys *keys) 1088 { 1089 struct crypto_shash *hash_tfm = ctx->hash_tfm; 1090 int blocksize, digestsize, ret; 1091 1092 if (!keys->authkeylen) { 1093 pr_err("hisi_sec2: aead auth key error!\n"); 1094 return -EINVAL; 1095 } 1096 1097 blocksize = crypto_shash_blocksize(hash_tfm); 1098 digestsize = crypto_shash_digestsize(hash_tfm); 1099 if (keys->authkeylen > blocksize) { 1100 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, 1101 keys->authkeylen, ctx->a_key); 1102 if (ret) { 1103 pr_err("hisi_sec2: aead auth digest error!\n"); 1104 return -EINVAL; 1105 } 1106 ctx->a_key_len = digestsize; 1107 } else { 1108 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 1109 ctx->a_key_len = keys->authkeylen; 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) 1116 { 1117 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 1118 struct sec_ctx *ctx = crypto_tfm_ctx(tfm); 1119 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1120 1121 if (unlikely(a_ctx->fallback_aead_tfm)) 1122 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); 1123 1124 return 0; 1125 } 1126 1127 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, 1128 struct crypto_aead *tfm, const u8 *key, 1129 unsigned int keylen) 1130 { 1131 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); 1132 crypto_aead_set_flags(a_ctx->fallback_aead_tfm, 1133 crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); 1134 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); 1135 } 1136 1137 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1138 const u32 keylen, const enum sec_hash_alg a_alg, 1139 const enum sec_calg c_alg, 1140 const enum sec_mac_len mac_len, 1141 const enum sec_cmode c_mode) 1142 { 1143 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1144 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1145 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1146 struct device *dev = ctx->dev; 1147 struct crypto_authenc_keys keys; 1148 int ret; 1149 1150 ctx->a_ctx.a_alg = a_alg; 1151 ctx->c_ctx.c_alg = c_alg; 1152 ctx->a_ctx.mac_len = mac_len; 1153 c_ctx->c_mode = c_mode; 1154 1155 if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { 1156 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 1157 if (ret) { 1158 dev_err(dev, "set sec aes ccm cipher key err!\n"); 1159 return ret; 1160 } 1161 memcpy(c_ctx->c_key, key, keylen); 1162 1163 if (unlikely(a_ctx->fallback_aead_tfm)) { 1164 ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); 1165 if (ret) 1166 return ret; 1167 } 1168 1169 return 0; 1170 } 1171 1172 ret = crypto_authenc_extractkeys(&keys, key, keylen); 1173 if (ret) 1174 goto bad_key; 1175 1176 ret = sec_aead_aes_set_key(c_ctx, &keys); 1177 if (ret) { 1178 dev_err(dev, "set sec cipher key err!\n"); 1179 goto bad_key; 1180 } 1181 1182 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 1183 if (ret) { 1184 dev_err(dev, "set sec auth key err!\n"); 1185 goto bad_key; 1186 } 1187 1188 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || 1189 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { 1190 ret = -EINVAL; 1191 dev_err(dev, "MAC or AUTH key length error!\n"); 1192 goto bad_key; 1193 } 1194 1195 return 0; 1196 1197 bad_key: 1198 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 1199 return ret; 1200 } 1201 1202 1203 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 1204 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 1205 u32 keylen) \ 1206 { \ 1207 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 1208 } 1209 1210 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 1211 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 1212 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 1213 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 1214 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 1215 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 1216 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, 1217 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) 1218 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, 1219 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) 1220 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, 1221 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) 1222 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, 1223 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) 1224 1225 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 1226 { 1227 struct aead_request *aq = req->aead_req.aead_req; 1228 1229 return sec_cipher_map(ctx, req, aq->src, aq->dst); 1230 } 1231 1232 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 1233 { 1234 struct aead_request *aq = req->aead_req.aead_req; 1235 1236 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 1237 } 1238 1239 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 1240 { 1241 int ret; 1242 1243 ret = ctx->req_op->buf_map(ctx, req); 1244 if (unlikely(ret)) 1245 return ret; 1246 1247 ctx->req_op->do_transfer(ctx, req); 1248 1249 ret = ctx->req_op->bd_fill(ctx, req); 1250 if (unlikely(ret)) 1251 goto unmap_req_buf; 1252 1253 return ret; 1254 1255 unmap_req_buf: 1256 ctx->req_op->buf_unmap(ctx, req); 1257 return ret; 1258 } 1259 1260 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 1261 { 1262 ctx->req_op->buf_unmap(ctx, req); 1263 } 1264 1265 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1266 { 1267 struct skcipher_request *sk_req = req->c_req.sk_req; 1268 struct sec_cipher_req *c_req = &req->c_req; 1269 1270 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 1271 } 1272 1273 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1274 { 1275 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1276 struct sec_cipher_req *c_req = &req->c_req; 1277 struct sec_sqe *sec_sqe = &req->sec_sqe; 1278 u8 scene, sa_type, da_type; 1279 u8 bd_type, cipher; 1280 u8 de = 0; 1281 1282 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 1283 1284 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 1285 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 1286 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); 1287 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 1288 1289 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 1290 SEC_CMODE_OFFSET); 1291 sec_sqe->type2.c_alg = c_ctx->c_alg; 1292 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1293 SEC_CKEY_OFFSET); 1294 1295 bd_type = SEC_BD_TYPE2; 1296 if (c_req->encrypt) 1297 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 1298 else 1299 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 1300 sec_sqe->type_cipher_auth = bd_type | cipher; 1301 1302 /* Set destination and source address type */ 1303 if (req->use_pbuf) { 1304 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 1305 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1306 } else { 1307 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 1308 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1309 } 1310 1311 sec_sqe->sdm_addr_type |= da_type; 1312 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 1313 if (req->in_dma != c_req->c_out_dma) 1314 de = 0x1 << SEC_DE_OFFSET; 1315 1316 sec_sqe->sds_sa_type = (de | scene | sa_type); 1317 1318 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1319 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1320 1321 return 0; 1322 } 1323 1324 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) 1325 { 1326 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; 1327 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1328 struct sec_cipher_req *c_req = &req->c_req; 1329 u32 bd_param = 0; 1330 u16 cipher; 1331 1332 memset(sec_sqe3, 0, sizeof(struct sec_sqe3)); 1333 1334 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 1335 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 1336 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); 1337 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); 1338 1339 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | 1340 c_ctx->c_mode; 1341 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1342 SEC_CKEY_OFFSET_V3); 1343 1344 if (c_req->encrypt) 1345 cipher = SEC_CIPHER_ENC; 1346 else 1347 cipher = SEC_CIPHER_DEC; 1348 sec_sqe3->c_icv_key |= cpu_to_le16(cipher); 1349 1350 /* Set the CTR counter mode is 128bit rollover */ 1351 sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << 1352 SEC_CTR_CNT_OFFSET); 1353 1354 if (req->use_pbuf) { 1355 bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; 1356 bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; 1357 } else { 1358 bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3; 1359 bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3; 1360 } 1361 1362 bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; 1363 if (req->in_dma != c_req->c_out_dma) 1364 bd_param |= 0x1 << SEC_DE_OFFSET_V3; 1365 1366 bd_param |= SEC_BD_TYPE3; 1367 sec_sqe3->bd_param = cpu_to_le32(bd_param); 1368 1369 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); 1370 sec_sqe3->tag = cpu_to_le64((unsigned long)req); 1371 1372 return 0; 1373 } 1374 1375 /* increment counter (128-bit int) */ 1376 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) 1377 { 1378 do { 1379 --bits; 1380 nums += counter[bits]; 1381 counter[bits] = nums & BITS_MASK; 1382 nums >>= BYTE_BITS; 1383 } while (bits && nums); 1384 } 1385 1386 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1387 { 1388 struct aead_request *aead_req = req->aead_req.aead_req; 1389 struct skcipher_request *sk_req = req->c_req.sk_req; 1390 u32 iv_size = req->ctx->c_ctx.ivsize; 1391 struct scatterlist *sgl; 1392 unsigned int cryptlen; 1393 size_t sz; 1394 u8 *iv; 1395 1396 if (req->c_req.encrypt) 1397 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1398 else 1399 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1400 1401 if (alg_type == SEC_SKCIPHER) { 1402 iv = sk_req->iv; 1403 cryptlen = sk_req->cryptlen; 1404 } else { 1405 iv = aead_req->iv; 1406 cryptlen = aead_req->cryptlen; 1407 } 1408 1409 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { 1410 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1411 cryptlen - iv_size); 1412 if (unlikely(sz != iv_size)) 1413 dev_err(req->ctx->dev, "copy output iv error!\n"); 1414 } else { 1415 sz = cryptlen / iv_size; 1416 if (cryptlen % iv_size) 1417 sz += 1; 1418 ctr_iv_inc(iv, iv_size, sz); 1419 } 1420 } 1421 1422 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, 1423 struct sec_qp_ctx *qp_ctx) 1424 { 1425 struct sec_req *backlog_req = NULL; 1426 1427 spin_lock_bh(&qp_ctx->req_lock); 1428 if (ctx->fake_req_limit >= 1429 atomic_read(&qp_ctx->qp->qp_status.used) && 1430 !list_empty(&qp_ctx->backlog)) { 1431 backlog_req = list_first_entry(&qp_ctx->backlog, 1432 typeof(*backlog_req), backlog_head); 1433 list_del(&backlog_req->backlog_head); 1434 } 1435 spin_unlock_bh(&qp_ctx->req_lock); 1436 1437 return backlog_req; 1438 } 1439 1440 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1441 int err) 1442 { 1443 struct skcipher_request *sk_req = req->c_req.sk_req; 1444 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1445 struct skcipher_request *backlog_sk_req; 1446 struct sec_req *backlog_req; 1447 1448 sec_free_req_id(req); 1449 1450 /* IV output at encrypto of CBC/CTR mode */ 1451 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || 1452 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) 1453 sec_update_iv(req, SEC_SKCIPHER); 1454 1455 while (1) { 1456 backlog_req = sec_back_req_clear(ctx, qp_ctx); 1457 if (!backlog_req) 1458 break; 1459 1460 backlog_sk_req = backlog_req->c_req.sk_req; 1461 skcipher_request_complete(backlog_sk_req, -EINPROGRESS); 1462 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); 1463 } 1464 1465 skcipher_request_complete(sk_req, err); 1466 } 1467 1468 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) 1469 { 1470 struct aead_request *aead_req = req->aead_req.aead_req; 1471 struct sec_cipher_req *c_req = &req->c_req; 1472 struct sec_aead_req *a_req = &req->aead_req; 1473 size_t authsize = ctx->a_ctx.mac_len; 1474 u32 data_size = aead_req->cryptlen; 1475 u8 flage = 0; 1476 u8 cm, cl; 1477 1478 /* the specification has been checked in aead_iv_demension_check() */ 1479 cl = c_req->c_ivin[0] + 1; 1480 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; 1481 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); 1482 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; 1483 1484 /* the last 3bit is L' */ 1485 flage |= c_req->c_ivin[0] & IV_CL_MASK; 1486 1487 /* the M' is bit3~bit5, the Flags is bit6 */ 1488 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; 1489 flage |= cm << IV_CM_OFFSET; 1490 if (aead_req->assoclen) 1491 flage |= 0x01 << IV_FLAGS_OFFSET; 1492 1493 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); 1494 a_req->a_ivin[0] = flage; 1495 1496 /* 1497 * the last 32bit is counter's initial number, 1498 * but the nonce uses the first 16bit 1499 * the tail 16bit fill with the cipher length 1500 */ 1501 if (!c_req->encrypt) 1502 data_size = aead_req->cryptlen - authsize; 1503 1504 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = 1505 data_size & IV_LAST_BYTE_MASK; 1506 data_size >>= IV_BYTE_OFFSET; 1507 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = 1508 data_size & IV_LAST_BYTE_MASK; 1509 } 1510 1511 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) 1512 { 1513 struct aead_request *aead_req = req->aead_req.aead_req; 1514 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 1515 size_t authsize = crypto_aead_authsize(tfm); 1516 struct sec_cipher_req *c_req = &req->c_req; 1517 struct sec_aead_req *a_req = &req->aead_req; 1518 1519 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1520 1521 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { 1522 /* 1523 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, 1524 * the counter must set to 0x01 1525 */ 1526 ctx->a_ctx.mac_len = authsize; 1527 /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ 1528 set_aead_auth_iv(ctx, req); 1529 } 1530 1531 /* GCM 12Byte Cipher_IV == Auth_IV */ 1532 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { 1533 ctx->a_ctx.mac_len = authsize; 1534 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); 1535 } 1536 } 1537 1538 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, 1539 struct sec_req *req, struct sec_sqe *sec_sqe) 1540 { 1541 struct sec_aead_req *a_req = &req->aead_req; 1542 struct aead_request *aq = a_req->aead_req; 1543 1544 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ 1545 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); 1546 1547 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ 1548 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; 1549 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); 1550 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; 1551 1552 if (dir) 1553 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1554 else 1555 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1556 1557 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); 1558 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); 1559 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1560 1561 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1562 } 1563 1564 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, 1565 struct sec_req *req, struct sec_sqe3 *sqe3) 1566 { 1567 struct sec_aead_req *a_req = &req->aead_req; 1568 struct aead_request *aq = a_req->aead_req; 1569 1570 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ 1571 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); 1572 1573 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ 1574 sqe3->a_key_addr = sqe3->c_key_addr; 1575 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); 1576 sqe3->auth_mac_key |= SEC_NO_AUTH; 1577 1578 if (dir) 1579 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; 1580 else 1581 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; 1582 1583 sqe3->a_len_key = cpu_to_le32(aq->assoclen); 1584 sqe3->auth_src_offset = cpu_to_le16(0x0); 1585 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1586 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); 1587 } 1588 1589 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1590 struct sec_req *req, struct sec_sqe *sec_sqe) 1591 { 1592 struct sec_aead_req *a_req = &req->aead_req; 1593 struct sec_cipher_req *c_req = &req->c_req; 1594 struct aead_request *aq = a_req->aead_req; 1595 1596 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1597 1598 sec_sqe->type2.mac_key_alg = 1599 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1600 1601 sec_sqe->type2.mac_key_alg |= 1602 cpu_to_le32((u32)((ctx->a_key_len) / 1603 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1604 1605 sec_sqe->type2.mac_key_alg |= 1606 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1607 1608 if (dir) { 1609 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1610 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1611 } else { 1612 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; 1613 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1614 } 1615 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1616 1617 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1618 1619 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1620 } 1621 1622 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1623 { 1624 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1625 struct sec_sqe *sec_sqe = &req->sec_sqe; 1626 int ret; 1627 1628 ret = sec_skcipher_bd_fill(ctx, req); 1629 if (unlikely(ret)) { 1630 dev_err(ctx->dev, "skcipher bd fill is error!\n"); 1631 return ret; 1632 } 1633 1634 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || 1635 ctx->c_ctx.c_mode == SEC_CMODE_GCM) 1636 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1637 else 1638 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1639 1640 return 0; 1641 } 1642 1643 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, 1644 struct sec_req *req, struct sec_sqe3 *sqe3) 1645 { 1646 struct sec_aead_req *a_req = &req->aead_req; 1647 struct sec_cipher_req *c_req = &req->c_req; 1648 struct aead_request *aq = a_req->aead_req; 1649 1650 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); 1651 1652 sqe3->auth_mac_key |= 1653 cpu_to_le32((u32)(ctx->mac_len / 1654 SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); 1655 1656 sqe3->auth_mac_key |= 1657 cpu_to_le32((u32)(ctx->a_key_len / 1658 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); 1659 1660 sqe3->auth_mac_key |= 1661 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); 1662 1663 if (dir) { 1664 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); 1665 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; 1666 } else { 1667 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); 1668 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; 1669 } 1670 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); 1671 1672 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1673 1674 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); 1675 } 1676 1677 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) 1678 { 1679 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1680 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; 1681 int ret; 1682 1683 ret = sec_skcipher_bd_fill_v3(ctx, req); 1684 if (unlikely(ret)) { 1685 dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); 1686 return ret; 1687 } 1688 1689 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || 1690 ctx->c_ctx.c_mode == SEC_CMODE_GCM) 1691 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, 1692 req, sec_sqe3); 1693 else 1694 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, 1695 req, sec_sqe3); 1696 1697 return 0; 1698 } 1699 1700 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1701 { 1702 struct aead_request *a_req = req->aead_req.aead_req; 1703 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1704 struct sec_aead_req *aead_req = &req->aead_req; 1705 struct sec_cipher_req *c_req = &req->c_req; 1706 size_t authsize = crypto_aead_authsize(tfm); 1707 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1708 struct aead_request *backlog_aead_req; 1709 struct sec_req *backlog_req; 1710 size_t sz; 1711 1712 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1713 sec_update_iv(req, SEC_AEAD); 1714 1715 /* Copy output mac */ 1716 if (!err && c_req->encrypt) { 1717 struct scatterlist *sgl = a_req->dst; 1718 1719 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1720 aead_req->out_mac, 1721 authsize, a_req->cryptlen + 1722 a_req->assoclen); 1723 if (unlikely(sz != authsize)) { 1724 dev_err(c->dev, "copy out mac err!\n"); 1725 err = -EINVAL; 1726 } 1727 } 1728 1729 sec_free_req_id(req); 1730 1731 while (1) { 1732 backlog_req = sec_back_req_clear(c, qp_ctx); 1733 if (!backlog_req) 1734 break; 1735 1736 backlog_aead_req = backlog_req->aead_req.aead_req; 1737 aead_request_complete(backlog_aead_req, -EINPROGRESS); 1738 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); 1739 } 1740 1741 aead_request_complete(a_req, err); 1742 } 1743 1744 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1745 { 1746 sec_free_req_id(req); 1747 sec_free_queue_id(ctx, req); 1748 } 1749 1750 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1751 { 1752 struct sec_qp_ctx *qp_ctx; 1753 int queue_id; 1754 1755 /* To load balance */ 1756 queue_id = sec_alloc_queue_id(ctx, req); 1757 qp_ctx = &ctx->qp_ctx[queue_id]; 1758 1759 req->req_id = sec_alloc_req_id(req, qp_ctx); 1760 if (unlikely(req->req_id < 0)) { 1761 sec_free_queue_id(ctx, req); 1762 return req->req_id; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1769 { 1770 struct sec_cipher_req *c_req = &req->c_req; 1771 int ret; 1772 1773 ret = sec_request_init(ctx, req); 1774 if (unlikely(ret)) 1775 return ret; 1776 1777 ret = sec_request_transfer(ctx, req); 1778 if (unlikely(ret)) 1779 goto err_uninit_req; 1780 1781 /* Output IV as decrypto */ 1782 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || 1783 ctx->c_ctx.c_mode == SEC_CMODE_CTR)) 1784 sec_update_iv(req, ctx->alg_type); 1785 1786 ret = ctx->req_op->bd_send(ctx, req); 1787 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || 1788 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1789 dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); 1790 goto err_send_req; 1791 } 1792 1793 return ret; 1794 1795 err_send_req: 1796 /* As failing, restore the IV from user */ 1797 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1798 if (ctx->alg_type == SEC_SKCIPHER) 1799 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1800 ctx->c_ctx.ivsize); 1801 else 1802 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1803 ctx->c_ctx.ivsize); 1804 } 1805 1806 sec_request_untransfer(ctx, req); 1807 err_uninit_req: 1808 sec_request_uninit(ctx, req); 1809 return ret; 1810 } 1811 1812 static const struct sec_req_op sec_skcipher_req_ops = { 1813 .buf_map = sec_skcipher_sgl_map, 1814 .buf_unmap = sec_skcipher_sgl_unmap, 1815 .do_transfer = sec_skcipher_copy_iv, 1816 .bd_fill = sec_skcipher_bd_fill, 1817 .bd_send = sec_bd_send, 1818 .callback = sec_skcipher_callback, 1819 .process = sec_process, 1820 }; 1821 1822 static const struct sec_req_op sec_aead_req_ops = { 1823 .buf_map = sec_aead_sgl_map, 1824 .buf_unmap = sec_aead_sgl_unmap, 1825 .do_transfer = sec_aead_set_iv, 1826 .bd_fill = sec_aead_bd_fill, 1827 .bd_send = sec_bd_send, 1828 .callback = sec_aead_callback, 1829 .process = sec_process, 1830 }; 1831 1832 static const struct sec_req_op sec_skcipher_req_ops_v3 = { 1833 .buf_map = sec_skcipher_sgl_map, 1834 .buf_unmap = sec_skcipher_sgl_unmap, 1835 .do_transfer = sec_skcipher_copy_iv, 1836 .bd_fill = sec_skcipher_bd_fill_v3, 1837 .bd_send = sec_bd_send, 1838 .callback = sec_skcipher_callback, 1839 .process = sec_process, 1840 }; 1841 1842 static const struct sec_req_op sec_aead_req_ops_v3 = { 1843 .buf_map = sec_aead_sgl_map, 1844 .buf_unmap = sec_aead_sgl_unmap, 1845 .do_transfer = sec_aead_set_iv, 1846 .bd_fill = sec_aead_bd_fill_v3, 1847 .bd_send = sec_bd_send, 1848 .callback = sec_aead_callback, 1849 .process = sec_process, 1850 }; 1851 1852 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1853 { 1854 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1855 int ret; 1856 1857 ret = sec_skcipher_init(tfm); 1858 if (ret) 1859 return ret; 1860 1861 if (ctx->sec->qm.ver < QM_HW_V3) { 1862 ctx->type_supported = SEC_BD_TYPE2; 1863 ctx->req_op = &sec_skcipher_req_ops; 1864 } else { 1865 ctx->type_supported = SEC_BD_TYPE3; 1866 ctx->req_op = &sec_skcipher_req_ops_v3; 1867 } 1868 1869 return ret; 1870 } 1871 1872 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1873 { 1874 sec_skcipher_uninit(tfm); 1875 } 1876 1877 static int sec_aead_init(struct crypto_aead *tfm) 1878 { 1879 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1880 int ret; 1881 1882 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1883 ctx->alg_type = SEC_AEAD; 1884 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1885 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || 1886 ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1887 pr_err("get error aead iv size!\n"); 1888 return -EINVAL; 1889 } 1890 1891 ret = sec_ctx_base_init(ctx); 1892 if (ret) 1893 return ret; 1894 if (ctx->sec->qm.ver < QM_HW_V3) { 1895 ctx->type_supported = SEC_BD_TYPE2; 1896 ctx->req_op = &sec_aead_req_ops; 1897 } else { 1898 ctx->type_supported = SEC_BD_TYPE3; 1899 ctx->req_op = &sec_aead_req_ops_v3; 1900 } 1901 1902 ret = sec_auth_init(ctx); 1903 if (ret) 1904 goto err_auth_init; 1905 1906 ret = sec_cipher_init(ctx); 1907 if (ret) 1908 goto err_cipher_init; 1909 1910 return ret; 1911 1912 err_cipher_init: 1913 sec_auth_uninit(ctx); 1914 err_auth_init: 1915 sec_ctx_base_uninit(ctx); 1916 return ret; 1917 } 1918 1919 static void sec_aead_exit(struct crypto_aead *tfm) 1920 { 1921 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1922 1923 sec_cipher_uninit(ctx); 1924 sec_auth_uninit(ctx); 1925 sec_ctx_base_uninit(ctx); 1926 } 1927 1928 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1929 { 1930 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1931 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1932 int ret; 1933 1934 ret = sec_aead_init(tfm); 1935 if (ret) { 1936 pr_err("hisi_sec2: aead init error!\n"); 1937 return ret; 1938 } 1939 1940 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1941 if (IS_ERR(auth_ctx->hash_tfm)) { 1942 dev_err(ctx->dev, "aead alloc shash error!\n"); 1943 sec_aead_exit(tfm); 1944 return PTR_ERR(auth_ctx->hash_tfm); 1945 } 1946 1947 return 0; 1948 } 1949 1950 static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1951 { 1952 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1953 1954 crypto_free_shash(ctx->a_ctx.hash_tfm); 1955 sec_aead_exit(tfm); 1956 } 1957 1958 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) 1959 { 1960 struct aead_alg *alg = crypto_aead_alg(tfm); 1961 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1962 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1963 const char *aead_name = alg->base.cra_name; 1964 int ret; 1965 1966 ret = sec_aead_init(tfm); 1967 if (ret) { 1968 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); 1969 return ret; 1970 } 1971 1972 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, 1973 CRYPTO_ALG_NEED_FALLBACK | 1974 CRYPTO_ALG_ASYNC); 1975 if (IS_ERR(a_ctx->fallback_aead_tfm)) { 1976 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); 1977 sec_aead_exit(tfm); 1978 return PTR_ERR(a_ctx->fallback_aead_tfm); 1979 } 1980 a_ctx->fallback = false; 1981 1982 return 0; 1983 } 1984 1985 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm) 1986 { 1987 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1988 1989 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); 1990 sec_aead_exit(tfm); 1991 } 1992 1993 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1994 { 1995 return sec_aead_ctx_init(tfm, "sha1"); 1996 } 1997 1998 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 1999 { 2000 return sec_aead_ctx_init(tfm, "sha256"); 2001 } 2002 2003 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 2004 { 2005 return sec_aead_ctx_init(tfm, "sha512"); 2006 } 2007 2008 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, 2009 struct sec_req *sreq) 2010 { 2011 u32 cryptlen = sreq->c_req.sk_req->cryptlen; 2012 struct device *dev = ctx->dev; 2013 u8 c_mode = ctx->c_ctx.c_mode; 2014 int ret = 0; 2015 2016 switch (c_mode) { 2017 case SEC_CMODE_XTS: 2018 if (unlikely(cryptlen < AES_BLOCK_SIZE)) { 2019 dev_err(dev, "skcipher XTS mode input length error!\n"); 2020 ret = -EINVAL; 2021 } 2022 break; 2023 case SEC_CMODE_ECB: 2024 case SEC_CMODE_CBC: 2025 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { 2026 dev_err(dev, "skcipher AES input length error!\n"); 2027 ret = -EINVAL; 2028 } 2029 break; 2030 case SEC_CMODE_CTR: 2031 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { 2032 dev_err(dev, "skcipher HW version error!\n"); 2033 ret = -EINVAL; 2034 } 2035 break; 2036 default: 2037 ret = -EINVAL; 2038 } 2039 2040 return ret; 2041 } 2042 2043 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 2044 { 2045 struct skcipher_request *sk_req = sreq->c_req.sk_req; 2046 struct device *dev = ctx->dev; 2047 u8 c_alg = ctx->c_ctx.c_alg; 2048 2049 if (unlikely(!sk_req->src || !sk_req->dst || 2050 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { 2051 dev_err(dev, "skcipher input param error!\n"); 2052 return -EINVAL; 2053 } 2054 sreq->c_req.c_len = sk_req->cryptlen; 2055 2056 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 2057 sreq->use_pbuf = true; 2058 else 2059 sreq->use_pbuf = false; 2060 2061 if (c_alg == SEC_CALG_3DES) { 2062 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 2063 dev_err(dev, "skcipher 3des input length error!\n"); 2064 return -EINVAL; 2065 } 2066 return 0; 2067 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 2068 return sec_skcipher_cryptlen_check(ctx, sreq); 2069 } 2070 2071 dev_err(dev, "skcipher algorithm error!\n"); 2072 2073 return -EINVAL; 2074 } 2075 2076 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, 2077 struct skcipher_request *sreq, bool encrypt) 2078 { 2079 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 2080 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); 2081 struct device *dev = ctx->dev; 2082 int ret; 2083 2084 if (!c_ctx->fbtfm) { 2085 dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); 2086 return -EINVAL; 2087 } 2088 2089 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); 2090 2091 /* software need sync mode to do crypto */ 2092 skcipher_request_set_callback(subreq, sreq->base.flags, 2093 NULL, NULL); 2094 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, 2095 sreq->cryptlen, sreq->iv); 2096 if (encrypt) 2097 ret = crypto_skcipher_encrypt(subreq); 2098 else 2099 ret = crypto_skcipher_decrypt(subreq); 2100 2101 skcipher_request_zero(subreq); 2102 2103 return ret; 2104 } 2105 2106 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 2107 { 2108 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 2109 struct sec_req *req = skcipher_request_ctx(sk_req); 2110 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 2111 int ret; 2112 2113 if (!sk_req->cryptlen) { 2114 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) 2115 return -EINVAL; 2116 return 0; 2117 } 2118 2119 req->flag = sk_req->base.flags; 2120 req->c_req.sk_req = sk_req; 2121 req->c_req.encrypt = encrypt; 2122 req->ctx = ctx; 2123 2124 ret = sec_skcipher_param_check(ctx, req); 2125 if (unlikely(ret)) 2126 return -EINVAL; 2127 2128 if (unlikely(ctx->c_ctx.fallback)) 2129 return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); 2130 2131 return ctx->req_op->process(ctx, req); 2132 } 2133 2134 static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 2135 { 2136 return sec_skcipher_crypto(sk_req, true); 2137 } 2138 2139 static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 2140 { 2141 return sec_skcipher_crypto(sk_req, false); 2142 } 2143 2144 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \ 2145 sec_min_key_size, sec_max_key_size, blk_size, iv_size)\ 2146 {\ 2147 .base = {\ 2148 .cra_name = sec_cra_name,\ 2149 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 2150 .cra_priority = SEC_PRIORITY,\ 2151 .cra_flags = CRYPTO_ALG_ASYNC |\ 2152 CRYPTO_ALG_NEED_FALLBACK,\ 2153 .cra_blocksize = blk_size,\ 2154 .cra_ctxsize = sizeof(struct sec_ctx),\ 2155 .cra_module = THIS_MODULE,\ 2156 },\ 2157 .init = sec_skcipher_ctx_init,\ 2158 .exit = sec_skcipher_ctx_exit,\ 2159 .setkey = sec_set_key,\ 2160 .decrypt = sec_skcipher_decrypt,\ 2161 .encrypt = sec_skcipher_encrypt,\ 2162 .min_keysize = sec_min_key_size,\ 2163 .max_keysize = sec_max_key_size,\ 2164 .ivsize = iv_size,\ 2165 } 2166 2167 static struct sec_skcipher sec_skciphers[] = { 2168 { 2169 .alg_msk = BIT(0), 2170 .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, 2171 AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), 2172 }, 2173 { 2174 .alg_msk = BIT(1), 2175 .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, 2176 AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2177 }, 2178 { 2179 .alg_msk = BIT(2), 2180 .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, 2181 AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2182 }, 2183 { 2184 .alg_msk = BIT(3), 2185 .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, 2186 SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2187 }, 2188 { 2189 .alg_msk = BIT(12), 2190 .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, 2191 AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2192 }, 2193 { 2194 .alg_msk = BIT(13), 2195 .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, 2196 AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2197 }, 2198 { 2199 .alg_msk = BIT(14), 2200 .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, 2201 SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2202 }, 2203 { 2204 .alg_msk = BIT(23), 2205 .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, 2206 SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), 2207 }, 2208 { 2209 .alg_msk = BIT(24), 2210 .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, 2211 SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 2212 DES3_EDE_BLOCK_SIZE), 2213 }, 2214 }; 2215 2216 static int aead_iv_demension_check(struct aead_request *aead_req) 2217 { 2218 u8 cl; 2219 2220 cl = aead_req->iv[0] + 1; 2221 if (cl < IV_CL_MIN || cl > IV_CL_MAX) 2222 return -EINVAL; 2223 2224 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) 2225 return -EOVERFLOW; 2226 2227 return 0; 2228 } 2229 2230 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) 2231 { 2232 struct aead_request *req = sreq->aead_req.aead_req; 2233 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2234 size_t authsize = crypto_aead_authsize(tfm); 2235 u8 c_mode = ctx->c_ctx.c_mode; 2236 struct device *dev = ctx->dev; 2237 int ret; 2238 2239 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || 2240 req->assoclen > SEC_MAX_AAD_LEN)) { 2241 dev_err(dev, "aead input spec error!\n"); 2242 return -EINVAL; 2243 } 2244 2245 if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || 2246 (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || 2247 authsize & MAC_LEN_MASK)))) { 2248 dev_err(dev, "aead input mac length error!\n"); 2249 return -EINVAL; 2250 } 2251 2252 if (c_mode == SEC_CMODE_CCM) { 2253 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { 2254 dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); 2255 return -EINVAL; 2256 } 2257 ret = aead_iv_demension_check(req); 2258 if (ret) { 2259 dev_err(dev, "aead input iv param error!\n"); 2260 return ret; 2261 } 2262 } 2263 2264 if (sreq->c_req.encrypt) 2265 sreq->c_req.c_len = req->cryptlen; 2266 else 2267 sreq->c_req.c_len = req->cryptlen - authsize; 2268 if (c_mode == SEC_CMODE_CBC) { 2269 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 2270 dev_err(dev, "aead crypto length error!\n"); 2271 return -EINVAL; 2272 } 2273 } 2274 2275 return 0; 2276 } 2277 2278 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 2279 { 2280 struct aead_request *req = sreq->aead_req.aead_req; 2281 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2282 size_t authsize = crypto_aead_authsize(tfm); 2283 struct device *dev = ctx->dev; 2284 u8 c_alg = ctx->c_ctx.c_alg; 2285 2286 if (unlikely(!req->src || !req->dst)) { 2287 dev_err(dev, "aead input param error!\n"); 2288 return -EINVAL; 2289 } 2290 2291 if (ctx->sec->qm.ver == QM_HW_V2) { 2292 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && 2293 req->cryptlen <= authsize))) { 2294 ctx->a_ctx.fallback = true; 2295 return -EINVAL; 2296 } 2297 } 2298 2299 /* Support AES or SM4 */ 2300 if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) { 2301 dev_err(dev, "aead crypto alg error!\n"); 2302 return -EINVAL; 2303 } 2304 2305 if (unlikely(sec_aead_spec_check(ctx, sreq))) 2306 return -EINVAL; 2307 2308 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 2309 SEC_PBUF_SZ) 2310 sreq->use_pbuf = true; 2311 else 2312 sreq->use_pbuf = false; 2313 2314 return 0; 2315 } 2316 2317 static int sec_aead_soft_crypto(struct sec_ctx *ctx, 2318 struct aead_request *aead_req, 2319 bool encrypt) 2320 { 2321 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 2322 struct device *dev = ctx->dev; 2323 struct aead_request *subreq; 2324 int ret; 2325 2326 /* Kunpeng920 aead mode not support input 0 size */ 2327 if (!a_ctx->fallback_aead_tfm) { 2328 dev_err(dev, "aead fallback tfm is NULL!\n"); 2329 return -EINVAL; 2330 } 2331 2332 subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); 2333 if (!subreq) 2334 return -ENOMEM; 2335 2336 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); 2337 aead_request_set_callback(subreq, aead_req->base.flags, 2338 aead_req->base.complete, aead_req->base.data); 2339 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, 2340 aead_req->cryptlen, aead_req->iv); 2341 aead_request_set_ad(subreq, aead_req->assoclen); 2342 2343 if (encrypt) 2344 ret = crypto_aead_encrypt(subreq); 2345 else 2346 ret = crypto_aead_decrypt(subreq); 2347 aead_request_free(subreq); 2348 2349 return ret; 2350 } 2351 2352 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 2353 { 2354 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 2355 struct sec_req *req = aead_request_ctx(a_req); 2356 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 2357 int ret; 2358 2359 req->flag = a_req->base.flags; 2360 req->aead_req.aead_req = a_req; 2361 req->c_req.encrypt = encrypt; 2362 req->ctx = ctx; 2363 2364 ret = sec_aead_param_check(ctx, req); 2365 if (unlikely(ret)) { 2366 if (ctx->a_ctx.fallback) 2367 return sec_aead_soft_crypto(ctx, a_req, encrypt); 2368 return -EINVAL; 2369 } 2370 2371 return ctx->req_op->process(ctx, req); 2372 } 2373 2374 static int sec_aead_encrypt(struct aead_request *a_req) 2375 { 2376 return sec_aead_crypto(a_req, true); 2377 } 2378 2379 static int sec_aead_decrypt(struct aead_request *a_req) 2380 { 2381 return sec_aead_crypto(a_req, false); 2382 } 2383 2384 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\ 2385 ctx_exit, blk_size, iv_size, max_authsize)\ 2386 {\ 2387 .base = {\ 2388 .cra_name = sec_cra_name,\ 2389 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 2390 .cra_priority = SEC_PRIORITY,\ 2391 .cra_flags = CRYPTO_ALG_ASYNC |\ 2392 CRYPTO_ALG_NEED_FALLBACK,\ 2393 .cra_blocksize = blk_size,\ 2394 .cra_ctxsize = sizeof(struct sec_ctx),\ 2395 .cra_module = THIS_MODULE,\ 2396 },\ 2397 .init = ctx_init,\ 2398 .exit = ctx_exit,\ 2399 .setkey = sec_set_key,\ 2400 .setauthsize = sec_aead_setauthsize,\ 2401 .decrypt = sec_aead_decrypt,\ 2402 .encrypt = sec_aead_encrypt,\ 2403 .ivsize = iv_size,\ 2404 .maxauthsize = max_authsize,\ 2405 } 2406 2407 static struct sec_aead sec_aeads[] = { 2408 { 2409 .alg_msk = BIT(6), 2410 .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, 2411 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2412 AES_BLOCK_SIZE), 2413 }, 2414 { 2415 .alg_msk = BIT(7), 2416 .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, 2417 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2418 AES_BLOCK_SIZE), 2419 }, 2420 { 2421 .alg_msk = BIT(17), 2422 .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, 2423 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2424 AES_BLOCK_SIZE), 2425 }, 2426 { 2427 .alg_msk = BIT(18), 2428 .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, 2429 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2430 AES_BLOCK_SIZE), 2431 }, 2432 { 2433 .alg_msk = BIT(43), 2434 .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, 2435 sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2436 AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 2437 }, 2438 { 2439 .alg_msk = BIT(44), 2440 .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, 2441 sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2442 AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 2443 }, 2444 { 2445 .alg_msk = BIT(45), 2446 .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, 2447 sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2448 AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 2449 }, 2450 }; 2451 2452 static void sec_unregister_skcipher(u64 alg_mask, int end) 2453 { 2454 int i; 2455 2456 for (i = 0; i < end; i++) 2457 if (sec_skciphers[i].alg_msk & alg_mask) 2458 crypto_unregister_skcipher(&sec_skciphers[i].alg); 2459 } 2460 2461 static int sec_register_skcipher(u64 alg_mask) 2462 { 2463 int i, ret, count; 2464 2465 count = ARRAY_SIZE(sec_skciphers); 2466 2467 for (i = 0; i < count; i++) { 2468 if (!(sec_skciphers[i].alg_msk & alg_mask)) 2469 continue; 2470 2471 ret = crypto_register_skcipher(&sec_skciphers[i].alg); 2472 if (ret) 2473 goto err; 2474 } 2475 2476 return 0; 2477 2478 err: 2479 sec_unregister_skcipher(alg_mask, i); 2480 2481 return ret; 2482 } 2483 2484 static void sec_unregister_aead(u64 alg_mask, int end) 2485 { 2486 int i; 2487 2488 for (i = 0; i < end; i++) 2489 if (sec_aeads[i].alg_msk & alg_mask) 2490 crypto_unregister_aead(&sec_aeads[i].alg); 2491 } 2492 2493 static int sec_register_aead(u64 alg_mask) 2494 { 2495 int i, ret, count; 2496 2497 count = ARRAY_SIZE(sec_aeads); 2498 2499 for (i = 0; i < count; i++) { 2500 if (!(sec_aeads[i].alg_msk & alg_mask)) 2501 continue; 2502 2503 ret = crypto_register_aead(&sec_aeads[i].alg); 2504 if (ret) 2505 goto err; 2506 } 2507 2508 return 0; 2509 2510 err: 2511 sec_unregister_aead(alg_mask, i); 2512 2513 return ret; 2514 } 2515 2516 int sec_register_to_crypto(struct hisi_qm *qm) 2517 { 2518 u64 alg_mask; 2519 int ret = 0; 2520 2521 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, 2522 SEC_DRV_ALG_BITMAP_LOW_IDX); 2523 2524 mutex_lock(&sec_algs_lock); 2525 if (sec_available_devs) { 2526 sec_available_devs++; 2527 goto unlock; 2528 } 2529 2530 ret = sec_register_skcipher(alg_mask); 2531 if (ret) 2532 goto unlock; 2533 2534 ret = sec_register_aead(alg_mask); 2535 if (ret) 2536 goto unreg_skcipher; 2537 2538 sec_available_devs++; 2539 mutex_unlock(&sec_algs_lock); 2540 2541 return 0; 2542 2543 unreg_skcipher: 2544 sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2545 unlock: 2546 mutex_unlock(&sec_algs_lock); 2547 return ret; 2548 } 2549 2550 void sec_unregister_from_crypto(struct hisi_qm *qm) 2551 { 2552 u64 alg_mask; 2553 2554 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, 2555 SEC_DRV_ALG_BITMAP_LOW_IDX); 2556 2557 mutex_lock(&sec_algs_lock); 2558 if (--sec_available_devs) 2559 goto unlock; 2560 2561 sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); 2562 sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2563 2564 unlock: 2565 mutex_unlock(&sec_algs_lock); 2566 } 2567