1416d8220SZaibo Xu // SPDX-License-Identifier: GPL-2.0 2416d8220SZaibo Xu /* Copyright (c) 2019 HiSilicon Limited. */ 3416d8220SZaibo Xu 4416d8220SZaibo Xu #include <crypto/aes.h> 5416d8220SZaibo Xu #include <crypto/algapi.h> 6416d8220SZaibo Xu #include <crypto/des.h> 7416d8220SZaibo Xu #include <crypto/skcipher.h> 8416d8220SZaibo Xu #include <crypto/xts.h> 9416d8220SZaibo Xu #include <linux/crypto.h> 10416d8220SZaibo Xu #include <linux/dma-mapping.h> 11416d8220SZaibo Xu #include <linux/idr.h> 12416d8220SZaibo Xu 13416d8220SZaibo Xu #include "sec.h" 14416d8220SZaibo Xu #include "sec_crypto.h" 15416d8220SZaibo Xu 16416d8220SZaibo Xu #define SEC_PRIORITY 4001 17416d8220SZaibo Xu #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 18416d8220SZaibo Xu #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 19416d8220SZaibo Xu #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 20416d8220SZaibo Xu #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 21416d8220SZaibo Xu 22416d8220SZaibo Xu /* SEC sqe(bd) bit operational relative MACRO */ 23416d8220SZaibo Xu #define SEC_DE_OFFSET 1 24416d8220SZaibo Xu #define SEC_CIPHER_OFFSET 4 25416d8220SZaibo Xu #define SEC_SCENE_OFFSET 3 26416d8220SZaibo Xu #define SEC_DST_SGL_OFFSET 2 27416d8220SZaibo Xu #define SEC_SRC_SGL_OFFSET 7 28416d8220SZaibo Xu #define SEC_CKEY_OFFSET 9 29416d8220SZaibo Xu #define SEC_CMODE_OFFSET 12 30416d8220SZaibo Xu #define SEC_FLAG_OFFSET 7 31416d8220SZaibo Xu #define SEC_FLAG_MASK 0x0780 32416d8220SZaibo Xu #define SEC_TYPE_MASK 0x0F 33416d8220SZaibo Xu #define SEC_DONE_MASK 0x0001 34416d8220SZaibo Xu 35416d8220SZaibo Xu #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 36416d8220SZaibo Xu #define SEC_SGL_SGE_NR 128 37416d8220SZaibo Xu #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) 38416d8220SZaibo Xu 39416d8220SZaibo Xu static DEFINE_MUTEX(sec_algs_lock); 40416d8220SZaibo Xu static unsigned int sec_active_devs; 41416d8220SZaibo Xu 42416d8220SZaibo Xu /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 43a181647cSZaibo Xu static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 44416d8220SZaibo Xu { 45416d8220SZaibo Xu if (req->c_req.encrypt) 46416d8220SZaibo Xu return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 47416d8220SZaibo Xu ctx->hlf_q_num; 48416d8220SZaibo Xu 49416d8220SZaibo Xu return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 50416d8220SZaibo Xu ctx->hlf_q_num; 51416d8220SZaibo Xu } 52416d8220SZaibo Xu 53a181647cSZaibo Xu static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 54416d8220SZaibo Xu { 55416d8220SZaibo Xu if (req->c_req.encrypt) 56416d8220SZaibo Xu atomic_dec(&ctx->enc_qcyclic); 57416d8220SZaibo Xu else 58416d8220SZaibo Xu atomic_dec(&ctx->dec_qcyclic); 59416d8220SZaibo Xu } 60416d8220SZaibo Xu 61416d8220SZaibo Xu static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 62416d8220SZaibo Xu { 63416d8220SZaibo Xu int req_id; 64416d8220SZaibo Xu 65416d8220SZaibo Xu mutex_lock(&qp_ctx->req_lock); 66416d8220SZaibo Xu 67416d8220SZaibo Xu req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 68416d8220SZaibo Xu 0, QM_Q_DEPTH, GFP_ATOMIC); 69416d8220SZaibo Xu mutex_unlock(&qp_ctx->req_lock); 70416d8220SZaibo Xu if (req_id < 0) { 71416d8220SZaibo Xu dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); 72416d8220SZaibo Xu return req_id; 73416d8220SZaibo Xu } 74416d8220SZaibo Xu 75416d8220SZaibo Xu req->qp_ctx = qp_ctx; 76416d8220SZaibo Xu qp_ctx->req_list[req_id] = req; 77416d8220SZaibo Xu return req_id; 78416d8220SZaibo Xu } 79416d8220SZaibo Xu 80416d8220SZaibo Xu static void sec_free_req_id(struct sec_req *req) 81416d8220SZaibo Xu { 82416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = req->qp_ctx; 83416d8220SZaibo Xu int req_id = req->req_id; 84416d8220SZaibo Xu 85416d8220SZaibo Xu if (req_id < 0 || req_id >= QM_Q_DEPTH) { 86416d8220SZaibo Xu dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); 87416d8220SZaibo Xu return; 88416d8220SZaibo Xu } 89416d8220SZaibo Xu 90416d8220SZaibo Xu qp_ctx->req_list[req_id] = NULL; 91416d8220SZaibo Xu req->qp_ctx = NULL; 92416d8220SZaibo Xu 93416d8220SZaibo Xu mutex_lock(&qp_ctx->req_lock); 94416d8220SZaibo Xu idr_remove(&qp_ctx->req_idr, req_id); 95416d8220SZaibo Xu mutex_unlock(&qp_ctx->req_lock); 96416d8220SZaibo Xu } 97416d8220SZaibo Xu 98416d8220SZaibo Xu static void sec_req_cb(struct hisi_qp *qp, void *resp) 99416d8220SZaibo Xu { 100416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 101416d8220SZaibo Xu struct sec_sqe *bd = resp; 102416d8220SZaibo Xu u16 done, flag; 103416d8220SZaibo Xu u8 type; 104416d8220SZaibo Xu struct sec_req *req; 105416d8220SZaibo Xu 106416d8220SZaibo Xu type = bd->type_cipher_auth & SEC_TYPE_MASK; 107416d8220SZaibo Xu if (type == SEC_BD_TYPE2) { 108416d8220SZaibo Xu req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; 109416d8220SZaibo Xu req->err_type = bd->type2.error_type; 110416d8220SZaibo Xu 111416d8220SZaibo Xu done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 112416d8220SZaibo Xu flag = (le16_to_cpu(bd->type2.done_flag) & 113416d8220SZaibo Xu SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 114416d8220SZaibo Xu if (req->err_type || done != 0x1 || flag != 0x2) 115416d8220SZaibo Xu dev_err(SEC_CTX_DEV(req->ctx), 116416d8220SZaibo Xu "err_type[%d],done[%d],flag[%d]\n", 117416d8220SZaibo Xu req->err_type, done, flag); 118416d8220SZaibo Xu } else { 119416d8220SZaibo Xu pr_err("err bd type [%d]\n", type); 120416d8220SZaibo Xu return; 121416d8220SZaibo Xu } 122416d8220SZaibo Xu 123cb1eeb75SArnd Bergmann atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt); 1241e9bc276SZaibo Xu 125416d8220SZaibo Xu req->ctx->req_op->buf_unmap(req->ctx, req); 126416d8220SZaibo Xu 127416d8220SZaibo Xu req->ctx->req_op->callback(req->ctx, req); 128416d8220SZaibo Xu } 129416d8220SZaibo Xu 130416d8220SZaibo Xu static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 131416d8220SZaibo Xu { 132416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = req->qp_ctx; 133416d8220SZaibo Xu int ret; 134416d8220SZaibo Xu 135416d8220SZaibo Xu mutex_lock(&qp_ctx->req_lock); 136416d8220SZaibo Xu ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 137416d8220SZaibo Xu mutex_unlock(&qp_ctx->req_lock); 138cb1eeb75SArnd Bergmann atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 139416d8220SZaibo Xu 140416d8220SZaibo Xu if (ret == -EBUSY) 141416d8220SZaibo Xu return -ENOBUFS; 142416d8220SZaibo Xu 143416d8220SZaibo Xu if (!ret) { 144ca0d158dSZaibo Xu if (req->fake_busy) 145416d8220SZaibo Xu ret = -EBUSY; 146416d8220SZaibo Xu else 147416d8220SZaibo Xu ret = -EINPROGRESS; 148416d8220SZaibo Xu } 149416d8220SZaibo Xu 150416d8220SZaibo Xu return ret; 151416d8220SZaibo Xu } 152416d8220SZaibo Xu 153*7c7d902aSZaibo Xu /* Get DMA memory resources */ 154*7c7d902aSZaibo Xu static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 155*7c7d902aSZaibo Xu { 156*7c7d902aSZaibo Xu int i; 157*7c7d902aSZaibo Xu 158*7c7d902aSZaibo Xu res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 159*7c7d902aSZaibo Xu &res->c_ivin_dma, GFP_KERNEL); 160*7c7d902aSZaibo Xu if (!res->c_ivin) 161*7c7d902aSZaibo Xu return -ENOMEM; 162*7c7d902aSZaibo Xu 163*7c7d902aSZaibo Xu for (i = 1; i < QM_Q_DEPTH; i++) { 164*7c7d902aSZaibo Xu res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 165*7c7d902aSZaibo Xu res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 166*7c7d902aSZaibo Xu } 167*7c7d902aSZaibo Xu 168*7c7d902aSZaibo Xu return 0; 169*7c7d902aSZaibo Xu } 170*7c7d902aSZaibo Xu 171*7c7d902aSZaibo Xu static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 172*7c7d902aSZaibo Xu { 173*7c7d902aSZaibo Xu if (res->c_ivin) 174*7c7d902aSZaibo Xu dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 175*7c7d902aSZaibo Xu res->c_ivin, res->c_ivin_dma); 176*7c7d902aSZaibo Xu } 177*7c7d902aSZaibo Xu 178*7c7d902aSZaibo Xu static int sec_alg_resource_alloc(struct sec_ctx *ctx, 179*7c7d902aSZaibo Xu struct sec_qp_ctx *qp_ctx) 180*7c7d902aSZaibo Xu { 181*7c7d902aSZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 182*7c7d902aSZaibo Xu 183*7c7d902aSZaibo Xu return sec_alloc_civ_resource(dev, qp_ctx->res); 184*7c7d902aSZaibo Xu } 185*7c7d902aSZaibo Xu 186*7c7d902aSZaibo Xu static void sec_alg_resource_free(struct sec_ctx *ctx, 187*7c7d902aSZaibo Xu struct sec_qp_ctx *qp_ctx) 188*7c7d902aSZaibo Xu { 189*7c7d902aSZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 190*7c7d902aSZaibo Xu 191*7c7d902aSZaibo Xu sec_free_civ_resource(dev, qp_ctx->res); 192*7c7d902aSZaibo Xu } 193*7c7d902aSZaibo Xu 194416d8220SZaibo Xu static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 195416d8220SZaibo Xu int qp_ctx_id, int alg_type) 196416d8220SZaibo Xu { 197416d8220SZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 198416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx; 199416d8220SZaibo Xu struct hisi_qp *qp; 200416d8220SZaibo Xu int ret = -ENOMEM; 201416d8220SZaibo Xu 202416d8220SZaibo Xu qp = hisi_qm_create_qp(qm, alg_type); 203416d8220SZaibo Xu if (IS_ERR(qp)) 204416d8220SZaibo Xu return PTR_ERR(qp); 205416d8220SZaibo Xu 206416d8220SZaibo Xu qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 207416d8220SZaibo Xu qp->req_type = 0; 208416d8220SZaibo Xu qp->qp_ctx = qp_ctx; 209416d8220SZaibo Xu qp->req_cb = sec_req_cb; 210416d8220SZaibo Xu qp_ctx->qp = qp; 211416d8220SZaibo Xu qp_ctx->ctx = ctx; 212416d8220SZaibo Xu 213416d8220SZaibo Xu mutex_init(&qp_ctx->req_lock); 214416d8220SZaibo Xu atomic_set(&qp_ctx->pending_reqs, 0); 215416d8220SZaibo Xu idr_init(&qp_ctx->req_idr); 216416d8220SZaibo Xu 217416d8220SZaibo Xu qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 218416d8220SZaibo Xu SEC_SGL_SGE_NR); 2198a6b8f4dSDan Carpenter if (IS_ERR(qp_ctx->c_in_pool)) { 220416d8220SZaibo Xu dev_err(dev, "fail to create sgl pool for input!\n"); 221*7c7d902aSZaibo Xu goto err_destroy_idr; 222416d8220SZaibo Xu } 223416d8220SZaibo Xu 224416d8220SZaibo Xu qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 225416d8220SZaibo Xu SEC_SGL_SGE_NR); 2268a6b8f4dSDan Carpenter if (IS_ERR(qp_ctx->c_out_pool)) { 227416d8220SZaibo Xu dev_err(dev, "fail to create sgl pool for output!\n"); 228416d8220SZaibo Xu goto err_free_c_in_pool; 229416d8220SZaibo Xu } 230416d8220SZaibo Xu 231*7c7d902aSZaibo Xu ret = sec_alg_resource_alloc(ctx, qp_ctx); 232416d8220SZaibo Xu if (ret) 233416d8220SZaibo Xu goto err_free_c_out_pool; 234416d8220SZaibo Xu 235416d8220SZaibo Xu ret = hisi_qm_start_qp(qp, 0); 236416d8220SZaibo Xu if (ret < 0) 237416d8220SZaibo Xu goto err_queue_free; 238416d8220SZaibo Xu 239416d8220SZaibo Xu return 0; 240416d8220SZaibo Xu 241416d8220SZaibo Xu err_queue_free: 242*7c7d902aSZaibo Xu sec_alg_resource_free(ctx, qp_ctx); 243416d8220SZaibo Xu err_free_c_out_pool: 244416d8220SZaibo Xu hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 245416d8220SZaibo Xu err_free_c_in_pool: 246416d8220SZaibo Xu hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 247416d8220SZaibo Xu err_destroy_idr: 248416d8220SZaibo Xu idr_destroy(&qp_ctx->req_idr); 249416d8220SZaibo Xu hisi_qm_release_qp(qp); 250416d8220SZaibo Xu 251416d8220SZaibo Xu return ret; 252416d8220SZaibo Xu } 253416d8220SZaibo Xu 254416d8220SZaibo Xu static void sec_release_qp_ctx(struct sec_ctx *ctx, 255416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx) 256416d8220SZaibo Xu { 257416d8220SZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 258416d8220SZaibo Xu 259416d8220SZaibo Xu hisi_qm_stop_qp(qp_ctx->qp); 260*7c7d902aSZaibo Xu sec_alg_resource_free(ctx, qp_ctx); 261416d8220SZaibo Xu 262416d8220SZaibo Xu hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 263416d8220SZaibo Xu hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 264416d8220SZaibo Xu 265416d8220SZaibo Xu idr_destroy(&qp_ctx->req_idr); 266416d8220SZaibo Xu hisi_qm_release_qp(qp_ctx->qp); 267416d8220SZaibo Xu } 268416d8220SZaibo Xu 269416d8220SZaibo Xu static int sec_skcipher_init(struct crypto_skcipher *tfm) 270416d8220SZaibo Xu { 271416d8220SZaibo Xu struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 272416d8220SZaibo Xu struct sec_cipher_ctx *c_ctx; 273416d8220SZaibo Xu struct sec_dev *sec; 274416d8220SZaibo Xu struct device *dev; 275416d8220SZaibo Xu struct hisi_qm *qm; 276416d8220SZaibo Xu int i, ret; 277416d8220SZaibo Xu 278416d8220SZaibo Xu crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 279416d8220SZaibo Xu 280416d8220SZaibo Xu sec = sec_find_device(cpu_to_node(smp_processor_id())); 281416d8220SZaibo Xu if (!sec) { 282a718cfceSZaibo Xu pr_err("Can not find proper Hisilicon SEC device!\n"); 283416d8220SZaibo Xu return -ENODEV; 284416d8220SZaibo Xu } 285416d8220SZaibo Xu ctx->sec = sec; 286416d8220SZaibo Xu qm = &sec->qm; 287416d8220SZaibo Xu dev = &qm->pdev->dev; 288a718cfceSZaibo Xu ctx->hlf_q_num = sec->ctx_q_num >> 1; 289416d8220SZaibo Xu 290416d8220SZaibo Xu /* Half of queue depth is taken as fake requests limit in the queue. */ 291a718cfceSZaibo Xu ctx->fake_req_limit = QM_Q_DEPTH >> 1; 292416d8220SZaibo Xu ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 293416d8220SZaibo Xu GFP_KERNEL); 294416d8220SZaibo Xu if (!ctx->qp_ctx) 295416d8220SZaibo Xu return -ENOMEM; 296416d8220SZaibo Xu 297416d8220SZaibo Xu for (i = 0; i < sec->ctx_q_num; i++) { 298416d8220SZaibo Xu ret = sec_create_qp_ctx(qm, ctx, i, 0); 299416d8220SZaibo Xu if (ret) 300416d8220SZaibo Xu goto err_sec_release_qp_ctx; 301416d8220SZaibo Xu } 302416d8220SZaibo Xu 303416d8220SZaibo Xu c_ctx = &ctx->c_ctx; 304416d8220SZaibo Xu c_ctx->ivsize = crypto_skcipher_ivsize(tfm); 305416d8220SZaibo Xu if (c_ctx->ivsize > SEC_IV_SIZE) { 306416d8220SZaibo Xu dev_err(dev, "get error iv size!\n"); 307416d8220SZaibo Xu ret = -EINVAL; 308416d8220SZaibo Xu goto err_sec_release_qp_ctx; 309416d8220SZaibo Xu } 310416d8220SZaibo Xu c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE, 311416d8220SZaibo Xu &c_ctx->c_key_dma, GFP_KERNEL); 312416d8220SZaibo Xu if (!c_ctx->c_key) { 313416d8220SZaibo Xu ret = -ENOMEM; 314416d8220SZaibo Xu goto err_sec_release_qp_ctx; 315416d8220SZaibo Xu } 316416d8220SZaibo Xu 317416d8220SZaibo Xu return 0; 318416d8220SZaibo Xu 319416d8220SZaibo Xu err_sec_release_qp_ctx: 320416d8220SZaibo Xu for (i = i - 1; i >= 0; i--) 321416d8220SZaibo Xu sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 322416d8220SZaibo Xu 323416d8220SZaibo Xu kfree(ctx->qp_ctx); 324416d8220SZaibo Xu return ret; 325416d8220SZaibo Xu } 326416d8220SZaibo Xu 327a181647cSZaibo Xu static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 328416d8220SZaibo Xu { 329416d8220SZaibo Xu struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 330416d8220SZaibo Xu struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 331416d8220SZaibo Xu int i = 0; 332416d8220SZaibo Xu 333416d8220SZaibo Xu if (c_ctx->c_key) { 334416d8220SZaibo Xu dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 335416d8220SZaibo Xu c_ctx->c_key, c_ctx->c_key_dma); 336416d8220SZaibo Xu c_ctx->c_key = NULL; 337416d8220SZaibo Xu } 338416d8220SZaibo Xu 339416d8220SZaibo Xu for (i = 0; i < ctx->sec->ctx_q_num; i++) 340416d8220SZaibo Xu sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 341416d8220SZaibo Xu 342416d8220SZaibo Xu kfree(ctx->qp_ctx); 343416d8220SZaibo Xu } 344416d8220SZaibo Xu 345416d8220SZaibo Xu static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, 346416d8220SZaibo Xu const u32 keylen, 347416d8220SZaibo Xu const enum sec_cmode c_mode) 348416d8220SZaibo Xu { 349416d8220SZaibo Xu switch (keylen) { 350416d8220SZaibo Xu case SEC_DES3_2KEY_SIZE: 351416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 352416d8220SZaibo Xu break; 353416d8220SZaibo Xu case SEC_DES3_3KEY_SIZE: 354416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 355416d8220SZaibo Xu break; 356416d8220SZaibo Xu default: 357416d8220SZaibo Xu return -EINVAL; 358416d8220SZaibo Xu } 359416d8220SZaibo Xu 360416d8220SZaibo Xu return 0; 361416d8220SZaibo Xu } 362416d8220SZaibo Xu 363416d8220SZaibo Xu static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 364416d8220SZaibo Xu const u32 keylen, 365416d8220SZaibo Xu const enum sec_cmode c_mode) 366416d8220SZaibo Xu { 367416d8220SZaibo Xu if (c_mode == SEC_CMODE_XTS) { 368416d8220SZaibo Xu switch (keylen) { 369416d8220SZaibo Xu case SEC_XTS_MIN_KEY_SIZE: 370416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_128BIT; 371416d8220SZaibo Xu break; 372416d8220SZaibo Xu case SEC_XTS_MAX_KEY_SIZE: 373416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_256BIT; 374416d8220SZaibo Xu break; 375416d8220SZaibo Xu default: 376416d8220SZaibo Xu pr_err("hisi_sec2: xts mode key error!\n"); 377416d8220SZaibo Xu return -EINVAL; 378416d8220SZaibo Xu } 379416d8220SZaibo Xu } else { 380416d8220SZaibo Xu switch (keylen) { 381416d8220SZaibo Xu case AES_KEYSIZE_128: 382416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_128BIT; 383416d8220SZaibo Xu break; 384416d8220SZaibo Xu case AES_KEYSIZE_192: 385416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_192BIT; 386416d8220SZaibo Xu break; 387416d8220SZaibo Xu case AES_KEYSIZE_256: 388416d8220SZaibo Xu c_ctx->c_key_len = SEC_CKEY_256BIT; 389416d8220SZaibo Xu break; 390416d8220SZaibo Xu default: 391416d8220SZaibo Xu pr_err("hisi_sec2: aes key error!\n"); 392416d8220SZaibo Xu return -EINVAL; 393416d8220SZaibo Xu } 394416d8220SZaibo Xu } 395416d8220SZaibo Xu 396416d8220SZaibo Xu return 0; 397416d8220SZaibo Xu } 398416d8220SZaibo Xu 399416d8220SZaibo Xu static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 400416d8220SZaibo Xu const u32 keylen, const enum sec_calg c_alg, 401416d8220SZaibo Xu const enum sec_cmode c_mode) 402416d8220SZaibo Xu { 403416d8220SZaibo Xu struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 404416d8220SZaibo Xu struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 405416d8220SZaibo Xu int ret; 406416d8220SZaibo Xu 407416d8220SZaibo Xu if (c_mode == SEC_CMODE_XTS) { 408416d8220SZaibo Xu ret = xts_verify_key(tfm, key, keylen); 409416d8220SZaibo Xu if (ret) { 410416d8220SZaibo Xu dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); 411416d8220SZaibo Xu return ret; 412416d8220SZaibo Xu } 413416d8220SZaibo Xu } 414416d8220SZaibo Xu 415416d8220SZaibo Xu c_ctx->c_alg = c_alg; 416416d8220SZaibo Xu c_ctx->c_mode = c_mode; 417416d8220SZaibo Xu 418416d8220SZaibo Xu switch (c_alg) { 419416d8220SZaibo Xu case SEC_CALG_3DES: 420416d8220SZaibo Xu ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); 421416d8220SZaibo Xu break; 422416d8220SZaibo Xu case SEC_CALG_AES: 423416d8220SZaibo Xu case SEC_CALG_SM4: 424416d8220SZaibo Xu ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 425416d8220SZaibo Xu break; 426416d8220SZaibo Xu default: 427416d8220SZaibo Xu return -EINVAL; 428416d8220SZaibo Xu } 429416d8220SZaibo Xu 430416d8220SZaibo Xu if (ret) { 431416d8220SZaibo Xu dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); 432416d8220SZaibo Xu return ret; 433416d8220SZaibo Xu } 434416d8220SZaibo Xu 435416d8220SZaibo Xu memcpy(c_ctx->c_key, key, keylen); 436416d8220SZaibo Xu 437416d8220SZaibo Xu return 0; 438416d8220SZaibo Xu } 439416d8220SZaibo Xu 440416d8220SZaibo Xu #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 441416d8220SZaibo Xu static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 442416d8220SZaibo Xu u32 keylen) \ 443416d8220SZaibo Xu { \ 444416d8220SZaibo Xu return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 445416d8220SZaibo Xu } 446416d8220SZaibo Xu 447416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 448416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 449416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 450416d8220SZaibo Xu 451416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 452416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 453416d8220SZaibo Xu 454416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 455416d8220SZaibo Xu GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 456416d8220SZaibo Xu 457a181647cSZaibo Xu static int sec_cipher_map(struct device *dev, struct sec_req *req, 458416d8220SZaibo Xu struct scatterlist *src, struct scatterlist *dst) 459416d8220SZaibo Xu { 460416d8220SZaibo Xu struct sec_cipher_req *c_req = &req->c_req; 461416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = req->qp_ctx; 462416d8220SZaibo Xu 463416d8220SZaibo Xu c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 464416d8220SZaibo Xu qp_ctx->c_in_pool, 465416d8220SZaibo Xu req->req_id, 466416d8220SZaibo Xu &c_req->c_in_dma); 467416d8220SZaibo Xu 468416d8220SZaibo Xu if (IS_ERR(c_req->c_in)) { 469416d8220SZaibo Xu dev_err(dev, "fail to dma map input sgl buffers!\n"); 470416d8220SZaibo Xu return PTR_ERR(c_req->c_in); 471416d8220SZaibo Xu } 472416d8220SZaibo Xu 473416d8220SZaibo Xu if (dst == src) { 474416d8220SZaibo Xu c_req->c_out = c_req->c_in; 475416d8220SZaibo Xu c_req->c_out_dma = c_req->c_in_dma; 476416d8220SZaibo Xu } else { 477416d8220SZaibo Xu c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 478416d8220SZaibo Xu qp_ctx->c_out_pool, 479416d8220SZaibo Xu req->req_id, 480416d8220SZaibo Xu &c_req->c_out_dma); 481416d8220SZaibo Xu 482416d8220SZaibo Xu if (IS_ERR(c_req->c_out)) { 483416d8220SZaibo Xu dev_err(dev, "fail to dma map output sgl buffers!\n"); 484416d8220SZaibo Xu hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 485416d8220SZaibo Xu return PTR_ERR(c_req->c_out); 486416d8220SZaibo Xu } 487416d8220SZaibo Xu } 488416d8220SZaibo Xu 489416d8220SZaibo Xu return 0; 490416d8220SZaibo Xu } 491416d8220SZaibo Xu 492a181647cSZaibo Xu static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req, 493a181647cSZaibo Xu struct scatterlist *src, struct scatterlist *dst) 494a181647cSZaibo Xu { 495a181647cSZaibo Xu if (dst != src) 496a181647cSZaibo Xu hisi_acc_sg_buf_unmap(dev, src, req->c_in); 497a181647cSZaibo Xu 498a181647cSZaibo Xu hisi_acc_sg_buf_unmap(dev, dst, req->c_out); 499a181647cSZaibo Xu } 500a181647cSZaibo Xu 501416d8220SZaibo Xu static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 502416d8220SZaibo Xu { 503a181647cSZaibo Xu struct skcipher_request *sq = req->c_req.sk_req; 504416d8220SZaibo Xu 505a181647cSZaibo Xu return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst); 506416d8220SZaibo Xu } 507416d8220SZaibo Xu 508416d8220SZaibo Xu static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 509416d8220SZaibo Xu { 510416d8220SZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 511416d8220SZaibo Xu struct sec_cipher_req *c_req = &req->c_req; 512416d8220SZaibo Xu struct skcipher_request *sk_req = c_req->sk_req; 513416d8220SZaibo Xu 514a181647cSZaibo Xu sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst); 515416d8220SZaibo Xu } 516416d8220SZaibo Xu 517416d8220SZaibo Xu static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 518416d8220SZaibo Xu { 519416d8220SZaibo Xu int ret; 520416d8220SZaibo Xu 521416d8220SZaibo Xu ret = ctx->req_op->buf_map(ctx, req); 522416d8220SZaibo Xu if (ret) 523416d8220SZaibo Xu return ret; 524416d8220SZaibo Xu 525416d8220SZaibo Xu ctx->req_op->do_transfer(ctx, req); 526416d8220SZaibo Xu 527416d8220SZaibo Xu ret = ctx->req_op->bd_fill(ctx, req); 528416d8220SZaibo Xu if (ret) 529416d8220SZaibo Xu goto unmap_req_buf; 530416d8220SZaibo Xu 531416d8220SZaibo Xu return ret; 532416d8220SZaibo Xu 533416d8220SZaibo Xu unmap_req_buf: 534416d8220SZaibo Xu ctx->req_op->buf_unmap(ctx, req); 535416d8220SZaibo Xu 536416d8220SZaibo Xu return ret; 537416d8220SZaibo Xu } 538416d8220SZaibo Xu 539416d8220SZaibo Xu static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 540416d8220SZaibo Xu { 541416d8220SZaibo Xu ctx->req_op->buf_unmap(ctx, req); 542416d8220SZaibo Xu } 543416d8220SZaibo Xu 544416d8220SZaibo Xu static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 545416d8220SZaibo Xu { 546416d8220SZaibo Xu struct skcipher_request *sk_req = req->c_req.sk_req; 547*7c7d902aSZaibo Xu u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin; 548416d8220SZaibo Xu struct sec_cipher_req *c_req = &req->c_req; 549416d8220SZaibo Xu 550416d8220SZaibo Xu c_req->c_len = sk_req->cryptlen; 551*7c7d902aSZaibo Xu memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 552416d8220SZaibo Xu } 553416d8220SZaibo Xu 554416d8220SZaibo Xu static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 555416d8220SZaibo Xu { 556416d8220SZaibo Xu struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 557416d8220SZaibo Xu struct sec_cipher_req *c_req = &req->c_req; 558416d8220SZaibo Xu struct sec_sqe *sec_sqe = &req->sec_sqe; 559416d8220SZaibo Xu u8 scene, sa_type, da_type; 560416d8220SZaibo Xu u8 bd_type, cipher; 561*7c7d902aSZaibo Xu u8 de = 0; 562416d8220SZaibo Xu 563416d8220SZaibo Xu memset(sec_sqe, 0, sizeof(struct sec_sqe)); 564416d8220SZaibo Xu 565416d8220SZaibo Xu sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 566*7c7d902aSZaibo Xu sec_sqe->type2.c_ivin_addr = 567*7c7d902aSZaibo Xu cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma); 568416d8220SZaibo Xu sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); 569416d8220SZaibo Xu sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 570416d8220SZaibo Xu 571416d8220SZaibo Xu sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 572416d8220SZaibo Xu SEC_CMODE_OFFSET); 573416d8220SZaibo Xu sec_sqe->type2.c_alg = c_ctx->c_alg; 574416d8220SZaibo Xu sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 575416d8220SZaibo Xu SEC_CKEY_OFFSET); 576416d8220SZaibo Xu 577416d8220SZaibo Xu bd_type = SEC_BD_TYPE2; 578416d8220SZaibo Xu if (c_req->encrypt) 579416d8220SZaibo Xu cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 580416d8220SZaibo Xu else 581416d8220SZaibo Xu cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 582416d8220SZaibo Xu sec_sqe->type_cipher_auth = bd_type | cipher; 583416d8220SZaibo Xu 584416d8220SZaibo Xu sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 585416d8220SZaibo Xu scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 586416d8220SZaibo Xu if (c_req->c_in_dma != c_req->c_out_dma) 587416d8220SZaibo Xu de = 0x1 << SEC_DE_OFFSET; 588416d8220SZaibo Xu 589416d8220SZaibo Xu sec_sqe->sds_sa_type = (de | scene | sa_type); 590416d8220SZaibo Xu 591416d8220SZaibo Xu /* Just set DST address type */ 592416d8220SZaibo Xu da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 593416d8220SZaibo Xu sec_sqe->sdm_addr_type |= da_type; 594416d8220SZaibo Xu 595416d8220SZaibo Xu sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 596416d8220SZaibo Xu sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 597416d8220SZaibo Xu 598416d8220SZaibo Xu return 0; 599416d8220SZaibo Xu } 600416d8220SZaibo Xu 601416d8220SZaibo Xu static void sec_update_iv(struct sec_req *req) 602416d8220SZaibo Xu { 603416d8220SZaibo Xu struct skcipher_request *sk_req = req->c_req.sk_req; 604416d8220SZaibo Xu u32 iv_size = req->ctx->c_ctx.ivsize; 605416d8220SZaibo Xu struct scatterlist *sgl; 606416d8220SZaibo Xu size_t sz; 607416d8220SZaibo Xu 608416d8220SZaibo Xu if (req->c_req.encrypt) 609416d8220SZaibo Xu sgl = sk_req->dst; 610416d8220SZaibo Xu else 611416d8220SZaibo Xu sgl = sk_req->src; 612416d8220SZaibo Xu 613416d8220SZaibo Xu sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv, 614416d8220SZaibo Xu iv_size, sk_req->cryptlen - iv_size); 615416d8220SZaibo Xu if (sz != iv_size) 616416d8220SZaibo Xu dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); 617416d8220SZaibo Xu } 618416d8220SZaibo Xu 619416d8220SZaibo Xu static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) 620416d8220SZaibo Xu { 621416d8220SZaibo Xu struct skcipher_request *sk_req = req->c_req.sk_req; 622416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = req->qp_ctx; 623416d8220SZaibo Xu 624416d8220SZaibo Xu atomic_dec(&qp_ctx->pending_reqs); 625416d8220SZaibo Xu sec_free_req_id(req); 626416d8220SZaibo Xu 627416d8220SZaibo Xu /* IV output at encrypto of CBC mode */ 628416d8220SZaibo Xu if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) 629416d8220SZaibo Xu sec_update_iv(req); 630416d8220SZaibo Xu 631ca0d158dSZaibo Xu if (req->fake_busy) 632416d8220SZaibo Xu sk_req->base.complete(&sk_req->base, -EINPROGRESS); 633416d8220SZaibo Xu 634416d8220SZaibo Xu sk_req->base.complete(&sk_req->base, req->err_type); 635416d8220SZaibo Xu } 636416d8220SZaibo Xu 637416d8220SZaibo Xu static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 638416d8220SZaibo Xu { 639416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx = req->qp_ctx; 640416d8220SZaibo Xu 641416d8220SZaibo Xu atomic_dec(&qp_ctx->pending_reqs); 642416d8220SZaibo Xu sec_free_req_id(req); 643a181647cSZaibo Xu sec_free_queue_id(ctx, req); 644416d8220SZaibo Xu } 645416d8220SZaibo Xu 646416d8220SZaibo Xu static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 647416d8220SZaibo Xu { 648416d8220SZaibo Xu struct sec_qp_ctx *qp_ctx; 649*7c7d902aSZaibo Xu int queue_id; 650416d8220SZaibo Xu 651416d8220SZaibo Xu /* To load balance */ 652a181647cSZaibo Xu queue_id = sec_alloc_queue_id(ctx, req); 653a181647cSZaibo Xu qp_ctx = &ctx->qp_ctx[queue_id]; 654416d8220SZaibo Xu 655416d8220SZaibo Xu req->req_id = sec_alloc_req_id(req, qp_ctx); 656416d8220SZaibo Xu if (req->req_id < 0) { 657a181647cSZaibo Xu sec_free_queue_id(ctx, req); 658416d8220SZaibo Xu return req->req_id; 659416d8220SZaibo Xu } 660416d8220SZaibo Xu 661416d8220SZaibo Xu if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) 662ca0d158dSZaibo Xu req->fake_busy = true; 663416d8220SZaibo Xu else 664ca0d158dSZaibo Xu req->fake_busy = false; 665416d8220SZaibo Xu 666*7c7d902aSZaibo Xu return 0; 667416d8220SZaibo Xu } 668416d8220SZaibo Xu 669416d8220SZaibo Xu static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 670416d8220SZaibo Xu { 671416d8220SZaibo Xu int ret; 672416d8220SZaibo Xu 673416d8220SZaibo Xu ret = sec_request_init(ctx, req); 674416d8220SZaibo Xu if (ret) 675416d8220SZaibo Xu return ret; 676416d8220SZaibo Xu 677416d8220SZaibo Xu ret = sec_request_transfer(ctx, req); 678416d8220SZaibo Xu if (ret) 679416d8220SZaibo Xu goto err_uninit_req; 680416d8220SZaibo Xu 681416d8220SZaibo Xu /* Output IV as decrypto */ 682416d8220SZaibo Xu if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 683416d8220SZaibo Xu sec_update_iv(req); 684416d8220SZaibo Xu 685416d8220SZaibo Xu ret = ctx->req_op->bd_send(ctx, req); 686416d8220SZaibo Xu if (ret != -EBUSY && ret != -EINPROGRESS) { 687a718cfceSZaibo Xu dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); 688416d8220SZaibo Xu goto err_send_req; 689416d8220SZaibo Xu } 690416d8220SZaibo Xu 691416d8220SZaibo Xu return ret; 692416d8220SZaibo Xu 693416d8220SZaibo Xu err_send_req: 694416d8220SZaibo Xu /* As failing, restore the IV from user */ 695416d8220SZaibo Xu if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 696*7c7d902aSZaibo Xu memcpy(req->c_req.sk_req->iv, 697*7c7d902aSZaibo Xu req->qp_ctx->res[req->req_id].c_ivin, 698416d8220SZaibo Xu ctx->c_ctx.ivsize); 699416d8220SZaibo Xu 700416d8220SZaibo Xu sec_request_untransfer(ctx, req); 701416d8220SZaibo Xu err_uninit_req: 702416d8220SZaibo Xu sec_request_uninit(ctx, req); 703416d8220SZaibo Xu 704416d8220SZaibo Xu return ret; 705416d8220SZaibo Xu } 706416d8220SZaibo Xu 707a181647cSZaibo Xu static const struct sec_req_op sec_skcipher_req_ops = { 708416d8220SZaibo Xu .buf_map = sec_skcipher_sgl_map, 709416d8220SZaibo Xu .buf_unmap = sec_skcipher_sgl_unmap, 710416d8220SZaibo Xu .do_transfer = sec_skcipher_copy_iv, 711416d8220SZaibo Xu .bd_fill = sec_skcipher_bd_fill, 712416d8220SZaibo Xu .bd_send = sec_bd_send, 713416d8220SZaibo Xu .callback = sec_skcipher_callback, 714416d8220SZaibo Xu .process = sec_process, 715416d8220SZaibo Xu }; 716416d8220SZaibo Xu 717416d8220SZaibo Xu static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 718416d8220SZaibo Xu { 719416d8220SZaibo Xu struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 720416d8220SZaibo Xu 721a181647cSZaibo Xu ctx->req_op = &sec_skcipher_req_ops; 722416d8220SZaibo Xu 723416d8220SZaibo Xu return sec_skcipher_init(tfm); 724416d8220SZaibo Xu } 725416d8220SZaibo Xu 726416d8220SZaibo Xu static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 727416d8220SZaibo Xu { 728a181647cSZaibo Xu sec_skcipher_uninit(tfm); 729416d8220SZaibo Xu } 730416d8220SZaibo Xu 731416d8220SZaibo Xu static int sec_skcipher_param_check(struct sec_ctx *ctx, 732416d8220SZaibo Xu struct skcipher_request *sk_req) 733416d8220SZaibo Xu { 734416d8220SZaibo Xu u8 c_alg = ctx->c_ctx.c_alg; 735416d8220SZaibo Xu struct device *dev = SEC_CTX_DEV(ctx); 736416d8220SZaibo Xu 737416d8220SZaibo Xu if (!sk_req->src || !sk_req->dst) { 738416d8220SZaibo Xu dev_err(dev, "skcipher input param error!\n"); 739416d8220SZaibo Xu return -EINVAL; 740416d8220SZaibo Xu } 741416d8220SZaibo Xu 742416d8220SZaibo Xu if (c_alg == SEC_CALG_3DES) { 743416d8220SZaibo Xu if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) { 744416d8220SZaibo Xu dev_err(dev, "skcipher 3des input length error!\n"); 745416d8220SZaibo Xu return -EINVAL; 746416d8220SZaibo Xu } 747416d8220SZaibo Xu return 0; 748416d8220SZaibo Xu } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 749416d8220SZaibo Xu if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) { 750416d8220SZaibo Xu dev_err(dev, "skcipher aes input length error!\n"); 751416d8220SZaibo Xu return -EINVAL; 752416d8220SZaibo Xu } 753416d8220SZaibo Xu return 0; 754416d8220SZaibo Xu } 755416d8220SZaibo Xu 756416d8220SZaibo Xu dev_err(dev, "skcipher algorithm error!\n"); 757416d8220SZaibo Xu return -EINVAL; 758416d8220SZaibo Xu } 759416d8220SZaibo Xu 760416d8220SZaibo Xu static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 761416d8220SZaibo Xu { 762416d8220SZaibo Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 763416d8220SZaibo Xu struct sec_req *req = skcipher_request_ctx(sk_req); 764416d8220SZaibo Xu struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 765416d8220SZaibo Xu int ret; 766416d8220SZaibo Xu 767416d8220SZaibo Xu if (!sk_req->cryptlen) 768416d8220SZaibo Xu return 0; 769416d8220SZaibo Xu 770416d8220SZaibo Xu ret = sec_skcipher_param_check(ctx, sk_req); 771416d8220SZaibo Xu if (ret) 772416d8220SZaibo Xu return ret; 773416d8220SZaibo Xu 774416d8220SZaibo Xu req->c_req.sk_req = sk_req; 775416d8220SZaibo Xu req->c_req.encrypt = encrypt; 776416d8220SZaibo Xu req->ctx = ctx; 777416d8220SZaibo Xu 778416d8220SZaibo Xu return ctx->req_op->process(ctx, req); 779416d8220SZaibo Xu } 780416d8220SZaibo Xu 781416d8220SZaibo Xu static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 782416d8220SZaibo Xu { 783416d8220SZaibo Xu return sec_skcipher_crypto(sk_req, true); 784416d8220SZaibo Xu } 785416d8220SZaibo Xu 786416d8220SZaibo Xu static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 787416d8220SZaibo Xu { 788416d8220SZaibo Xu return sec_skcipher_crypto(sk_req, false); 789416d8220SZaibo Xu } 790416d8220SZaibo Xu 791416d8220SZaibo Xu #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 792416d8220SZaibo Xu sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 793416d8220SZaibo Xu {\ 794416d8220SZaibo Xu .base = {\ 795416d8220SZaibo Xu .cra_name = sec_cra_name,\ 796416d8220SZaibo Xu .cra_driver_name = "hisi_sec_"sec_cra_name,\ 797416d8220SZaibo Xu .cra_priority = SEC_PRIORITY,\ 798416d8220SZaibo Xu .cra_flags = CRYPTO_ALG_ASYNC,\ 799416d8220SZaibo Xu .cra_blocksize = blk_size,\ 800416d8220SZaibo Xu .cra_ctxsize = sizeof(struct sec_ctx),\ 801416d8220SZaibo Xu .cra_module = THIS_MODULE,\ 802416d8220SZaibo Xu },\ 803416d8220SZaibo Xu .init = ctx_init,\ 804416d8220SZaibo Xu .exit = ctx_exit,\ 805416d8220SZaibo Xu .setkey = sec_set_key,\ 806416d8220SZaibo Xu .decrypt = sec_skcipher_decrypt,\ 807416d8220SZaibo Xu .encrypt = sec_skcipher_encrypt,\ 808416d8220SZaibo Xu .min_keysize = sec_min_key_size,\ 809416d8220SZaibo Xu .max_keysize = sec_max_key_size,\ 810416d8220SZaibo Xu .ivsize = iv_size,\ 811416d8220SZaibo Xu }, 812416d8220SZaibo Xu 813416d8220SZaibo Xu #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 814416d8220SZaibo Xu max_key_size, blk_size, iv_size) \ 815416d8220SZaibo Xu SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 816416d8220SZaibo Xu sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 817416d8220SZaibo Xu 818a181647cSZaibo Xu static struct skcipher_alg sec_skciphers[] = { 819416d8220SZaibo Xu SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 820416d8220SZaibo Xu AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 821416d8220SZaibo Xu AES_BLOCK_SIZE, 0) 822416d8220SZaibo Xu 823416d8220SZaibo Xu SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 824416d8220SZaibo Xu AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 825416d8220SZaibo Xu AES_BLOCK_SIZE, AES_BLOCK_SIZE) 826416d8220SZaibo Xu 827416d8220SZaibo Xu SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 828416d8220SZaibo Xu SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 829416d8220SZaibo Xu AES_BLOCK_SIZE, AES_BLOCK_SIZE) 830416d8220SZaibo Xu 831416d8220SZaibo Xu SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 832416d8220SZaibo Xu SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 833416d8220SZaibo Xu DES3_EDE_BLOCK_SIZE, 0) 834416d8220SZaibo Xu 835416d8220SZaibo Xu SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 836416d8220SZaibo Xu SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 837416d8220SZaibo Xu DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 838416d8220SZaibo Xu 839416d8220SZaibo Xu SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 840416d8220SZaibo Xu SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 841416d8220SZaibo Xu AES_BLOCK_SIZE, AES_BLOCK_SIZE) 842416d8220SZaibo Xu 843416d8220SZaibo Xu SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 844416d8220SZaibo Xu AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 845416d8220SZaibo Xu AES_BLOCK_SIZE, AES_BLOCK_SIZE) 846416d8220SZaibo Xu }; 847416d8220SZaibo Xu 848416d8220SZaibo Xu int sec_register_to_crypto(void) 849416d8220SZaibo Xu { 850416d8220SZaibo Xu int ret = 0; 851416d8220SZaibo Xu 852416d8220SZaibo Xu /* To avoid repeat register */ 853416d8220SZaibo Xu mutex_lock(&sec_algs_lock); 854416d8220SZaibo Xu if (++sec_active_devs == 1) 855a181647cSZaibo Xu ret = crypto_register_skciphers(sec_skciphers, 856a181647cSZaibo Xu ARRAY_SIZE(sec_skciphers)); 857416d8220SZaibo Xu mutex_unlock(&sec_algs_lock); 858416d8220SZaibo Xu 859416d8220SZaibo Xu return ret; 860416d8220SZaibo Xu } 861416d8220SZaibo Xu 862416d8220SZaibo Xu void sec_unregister_from_crypto(void) 863416d8220SZaibo Xu { 864416d8220SZaibo Xu mutex_lock(&sec_algs_lock); 865416d8220SZaibo Xu if (--sec_active_devs == 0) 866a181647cSZaibo Xu crypto_unregister_skciphers(sec_skciphers, 867a181647cSZaibo Xu ARRAY_SIZE(sec_skciphers)); 868416d8220SZaibo Xu mutex_unlock(&sec_algs_lock); 869416d8220SZaibo Xu } 870