1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interrupt.h> 9 #include <linux/moduleparam.h> 10 #include <linux/string.h> 11 #include <linux/types.h> 12 #include <linux/errno.h> 13 #include <crypto/aes.h> 14 #include <crypto/internal/des.h> 15 #include <crypto/internal/skcipher.h> 16 17 #include "cipher.h" 18 19 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; 20 module_param(aes_sw_max_len, uint, 0644); 21 MODULE_PARM_DESC(aes_sw_max_len, 22 "Only use hardware for AES requests larger than this " 23 "[0=always use hardware; anything <16 breaks AES-GCM; default=" 24 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]"); 25 26 static LIST_HEAD(skcipher_algs); 27 28 static void qce_skcipher_done(void *data) 29 { 30 struct crypto_async_request *async_req = data; 31 struct skcipher_request *req = skcipher_request_cast(async_req); 32 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 33 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 34 struct qce_device *qce = tmpl->qce; 35 struct qce_result_dump *result_buf = qce->dma.result_buf; 36 enum dma_data_direction dir_src, dir_dst; 37 u32 status; 38 int error; 39 bool diff_dst; 40 41 diff_dst = (req->src != req->dst) ? true : false; 42 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 43 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 44 45 error = qce_dma_terminate_all(&qce->dma); 46 if (error) 47 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", 48 error); 49 50 if (diff_dst) 51 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); 52 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 53 54 sg_free_table(&rctx->dst_tbl); 55 56 error = qce_check_status(qce, &status); 57 if (error < 0) 58 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); 59 60 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); 61 qce->async_req_done(tmpl->qce, error); 62 } 63 64 static int 65 qce_skcipher_async_req_handle(struct crypto_async_request *async_req) 66 { 67 struct skcipher_request *req = skcipher_request_cast(async_req); 68 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 69 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 70 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); 71 struct qce_device *qce = tmpl->qce; 72 enum dma_data_direction dir_src, dir_dst; 73 struct scatterlist *sg; 74 bool diff_dst; 75 gfp_t gfp; 76 int dst_nents, src_nents, ret; 77 78 rctx->iv = req->iv; 79 rctx->ivsize = crypto_skcipher_ivsize(skcipher); 80 rctx->cryptlen = req->cryptlen; 81 82 diff_dst = (req->src != req->dst) ? true : false; 83 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; 84 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; 85 86 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); 87 if (diff_dst) 88 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 89 else 90 rctx->dst_nents = rctx->src_nents; 91 if (rctx->src_nents < 0) { 92 dev_err(qce->dev, "Invalid numbers of src SG.\n"); 93 return rctx->src_nents; 94 } 95 if (rctx->dst_nents < 0) { 96 dev_err(qce->dev, "Invalid numbers of dst SG.\n"); 97 return -rctx->dst_nents; 98 } 99 100 rctx->dst_nents += 1; 101 102 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 103 GFP_KERNEL : GFP_ATOMIC; 104 105 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); 106 if (ret) 107 return ret; 108 109 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); 110 111 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); 112 if (IS_ERR(sg)) { 113 ret = PTR_ERR(sg); 114 goto error_free; 115 } 116 117 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 118 QCE_RESULT_BUF_SZ); 119 if (IS_ERR(sg)) { 120 ret = PTR_ERR(sg); 121 goto error_free; 122 } 123 124 sg_mark_end(sg); 125 rctx->dst_sg = rctx->dst_tbl.sgl; 126 127 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 128 if (!dst_nents) { 129 ret = -EIO; 130 goto error_free; 131 } 132 133 if (diff_dst) { 134 src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); 135 if (!src_nents) { 136 ret = -EIO; 137 goto error_unmap_dst; 138 } 139 rctx->src_sg = req->src; 140 } else { 141 rctx->src_sg = rctx->dst_sg; 142 src_nents = dst_nents - 1; 143 } 144 145 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, 146 rctx->dst_sg, dst_nents, 147 qce_skcipher_done, async_req); 148 if (ret) 149 goto error_unmap_src; 150 151 qce_dma_issue_pending(&qce->dma); 152 153 ret = qce_start(async_req, tmpl->crypto_alg_type); 154 if (ret) 155 goto error_terminate; 156 157 return 0; 158 159 error_terminate: 160 qce_dma_terminate_all(&qce->dma); 161 error_unmap_src: 162 if (diff_dst) 163 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); 164 error_unmap_dst: 165 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); 166 error_free: 167 sg_free_table(&rctx->dst_tbl); 168 return ret; 169 } 170 171 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, 172 unsigned int keylen) 173 { 174 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); 175 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 176 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; 177 unsigned int __keylen; 178 int ret; 179 180 if (!key || !keylen) 181 return -EINVAL; 182 183 /* 184 * AES XTS key1 = key2 not supported by crypto engine. 185 * Revisit to request a fallback cipher in this case. 186 */ 187 if (IS_XTS(flags)) { 188 __keylen = keylen >> 1; 189 if (!memcmp(key, key + __keylen, __keylen)) 190 return -ENOKEY; 191 } else { 192 __keylen = keylen; 193 } 194 195 switch (__keylen) { 196 case AES_KEYSIZE_128: 197 case AES_KEYSIZE_256: 198 memcpy(ctx->enc_key, key, keylen); 199 break; 200 case AES_KEYSIZE_192: 201 break; 202 default: 203 return -EINVAL; 204 } 205 206 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 207 if (!ret) 208 ctx->enc_keylen = keylen; 209 return ret; 210 } 211 212 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, 213 unsigned int keylen) 214 { 215 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 216 int err; 217 218 err = verify_skcipher_des_key(ablk, key); 219 if (err) 220 return err; 221 222 ctx->enc_keylen = keylen; 223 memcpy(ctx->enc_key, key, keylen); 224 return 0; 225 } 226 227 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, 228 unsigned int keylen) 229 { 230 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); 231 u32 _key[6]; 232 int err; 233 234 err = verify_skcipher_des3_key(ablk, key); 235 if (err) 236 return err; 237 238 /* 239 * The crypto engine does not support any two keys 240 * being the same for triple des algorithms. The 241 * verify_skcipher_des3_key does not check for all the 242 * below conditions. Return -ENOKEY in case any two keys 243 * are the same. Revisit to see if a fallback cipher 244 * is needed to handle this condition. 245 */ 246 memcpy(_key, key, DES3_EDE_KEY_SIZE); 247 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || 248 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || 249 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) 250 return -ENOKEY; 251 252 ctx->enc_keylen = keylen; 253 memcpy(ctx->enc_key, key, keylen); 254 return 0; 255 } 256 257 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) 258 { 259 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 260 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 261 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); 262 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); 263 unsigned int blocksize = crypto_skcipher_blocksize(tfm); 264 int keylen; 265 int ret; 266 267 rctx->flags = tmpl->alg_flags; 268 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; 269 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; 270 271 /* CE does not handle 0 length messages */ 272 if (!req->cryptlen) 273 return 0; 274 275 /* 276 * ECB and CBC algorithms require message lengths to be 277 * multiples of block size. 278 */ 279 if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) 280 if (!IS_ALIGNED(req->cryptlen, blocksize)) 281 return -EINVAL; 282 283 /* 284 * Conditions for requesting a fallback cipher 285 * AES-192 (not supported by crypto engine (CE)) 286 * AES-XTS request with len <= 512 byte (not recommended to use CE) 287 * AES-XTS request with len > QCE_SECTOR_SIZE and 288 * is not a multiple of it.(Revisit this condition to check if it is 289 * needed in all versions of CE) 290 */ 291 if (IS_AES(rctx->flags) && 292 ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || 293 (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) || 294 (req->cryptlen > QCE_SECTOR_SIZE && 295 req->cryptlen % QCE_SECTOR_SIZE))))) { 296 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 297 skcipher_request_set_callback(&rctx->fallback_req, 298 req->base.flags, 299 req->base.complete, 300 req->base.data); 301 skcipher_request_set_crypt(&rctx->fallback_req, req->src, 302 req->dst, req->cryptlen, req->iv); 303 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : 304 crypto_skcipher_decrypt(&rctx->fallback_req); 305 return ret; 306 } 307 308 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); 309 } 310 311 static int qce_skcipher_encrypt(struct skcipher_request *req) 312 { 313 return qce_skcipher_crypt(req, 1); 314 } 315 316 static int qce_skcipher_decrypt(struct skcipher_request *req) 317 { 318 return qce_skcipher_crypt(req, 0); 319 } 320 321 static int qce_skcipher_init(struct crypto_skcipher *tfm) 322 { 323 /* take the size without the fallback skcipher_request at the end */ 324 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, 325 fallback_req)); 326 return 0; 327 } 328 329 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) 330 { 331 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 332 333 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), 334 0, CRYPTO_ALG_NEED_FALLBACK); 335 if (IS_ERR(ctx->fallback)) 336 return PTR_ERR(ctx->fallback); 337 338 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + 339 crypto_skcipher_reqsize(ctx->fallback)); 340 return 0; 341 } 342 343 static void qce_skcipher_exit(struct crypto_skcipher *tfm) 344 { 345 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 346 347 crypto_free_skcipher(ctx->fallback); 348 } 349 350 struct qce_skcipher_def { 351 unsigned long flags; 352 const char *name; 353 const char *drv_name; 354 unsigned int blocksize; 355 unsigned int chunksize; 356 unsigned int ivsize; 357 unsigned int min_keysize; 358 unsigned int max_keysize; 359 }; 360 361 static const struct qce_skcipher_def skcipher_def[] = { 362 { 363 .flags = QCE_ALG_AES | QCE_MODE_ECB, 364 .name = "ecb(aes)", 365 .drv_name = "ecb-aes-qce", 366 .blocksize = AES_BLOCK_SIZE, 367 .ivsize = 0, 368 .min_keysize = AES_MIN_KEY_SIZE, 369 .max_keysize = AES_MAX_KEY_SIZE, 370 }, 371 { 372 .flags = QCE_ALG_AES | QCE_MODE_CBC, 373 .name = "cbc(aes)", 374 .drv_name = "cbc-aes-qce", 375 .blocksize = AES_BLOCK_SIZE, 376 .ivsize = AES_BLOCK_SIZE, 377 .min_keysize = AES_MIN_KEY_SIZE, 378 .max_keysize = AES_MAX_KEY_SIZE, 379 }, 380 { 381 .flags = QCE_ALG_AES | QCE_MODE_CTR, 382 .name = "ctr(aes)", 383 .drv_name = "ctr-aes-qce", 384 .blocksize = 1, 385 .chunksize = AES_BLOCK_SIZE, 386 .ivsize = AES_BLOCK_SIZE, 387 .min_keysize = AES_MIN_KEY_SIZE, 388 .max_keysize = AES_MAX_KEY_SIZE, 389 }, 390 { 391 .flags = QCE_ALG_AES | QCE_MODE_XTS, 392 .name = "xts(aes)", 393 .drv_name = "xts-aes-qce", 394 .blocksize = AES_BLOCK_SIZE, 395 .ivsize = AES_BLOCK_SIZE, 396 .min_keysize = AES_MIN_KEY_SIZE * 2, 397 .max_keysize = AES_MAX_KEY_SIZE * 2, 398 }, 399 { 400 .flags = QCE_ALG_DES | QCE_MODE_ECB, 401 .name = "ecb(des)", 402 .drv_name = "ecb-des-qce", 403 .blocksize = DES_BLOCK_SIZE, 404 .ivsize = 0, 405 .min_keysize = DES_KEY_SIZE, 406 .max_keysize = DES_KEY_SIZE, 407 }, 408 { 409 .flags = QCE_ALG_DES | QCE_MODE_CBC, 410 .name = "cbc(des)", 411 .drv_name = "cbc-des-qce", 412 .blocksize = DES_BLOCK_SIZE, 413 .ivsize = DES_BLOCK_SIZE, 414 .min_keysize = DES_KEY_SIZE, 415 .max_keysize = DES_KEY_SIZE, 416 }, 417 { 418 .flags = QCE_ALG_3DES | QCE_MODE_ECB, 419 .name = "ecb(des3_ede)", 420 .drv_name = "ecb-3des-qce", 421 .blocksize = DES3_EDE_BLOCK_SIZE, 422 .ivsize = 0, 423 .min_keysize = DES3_EDE_KEY_SIZE, 424 .max_keysize = DES3_EDE_KEY_SIZE, 425 }, 426 { 427 .flags = QCE_ALG_3DES | QCE_MODE_CBC, 428 .name = "cbc(des3_ede)", 429 .drv_name = "cbc-3des-qce", 430 .blocksize = DES3_EDE_BLOCK_SIZE, 431 .ivsize = DES3_EDE_BLOCK_SIZE, 432 .min_keysize = DES3_EDE_KEY_SIZE, 433 .max_keysize = DES3_EDE_KEY_SIZE, 434 }, 435 }; 436 437 static int qce_skcipher_register_one(const struct qce_skcipher_def *def, 438 struct qce_device *qce) 439 { 440 struct qce_alg_template *tmpl; 441 struct skcipher_alg *alg; 442 int ret; 443 444 tmpl = kzalloc_obj(*tmpl); 445 if (!tmpl) 446 return -ENOMEM; 447 448 alg = &tmpl->alg.skcipher; 449 450 strscpy(alg->base.cra_name, def->name); 451 strscpy(alg->base.cra_driver_name, def->drv_name); 452 453 alg->base.cra_blocksize = def->blocksize; 454 alg->chunksize = def->chunksize; 455 alg->ivsize = def->ivsize; 456 alg->min_keysize = def->min_keysize; 457 alg->max_keysize = def->max_keysize; 458 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : 459 IS_DES(def->flags) ? qce_des_setkey : 460 qce_skcipher_setkey; 461 alg->encrypt = qce_skcipher_encrypt; 462 alg->decrypt = qce_skcipher_decrypt; 463 464 alg->base.cra_priority = 275; 465 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 466 CRYPTO_ALG_ALLOCATES_MEMORY | 467 CRYPTO_ALG_KERN_DRIVER_ONLY; 468 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); 469 alg->base.cra_alignmask = 0; 470 alg->base.cra_module = THIS_MODULE; 471 472 if (IS_AES(def->flags)) { 473 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; 474 alg->init = qce_skcipher_init_fallback; 475 alg->exit = qce_skcipher_exit; 476 } else { 477 alg->init = qce_skcipher_init; 478 } 479 480 INIT_LIST_HEAD(&tmpl->entry); 481 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; 482 tmpl->alg_flags = def->flags; 483 tmpl->qce = qce; 484 485 ret = crypto_register_skcipher(alg); 486 if (ret) { 487 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); 488 kfree(tmpl); 489 return ret; 490 } 491 492 list_add_tail(&tmpl->entry, &skcipher_algs); 493 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); 494 return 0; 495 } 496 497 static void qce_skcipher_unregister(struct qce_device *qce) 498 { 499 struct qce_alg_template *tmpl, *n; 500 501 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { 502 crypto_unregister_skcipher(&tmpl->alg.skcipher); 503 list_del(&tmpl->entry); 504 kfree(tmpl); 505 } 506 } 507 508 static int qce_skcipher_register(struct qce_device *qce) 509 { 510 int ret, i; 511 512 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { 513 ret = qce_skcipher_register_one(&skcipher_def[i], qce); 514 if (ret) 515 goto err; 516 } 517 518 return 0; 519 err: 520 qce_skcipher_unregister(qce); 521 return ret; 522 } 523 524 const struct qce_algo_ops skcipher_ops = { 525 .type = CRYPTO_ALG_TYPE_SKCIPHER, 526 .register_algs = qce_skcipher_register, 527 .unregister_algs = qce_skcipher_unregister, 528 .async_req_handle = qce_skcipher_async_req_handle, 529 }; 530