1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linear symmetric key cipher operations. 4 * 5 * Generic encrypt/decrypt wrapper for ciphers. 6 * 7 * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> 8 */ 9 10 #include <linux/cryptouser.h> 11 #include <linux/err.h> 12 #include <linux/export.h> 13 #include <linux/kernel.h> 14 #include <linux/seq_file.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include <net/netlink.h> 18 #include "skcipher.h" 19 20 static inline struct crypto_lskcipher *__crypto_lskcipher_cast( 21 struct crypto_tfm *tfm) 22 { 23 return container_of(tfm, struct crypto_lskcipher, base); 24 } 25 26 static inline struct lskcipher_alg *__crypto_lskcipher_alg( 27 struct crypto_alg *alg) 28 { 29 return container_of(alg, struct lskcipher_alg, co.base); 30 } 31 32 static inline struct crypto_istat_cipher *lskcipher_get_stat( 33 struct lskcipher_alg *alg) 34 { 35 return skcipher_get_stat_common(&alg->co); 36 } 37 38 static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) 39 { 40 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 41 42 if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 43 return err; 44 45 if (err) 46 atomic64_inc(&istat->err_cnt); 47 48 return err; 49 } 50 51 static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, 52 const u8 *key, unsigned int keylen) 53 { 54 unsigned long alignmask = crypto_lskcipher_alignmask(tfm); 55 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); 56 u8 *buffer, *alignbuffer; 57 unsigned long absize; 58 int ret; 59 60 absize = keylen + alignmask; 61 buffer = kmalloc(absize, GFP_ATOMIC); 62 if (!buffer) 63 return -ENOMEM; 64 65 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 66 memcpy(alignbuffer, key, keylen); 67 ret = cipher->setkey(tfm, alignbuffer, keylen); 68 kfree_sensitive(buffer); 69 return ret; 70 } 71 72 int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key, 73 unsigned int keylen) 74 { 75 unsigned long alignmask = crypto_lskcipher_alignmask(tfm); 76 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); 77 78 if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize) 79 return -EINVAL; 80 81 if ((unsigned long)key & alignmask) 82 return lskcipher_setkey_unaligned(tfm, key, keylen); 83 else 84 return cipher->setkey(tfm, key, keylen); 85 } 86 EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey); 87 88 static int crypto_lskcipher_crypt_unaligned( 89 struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len, 90 u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, 91 u8 *dst, unsigned len, u8 *iv, bool final)) 92 { 93 unsigned ivsize = crypto_lskcipher_ivsize(tfm); 94 unsigned bs = crypto_lskcipher_blocksize(tfm); 95 unsigned cs = crypto_lskcipher_chunksize(tfm); 96 int err; 97 u8 *tiv; 98 u8 *p; 99 100 BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE || 101 MAX_CIPHER_ALIGNMASK >= PAGE_SIZE); 102 103 tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC); 104 if (!tiv) 105 return -ENOMEM; 106 107 memcpy(tiv, iv, ivsize); 108 109 p = kmalloc(PAGE_SIZE, GFP_ATOMIC); 110 err = -ENOMEM; 111 if (!p) 112 goto out; 113 114 while (len >= bs) { 115 unsigned chunk = min((unsigned)PAGE_SIZE, len); 116 int err; 117 118 if (chunk > cs) 119 chunk &= ~(cs - 1); 120 121 memcpy(p, src, chunk); 122 err = crypt(tfm, p, p, chunk, tiv, true); 123 if (err) 124 goto out; 125 126 memcpy(dst, p, chunk); 127 src += chunk; 128 dst += chunk; 129 len -= chunk; 130 } 131 132 err = len ? -EINVAL : 0; 133 134 out: 135 memcpy(iv, tiv, ivsize); 136 kfree_sensitive(p); 137 kfree_sensitive(tiv); 138 return err; 139 } 140 141 static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, 142 u8 *dst, unsigned len, u8 *iv, 143 int (*crypt)(struct crypto_lskcipher *tfm, 144 const u8 *src, u8 *dst, 145 unsigned len, u8 *iv, 146 bool final)) 147 { 148 unsigned long alignmask = crypto_lskcipher_alignmask(tfm); 149 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 150 int ret; 151 152 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & 153 alignmask) { 154 ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, 155 crypt); 156 goto out; 157 } 158 159 ret = crypt(tfm, src, dst, len, iv, true); 160 161 out: 162 return crypto_lskcipher_errstat(alg, ret); 163 } 164 165 int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, 166 u8 *dst, unsigned len, u8 *iv) 167 { 168 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 169 170 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 171 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 172 173 atomic64_inc(&istat->encrypt_cnt); 174 atomic64_add(len, &istat->encrypt_tlen); 175 } 176 177 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); 178 } 179 EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); 180 181 int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, 182 u8 *dst, unsigned len, u8 *iv) 183 { 184 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 185 186 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 187 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 188 189 atomic64_inc(&istat->decrypt_cnt); 190 atomic64_add(len, &istat->decrypt_tlen); 191 } 192 193 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); 194 } 195 EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); 196 197 int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, 198 unsigned int keylen) 199 { 200 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); 201 202 return crypto_lskcipher_setkey(*ctx, key, keylen); 203 } 204 205 static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, 206 int (*crypt)(struct crypto_lskcipher *tfm, 207 const u8 *src, u8 *dst, 208 unsigned len, u8 *iv, 209 bool final)) 210 { 211 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 212 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); 213 struct crypto_lskcipher *tfm = *ctx; 214 struct skcipher_walk walk; 215 int err; 216 217 err = skcipher_walk_virt(&walk, req, false); 218 219 while (walk.nbytes) { 220 err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr, 221 walk.nbytes, walk.iv, walk.nbytes == walk.total); 222 err = skcipher_walk_done(&walk, err); 223 } 224 225 return err; 226 } 227 228 int crypto_lskcipher_encrypt_sg(struct skcipher_request *req) 229 { 230 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 231 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); 232 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); 233 234 return crypto_lskcipher_crypt_sg(req, alg->encrypt); 235 } 236 237 int crypto_lskcipher_decrypt_sg(struct skcipher_request *req) 238 { 239 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 240 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); 241 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); 242 243 return crypto_lskcipher_crypt_sg(req, alg->decrypt); 244 } 245 246 static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm) 247 { 248 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); 249 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); 250 251 alg->exit(skcipher); 252 } 253 254 static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm) 255 { 256 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); 257 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); 258 259 if (alg->exit) 260 skcipher->base.exit = crypto_lskcipher_exit_tfm; 261 262 if (alg->init) 263 return alg->init(skcipher); 264 265 return 0; 266 } 267 268 static void crypto_lskcipher_free_instance(struct crypto_instance *inst) 269 { 270 struct lskcipher_instance *skcipher = 271 container_of(inst, struct lskcipher_instance, s.base); 272 273 skcipher->free(skcipher); 274 } 275 276 static void __maybe_unused crypto_lskcipher_show( 277 struct seq_file *m, struct crypto_alg *alg) 278 { 279 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); 280 281 seq_printf(m, "type : lskcipher\n"); 282 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 283 seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize); 284 seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize); 285 seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize); 286 seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize); 287 } 288 289 static int __maybe_unused crypto_lskcipher_report( 290 struct sk_buff *skb, struct crypto_alg *alg) 291 { 292 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); 293 struct crypto_report_blkcipher rblkcipher; 294 295 memset(&rblkcipher, 0, sizeof(rblkcipher)); 296 297 strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type)); 298 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); 299 300 rblkcipher.blocksize = alg->cra_blocksize; 301 rblkcipher.min_keysize = skcipher->co.min_keysize; 302 rblkcipher.max_keysize = skcipher->co.max_keysize; 303 rblkcipher.ivsize = skcipher->co.ivsize; 304 305 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 306 sizeof(rblkcipher), &rblkcipher); 307 } 308 309 static int __maybe_unused crypto_lskcipher_report_stat( 310 struct sk_buff *skb, struct crypto_alg *alg) 311 { 312 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); 313 struct crypto_istat_cipher *istat; 314 struct crypto_stat_cipher rcipher; 315 316 istat = lskcipher_get_stat(skcipher); 317 318 memset(&rcipher, 0, sizeof(rcipher)); 319 320 strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); 321 322 rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); 323 rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); 324 rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); 325 rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); 326 rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); 327 328 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); 329 } 330 331 static const struct crypto_type crypto_lskcipher_type = { 332 .extsize = crypto_alg_extsize, 333 .init_tfm = crypto_lskcipher_init_tfm, 334 .free = crypto_lskcipher_free_instance, 335 #ifdef CONFIG_PROC_FS 336 .show = crypto_lskcipher_show, 337 #endif 338 #if IS_ENABLED(CONFIG_CRYPTO_USER) 339 .report = crypto_lskcipher_report, 340 #endif 341 #ifdef CONFIG_CRYPTO_STATS 342 .report_stat = crypto_lskcipher_report_stat, 343 #endif 344 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 345 .maskset = CRYPTO_ALG_TYPE_MASK, 346 .type = CRYPTO_ALG_TYPE_LSKCIPHER, 347 .tfmsize = offsetof(struct crypto_lskcipher, base), 348 }; 349 350 static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) 351 { 352 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); 353 354 crypto_free_lskcipher(*ctx); 355 } 356 357 int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm) 358 { 359 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); 360 struct crypto_alg *calg = tfm->__crt_alg; 361 struct crypto_lskcipher *skcipher; 362 363 if (!crypto_mod_get(calg)) 364 return -EAGAIN; 365 366 skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type); 367 if (IS_ERR(skcipher)) { 368 crypto_mod_put(calg); 369 return PTR_ERR(skcipher); 370 } 371 372 *ctx = skcipher; 373 tfm->exit = crypto_lskcipher_exit_tfm_sg; 374 375 return 0; 376 } 377 378 int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, 379 struct crypto_instance *inst, 380 const char *name, u32 type, u32 mask) 381 { 382 spawn->base.frontend = &crypto_lskcipher_type; 383 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 384 } 385 EXPORT_SYMBOL_GPL(crypto_grab_lskcipher); 386 387 struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, 388 u32 type, u32 mask) 389 { 390 return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask); 391 } 392 EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher); 393 394 static int lskcipher_prepare_alg(struct lskcipher_alg *alg) 395 { 396 struct crypto_alg *base = &alg->co.base; 397 int err; 398 399 err = skcipher_prepare_alg_common(&alg->co); 400 if (err) 401 return err; 402 403 if (alg->co.chunksize & (alg->co.chunksize - 1)) 404 return -EINVAL; 405 406 base->cra_type = &crypto_lskcipher_type; 407 base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER; 408 409 return 0; 410 } 411 412 int crypto_register_lskcipher(struct lskcipher_alg *alg) 413 { 414 struct crypto_alg *base = &alg->co.base; 415 int err; 416 417 err = lskcipher_prepare_alg(alg); 418 if (err) 419 return err; 420 421 return crypto_register_alg(base); 422 } 423 EXPORT_SYMBOL_GPL(crypto_register_lskcipher); 424 425 void crypto_unregister_lskcipher(struct lskcipher_alg *alg) 426 { 427 crypto_unregister_alg(&alg->co.base); 428 } 429 EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher); 430 431 int crypto_register_lskciphers(struct lskcipher_alg *algs, int count) 432 { 433 int i, ret; 434 435 for (i = 0; i < count; i++) { 436 ret = crypto_register_lskcipher(&algs[i]); 437 if (ret) 438 goto err; 439 } 440 441 return 0; 442 443 err: 444 for (--i; i >= 0; --i) 445 crypto_unregister_lskcipher(&algs[i]); 446 447 return ret; 448 } 449 EXPORT_SYMBOL_GPL(crypto_register_lskciphers); 450 451 void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count) 452 { 453 int i; 454 455 for (i = count - 1; i >= 0; --i) 456 crypto_unregister_lskcipher(&algs[i]); 457 } 458 EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers); 459 460 int lskcipher_register_instance(struct crypto_template *tmpl, 461 struct lskcipher_instance *inst) 462 { 463 int err; 464 465 if (WARN_ON(!inst->free)) 466 return -EINVAL; 467 468 err = lskcipher_prepare_alg(&inst->alg); 469 if (err) 470 return err; 471 472 return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst)); 473 } 474 EXPORT_SYMBOL_GPL(lskcipher_register_instance); 475 476 static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key, 477 unsigned int keylen) 478 { 479 struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm); 480 481 crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); 482 crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & 483 CRYPTO_TFM_REQ_MASK); 484 return crypto_lskcipher_setkey(cipher, key, keylen); 485 } 486 487 static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm) 488 { 489 struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); 490 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); 491 struct crypto_lskcipher_spawn *spawn; 492 struct crypto_lskcipher *cipher; 493 494 spawn = lskcipher_instance_ctx(inst); 495 cipher = crypto_spawn_lskcipher(spawn); 496 if (IS_ERR(cipher)) 497 return PTR_ERR(cipher); 498 499 *ctx = cipher; 500 return 0; 501 } 502 503 static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm) 504 { 505 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); 506 507 crypto_free_lskcipher(*ctx); 508 } 509 510 static void lskcipher_free_instance_simple(struct lskcipher_instance *inst) 511 { 512 crypto_drop_lskcipher(lskcipher_instance_ctx(inst)); 513 kfree(inst); 514 } 515 516 /** 517 * lskcipher_alloc_instance_simple - allocate instance of simple block cipher 518 * 519 * Allocate an lskcipher_instance for a simple block cipher mode of operation, 520 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, 521 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, 522 * alignmask, and priority are set from the underlying cipher but can be 523 * overridden if needed. The tfm context defaults to 524 * struct crypto_lskcipher *, and default ->setkey(), ->init(), and 525 * ->exit() methods are installed. 526 * 527 * @tmpl: the template being instantiated 528 * @tb: the template parameters 529 * 530 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still 531 * needs to register the instance. 532 */ 533 struct lskcipher_instance *lskcipher_alloc_instance_simple( 534 struct crypto_template *tmpl, struct rtattr **tb) 535 { 536 u32 mask; 537 struct lskcipher_instance *inst; 538 struct crypto_lskcipher_spawn *spawn; 539 char ecb_name[CRYPTO_MAX_ALG_NAME]; 540 struct lskcipher_alg *cipher_alg; 541 const char *cipher_name; 542 int err; 543 544 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); 545 if (err) 546 return ERR_PTR(err); 547 548 cipher_name = crypto_attr_alg_name(tb[1]); 549 if (IS_ERR(cipher_name)) 550 return ERR_CAST(cipher_name); 551 552 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 553 if (!inst) 554 return ERR_PTR(-ENOMEM); 555 556 spawn = lskcipher_instance_ctx(inst); 557 err = crypto_grab_lskcipher(spawn, 558 lskcipher_crypto_instance(inst), 559 cipher_name, 0, mask); 560 561 ecb_name[0] = 0; 562 if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) { 563 err = -ENAMETOOLONG; 564 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 565 cipher_name) >= CRYPTO_MAX_ALG_NAME) 566 goto err_free_inst; 567 568 err = crypto_grab_lskcipher(spawn, 569 lskcipher_crypto_instance(inst), 570 ecb_name, 0, mask); 571 } 572 573 if (err) 574 goto err_free_inst; 575 576 cipher_alg = crypto_lskcipher_spawn_alg(spawn); 577 578 err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, 579 &cipher_alg->co.base); 580 if (err) 581 goto err_free_inst; 582 583 if (ecb_name[0]) { 584 int len; 585 586 len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], 587 sizeof(ecb_name)); 588 if (len < 2) 589 goto err_free_inst; 590 591 if (ecb_name[len - 1] != ')') 592 goto err_free_inst; 593 594 ecb_name[len - 1] = 0; 595 596 err = -ENAMETOOLONG; 597 if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME, 598 "%s(%s)", tmpl->name, ecb_name) >= 599 CRYPTO_MAX_ALG_NAME) 600 goto err_free_inst; 601 602 if (strcmp(ecb_name, cipher_name) && 603 snprintf(inst->alg.co.base.cra_driver_name, 604 CRYPTO_MAX_ALG_NAME, 605 "%s(%s)", tmpl->name, cipher_name) >= 606 CRYPTO_MAX_ALG_NAME) 607 goto err_free_inst; 608 } else { 609 /* Don't allow nesting. */ 610 err = -ELOOP; 611 if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) 612 goto err_free_inst; 613 } 614 615 err = -EINVAL; 616 if (cipher_alg->co.ivsize) 617 goto err_free_inst; 618 619 inst->free = lskcipher_free_instance_simple; 620 621 /* Default algorithm properties, can be overridden */ 622 inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize; 623 inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask; 624 inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority; 625 inst->alg.co.min_keysize = cipher_alg->co.min_keysize; 626 inst->alg.co.max_keysize = cipher_alg->co.max_keysize; 627 inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize; 628 629 /* Use struct crypto_lskcipher * by default, can be overridden */ 630 inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *); 631 inst->alg.setkey = lskcipher_setkey_simple; 632 inst->alg.init = lskcipher_init_tfm_simple; 633 inst->alg.exit = lskcipher_exit_tfm_simple; 634 635 return inst; 636 637 err_free_inst: 638 lskcipher_free_instance_simple(inst); 639 return ERR_PTR(err); 640 } 641 EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple); 642