1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Symmetric key cipher operations. 4 * 5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across 6 * multiple page boundaries by using temporary blocks. In user context, 7 * the kernel is given a chance to schedule us once per page. 8 * 9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 10 */ 11 12 #include <crypto/internal/aead.h> 13 #include <crypto/internal/cipher.h> 14 #include <crypto/internal/skcipher.h> 15 #include <crypto/scatterwalk.h> 16 #include <linux/bug.h> 17 #include <linux/cryptouser.h> 18 #include <linux/err.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/seq_file.h> 23 #include <linux/slab.h> 24 #include <linux/string.h> 25 #include <linux/string_choices.h> 26 #include <net/netlink.h> 27 #include "skcipher.h" 28 29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e 30 31 enum { 32 SKCIPHER_WALK_SLOW = 1 << 0, 33 SKCIPHER_WALK_COPY = 1 << 1, 34 SKCIPHER_WALK_DIFF = 1 << 2, 35 SKCIPHER_WALK_SLEEP = 1 << 3, 36 }; 37 38 static const struct crypto_type crypto_skcipher_type; 39 40 static int skcipher_walk_next(struct skcipher_walk *walk); 41 42 static inline void skcipher_map_src(struct skcipher_walk *walk) 43 { 44 /* XXX */ 45 walk->in.__addr = scatterwalk_map(&walk->in); 46 } 47 48 static inline void skcipher_map_dst(struct skcipher_walk *walk) 49 { 50 /* XXX */ 51 walk->out.__addr = scatterwalk_map(&walk->out); 52 } 53 54 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) 55 { 56 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 57 } 58 59 static inline struct skcipher_alg *__crypto_skcipher_alg( 60 struct crypto_alg *alg) 61 { 62 return container_of(alg, struct skcipher_alg, base); 63 } 64 65 /** 66 * skcipher_walk_done() - finish one step of a skcipher_walk 67 * @walk: the skcipher_walk 68 * @res: number of bytes *not* processed (>= 0) from walk->nbytes, 69 * or a -errno value to terminate the walk due to an error 70 * 71 * This function cleans up after one step of walking through the source and 72 * destination scatterlists, and advances to the next step if applicable. 73 * walk->nbytes is set to the number of bytes available in the next step, 74 * walk->total is set to the new total number of bytes remaining, and 75 * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there 76 * is no more data, or if an error occurred (i.e. -errno return), then 77 * walk->nbytes and walk->total are set to 0 and all resources owned by the 78 * skcipher_walk are freed. 79 * 80 * Return: 0 or a -errno value. If @res was a -errno value then it will be 81 * returned, but other errors may occur too. 82 */ 83 int skcipher_walk_done(struct skcipher_walk *walk, int res) 84 { 85 unsigned int n = walk->nbytes; /* num bytes processed this step */ 86 unsigned int total = 0; /* new total remaining */ 87 88 if (!n) 89 goto finish; 90 91 if (likely(res >= 0)) { 92 n -= res; /* subtract num bytes *not* processed */ 93 total = walk->total - n; 94 } 95 96 if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | 97 SKCIPHER_WALK_COPY | 98 SKCIPHER_WALK_DIFF)))) { 99 scatterwalk_advance(&walk->in, n); 100 } else if (walk->flags & SKCIPHER_WALK_DIFF) { 101 scatterwalk_done_src(&walk->in, n); 102 } else if (walk->flags & SKCIPHER_WALK_COPY) { 103 scatterwalk_advance(&walk->in, n); 104 skcipher_map_dst(walk); 105 memcpy(walk->dst.virt.addr, walk->page, n); 106 } else { /* SKCIPHER_WALK_SLOW */ 107 if (res > 0) { 108 /* 109 * Didn't process all bytes. Either the algorithm is 110 * broken, or this was the last step and it turned out 111 * the message wasn't evenly divisible into blocks but 112 * the algorithm requires it. 113 */ 114 res = -EINVAL; 115 total = 0; 116 } else 117 memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); 118 goto dst_done; 119 } 120 121 scatterwalk_done_dst(&walk->out, n); 122 dst_done: 123 124 if (res > 0) 125 res = 0; 126 127 walk->total = total; 128 walk->nbytes = 0; 129 130 if (total) { 131 if (walk->flags & SKCIPHER_WALK_SLEEP) 132 cond_resched(); 133 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | 134 SKCIPHER_WALK_DIFF); 135 return skcipher_walk_next(walk); 136 } 137 138 finish: 139 /* Short-circuit for the common/fast path. */ 140 if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) 141 goto out; 142 143 if (walk->iv != walk->oiv) 144 memcpy(walk->oiv, walk->iv, walk->ivsize); 145 if (walk->buffer != walk->page) 146 kfree(walk->buffer); 147 if (walk->page) 148 free_page((unsigned long)walk->page); 149 150 out: 151 return res; 152 } 153 EXPORT_SYMBOL_GPL(skcipher_walk_done); 154 155 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) 156 { 157 unsigned alignmask = walk->alignmask; 158 unsigned n; 159 void *buffer; 160 161 if (!walk->buffer) 162 walk->buffer = walk->page; 163 buffer = walk->buffer; 164 if (!buffer) { 165 /* Min size for a buffer of bsize bytes aligned to alignmask */ 166 n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 167 168 buffer = kzalloc(n, skcipher_walk_gfp(walk)); 169 if (!buffer) 170 return skcipher_walk_done(walk, -ENOMEM); 171 walk->buffer = buffer; 172 } 173 174 buffer = PTR_ALIGN(buffer, alignmask + 1); 175 memcpy_from_scatterwalk(buffer, &walk->in, bsize); 176 walk->out.__addr = buffer; 177 walk->in.__addr = walk->out.addr; 178 179 walk->nbytes = bsize; 180 walk->flags |= SKCIPHER_WALK_SLOW; 181 182 return 0; 183 } 184 185 static int skcipher_next_copy(struct skcipher_walk *walk) 186 { 187 void *tmp = walk->page; 188 189 skcipher_map_src(walk); 190 memcpy(tmp, walk->src.virt.addr, walk->nbytes); 191 scatterwalk_unmap(walk->src.virt.addr); 192 /* 193 * walk->in is advanced later when the number of bytes actually 194 * processed (which might be less than walk->nbytes) is known. 195 */ 196 197 walk->in.__addr = tmp; 198 walk->out.__addr = tmp; 199 return 0; 200 } 201 202 static int skcipher_next_fast(struct skcipher_walk *walk) 203 { 204 unsigned long diff; 205 206 diff = offset_in_page(walk->in.offset) - 207 offset_in_page(walk->out.offset); 208 diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - 209 (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); 210 211 skcipher_map_dst(walk); 212 walk->in.__addr = walk->dst.virt.addr; 213 214 if (diff) { 215 walk->flags |= SKCIPHER_WALK_DIFF; 216 skcipher_map_src(walk); 217 } 218 219 return 0; 220 } 221 222 static int skcipher_walk_next(struct skcipher_walk *walk) 223 { 224 unsigned int bsize; 225 unsigned int n; 226 227 n = walk->total; 228 bsize = min(walk->stride, max(n, walk->blocksize)); 229 n = scatterwalk_clamp(&walk->in, n); 230 n = scatterwalk_clamp(&walk->out, n); 231 232 if (unlikely(n < bsize)) { 233 if (unlikely(walk->total < walk->blocksize)) 234 return skcipher_walk_done(walk, -EINVAL); 235 236 slow_path: 237 return skcipher_next_slow(walk, bsize); 238 } 239 walk->nbytes = n; 240 241 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { 242 if (!walk->page) { 243 gfp_t gfp = skcipher_walk_gfp(walk); 244 245 walk->page = (void *)__get_free_page(gfp); 246 if (!walk->page) 247 goto slow_path; 248 } 249 walk->flags |= SKCIPHER_WALK_COPY; 250 return skcipher_next_copy(walk); 251 } 252 253 return skcipher_next_fast(walk); 254 } 255 256 static int skcipher_copy_iv(struct skcipher_walk *walk) 257 { 258 unsigned alignmask = walk->alignmask; 259 unsigned ivsize = walk->ivsize; 260 unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); 261 unsigned size; 262 u8 *iv; 263 264 /* Min size for a buffer of stride + ivsize, aligned to alignmask */ 265 size = aligned_stride + ivsize + 266 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 267 268 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); 269 if (!walk->buffer) 270 return -ENOMEM; 271 272 iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; 273 274 walk->iv = memcpy(iv, walk->iv, walk->ivsize); 275 return 0; 276 } 277 278 static int skcipher_walk_first(struct skcipher_walk *walk) 279 { 280 if (WARN_ON_ONCE(in_hardirq())) 281 return -EDEADLK; 282 283 walk->buffer = NULL; 284 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 285 int err = skcipher_copy_iv(walk); 286 if (err) 287 return err; 288 } 289 290 walk->page = NULL; 291 292 return skcipher_walk_next(walk); 293 } 294 295 int skcipher_walk_virt(struct skcipher_walk *__restrict walk, 296 struct skcipher_request *__restrict req, bool atomic) 297 { 298 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 299 struct skcipher_alg *alg; 300 301 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 302 303 alg = crypto_skcipher_alg(tfm); 304 305 walk->total = req->cryptlen; 306 walk->nbytes = 0; 307 walk->iv = req->iv; 308 walk->oiv = req->iv; 309 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) 310 walk->flags = SKCIPHER_WALK_SLEEP; 311 else 312 walk->flags = 0; 313 314 if (unlikely(!walk->total)) 315 return 0; 316 317 scatterwalk_start(&walk->in, req->src); 318 scatterwalk_start(&walk->out, req->dst); 319 320 walk->blocksize = crypto_skcipher_blocksize(tfm); 321 walk->ivsize = crypto_skcipher_ivsize(tfm); 322 walk->alignmask = crypto_skcipher_alignmask(tfm); 323 324 if (alg->co.base.cra_type != &crypto_skcipher_type) 325 walk->stride = alg->co.chunksize; 326 else 327 walk->stride = alg->walksize; 328 329 return skcipher_walk_first(walk); 330 } 331 EXPORT_SYMBOL_GPL(skcipher_walk_virt); 332 333 static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, 334 struct aead_request *__restrict req, 335 bool atomic) 336 { 337 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 338 339 walk->nbytes = 0; 340 walk->iv = req->iv; 341 walk->oiv = req->iv; 342 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) 343 walk->flags = SKCIPHER_WALK_SLEEP; 344 else 345 walk->flags = 0; 346 347 if (unlikely(!walk->total)) 348 return 0; 349 350 scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen); 351 scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen); 352 353 walk->blocksize = crypto_aead_blocksize(tfm); 354 walk->stride = crypto_aead_chunksize(tfm); 355 walk->ivsize = crypto_aead_ivsize(tfm); 356 walk->alignmask = crypto_aead_alignmask(tfm); 357 358 return skcipher_walk_first(walk); 359 } 360 361 int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, 362 struct aead_request *__restrict req, 363 bool atomic) 364 { 365 walk->total = req->cryptlen; 366 367 return skcipher_walk_aead_common(walk, req, atomic); 368 } 369 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); 370 371 int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, 372 struct aead_request *__restrict req, 373 bool atomic) 374 { 375 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 376 377 walk->total = req->cryptlen - crypto_aead_authsize(tfm); 378 379 return skcipher_walk_aead_common(walk, req, atomic); 380 } 381 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); 382 383 static void skcipher_set_needkey(struct crypto_skcipher *tfm) 384 { 385 if (crypto_skcipher_max_keysize(tfm) != 0) 386 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 387 } 388 389 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, 390 const u8 *key, unsigned int keylen) 391 { 392 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 393 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 394 u8 *buffer, *alignbuffer; 395 unsigned long absize; 396 int ret; 397 398 absize = keylen + alignmask; 399 buffer = kmalloc(absize, GFP_ATOMIC); 400 if (!buffer) 401 return -ENOMEM; 402 403 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 404 memcpy(alignbuffer, key, keylen); 405 ret = cipher->setkey(tfm, alignbuffer, keylen); 406 kfree_sensitive(buffer); 407 return ret; 408 } 409 410 int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 411 unsigned int keylen) 412 { 413 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 414 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 415 int err; 416 417 if (cipher->co.base.cra_type != &crypto_skcipher_type) { 418 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); 419 420 crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); 421 crypto_lskcipher_set_flags(*ctx, 422 crypto_skcipher_get_flags(tfm) & 423 CRYPTO_TFM_REQ_MASK); 424 err = crypto_lskcipher_setkey(*ctx, key, keylen); 425 goto out; 426 } 427 428 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) 429 return -EINVAL; 430 431 if ((unsigned long)key & alignmask) 432 err = skcipher_setkey_unaligned(tfm, key, keylen); 433 else 434 err = cipher->setkey(tfm, key, keylen); 435 436 out: 437 if (unlikely(err)) { 438 skcipher_set_needkey(tfm); 439 return err; 440 } 441 442 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 443 return 0; 444 } 445 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); 446 447 int crypto_skcipher_encrypt(struct skcipher_request *req) 448 { 449 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 450 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 451 452 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 453 return -ENOKEY; 454 if (alg->co.base.cra_type != &crypto_skcipher_type) 455 return crypto_lskcipher_encrypt_sg(req); 456 return alg->encrypt(req); 457 } 458 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); 459 460 int crypto_skcipher_decrypt(struct skcipher_request *req) 461 { 462 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 463 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 464 465 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 466 return -ENOKEY; 467 if (alg->co.base.cra_type != &crypto_skcipher_type) 468 return crypto_lskcipher_decrypt_sg(req); 469 return alg->decrypt(req); 470 } 471 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); 472 473 static int crypto_lskcipher_export(struct skcipher_request *req, void *out) 474 { 475 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 476 u8 *ivs = skcipher_request_ctx(req); 477 478 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); 479 480 memcpy(out, ivs + crypto_skcipher_ivsize(tfm), 481 crypto_skcipher_statesize(tfm)); 482 483 return 0; 484 } 485 486 static int crypto_lskcipher_import(struct skcipher_request *req, const void *in) 487 { 488 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 489 u8 *ivs = skcipher_request_ctx(req); 490 491 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); 492 493 memcpy(ivs + crypto_skcipher_ivsize(tfm), in, 494 crypto_skcipher_statesize(tfm)); 495 496 return 0; 497 } 498 499 static int skcipher_noexport(struct skcipher_request *req, void *out) 500 { 501 return 0; 502 } 503 504 static int skcipher_noimport(struct skcipher_request *req, const void *in) 505 { 506 return 0; 507 } 508 509 int crypto_skcipher_export(struct skcipher_request *req, void *out) 510 { 511 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 512 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 513 514 if (alg->co.base.cra_type != &crypto_skcipher_type) 515 return crypto_lskcipher_export(req, out); 516 return alg->export(req, out); 517 } 518 EXPORT_SYMBOL_GPL(crypto_skcipher_export); 519 520 int crypto_skcipher_import(struct skcipher_request *req, const void *in) 521 { 522 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 523 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 524 525 if (alg->co.base.cra_type != &crypto_skcipher_type) 526 return crypto_lskcipher_import(req, in); 527 return alg->import(req, in); 528 } 529 EXPORT_SYMBOL_GPL(crypto_skcipher_import); 530 531 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 532 { 533 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 534 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 535 536 alg->exit(skcipher); 537 } 538 539 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 540 { 541 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 542 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 543 544 skcipher_set_needkey(skcipher); 545 546 if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) { 547 unsigned am = crypto_skcipher_alignmask(skcipher); 548 unsigned reqsize; 549 550 reqsize = am & ~(crypto_tfm_ctx_alignment() - 1); 551 reqsize += crypto_skcipher_ivsize(skcipher); 552 reqsize += crypto_skcipher_statesize(skcipher); 553 crypto_skcipher_set_reqsize(skcipher, reqsize); 554 555 return crypto_init_lskcipher_ops_sg(tfm); 556 } 557 558 if (alg->exit) 559 skcipher->base.exit = crypto_skcipher_exit_tfm; 560 561 if (alg->init) 562 return alg->init(skcipher); 563 564 return 0; 565 } 566 567 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 568 { 569 if (alg->cra_type != &crypto_skcipher_type) 570 return sizeof(struct crypto_lskcipher *); 571 572 return crypto_alg_extsize(alg); 573 } 574 575 static void crypto_skcipher_free_instance(struct crypto_instance *inst) 576 { 577 struct skcipher_instance *skcipher = 578 container_of(inst, struct skcipher_instance, s.base); 579 580 skcipher->free(skcipher); 581 } 582 583 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 584 __maybe_unused; 585 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 586 { 587 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); 588 589 seq_printf(m, "type : skcipher\n"); 590 seq_printf(m, "async : %s\n", 591 str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); 592 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 593 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); 594 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); 595 seq_printf(m, "ivsize : %u\n", skcipher->ivsize); 596 seq_printf(m, "chunksize : %u\n", skcipher->chunksize); 597 seq_printf(m, "walksize : %u\n", skcipher->walksize); 598 seq_printf(m, "statesize : %u\n", skcipher->statesize); 599 } 600 601 static int __maybe_unused crypto_skcipher_report( 602 struct sk_buff *skb, struct crypto_alg *alg) 603 { 604 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); 605 struct crypto_report_blkcipher rblkcipher; 606 607 memset(&rblkcipher, 0, sizeof(rblkcipher)); 608 609 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); 610 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); 611 612 rblkcipher.blocksize = alg->cra_blocksize; 613 rblkcipher.min_keysize = skcipher->min_keysize; 614 rblkcipher.max_keysize = skcipher->max_keysize; 615 rblkcipher.ivsize = skcipher->ivsize; 616 617 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 618 sizeof(rblkcipher), &rblkcipher); 619 } 620 621 static const struct crypto_type crypto_skcipher_type = { 622 .extsize = crypto_skcipher_extsize, 623 .init_tfm = crypto_skcipher_init_tfm, 624 .free = crypto_skcipher_free_instance, 625 #ifdef CONFIG_PROC_FS 626 .show = crypto_skcipher_show, 627 #endif 628 #if IS_ENABLED(CONFIG_CRYPTO_USER) 629 .report = crypto_skcipher_report, 630 #endif 631 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 632 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, 633 .type = CRYPTO_ALG_TYPE_SKCIPHER, 634 .tfmsize = offsetof(struct crypto_skcipher, base), 635 }; 636 637 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, 638 struct crypto_instance *inst, 639 const char *name, u32 type, u32 mask) 640 { 641 spawn->base.frontend = &crypto_skcipher_type; 642 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 643 } 644 EXPORT_SYMBOL_GPL(crypto_grab_skcipher); 645 646 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 647 u32 type, u32 mask) 648 { 649 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); 650 } 651 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 652 653 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( 654 const char *alg_name, u32 type, u32 mask) 655 { 656 struct crypto_skcipher *tfm; 657 658 /* Only sync algorithms allowed. */ 659 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; 660 type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); 661 662 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); 663 664 /* 665 * Make sure we do not allocate something that might get used with 666 * an on-stack request: check the request size. 667 */ 668 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > 669 MAX_SYNC_SKCIPHER_REQSIZE)) { 670 crypto_free_skcipher(tfm); 671 return ERR_PTR(-EINVAL); 672 } 673 674 return (struct crypto_sync_skcipher *)tfm; 675 } 676 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); 677 678 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) 679 { 680 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask); 681 } 682 EXPORT_SYMBOL_GPL(crypto_has_skcipher); 683 684 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) 685 { 686 struct crypto_alg *base = &alg->base; 687 688 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || 689 alg->statesize > PAGE_SIZE / 2 || 690 (alg->ivsize + alg->statesize) > PAGE_SIZE / 2) 691 return -EINVAL; 692 693 if (!alg->chunksize) 694 alg->chunksize = base->cra_blocksize; 695 696 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 697 698 return 0; 699 } 700 701 static int skcipher_prepare_alg(struct skcipher_alg *alg) 702 { 703 struct crypto_alg *base = &alg->base; 704 int err; 705 706 err = skcipher_prepare_alg_common(&alg->co); 707 if (err) 708 return err; 709 710 if (alg->walksize > PAGE_SIZE / 8) 711 return -EINVAL; 712 713 if (!alg->walksize) 714 alg->walksize = alg->chunksize; 715 716 if (!alg->statesize) { 717 alg->import = skcipher_noimport; 718 alg->export = skcipher_noexport; 719 } else if (!(alg->import && alg->export)) 720 return -EINVAL; 721 722 base->cra_type = &crypto_skcipher_type; 723 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; 724 725 return 0; 726 } 727 728 int crypto_register_skcipher(struct skcipher_alg *alg) 729 { 730 struct crypto_alg *base = &alg->base; 731 int err; 732 733 err = skcipher_prepare_alg(alg); 734 if (err) 735 return err; 736 737 return crypto_register_alg(base); 738 } 739 EXPORT_SYMBOL_GPL(crypto_register_skcipher); 740 741 void crypto_unregister_skcipher(struct skcipher_alg *alg) 742 { 743 crypto_unregister_alg(&alg->base); 744 } 745 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); 746 747 int crypto_register_skciphers(struct skcipher_alg *algs, int count) 748 { 749 int i, ret; 750 751 for (i = 0; i < count; i++) { 752 ret = crypto_register_skcipher(&algs[i]); 753 if (ret) 754 goto err; 755 } 756 757 return 0; 758 759 err: 760 for (--i; i >= 0; --i) 761 crypto_unregister_skcipher(&algs[i]); 762 763 return ret; 764 } 765 EXPORT_SYMBOL_GPL(crypto_register_skciphers); 766 767 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) 768 { 769 int i; 770 771 for (i = count - 1; i >= 0; --i) 772 crypto_unregister_skcipher(&algs[i]); 773 } 774 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); 775 776 int skcipher_register_instance(struct crypto_template *tmpl, 777 struct skcipher_instance *inst) 778 { 779 int err; 780 781 if (WARN_ON(!inst->free)) 782 return -EINVAL; 783 784 err = skcipher_prepare_alg(&inst->alg); 785 if (err) 786 return err; 787 788 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); 789 } 790 EXPORT_SYMBOL_GPL(skcipher_register_instance); 791 792 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, 793 unsigned int keylen) 794 { 795 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); 796 797 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); 798 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & 799 CRYPTO_TFM_REQ_MASK); 800 return crypto_cipher_setkey(cipher, key, keylen); 801 } 802 803 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) 804 { 805 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 806 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); 807 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); 808 struct crypto_cipher *cipher; 809 810 cipher = crypto_spawn_cipher(spawn); 811 if (IS_ERR(cipher)) 812 return PTR_ERR(cipher); 813 814 ctx->cipher = cipher; 815 return 0; 816 } 817 818 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) 819 { 820 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); 821 822 crypto_free_cipher(ctx->cipher); 823 } 824 825 static void skcipher_free_instance_simple(struct skcipher_instance *inst) 826 { 827 crypto_drop_cipher(skcipher_instance_ctx(inst)); 828 kfree(inst); 829 } 830 831 /** 832 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode 833 * 834 * Allocate an skcipher_instance for a simple block cipher mode of operation, 835 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, 836 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, 837 * alignmask, and priority are set from the underlying cipher but can be 838 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and 839 * default ->setkey(), ->init(), and ->exit() methods are installed. 840 * 841 * @tmpl: the template being instantiated 842 * @tb: the template parameters 843 * 844 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still 845 * needs to register the instance. 846 */ 847 struct skcipher_instance *skcipher_alloc_instance_simple( 848 struct crypto_template *tmpl, struct rtattr **tb) 849 { 850 u32 mask; 851 struct skcipher_instance *inst; 852 struct crypto_cipher_spawn *spawn; 853 struct crypto_alg *cipher_alg; 854 int err; 855 856 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); 857 if (err) 858 return ERR_PTR(err); 859 860 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 861 if (!inst) 862 return ERR_PTR(-ENOMEM); 863 spawn = skcipher_instance_ctx(inst); 864 865 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), 866 crypto_attr_alg_name(tb[1]), 0, mask); 867 if (err) 868 goto err_free_inst; 869 cipher_alg = crypto_spawn_cipher_alg(spawn); 870 871 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, 872 cipher_alg); 873 if (err) 874 goto err_free_inst; 875 876 inst->free = skcipher_free_instance_simple; 877 878 /* Default algorithm properties, can be overridden */ 879 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; 880 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; 881 inst->alg.base.cra_priority = cipher_alg->cra_priority; 882 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; 883 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; 884 inst->alg.ivsize = cipher_alg->cra_blocksize; 885 886 /* Use skcipher_ctx_simple by default, can be overridden */ 887 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); 888 inst->alg.setkey = skcipher_setkey_simple; 889 inst->alg.init = skcipher_init_tfm_simple; 890 inst->alg.exit = skcipher_exit_tfm_simple; 891 892 return inst; 893 894 err_free_inst: 895 skcipher_free_instance_simple(inst); 896 return ERR_PTR(err); 897 } 898 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); 899 900 MODULE_LICENSE("GPL"); 901 MODULE_DESCRIPTION("Symmetric key cipher type"); 902 MODULE_IMPORT_NS("CRYPTO_INTERNAL"); 903