1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Symmetric key cipher operations. 4 * 5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across 6 * multiple page boundaries by using temporary blocks. In user context, 7 * the kernel is given a chance to schedule us once per page. 8 * 9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 10 */ 11 12 #include <crypto/internal/aead.h> 13 #include <crypto/internal/cipher.h> 14 #include <crypto/internal/skcipher.h> 15 #include <crypto/scatterwalk.h> 16 #include <linux/bug.h> 17 #include <linux/cryptouser.h> 18 #include <linux/err.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/seq_file.h> 23 #include <linux/slab.h> 24 #include <linux/string.h> 25 #include <linux/string_choices.h> 26 #include <net/netlink.h> 27 #include "skcipher.h" 28 29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e 30 31 enum { 32 SKCIPHER_WALK_SLOW = 1 << 0, 33 SKCIPHER_WALK_COPY = 1 << 1, 34 SKCIPHER_WALK_DIFF = 1 << 2, 35 SKCIPHER_WALK_SLEEP = 1 << 3, 36 }; 37 38 static const struct crypto_type crypto_skcipher_type; 39 40 static int skcipher_walk_next(struct skcipher_walk *walk); 41 42 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) 43 { 44 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 45 } 46 47 static inline struct skcipher_alg *__crypto_skcipher_alg( 48 struct crypto_alg *alg) 49 { 50 return container_of(alg, struct skcipher_alg, base); 51 } 52 53 /** 54 * skcipher_walk_done() - finish one step of a skcipher_walk 55 * @walk: the skcipher_walk 56 * @res: number of bytes *not* processed (>= 0) from walk->nbytes, 57 * or a -errno value to terminate the walk due to an error 58 * 59 * This function cleans up after one step of walking through the source and 60 * destination scatterlists, and advances to the next step if applicable. 61 * walk->nbytes is set to the number of bytes available in the next step, 62 * walk->total is set to the new total number of bytes remaining, and 63 * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there 64 * is no more data, or if an error occurred (i.e. -errno return), then 65 * walk->nbytes and walk->total are set to 0 and all resources owned by the 66 * skcipher_walk are freed. 67 * 68 * Return: 0 or a -errno value. If @res was a -errno value then it will be 69 * returned, but other errors may occur too. 70 */ 71 int skcipher_walk_done(struct skcipher_walk *walk, int res) 72 { 73 unsigned int n = walk->nbytes; /* num bytes processed this step */ 74 unsigned int total = 0; /* new total remaining */ 75 76 if (!n) 77 goto finish; 78 79 if (likely(res >= 0)) { 80 n -= res; /* subtract num bytes *not* processed */ 81 total = walk->total - n; 82 } 83 84 if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | 85 SKCIPHER_WALK_COPY | 86 SKCIPHER_WALK_DIFF)))) { 87 scatterwalk_advance(&walk->in, n); 88 } else if (walk->flags & SKCIPHER_WALK_DIFF) { 89 scatterwalk_done_src(&walk->in, n); 90 } else if (walk->flags & SKCIPHER_WALK_COPY) { 91 scatterwalk_advance(&walk->in, n); 92 scatterwalk_map(&walk->out); 93 memcpy(walk->out.addr, walk->page, n); 94 } else { /* SKCIPHER_WALK_SLOW */ 95 if (res > 0) { 96 /* 97 * Didn't process all bytes. Either the algorithm is 98 * broken, or this was the last step and it turned out 99 * the message wasn't evenly divisible into blocks but 100 * the algorithm requires it. 101 */ 102 res = -EINVAL; 103 total = 0; 104 } else 105 memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); 106 goto dst_done; 107 } 108 109 scatterwalk_done_dst(&walk->out, n); 110 dst_done: 111 112 if (res > 0) 113 res = 0; 114 115 walk->total = total; 116 walk->nbytes = 0; 117 118 if (total) { 119 if (walk->flags & SKCIPHER_WALK_SLEEP) 120 cond_resched(); 121 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | 122 SKCIPHER_WALK_DIFF); 123 return skcipher_walk_next(walk); 124 } 125 126 finish: 127 /* Short-circuit for the common/fast path. */ 128 if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) 129 goto out; 130 131 if (walk->iv != walk->oiv) 132 memcpy(walk->oiv, walk->iv, walk->ivsize); 133 if (walk->buffer != walk->page) 134 kfree(walk->buffer); 135 if (walk->page) 136 free_page((unsigned long)walk->page); 137 138 out: 139 return res; 140 } 141 EXPORT_SYMBOL_GPL(skcipher_walk_done); 142 143 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) 144 { 145 unsigned alignmask = walk->alignmask; 146 unsigned n; 147 void *buffer; 148 149 if (!walk->buffer) 150 walk->buffer = walk->page; 151 buffer = walk->buffer; 152 if (!buffer) { 153 /* Min size for a buffer of bsize bytes aligned to alignmask */ 154 n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 155 156 buffer = kzalloc(n, skcipher_walk_gfp(walk)); 157 if (!buffer) 158 return skcipher_walk_done(walk, -ENOMEM); 159 walk->buffer = buffer; 160 } 161 162 buffer = PTR_ALIGN(buffer, alignmask + 1); 163 memcpy_from_scatterwalk(buffer, &walk->in, bsize); 164 walk->out.__addr = buffer; 165 walk->in.__addr = walk->out.addr; 166 167 walk->nbytes = bsize; 168 walk->flags |= SKCIPHER_WALK_SLOW; 169 170 return 0; 171 } 172 173 static int skcipher_next_copy(struct skcipher_walk *walk) 174 { 175 void *tmp = walk->page; 176 177 scatterwalk_map(&walk->in); 178 memcpy(tmp, walk->in.addr, walk->nbytes); 179 scatterwalk_unmap(&walk->in); 180 /* 181 * walk->in is advanced later when the number of bytes actually 182 * processed (which might be less than walk->nbytes) is known. 183 */ 184 185 walk->in.__addr = tmp; 186 walk->out.__addr = tmp; 187 return 0; 188 } 189 190 static int skcipher_next_fast(struct skcipher_walk *walk) 191 { 192 unsigned long diff; 193 194 diff = offset_in_page(walk->in.offset) - 195 offset_in_page(walk->out.offset); 196 diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - 197 (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); 198 199 scatterwalk_map(&walk->out); 200 walk->in.__addr = walk->out.__addr; 201 202 if (diff) { 203 walk->flags |= SKCIPHER_WALK_DIFF; 204 scatterwalk_map(&walk->in); 205 } 206 207 return 0; 208 } 209 210 static int skcipher_walk_next(struct skcipher_walk *walk) 211 { 212 unsigned int bsize; 213 unsigned int n; 214 215 n = walk->total; 216 bsize = min(walk->stride, max(n, walk->blocksize)); 217 n = scatterwalk_clamp(&walk->in, n); 218 n = scatterwalk_clamp(&walk->out, n); 219 220 if (unlikely(n < bsize)) { 221 if (unlikely(walk->total < walk->blocksize)) 222 return skcipher_walk_done(walk, -EINVAL); 223 224 slow_path: 225 return skcipher_next_slow(walk, bsize); 226 } 227 walk->nbytes = n; 228 229 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { 230 if (!walk->page) { 231 gfp_t gfp = skcipher_walk_gfp(walk); 232 233 walk->page = (void *)__get_free_page(gfp); 234 if (!walk->page) 235 goto slow_path; 236 } 237 walk->flags |= SKCIPHER_WALK_COPY; 238 return skcipher_next_copy(walk); 239 } 240 241 return skcipher_next_fast(walk); 242 } 243 244 static int skcipher_copy_iv(struct skcipher_walk *walk) 245 { 246 unsigned alignmask = walk->alignmask; 247 unsigned ivsize = walk->ivsize; 248 unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); 249 unsigned size; 250 u8 *iv; 251 252 /* Min size for a buffer of stride + ivsize, aligned to alignmask */ 253 size = aligned_stride + ivsize + 254 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 255 256 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); 257 if (!walk->buffer) 258 return -ENOMEM; 259 260 iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; 261 262 walk->iv = memcpy(iv, walk->iv, walk->ivsize); 263 return 0; 264 } 265 266 static int skcipher_walk_first(struct skcipher_walk *walk) 267 { 268 if (WARN_ON_ONCE(in_hardirq())) 269 return -EDEADLK; 270 271 walk->buffer = NULL; 272 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 273 int err = skcipher_copy_iv(walk); 274 if (err) 275 return err; 276 } 277 278 walk->page = NULL; 279 280 return skcipher_walk_next(walk); 281 } 282 283 int skcipher_walk_virt(struct skcipher_walk *__restrict walk, 284 struct skcipher_request *__restrict req, bool atomic) 285 { 286 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 287 struct skcipher_alg *alg; 288 289 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 290 291 alg = crypto_skcipher_alg(tfm); 292 293 walk->total = req->cryptlen; 294 walk->nbytes = 0; 295 walk->iv = req->iv; 296 walk->oiv = req->iv; 297 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) 298 walk->flags = SKCIPHER_WALK_SLEEP; 299 else 300 walk->flags = 0; 301 302 if (unlikely(!walk->total)) 303 return 0; 304 305 scatterwalk_start(&walk->in, req->src); 306 scatterwalk_start(&walk->out, req->dst); 307 308 walk->blocksize = crypto_skcipher_blocksize(tfm); 309 walk->ivsize = crypto_skcipher_ivsize(tfm); 310 walk->alignmask = crypto_skcipher_alignmask(tfm); 311 312 if (alg->co.base.cra_type != &crypto_skcipher_type) 313 walk->stride = alg->co.chunksize; 314 else 315 walk->stride = alg->walksize; 316 317 return skcipher_walk_first(walk); 318 } 319 EXPORT_SYMBOL_GPL(skcipher_walk_virt); 320 321 static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, 322 struct aead_request *__restrict req, 323 bool atomic) 324 { 325 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 326 327 walk->nbytes = 0; 328 walk->iv = req->iv; 329 walk->oiv = req->iv; 330 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) 331 walk->flags = SKCIPHER_WALK_SLEEP; 332 else 333 walk->flags = 0; 334 335 if (unlikely(!walk->total)) 336 return 0; 337 338 scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen); 339 scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen); 340 341 walk->blocksize = crypto_aead_blocksize(tfm); 342 walk->stride = crypto_aead_chunksize(tfm); 343 walk->ivsize = crypto_aead_ivsize(tfm); 344 walk->alignmask = crypto_aead_alignmask(tfm); 345 346 return skcipher_walk_first(walk); 347 } 348 349 int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, 350 struct aead_request *__restrict req, 351 bool atomic) 352 { 353 walk->total = req->cryptlen; 354 355 return skcipher_walk_aead_common(walk, req, atomic); 356 } 357 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); 358 359 int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, 360 struct aead_request *__restrict req, 361 bool atomic) 362 { 363 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 364 365 walk->total = req->cryptlen - crypto_aead_authsize(tfm); 366 367 return skcipher_walk_aead_common(walk, req, atomic); 368 } 369 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); 370 371 static void skcipher_set_needkey(struct crypto_skcipher *tfm) 372 { 373 if (crypto_skcipher_max_keysize(tfm) != 0) 374 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 375 } 376 377 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, 378 const u8 *key, unsigned int keylen) 379 { 380 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 381 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 382 u8 *buffer, *alignbuffer; 383 unsigned long absize; 384 int ret; 385 386 absize = keylen + alignmask; 387 buffer = kmalloc(absize, GFP_ATOMIC); 388 if (!buffer) 389 return -ENOMEM; 390 391 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 392 memcpy(alignbuffer, key, keylen); 393 ret = cipher->setkey(tfm, alignbuffer, keylen); 394 kfree_sensitive(buffer); 395 return ret; 396 } 397 398 int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 399 unsigned int keylen) 400 { 401 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 402 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 403 int err; 404 405 if (cipher->co.base.cra_type != &crypto_skcipher_type) { 406 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); 407 408 crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); 409 crypto_lskcipher_set_flags(*ctx, 410 crypto_skcipher_get_flags(tfm) & 411 CRYPTO_TFM_REQ_MASK); 412 err = crypto_lskcipher_setkey(*ctx, key, keylen); 413 goto out; 414 } 415 416 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) 417 return -EINVAL; 418 419 if ((unsigned long)key & alignmask) 420 err = skcipher_setkey_unaligned(tfm, key, keylen); 421 else 422 err = cipher->setkey(tfm, key, keylen); 423 424 out: 425 if (unlikely(err)) { 426 skcipher_set_needkey(tfm); 427 return err; 428 } 429 430 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 431 return 0; 432 } 433 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); 434 435 int crypto_skcipher_encrypt(struct skcipher_request *req) 436 { 437 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 438 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 439 440 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 441 return -ENOKEY; 442 if (alg->co.base.cra_type != &crypto_skcipher_type) 443 return crypto_lskcipher_encrypt_sg(req); 444 return alg->encrypt(req); 445 } 446 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); 447 448 int crypto_skcipher_decrypt(struct skcipher_request *req) 449 { 450 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 451 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 452 453 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 454 return -ENOKEY; 455 if (alg->co.base.cra_type != &crypto_skcipher_type) 456 return crypto_lskcipher_decrypt_sg(req); 457 return alg->decrypt(req); 458 } 459 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); 460 461 static int crypto_lskcipher_export(struct skcipher_request *req, void *out) 462 { 463 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 464 u8 *ivs = skcipher_request_ctx(req); 465 466 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); 467 468 memcpy(out, ivs + crypto_skcipher_ivsize(tfm), 469 crypto_skcipher_statesize(tfm)); 470 471 return 0; 472 } 473 474 static int crypto_lskcipher_import(struct skcipher_request *req, const void *in) 475 { 476 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 477 u8 *ivs = skcipher_request_ctx(req); 478 479 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); 480 481 memcpy(ivs + crypto_skcipher_ivsize(tfm), in, 482 crypto_skcipher_statesize(tfm)); 483 484 return 0; 485 } 486 487 static int skcipher_noexport(struct skcipher_request *req, void *out) 488 { 489 return 0; 490 } 491 492 static int skcipher_noimport(struct skcipher_request *req, const void *in) 493 { 494 return 0; 495 } 496 497 int crypto_skcipher_export(struct skcipher_request *req, void *out) 498 { 499 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 500 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 501 502 if (alg->co.base.cra_type != &crypto_skcipher_type) 503 return crypto_lskcipher_export(req, out); 504 return alg->export(req, out); 505 } 506 EXPORT_SYMBOL_GPL(crypto_skcipher_export); 507 508 int crypto_skcipher_import(struct skcipher_request *req, const void *in) 509 { 510 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 511 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 512 513 if (alg->co.base.cra_type != &crypto_skcipher_type) 514 return crypto_lskcipher_import(req, in); 515 return alg->import(req, in); 516 } 517 EXPORT_SYMBOL_GPL(crypto_skcipher_import); 518 519 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 520 { 521 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 522 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 523 524 alg->exit(skcipher); 525 } 526 527 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 528 { 529 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 530 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 531 532 skcipher_set_needkey(skcipher); 533 534 if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) { 535 unsigned am = crypto_skcipher_alignmask(skcipher); 536 unsigned reqsize; 537 538 reqsize = am & ~(crypto_tfm_ctx_alignment() - 1); 539 reqsize += crypto_skcipher_ivsize(skcipher); 540 reqsize += crypto_skcipher_statesize(skcipher); 541 crypto_skcipher_set_reqsize(skcipher, reqsize); 542 543 return crypto_init_lskcipher_ops_sg(tfm); 544 } 545 546 if (alg->exit) 547 skcipher->base.exit = crypto_skcipher_exit_tfm; 548 549 if (alg->init) 550 return alg->init(skcipher); 551 552 return 0; 553 } 554 555 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 556 { 557 if (alg->cra_type != &crypto_skcipher_type) 558 return sizeof(struct crypto_lskcipher *); 559 560 return crypto_alg_extsize(alg); 561 } 562 563 static void crypto_skcipher_free_instance(struct crypto_instance *inst) 564 { 565 struct skcipher_instance *skcipher = 566 container_of(inst, struct skcipher_instance, s.base); 567 568 skcipher->free(skcipher); 569 } 570 571 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 572 __maybe_unused; 573 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 574 { 575 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); 576 577 seq_printf(m, "type : skcipher\n"); 578 seq_printf(m, "async : %s\n", 579 str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); 580 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 581 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); 582 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); 583 seq_printf(m, "ivsize : %u\n", skcipher->ivsize); 584 seq_printf(m, "chunksize : %u\n", skcipher->chunksize); 585 seq_printf(m, "walksize : %u\n", skcipher->walksize); 586 seq_printf(m, "statesize : %u\n", skcipher->statesize); 587 } 588 589 static int __maybe_unused crypto_skcipher_report( 590 struct sk_buff *skb, struct crypto_alg *alg) 591 { 592 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); 593 struct crypto_report_blkcipher rblkcipher; 594 595 memset(&rblkcipher, 0, sizeof(rblkcipher)); 596 597 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); 598 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); 599 600 rblkcipher.blocksize = alg->cra_blocksize; 601 rblkcipher.min_keysize = skcipher->min_keysize; 602 rblkcipher.max_keysize = skcipher->max_keysize; 603 rblkcipher.ivsize = skcipher->ivsize; 604 605 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 606 sizeof(rblkcipher), &rblkcipher); 607 } 608 609 static const struct crypto_type crypto_skcipher_type = { 610 .extsize = crypto_skcipher_extsize, 611 .init_tfm = crypto_skcipher_init_tfm, 612 .free = crypto_skcipher_free_instance, 613 #ifdef CONFIG_PROC_FS 614 .show = crypto_skcipher_show, 615 #endif 616 #if IS_ENABLED(CONFIG_CRYPTO_USER) 617 .report = crypto_skcipher_report, 618 #endif 619 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 620 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, 621 .type = CRYPTO_ALG_TYPE_SKCIPHER, 622 .tfmsize = offsetof(struct crypto_skcipher, base), 623 }; 624 625 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, 626 struct crypto_instance *inst, 627 const char *name, u32 type, u32 mask) 628 { 629 spawn->base.frontend = &crypto_skcipher_type; 630 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 631 } 632 EXPORT_SYMBOL_GPL(crypto_grab_skcipher); 633 634 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 635 u32 type, u32 mask) 636 { 637 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); 638 } 639 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 640 641 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( 642 const char *alg_name, u32 type, u32 mask) 643 { 644 struct crypto_skcipher *tfm; 645 646 /* Only sync algorithms allowed. */ 647 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; 648 type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); 649 650 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); 651 652 /* 653 * Make sure we do not allocate something that might get used with 654 * an on-stack request: check the request size. 655 */ 656 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > 657 MAX_SYNC_SKCIPHER_REQSIZE)) { 658 crypto_free_skcipher(tfm); 659 return ERR_PTR(-EINVAL); 660 } 661 662 return (struct crypto_sync_skcipher *)tfm; 663 } 664 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); 665 666 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) 667 { 668 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask); 669 } 670 EXPORT_SYMBOL_GPL(crypto_has_skcipher); 671 672 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) 673 { 674 struct crypto_alg *base = &alg->base; 675 676 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || 677 alg->statesize > PAGE_SIZE / 2 || 678 (alg->ivsize + alg->statesize) > PAGE_SIZE / 2) 679 return -EINVAL; 680 681 if (!alg->chunksize) 682 alg->chunksize = base->cra_blocksize; 683 684 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 685 686 return 0; 687 } 688 689 static int skcipher_prepare_alg(struct skcipher_alg *alg) 690 { 691 struct crypto_alg *base = &alg->base; 692 int err; 693 694 err = skcipher_prepare_alg_common(&alg->co); 695 if (err) 696 return err; 697 698 if (alg->walksize > PAGE_SIZE / 8) 699 return -EINVAL; 700 701 if (!alg->walksize) 702 alg->walksize = alg->chunksize; 703 704 if (!alg->statesize) { 705 alg->import = skcipher_noimport; 706 alg->export = skcipher_noexport; 707 } else if (!(alg->import && alg->export)) 708 return -EINVAL; 709 710 base->cra_type = &crypto_skcipher_type; 711 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; 712 713 return 0; 714 } 715 716 int crypto_register_skcipher(struct skcipher_alg *alg) 717 { 718 struct crypto_alg *base = &alg->base; 719 int err; 720 721 err = skcipher_prepare_alg(alg); 722 if (err) 723 return err; 724 725 return crypto_register_alg(base); 726 } 727 EXPORT_SYMBOL_GPL(crypto_register_skcipher); 728 729 void crypto_unregister_skcipher(struct skcipher_alg *alg) 730 { 731 crypto_unregister_alg(&alg->base); 732 } 733 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); 734 735 int crypto_register_skciphers(struct skcipher_alg *algs, int count) 736 { 737 int i, ret; 738 739 for (i = 0; i < count; i++) { 740 ret = crypto_register_skcipher(&algs[i]); 741 if (ret) 742 goto err; 743 } 744 745 return 0; 746 747 err: 748 for (--i; i >= 0; --i) 749 crypto_unregister_skcipher(&algs[i]); 750 751 return ret; 752 } 753 EXPORT_SYMBOL_GPL(crypto_register_skciphers); 754 755 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) 756 { 757 int i; 758 759 for (i = count - 1; i >= 0; --i) 760 crypto_unregister_skcipher(&algs[i]); 761 } 762 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); 763 764 int skcipher_register_instance(struct crypto_template *tmpl, 765 struct skcipher_instance *inst) 766 { 767 int err; 768 769 if (WARN_ON(!inst->free)) 770 return -EINVAL; 771 772 err = skcipher_prepare_alg(&inst->alg); 773 if (err) 774 return err; 775 776 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); 777 } 778 EXPORT_SYMBOL_GPL(skcipher_register_instance); 779 780 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, 781 unsigned int keylen) 782 { 783 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); 784 785 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); 786 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & 787 CRYPTO_TFM_REQ_MASK); 788 return crypto_cipher_setkey(cipher, key, keylen); 789 } 790 791 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) 792 { 793 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 794 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); 795 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); 796 struct crypto_cipher *cipher; 797 798 cipher = crypto_spawn_cipher(spawn); 799 if (IS_ERR(cipher)) 800 return PTR_ERR(cipher); 801 802 ctx->cipher = cipher; 803 return 0; 804 } 805 806 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) 807 { 808 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); 809 810 crypto_free_cipher(ctx->cipher); 811 } 812 813 static void skcipher_free_instance_simple(struct skcipher_instance *inst) 814 { 815 crypto_drop_cipher(skcipher_instance_ctx(inst)); 816 kfree(inst); 817 } 818 819 /** 820 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode 821 * 822 * Allocate an skcipher_instance for a simple block cipher mode of operation, 823 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, 824 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, 825 * alignmask, and priority are set from the underlying cipher but can be 826 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and 827 * default ->setkey(), ->init(), and ->exit() methods are installed. 828 * 829 * @tmpl: the template being instantiated 830 * @tb: the template parameters 831 * 832 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still 833 * needs to register the instance. 834 */ 835 struct skcipher_instance *skcipher_alloc_instance_simple( 836 struct crypto_template *tmpl, struct rtattr **tb) 837 { 838 u32 mask; 839 struct skcipher_instance *inst; 840 struct crypto_cipher_spawn *spawn; 841 struct crypto_alg *cipher_alg; 842 int err; 843 844 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); 845 if (err) 846 return ERR_PTR(err); 847 848 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 849 if (!inst) 850 return ERR_PTR(-ENOMEM); 851 spawn = skcipher_instance_ctx(inst); 852 853 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), 854 crypto_attr_alg_name(tb[1]), 0, mask); 855 if (err) 856 goto err_free_inst; 857 cipher_alg = crypto_spawn_cipher_alg(spawn); 858 859 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, 860 cipher_alg); 861 if (err) 862 goto err_free_inst; 863 864 inst->free = skcipher_free_instance_simple; 865 866 /* Default algorithm properties, can be overridden */ 867 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; 868 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; 869 inst->alg.base.cra_priority = cipher_alg->cra_priority; 870 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; 871 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; 872 inst->alg.ivsize = cipher_alg->cra_blocksize; 873 874 /* Use skcipher_ctx_simple by default, can be overridden */ 875 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); 876 inst->alg.setkey = skcipher_setkey_simple; 877 inst->alg.init = skcipher_init_tfm_simple; 878 inst->alg.exit = skcipher_exit_tfm_simple; 879 880 return inst; 881 882 err_free_inst: 883 skcipher_free_instance_simple(inst); 884 return ERR_PTR(err); 885 } 886 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); 887 888 MODULE_LICENSE("GPL"); 889 MODULE_DESCRIPTION("Symmetric key cipher type"); 890 MODULE_IMPORT_NS("CRYPTO_INTERNAL"); 891