1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/bio.h> 15 #include <linux/blkdev.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/crypto.h> 19 #include <linux/workqueue.h> 20 #include <linux/backing-dev.h> 21 #include <asm/atomic.h> 22 #include <linux/scatterlist.h> 23 #include <asm/page.h> 24 #include <asm/unaligned.h> 25 26 #include "dm.h" 27 28 #define DM_MSG_PREFIX "crypt" 29 #define MESG_STR(x) x, sizeof(x) 30 31 /* 32 * context holding the current state of a multi-part conversion 33 */ 34 struct convert_context { 35 struct completion restart; 36 struct bio *bio_in; 37 struct bio *bio_out; 38 unsigned int offset_in; 39 unsigned int offset_out; 40 unsigned int idx_in; 41 unsigned int idx_out; 42 sector_t sector; 43 atomic_t pending; 44 }; 45 46 /* 47 * per bio private data 48 */ 49 struct dm_crypt_io { 50 struct dm_target *target; 51 struct bio *base_bio; 52 struct work_struct work; 53 54 struct convert_context ctx; 55 56 atomic_t pending; 57 int error; 58 sector_t sector; 59 }; 60 61 struct dm_crypt_request { 62 struct scatterlist sg_in; 63 struct scatterlist sg_out; 64 }; 65 66 struct crypt_config; 67 68 struct crypt_iv_operations { 69 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 70 const char *opts); 71 void (*dtr)(struct crypt_config *cc); 72 const char *(*status)(struct crypt_config *cc); 73 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 74 }; 75 76 /* 77 * Crypt: maps a linear range of a block device 78 * and encrypts / decrypts at the same time. 79 */ 80 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 81 struct crypt_config { 82 struct dm_dev *dev; 83 sector_t start; 84 85 /* 86 * pool for per bio private data, crypto requests and 87 * encryption requeusts/buffer pages 88 */ 89 mempool_t *io_pool; 90 mempool_t *req_pool; 91 mempool_t *page_pool; 92 struct bio_set *bs; 93 94 struct workqueue_struct *io_queue; 95 struct workqueue_struct *crypt_queue; 96 /* 97 * crypto related data 98 */ 99 struct crypt_iv_operations *iv_gen_ops; 100 char *iv_mode; 101 union { 102 struct crypto_cipher *essiv_tfm; 103 int benbi_shift; 104 } iv_gen_private; 105 sector_t iv_offset; 106 unsigned int iv_size; 107 108 /* 109 * Layout of each crypto request: 110 * 111 * struct ablkcipher_request 112 * context 113 * padding 114 * struct dm_crypt_request 115 * padding 116 * IV 117 * 118 * The padding is added so that dm_crypt_request and the IV are 119 * correctly aligned. 120 */ 121 unsigned int dmreq_start; 122 struct ablkcipher_request *req; 123 124 char cipher[CRYPTO_MAX_ALG_NAME]; 125 char chainmode[CRYPTO_MAX_ALG_NAME]; 126 struct crypto_ablkcipher *tfm; 127 unsigned long flags; 128 unsigned int key_size; 129 u8 key[0]; 130 }; 131 132 #define MIN_IOS 16 133 #define MIN_POOL_PAGES 32 134 #define MIN_BIO_PAGES 8 135 136 static struct kmem_cache *_crypt_io_pool; 137 138 static void clone_init(struct dm_crypt_io *, struct bio *); 139 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 140 141 /* 142 * Different IV generation algorithms: 143 * 144 * plain: the initial vector is the 32-bit little-endian version of the sector 145 * number, padded with zeros if necessary. 146 * 147 * essiv: "encrypted sector|salt initial vector", the sector number is 148 * encrypted with the bulk cipher using a salt as key. The salt 149 * should be derived from the bulk cipher's key via hashing. 150 * 151 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 152 * (needed for LRW-32-AES and possible other narrow block modes) 153 * 154 * null: the initial vector is always zero. Provides compatibility with 155 * obsolete loop_fish2 devices. Do not use for new devices. 156 * 157 * plumb: unimplemented, see: 158 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 159 */ 160 161 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 162 { 163 memset(iv, 0, cc->iv_size); 164 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 165 166 return 0; 167 } 168 169 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 170 const char *opts) 171 { 172 struct crypto_cipher *essiv_tfm; 173 struct crypto_hash *hash_tfm; 174 struct hash_desc desc; 175 struct scatterlist sg; 176 unsigned int saltsize; 177 u8 *salt; 178 int err; 179 180 if (opts == NULL) { 181 ti->error = "Digest algorithm missing for ESSIV mode"; 182 return -EINVAL; 183 } 184 185 /* Hash the cipher key with the given hash algorithm */ 186 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 187 if (IS_ERR(hash_tfm)) { 188 ti->error = "Error initializing ESSIV hash"; 189 return PTR_ERR(hash_tfm); 190 } 191 192 saltsize = crypto_hash_digestsize(hash_tfm); 193 salt = kmalloc(saltsize, GFP_KERNEL); 194 if (salt == NULL) { 195 ti->error = "Error kmallocing salt storage in ESSIV"; 196 crypto_free_hash(hash_tfm); 197 return -ENOMEM; 198 } 199 200 sg_init_one(&sg, cc->key, cc->key_size); 201 desc.tfm = hash_tfm; 202 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 203 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); 204 crypto_free_hash(hash_tfm); 205 206 if (err) { 207 ti->error = "Error calculating hash in ESSIV"; 208 kfree(salt); 209 return err; 210 } 211 212 /* Setup the essiv_tfm with the given salt */ 213 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 214 if (IS_ERR(essiv_tfm)) { 215 ti->error = "Error allocating crypto tfm for ESSIV"; 216 kfree(salt); 217 return PTR_ERR(essiv_tfm); 218 } 219 if (crypto_cipher_blocksize(essiv_tfm) != 220 crypto_ablkcipher_ivsize(cc->tfm)) { 221 ti->error = "Block size of ESSIV cipher does " 222 "not match IV size of block cipher"; 223 crypto_free_cipher(essiv_tfm); 224 kfree(salt); 225 return -EINVAL; 226 } 227 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 228 if (err) { 229 ti->error = "Failed to set key for ESSIV cipher"; 230 crypto_free_cipher(essiv_tfm); 231 kfree(salt); 232 return err; 233 } 234 kfree(salt); 235 236 cc->iv_gen_private.essiv_tfm = essiv_tfm; 237 return 0; 238 } 239 240 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 241 { 242 crypto_free_cipher(cc->iv_gen_private.essiv_tfm); 243 cc->iv_gen_private.essiv_tfm = NULL; 244 } 245 246 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 247 { 248 memset(iv, 0, cc->iv_size); 249 *(u64 *)iv = cpu_to_le64(sector); 250 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); 251 return 0; 252 } 253 254 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 255 const char *opts) 256 { 257 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 258 int log = ilog2(bs); 259 260 /* we need to calculate how far we must shift the sector count 261 * to get the cipher block count, we use this shift in _gen */ 262 263 if (1 << log != bs) { 264 ti->error = "cypher blocksize is not a power of 2"; 265 return -EINVAL; 266 } 267 268 if (log > 9) { 269 ti->error = "cypher blocksize is > 512"; 270 return -EINVAL; 271 } 272 273 cc->iv_gen_private.benbi_shift = 9 - log; 274 275 return 0; 276 } 277 278 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 279 { 280 } 281 282 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 283 { 284 __be64 val; 285 286 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 287 288 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); 289 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 290 291 return 0; 292 } 293 294 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 295 { 296 memset(iv, 0, cc->iv_size); 297 298 return 0; 299 } 300 301 static struct crypt_iv_operations crypt_iv_plain_ops = { 302 .generator = crypt_iv_plain_gen 303 }; 304 305 static struct crypt_iv_operations crypt_iv_essiv_ops = { 306 .ctr = crypt_iv_essiv_ctr, 307 .dtr = crypt_iv_essiv_dtr, 308 .generator = crypt_iv_essiv_gen 309 }; 310 311 static struct crypt_iv_operations crypt_iv_benbi_ops = { 312 .ctr = crypt_iv_benbi_ctr, 313 .dtr = crypt_iv_benbi_dtr, 314 .generator = crypt_iv_benbi_gen 315 }; 316 317 static struct crypt_iv_operations crypt_iv_null_ops = { 318 .generator = crypt_iv_null_gen 319 }; 320 321 static void crypt_convert_init(struct crypt_config *cc, 322 struct convert_context *ctx, 323 struct bio *bio_out, struct bio *bio_in, 324 sector_t sector) 325 { 326 ctx->bio_in = bio_in; 327 ctx->bio_out = bio_out; 328 ctx->offset_in = 0; 329 ctx->offset_out = 0; 330 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 331 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 332 ctx->sector = sector + cc->iv_offset; 333 init_completion(&ctx->restart); 334 /* 335 * Crypto operation can be asynchronous, 336 * ctx->pending is increased after request submission. 337 * We need to ensure that we don't call the crypt finish 338 * operation before pending got incremented 339 * (dependent on crypt submission return code). 340 */ 341 atomic_set(&ctx->pending, 2); 342 } 343 344 static int crypt_convert_block(struct crypt_config *cc, 345 struct convert_context *ctx, 346 struct ablkcipher_request *req) 347 { 348 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 349 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 350 struct dm_crypt_request *dmreq; 351 u8 *iv; 352 int r = 0; 353 354 dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 355 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 356 crypto_ablkcipher_alignmask(cc->tfm) + 1); 357 358 sg_init_table(&dmreq->sg_in, 1); 359 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 360 bv_in->bv_offset + ctx->offset_in); 361 362 sg_init_table(&dmreq->sg_out, 1); 363 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 364 bv_out->bv_offset + ctx->offset_out); 365 366 ctx->offset_in += 1 << SECTOR_SHIFT; 367 if (ctx->offset_in >= bv_in->bv_len) { 368 ctx->offset_in = 0; 369 ctx->idx_in++; 370 } 371 372 ctx->offset_out += 1 << SECTOR_SHIFT; 373 if (ctx->offset_out >= bv_out->bv_len) { 374 ctx->offset_out = 0; 375 ctx->idx_out++; 376 } 377 378 if (cc->iv_gen_ops) { 379 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 380 if (r < 0) 381 return r; 382 } 383 384 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 385 1 << SECTOR_SHIFT, iv); 386 387 if (bio_data_dir(ctx->bio_in) == WRITE) 388 r = crypto_ablkcipher_encrypt(req); 389 else 390 r = crypto_ablkcipher_decrypt(req); 391 392 return r; 393 } 394 395 static void kcryptd_async_done(struct crypto_async_request *async_req, 396 int error); 397 static void crypt_alloc_req(struct crypt_config *cc, 398 struct convert_context *ctx) 399 { 400 if (!cc->req) 401 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 402 ablkcipher_request_set_tfm(cc->req, cc->tfm); 403 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 404 CRYPTO_TFM_REQ_MAY_SLEEP, 405 kcryptd_async_done, ctx); 406 } 407 408 /* 409 * Encrypt / decrypt data from one bio to another one (can be the same one) 410 */ 411 static int crypt_convert(struct crypt_config *cc, 412 struct convert_context *ctx) 413 { 414 int r = 0; 415 416 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 417 ctx->idx_out < ctx->bio_out->bi_vcnt) { 418 419 crypt_alloc_req(cc, ctx); 420 421 r = crypt_convert_block(cc, ctx, cc->req); 422 423 switch (r) { 424 case -EBUSY: 425 wait_for_completion(&ctx->restart); 426 INIT_COMPLETION(ctx->restart); 427 /* fall through*/ 428 case -EINPROGRESS: 429 atomic_inc(&ctx->pending); 430 cc->req = NULL; 431 r = 0; 432 /* fall through*/ 433 case 0: 434 ctx->sector++; 435 continue; 436 } 437 438 break; 439 } 440 441 /* 442 * If there are pending crypto operation run async 443 * code. Otherwise process return code synchronously. 444 * The step of 2 ensures that async finish doesn't 445 * call crypto finish too early. 446 */ 447 if (atomic_sub_return(2, &ctx->pending)) 448 return -EINPROGRESS; 449 450 return r; 451 } 452 453 static void dm_crypt_bio_destructor(struct bio *bio) 454 { 455 struct dm_crypt_io *io = bio->bi_private; 456 struct crypt_config *cc = io->target->private; 457 458 bio_free(bio, cc->bs); 459 } 460 461 /* 462 * Generate a new unfragmented bio with the given size 463 * This should never violate the device limitations 464 * May return a smaller bio when running out of pages 465 */ 466 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 467 { 468 struct crypt_config *cc = io->target->private; 469 struct bio *clone; 470 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 471 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 472 unsigned i, len; 473 struct page *page; 474 475 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 476 if (!clone) 477 return NULL; 478 479 clone_init(io, clone); 480 481 for (i = 0; i < nr_iovecs; i++) { 482 page = mempool_alloc(cc->page_pool, gfp_mask); 483 if (!page) 484 break; 485 486 /* 487 * if additional pages cannot be allocated without waiting, 488 * return a partially allocated bio, the caller will then try 489 * to allocate additional bios while submitting this partial bio 490 */ 491 if (i == (MIN_BIO_PAGES - 1)) 492 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 493 494 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 495 496 if (!bio_add_page(clone, page, len, 0)) { 497 mempool_free(page, cc->page_pool); 498 break; 499 } 500 501 size -= len; 502 } 503 504 if (!clone->bi_size) { 505 bio_put(clone); 506 return NULL; 507 } 508 509 return clone; 510 } 511 512 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 513 { 514 unsigned int i; 515 struct bio_vec *bv; 516 517 for (i = 0; i < clone->bi_vcnt; i++) { 518 bv = bio_iovec_idx(clone, i); 519 BUG_ON(!bv->bv_page); 520 mempool_free(bv->bv_page, cc->page_pool); 521 bv->bv_page = NULL; 522 } 523 } 524 525 /* 526 * One of the bios was finished. Check for completion of 527 * the whole request and correctly clean up the buffer. 528 */ 529 static void crypt_dec_pending(struct dm_crypt_io *io) 530 { 531 struct crypt_config *cc = io->target->private; 532 533 if (!atomic_dec_and_test(&io->pending)) 534 return; 535 536 bio_endio(io->base_bio, io->error); 537 mempool_free(io, cc->io_pool); 538 } 539 540 /* 541 * kcryptd/kcryptd_io: 542 * 543 * Needed because it would be very unwise to do decryption in an 544 * interrupt context. 545 * 546 * kcryptd performs the actual encryption or decryption. 547 * 548 * kcryptd_io performs the IO submission. 549 * 550 * They must be separated as otherwise the final stages could be 551 * starved by new requests which can block in the first stages due 552 * to memory allocation. 553 */ 554 static void crypt_endio(struct bio *clone, int error) 555 { 556 struct dm_crypt_io *io = clone->bi_private; 557 struct crypt_config *cc = io->target->private; 558 unsigned rw = bio_data_dir(clone); 559 560 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 561 error = -EIO; 562 563 /* 564 * free the processed pages 565 */ 566 if (rw == WRITE) 567 crypt_free_buffer_pages(cc, clone); 568 569 bio_put(clone); 570 571 if (rw == READ && !error) { 572 kcryptd_queue_crypt(io); 573 return; 574 } 575 576 if (unlikely(error)) 577 io->error = error; 578 579 crypt_dec_pending(io); 580 } 581 582 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 583 { 584 struct crypt_config *cc = io->target->private; 585 586 clone->bi_private = io; 587 clone->bi_end_io = crypt_endio; 588 clone->bi_bdev = cc->dev->bdev; 589 clone->bi_rw = io->base_bio->bi_rw; 590 clone->bi_destructor = dm_crypt_bio_destructor; 591 } 592 593 static void kcryptd_io_read(struct dm_crypt_io *io) 594 { 595 struct crypt_config *cc = io->target->private; 596 struct bio *base_bio = io->base_bio; 597 struct bio *clone; 598 599 atomic_inc(&io->pending); 600 601 /* 602 * The block layer might modify the bvec array, so always 603 * copy the required bvecs because we need the original 604 * one in order to decrypt the whole bio data *afterwards*. 605 */ 606 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 607 if (unlikely(!clone)) { 608 io->error = -ENOMEM; 609 crypt_dec_pending(io); 610 return; 611 } 612 613 clone_init(io, clone); 614 clone->bi_idx = 0; 615 clone->bi_vcnt = bio_segments(base_bio); 616 clone->bi_size = base_bio->bi_size; 617 clone->bi_sector = cc->start + io->sector; 618 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 619 sizeof(struct bio_vec) * clone->bi_vcnt); 620 621 generic_make_request(clone); 622 } 623 624 static void kcryptd_io_write(struct dm_crypt_io *io) 625 { 626 struct bio *clone = io->ctx.bio_out; 627 628 generic_make_request(clone); 629 } 630 631 static void kcryptd_io(struct work_struct *work) 632 { 633 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 634 635 if (bio_data_dir(io->base_bio) == READ) 636 kcryptd_io_read(io); 637 else 638 kcryptd_io_write(io); 639 } 640 641 static void kcryptd_queue_io(struct dm_crypt_io *io) 642 { 643 struct crypt_config *cc = io->target->private; 644 645 INIT_WORK(&io->work, kcryptd_io); 646 queue_work(cc->io_queue, &io->work); 647 } 648 649 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, 650 int error, int async) 651 { 652 struct bio *clone = io->ctx.bio_out; 653 struct crypt_config *cc = io->target->private; 654 655 if (unlikely(error < 0)) { 656 crypt_free_buffer_pages(cc, clone); 657 bio_put(clone); 658 io->error = -EIO; 659 return; 660 } 661 662 /* crypt_convert should have filled the clone bio */ 663 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 664 665 clone->bi_sector = cc->start + io->sector; 666 io->sector += bio_sectors(clone); 667 668 if (async) 669 kcryptd_queue_io(io); 670 else { 671 atomic_inc(&io->pending); 672 generic_make_request(clone); 673 } 674 } 675 676 static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) 677 { 678 struct crypt_config *cc = io->target->private; 679 struct bio *clone; 680 unsigned remaining = io->base_bio->bi_size; 681 int r; 682 683 /* 684 * The allocated buffers can be smaller than the whole bio, 685 * so repeat the whole process until all the data can be handled. 686 */ 687 while (remaining) { 688 clone = crypt_alloc_buffer(io, remaining); 689 if (unlikely(!clone)) { 690 io->error = -ENOMEM; 691 return; 692 } 693 694 io->ctx.bio_out = clone; 695 io->ctx.idx_out = 0; 696 697 remaining -= clone->bi_size; 698 699 r = crypt_convert(cc, &io->ctx); 700 701 if (r != -EINPROGRESS) { 702 kcryptd_crypt_write_io_submit(io, r, 0); 703 if (unlikely(r < 0)) 704 return; 705 } else 706 atomic_inc(&io->pending); 707 708 /* out of memory -> run queues */ 709 if (unlikely(remaining)) 710 congestion_wait(WRITE, HZ/100); 711 } 712 } 713 714 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 715 { 716 struct crypt_config *cc = io->target->private; 717 718 /* 719 * Prevent io from disappearing until this function completes. 720 */ 721 atomic_inc(&io->pending); 722 723 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 724 kcryptd_crypt_write_convert_loop(io); 725 726 crypt_dec_pending(io); 727 } 728 729 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) 730 { 731 if (unlikely(error < 0)) 732 io->error = -EIO; 733 734 crypt_dec_pending(io); 735 } 736 737 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 738 { 739 struct crypt_config *cc = io->target->private; 740 int r = 0; 741 742 atomic_inc(&io->pending); 743 744 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 745 io->sector); 746 747 r = crypt_convert(cc, &io->ctx); 748 749 if (r != -EINPROGRESS) 750 kcryptd_crypt_read_done(io, r); 751 752 crypt_dec_pending(io); 753 } 754 755 static void kcryptd_async_done(struct crypto_async_request *async_req, 756 int error) 757 { 758 struct convert_context *ctx = async_req->data; 759 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 760 struct crypt_config *cc = io->target->private; 761 762 if (error == -EINPROGRESS) { 763 complete(&ctx->restart); 764 return; 765 } 766 767 mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); 768 769 if (!atomic_dec_and_test(&ctx->pending)) 770 return; 771 772 if (bio_data_dir(io->base_bio) == READ) 773 kcryptd_crypt_read_done(io, error); 774 else 775 kcryptd_crypt_write_io_submit(io, error, 1); 776 } 777 778 static void kcryptd_crypt(struct work_struct *work) 779 { 780 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 781 782 if (bio_data_dir(io->base_bio) == READ) 783 kcryptd_crypt_read_convert(io); 784 else 785 kcryptd_crypt_write_convert(io); 786 } 787 788 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 789 { 790 struct crypt_config *cc = io->target->private; 791 792 INIT_WORK(&io->work, kcryptd_crypt); 793 queue_work(cc->crypt_queue, &io->work); 794 } 795 796 /* 797 * Decode key from its hex representation 798 */ 799 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 800 { 801 char buffer[3]; 802 char *endp; 803 unsigned int i; 804 805 buffer[2] = '\0'; 806 807 for (i = 0; i < size; i++) { 808 buffer[0] = *hex++; 809 buffer[1] = *hex++; 810 811 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 812 813 if (endp != &buffer[2]) 814 return -EINVAL; 815 } 816 817 if (*hex != '\0') 818 return -EINVAL; 819 820 return 0; 821 } 822 823 /* 824 * Encode key into its hex representation 825 */ 826 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 827 { 828 unsigned int i; 829 830 for (i = 0; i < size; i++) { 831 sprintf(hex, "%02x", *key); 832 hex += 2; 833 key++; 834 } 835 } 836 837 static int crypt_set_key(struct crypt_config *cc, char *key) 838 { 839 unsigned key_size = strlen(key) >> 1; 840 841 if (cc->key_size && cc->key_size != key_size) 842 return -EINVAL; 843 844 cc->key_size = key_size; /* initial settings */ 845 846 if ((!key_size && strcmp(key, "-")) || 847 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 848 return -EINVAL; 849 850 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 851 852 return 0; 853 } 854 855 static int crypt_wipe_key(struct crypt_config *cc) 856 { 857 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 858 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 859 return 0; 860 } 861 862 /* 863 * Construct an encryption mapping: 864 * <cipher> <key> <iv_offset> <dev_path> <start> 865 */ 866 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 867 { 868 struct crypt_config *cc; 869 struct crypto_ablkcipher *tfm; 870 char *tmp; 871 char *cipher; 872 char *chainmode; 873 char *ivmode; 874 char *ivopts; 875 unsigned int key_size; 876 unsigned long long tmpll; 877 878 if (argc != 5) { 879 ti->error = "Not enough arguments"; 880 return -EINVAL; 881 } 882 883 tmp = argv[0]; 884 cipher = strsep(&tmp, "-"); 885 chainmode = strsep(&tmp, "-"); 886 ivopts = strsep(&tmp, "-"); 887 ivmode = strsep(&ivopts, ":"); 888 889 if (tmp) 890 DMWARN("Unexpected additional cipher options"); 891 892 key_size = strlen(argv[1]) >> 1; 893 894 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 895 if (cc == NULL) { 896 ti->error = 897 "Cannot allocate transparent encryption context"; 898 return -ENOMEM; 899 } 900 901 if (crypt_set_key(cc, argv[1])) { 902 ti->error = "Error decoding key"; 903 goto bad_cipher; 904 } 905 906 /* Compatiblity mode for old dm-crypt cipher strings */ 907 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { 908 chainmode = "cbc"; 909 ivmode = "plain"; 910 } 911 912 if (strcmp(chainmode, "ecb") && !ivmode) { 913 ti->error = "This chaining mode requires an IV mechanism"; 914 goto bad_cipher; 915 } 916 917 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", 918 chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { 919 ti->error = "Chain mode + cipher name is too long"; 920 goto bad_cipher; 921 } 922 923 tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); 924 if (IS_ERR(tfm)) { 925 ti->error = "Error allocating crypto tfm"; 926 goto bad_cipher; 927 } 928 929 strcpy(cc->cipher, cipher); 930 strcpy(cc->chainmode, chainmode); 931 cc->tfm = tfm; 932 933 /* 934 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". 935 * See comments at iv code 936 */ 937 938 if (ivmode == NULL) 939 cc->iv_gen_ops = NULL; 940 else if (strcmp(ivmode, "plain") == 0) 941 cc->iv_gen_ops = &crypt_iv_plain_ops; 942 else if (strcmp(ivmode, "essiv") == 0) 943 cc->iv_gen_ops = &crypt_iv_essiv_ops; 944 else if (strcmp(ivmode, "benbi") == 0) 945 cc->iv_gen_ops = &crypt_iv_benbi_ops; 946 else if (strcmp(ivmode, "null") == 0) 947 cc->iv_gen_ops = &crypt_iv_null_ops; 948 else { 949 ti->error = "Invalid IV mode"; 950 goto bad_ivmode; 951 } 952 953 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && 954 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 955 goto bad_ivmode; 956 957 cc->iv_size = crypto_ablkcipher_ivsize(tfm); 958 if (cc->iv_size) 959 /* at least a 64 bit sector number should fit in our buffer */ 960 cc->iv_size = max(cc->iv_size, 961 (unsigned int)(sizeof(u64) / sizeof(u8))); 962 else { 963 if (cc->iv_gen_ops) { 964 DMWARN("Selected cipher does not support IVs"); 965 if (cc->iv_gen_ops->dtr) 966 cc->iv_gen_ops->dtr(cc); 967 cc->iv_gen_ops = NULL; 968 } 969 } 970 971 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 972 if (!cc->io_pool) { 973 ti->error = "Cannot allocate crypt io mempool"; 974 goto bad_slab_pool; 975 } 976 977 cc->dmreq_start = sizeof(struct ablkcipher_request); 978 cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); 979 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 980 cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & 981 ~(crypto_tfm_ctx_alignment() - 1); 982 983 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 984 sizeof(struct dm_crypt_request) + cc->iv_size); 985 if (!cc->req_pool) { 986 ti->error = "Cannot allocate crypt request mempool"; 987 goto bad_req_pool; 988 } 989 cc->req = NULL; 990 991 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 992 if (!cc->page_pool) { 993 ti->error = "Cannot allocate page mempool"; 994 goto bad_page_pool; 995 } 996 997 cc->bs = bioset_create(MIN_IOS, MIN_IOS); 998 if (!cc->bs) { 999 ti->error = "Cannot allocate crypt bioset"; 1000 goto bad_bs; 1001 } 1002 1003 if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { 1004 ti->error = "Error setting key"; 1005 goto bad_device; 1006 } 1007 1008 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1009 ti->error = "Invalid iv_offset sector"; 1010 goto bad_device; 1011 } 1012 cc->iv_offset = tmpll; 1013 1014 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1015 ti->error = "Invalid device sector"; 1016 goto bad_device; 1017 } 1018 cc->start = tmpll; 1019 1020 if (dm_get_device(ti, argv[3], cc->start, ti->len, 1021 dm_table_get_mode(ti->table), &cc->dev)) { 1022 ti->error = "Device lookup failed"; 1023 goto bad_device; 1024 } 1025 1026 if (ivmode && cc->iv_gen_ops) { 1027 if (ivopts) 1028 *(ivopts - 1) = ':'; 1029 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 1030 if (!cc->iv_mode) { 1031 ti->error = "Error kmallocing iv_mode string"; 1032 goto bad_ivmode_string; 1033 } 1034 strcpy(cc->iv_mode, ivmode); 1035 } else 1036 cc->iv_mode = NULL; 1037 1038 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1039 if (!cc->io_queue) { 1040 ti->error = "Couldn't create kcryptd io queue"; 1041 goto bad_io_queue; 1042 } 1043 1044 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1045 if (!cc->crypt_queue) { 1046 ti->error = "Couldn't create kcryptd queue"; 1047 goto bad_crypt_queue; 1048 } 1049 1050 ti->private = cc; 1051 return 0; 1052 1053 bad_crypt_queue: 1054 destroy_workqueue(cc->io_queue); 1055 bad_io_queue: 1056 kfree(cc->iv_mode); 1057 bad_ivmode_string: 1058 dm_put_device(ti, cc->dev); 1059 bad_device: 1060 bioset_free(cc->bs); 1061 bad_bs: 1062 mempool_destroy(cc->page_pool); 1063 bad_page_pool: 1064 mempool_destroy(cc->req_pool); 1065 bad_req_pool: 1066 mempool_destroy(cc->io_pool); 1067 bad_slab_pool: 1068 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1069 cc->iv_gen_ops->dtr(cc); 1070 bad_ivmode: 1071 crypto_free_ablkcipher(tfm); 1072 bad_cipher: 1073 /* Must zero key material before freeing */ 1074 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 1075 kfree(cc); 1076 return -EINVAL; 1077 } 1078 1079 static void crypt_dtr(struct dm_target *ti) 1080 { 1081 struct crypt_config *cc = (struct crypt_config *) ti->private; 1082 1083 destroy_workqueue(cc->io_queue); 1084 destroy_workqueue(cc->crypt_queue); 1085 1086 if (cc->req) 1087 mempool_free(cc->req, cc->req_pool); 1088 1089 bioset_free(cc->bs); 1090 mempool_destroy(cc->page_pool); 1091 mempool_destroy(cc->req_pool); 1092 mempool_destroy(cc->io_pool); 1093 1094 kfree(cc->iv_mode); 1095 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1096 cc->iv_gen_ops->dtr(cc); 1097 crypto_free_ablkcipher(cc->tfm); 1098 dm_put_device(ti, cc->dev); 1099 1100 /* Must zero key material before freeing */ 1101 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 1102 kfree(cc); 1103 } 1104 1105 static int crypt_map(struct dm_target *ti, struct bio *bio, 1106 union map_info *map_context) 1107 { 1108 struct crypt_config *cc = ti->private; 1109 struct dm_crypt_io *io; 1110 1111 io = mempool_alloc(cc->io_pool, GFP_NOIO); 1112 io->target = ti; 1113 io->base_bio = bio; 1114 io->sector = bio->bi_sector - ti->begin; 1115 io->error = 0; 1116 atomic_set(&io->pending, 0); 1117 1118 if (bio_data_dir(io->base_bio) == READ) 1119 kcryptd_queue_io(io); 1120 else 1121 kcryptd_queue_crypt(io); 1122 1123 return DM_MAPIO_SUBMITTED; 1124 } 1125 1126 static int crypt_status(struct dm_target *ti, status_type_t type, 1127 char *result, unsigned int maxlen) 1128 { 1129 struct crypt_config *cc = (struct crypt_config *) ti->private; 1130 unsigned int sz = 0; 1131 1132 switch (type) { 1133 case STATUSTYPE_INFO: 1134 result[0] = '\0'; 1135 break; 1136 1137 case STATUSTYPE_TABLE: 1138 if (cc->iv_mode) 1139 DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, 1140 cc->iv_mode); 1141 else 1142 DMEMIT("%s-%s ", cc->cipher, cc->chainmode); 1143 1144 if (cc->key_size > 0) { 1145 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1146 return -ENOMEM; 1147 1148 crypt_encode_key(result + sz, cc->key, cc->key_size); 1149 sz += cc->key_size << 1; 1150 } else { 1151 if (sz >= maxlen) 1152 return -ENOMEM; 1153 result[sz++] = '-'; 1154 } 1155 1156 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1157 cc->dev->name, (unsigned long long)cc->start); 1158 break; 1159 } 1160 return 0; 1161 } 1162 1163 static void crypt_postsuspend(struct dm_target *ti) 1164 { 1165 struct crypt_config *cc = ti->private; 1166 1167 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1168 } 1169 1170 static int crypt_preresume(struct dm_target *ti) 1171 { 1172 struct crypt_config *cc = ti->private; 1173 1174 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1175 DMERR("aborting resume - crypt key is not set."); 1176 return -EAGAIN; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static void crypt_resume(struct dm_target *ti) 1183 { 1184 struct crypt_config *cc = ti->private; 1185 1186 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1187 } 1188 1189 /* Message interface 1190 * key set <key> 1191 * key wipe 1192 */ 1193 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1194 { 1195 struct crypt_config *cc = ti->private; 1196 1197 if (argc < 2) 1198 goto error; 1199 1200 if (!strnicmp(argv[0], MESG_STR("key"))) { 1201 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1202 DMWARN("not suspended during key manipulation."); 1203 return -EINVAL; 1204 } 1205 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) 1206 return crypt_set_key(cc, argv[2]); 1207 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) 1208 return crypt_wipe_key(cc); 1209 } 1210 1211 error: 1212 DMWARN("unrecognised message received."); 1213 return -EINVAL; 1214 } 1215 1216 static struct target_type crypt_target = { 1217 .name = "crypt", 1218 .version= {1, 5, 0}, 1219 .module = THIS_MODULE, 1220 .ctr = crypt_ctr, 1221 .dtr = crypt_dtr, 1222 .map = crypt_map, 1223 .status = crypt_status, 1224 .postsuspend = crypt_postsuspend, 1225 .preresume = crypt_preresume, 1226 .resume = crypt_resume, 1227 .message = crypt_message, 1228 }; 1229 1230 static int __init dm_crypt_init(void) 1231 { 1232 int r; 1233 1234 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1235 if (!_crypt_io_pool) 1236 return -ENOMEM; 1237 1238 r = dm_register_target(&crypt_target); 1239 if (r < 0) { 1240 DMERR("register failed %d", r); 1241 kmem_cache_destroy(_crypt_io_pool); 1242 } 1243 1244 return r; 1245 } 1246 1247 static void __exit dm_crypt_exit(void) 1248 { 1249 int r = dm_unregister_target(&crypt_target); 1250 1251 if (r < 0) 1252 DMERR("unregister failed %d", r); 1253 1254 kmem_cache_destroy(_crypt_io_pool); 1255 } 1256 1257 module_init(dm_crypt_init); 1258 module_exit(dm_crypt_exit); 1259 1260 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1261 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1262 MODULE_LICENSE("GPL"); 1263