1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/err.h> 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/bio.h> 14 #include <linux/blkdev.h> 15 #include <linux/mempool.h> 16 #include <linux/slab.h> 17 #include <linux/crypto.h> 18 #include <linux/workqueue.h> 19 #include <linux/backing-dev.h> 20 #include <asm/atomic.h> 21 #include <linux/scatterlist.h> 22 #include <asm/page.h> 23 #include <asm/unaligned.h> 24 25 #include "dm.h" 26 27 #define DM_MSG_PREFIX "crypt" 28 #define MESG_STR(x) x, sizeof(x) 29 30 /* 31 * per bio private data 32 */ 33 struct dm_crypt_io { 34 struct dm_target *target; 35 struct bio *base_bio; 36 struct work_struct work; 37 atomic_t pending; 38 int error; 39 }; 40 41 /* 42 * context holding the current state of a multi-part conversion 43 */ 44 struct convert_context { 45 struct bio *bio_in; 46 struct bio *bio_out; 47 unsigned int offset_in; 48 unsigned int offset_out; 49 unsigned int idx_in; 50 unsigned int idx_out; 51 sector_t sector; 52 int write; 53 }; 54 55 struct crypt_config; 56 57 struct crypt_iv_operations { 58 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 59 const char *opts); 60 void (*dtr)(struct crypt_config *cc); 61 const char *(*status)(struct crypt_config *cc); 62 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 63 }; 64 65 /* 66 * Crypt: maps a linear range of a block device 67 * and encrypts / decrypts at the same time. 68 */ 69 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 70 struct crypt_config { 71 struct dm_dev *dev; 72 sector_t start; 73 74 /* 75 * pool for per bio private data and 76 * for encryption buffer pages 77 */ 78 mempool_t *io_pool; 79 mempool_t *page_pool; 80 struct bio_set *bs; 81 82 struct workqueue_struct *io_queue; 83 struct workqueue_struct *crypt_queue; 84 /* 85 * crypto related data 86 */ 87 struct crypt_iv_operations *iv_gen_ops; 88 char *iv_mode; 89 union { 90 struct crypto_cipher *essiv_tfm; 91 int benbi_shift; 92 } iv_gen_private; 93 sector_t iv_offset; 94 unsigned int iv_size; 95 96 char cipher[CRYPTO_MAX_ALG_NAME]; 97 char chainmode[CRYPTO_MAX_ALG_NAME]; 98 struct crypto_blkcipher *tfm; 99 unsigned long flags; 100 unsigned int key_size; 101 u8 key[0]; 102 }; 103 104 #define MIN_IOS 16 105 #define MIN_POOL_PAGES 32 106 #define MIN_BIO_PAGES 8 107 108 static struct kmem_cache *_crypt_io_pool; 109 110 static void clone_init(struct dm_crypt_io *, struct bio *); 111 112 /* 113 * Different IV generation algorithms: 114 * 115 * plain: the initial vector is the 32-bit little-endian version of the sector 116 * number, padded with zeros if necessary. 117 * 118 * essiv: "encrypted sector|salt initial vector", the sector number is 119 * encrypted with the bulk cipher using a salt as key. The salt 120 * should be derived from the bulk cipher's key via hashing. 121 * 122 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 123 * (needed for LRW-32-AES and possible other narrow block modes) 124 * 125 * null: the initial vector is always zero. Provides compatibility with 126 * obsolete loop_fish2 devices. Do not use for new devices. 127 * 128 * plumb: unimplemented, see: 129 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 130 */ 131 132 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 133 { 134 memset(iv, 0, cc->iv_size); 135 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 136 137 return 0; 138 } 139 140 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 141 const char *opts) 142 { 143 struct crypto_cipher *essiv_tfm; 144 struct crypto_hash *hash_tfm; 145 struct hash_desc desc; 146 struct scatterlist sg; 147 unsigned int saltsize; 148 u8 *salt; 149 int err; 150 151 if (opts == NULL) { 152 ti->error = "Digest algorithm missing for ESSIV mode"; 153 return -EINVAL; 154 } 155 156 /* Hash the cipher key with the given hash algorithm */ 157 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 158 if (IS_ERR(hash_tfm)) { 159 ti->error = "Error initializing ESSIV hash"; 160 return PTR_ERR(hash_tfm); 161 } 162 163 saltsize = crypto_hash_digestsize(hash_tfm); 164 salt = kmalloc(saltsize, GFP_KERNEL); 165 if (salt == NULL) { 166 ti->error = "Error kmallocing salt storage in ESSIV"; 167 crypto_free_hash(hash_tfm); 168 return -ENOMEM; 169 } 170 171 sg_init_one(&sg, cc->key, cc->key_size); 172 desc.tfm = hash_tfm; 173 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 174 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); 175 crypto_free_hash(hash_tfm); 176 177 if (err) { 178 ti->error = "Error calculating hash in ESSIV"; 179 kfree(salt); 180 return err; 181 } 182 183 /* Setup the essiv_tfm with the given salt */ 184 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 185 if (IS_ERR(essiv_tfm)) { 186 ti->error = "Error allocating crypto tfm for ESSIV"; 187 kfree(salt); 188 return PTR_ERR(essiv_tfm); 189 } 190 if (crypto_cipher_blocksize(essiv_tfm) != 191 crypto_blkcipher_ivsize(cc->tfm)) { 192 ti->error = "Block size of ESSIV cipher does " 193 "not match IV size of block cipher"; 194 crypto_free_cipher(essiv_tfm); 195 kfree(salt); 196 return -EINVAL; 197 } 198 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 199 if (err) { 200 ti->error = "Failed to set key for ESSIV cipher"; 201 crypto_free_cipher(essiv_tfm); 202 kfree(salt); 203 return err; 204 } 205 kfree(salt); 206 207 cc->iv_gen_private.essiv_tfm = essiv_tfm; 208 return 0; 209 } 210 211 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 212 { 213 crypto_free_cipher(cc->iv_gen_private.essiv_tfm); 214 cc->iv_gen_private.essiv_tfm = NULL; 215 } 216 217 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 218 { 219 memset(iv, 0, cc->iv_size); 220 *(u64 *)iv = cpu_to_le64(sector); 221 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); 222 return 0; 223 } 224 225 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 226 const char *opts) 227 { 228 unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); 229 int log = ilog2(bs); 230 231 /* we need to calculate how far we must shift the sector count 232 * to get the cipher block count, we use this shift in _gen */ 233 234 if (1 << log != bs) { 235 ti->error = "cypher blocksize is not a power of 2"; 236 return -EINVAL; 237 } 238 239 if (log > 9) { 240 ti->error = "cypher blocksize is > 512"; 241 return -EINVAL; 242 } 243 244 cc->iv_gen_private.benbi_shift = 9 - log; 245 246 return 0; 247 } 248 249 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 250 { 251 } 252 253 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 254 { 255 __be64 val; 256 257 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 258 259 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); 260 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 261 262 return 0; 263 } 264 265 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 266 { 267 memset(iv, 0, cc->iv_size); 268 269 return 0; 270 } 271 272 static struct crypt_iv_operations crypt_iv_plain_ops = { 273 .generator = crypt_iv_plain_gen 274 }; 275 276 static struct crypt_iv_operations crypt_iv_essiv_ops = { 277 .ctr = crypt_iv_essiv_ctr, 278 .dtr = crypt_iv_essiv_dtr, 279 .generator = crypt_iv_essiv_gen 280 }; 281 282 static struct crypt_iv_operations crypt_iv_benbi_ops = { 283 .ctr = crypt_iv_benbi_ctr, 284 .dtr = crypt_iv_benbi_dtr, 285 .generator = crypt_iv_benbi_gen 286 }; 287 288 static struct crypt_iv_operations crypt_iv_null_ops = { 289 .generator = crypt_iv_null_gen 290 }; 291 292 static int 293 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 294 struct scatterlist *in, unsigned int length, 295 int write, sector_t sector) 296 { 297 u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); 298 struct blkcipher_desc desc = { 299 .tfm = cc->tfm, 300 .info = iv, 301 .flags = CRYPTO_TFM_REQ_MAY_SLEEP, 302 }; 303 int r; 304 305 if (cc->iv_gen_ops) { 306 r = cc->iv_gen_ops->generator(cc, iv, sector); 307 if (r < 0) 308 return r; 309 310 if (write) 311 r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); 312 else 313 r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); 314 } else { 315 if (write) 316 r = crypto_blkcipher_encrypt(&desc, out, in, length); 317 else 318 r = crypto_blkcipher_decrypt(&desc, out, in, length); 319 } 320 321 return r; 322 } 323 324 static void crypt_convert_init(struct crypt_config *cc, 325 struct convert_context *ctx, 326 struct bio *bio_out, struct bio *bio_in, 327 sector_t sector, int write) 328 { 329 ctx->bio_in = bio_in; 330 ctx->bio_out = bio_out; 331 ctx->offset_in = 0; 332 ctx->offset_out = 0; 333 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 334 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 335 ctx->sector = sector + cc->iv_offset; 336 ctx->write = write; 337 } 338 339 /* 340 * Encrypt / decrypt data from one bio to another one (can be the same one) 341 */ 342 static int crypt_convert(struct crypt_config *cc, 343 struct convert_context *ctx) 344 { 345 int r = 0; 346 347 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 348 ctx->idx_out < ctx->bio_out->bi_vcnt) { 349 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 350 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 351 struct scatterlist sg_in, sg_out; 352 353 sg_init_table(&sg_in, 1); 354 sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); 355 356 sg_init_table(&sg_out, 1); 357 sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); 358 359 ctx->offset_in += sg_in.length; 360 if (ctx->offset_in >= bv_in->bv_len) { 361 ctx->offset_in = 0; 362 ctx->idx_in++; 363 } 364 365 ctx->offset_out += sg_out.length; 366 if (ctx->offset_out >= bv_out->bv_len) { 367 ctx->offset_out = 0; 368 ctx->idx_out++; 369 } 370 371 r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, 372 ctx->write, ctx->sector); 373 if (r < 0) 374 break; 375 376 ctx->sector++; 377 } 378 379 return r; 380 } 381 382 static void dm_crypt_bio_destructor(struct bio *bio) 383 { 384 struct dm_crypt_io *io = bio->bi_private; 385 struct crypt_config *cc = io->target->private; 386 387 bio_free(bio, cc->bs); 388 } 389 390 /* 391 * Generate a new unfragmented bio with the given size 392 * This should never violate the device limitations 393 * May return a smaller bio when running out of pages 394 */ 395 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 396 { 397 struct crypt_config *cc = io->target->private; 398 struct bio *clone; 399 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 400 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 401 unsigned int i; 402 403 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 404 if (!clone) 405 return NULL; 406 407 clone_init(io, clone); 408 409 for (i = 0; i < nr_iovecs; i++) { 410 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 412 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 413 if (!bv->bv_page) 414 break; 415 416 /* 417 * if additional pages cannot be allocated without waiting, 418 * return a partially allocated bio, the caller will then try 419 * to allocate additional bios while submitting this partial bio 420 */ 421 if (i == (MIN_BIO_PAGES - 1)) 422 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 423 424 bv->bv_offset = 0; 425 if (size > PAGE_SIZE) 426 bv->bv_len = PAGE_SIZE; 427 else 428 bv->bv_len = size; 429 430 clone->bi_size += bv->bv_len; 431 clone->bi_vcnt++; 432 size -= bv->bv_len; 433 } 434 435 if (!clone->bi_size) { 436 bio_put(clone); 437 return NULL; 438 } 439 440 return clone; 441 } 442 443 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 444 { 445 unsigned int i; 446 struct bio_vec *bv; 447 448 for (i = 0; i < clone->bi_vcnt; i++) { 449 bv = bio_iovec_idx(clone, i); 450 BUG_ON(!bv->bv_page); 451 mempool_free(bv->bv_page, cc->page_pool); 452 bv->bv_page = NULL; 453 } 454 } 455 456 /* 457 * One of the bios was finished. Check for completion of 458 * the whole request and correctly clean up the buffer. 459 */ 460 static void crypt_dec_pending(struct dm_crypt_io *io, int error) 461 { 462 struct crypt_config *cc = (struct crypt_config *) io->target->private; 463 464 if (error < 0) 465 io->error = error; 466 467 if (!atomic_dec_and_test(&io->pending)) 468 return; 469 470 bio_endio(io->base_bio, io->error); 471 472 mempool_free(io, cc->io_pool); 473 } 474 475 /* 476 * kcryptd/kcryptd_io: 477 * 478 * Needed because it would be very unwise to do decryption in an 479 * interrupt context. 480 * 481 * kcryptd performs the actual encryption or decryption. 482 * 483 * kcryptd_io performs the IO submission. 484 * 485 * They must be separated as otherwise the final stages could be 486 * starved by new requests which can block in the first stages due 487 * to memory allocation. 488 */ 489 static void kcryptd_do_work(struct work_struct *work); 490 static void kcryptd_do_crypt(struct work_struct *work); 491 492 static void kcryptd_queue_io(struct dm_crypt_io *io) 493 { 494 struct crypt_config *cc = io->target->private; 495 496 INIT_WORK(&io->work, kcryptd_do_work); 497 queue_work(cc->io_queue, &io->work); 498 } 499 500 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 501 { 502 struct crypt_config *cc = io->target->private; 503 504 INIT_WORK(&io->work, kcryptd_do_crypt); 505 queue_work(cc->crypt_queue, &io->work); 506 } 507 508 static void crypt_endio(struct bio *clone, int error) 509 { 510 struct dm_crypt_io *io = clone->bi_private; 511 struct crypt_config *cc = io->target->private; 512 unsigned read_io = bio_data_dir(clone) == READ; 513 514 /* 515 * free the processed pages 516 */ 517 if (!read_io) { 518 crypt_free_buffer_pages(cc, clone); 519 goto out; 520 } 521 522 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { 523 error = -EIO; 524 goto out; 525 } 526 527 bio_put(clone); 528 kcryptd_queue_crypt(io); 529 return; 530 531 out: 532 bio_put(clone); 533 crypt_dec_pending(io, error); 534 } 535 536 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 537 { 538 struct crypt_config *cc = io->target->private; 539 540 clone->bi_private = io; 541 clone->bi_end_io = crypt_endio; 542 clone->bi_bdev = cc->dev->bdev; 543 clone->bi_rw = io->base_bio->bi_rw; 544 clone->bi_destructor = dm_crypt_bio_destructor; 545 } 546 547 static void process_read(struct dm_crypt_io *io) 548 { 549 struct crypt_config *cc = io->target->private; 550 struct bio *base_bio = io->base_bio; 551 struct bio *clone; 552 sector_t sector = base_bio->bi_sector - io->target->begin; 553 554 atomic_inc(&io->pending); 555 556 /* 557 * The block layer might modify the bvec array, so always 558 * copy the required bvecs because we need the original 559 * one in order to decrypt the whole bio data *afterwards*. 560 */ 561 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 562 if (unlikely(!clone)) { 563 crypt_dec_pending(io, -ENOMEM); 564 return; 565 } 566 567 clone_init(io, clone); 568 clone->bi_idx = 0; 569 clone->bi_vcnt = bio_segments(base_bio); 570 clone->bi_size = base_bio->bi_size; 571 clone->bi_sector = cc->start + sector; 572 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 573 sizeof(struct bio_vec) * clone->bi_vcnt); 574 575 generic_make_request(clone); 576 } 577 578 static void process_write(struct dm_crypt_io *io) 579 { 580 struct crypt_config *cc = io->target->private; 581 struct bio *base_bio = io->base_bio; 582 struct bio *clone; 583 struct convert_context ctx; 584 unsigned remaining = base_bio->bi_size; 585 sector_t sector = base_bio->bi_sector - io->target->begin; 586 587 atomic_inc(&io->pending); 588 589 crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); 590 591 /* 592 * The allocated buffers can be smaller than the whole bio, 593 * so repeat the whole process until all the data can be handled. 594 */ 595 while (remaining) { 596 clone = crypt_alloc_buffer(io, remaining); 597 if (unlikely(!clone)) { 598 crypt_dec_pending(io, -ENOMEM); 599 return; 600 } 601 602 ctx.bio_out = clone; 603 ctx.idx_out = 0; 604 605 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 606 crypt_free_buffer_pages(cc, clone); 607 bio_put(clone); 608 crypt_dec_pending(io, -EIO); 609 return; 610 } 611 612 /* crypt_convert should have filled the clone bio */ 613 BUG_ON(ctx.idx_out < clone->bi_vcnt); 614 615 clone->bi_sector = cc->start + sector; 616 remaining -= clone->bi_size; 617 sector += bio_sectors(clone); 618 619 /* Grab another reference to the io struct 620 * before we kick off the request */ 621 if (remaining) 622 atomic_inc(&io->pending); 623 624 generic_make_request(clone); 625 626 /* Do not reference clone after this - it 627 * may be gone already. */ 628 629 /* out of memory -> run queues */ 630 if (remaining) 631 congestion_wait(WRITE, HZ/100); 632 } 633 } 634 635 static void process_read_endio(struct dm_crypt_io *io) 636 { 637 struct crypt_config *cc = io->target->private; 638 struct convert_context ctx; 639 640 crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, 641 io->base_bio->bi_sector - io->target->begin, 0); 642 643 crypt_dec_pending(io, crypt_convert(cc, &ctx)); 644 } 645 646 static void kcryptd_do_work(struct work_struct *work) 647 { 648 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 649 650 if (bio_data_dir(io->base_bio) == READ) 651 process_read(io); 652 } 653 654 static void kcryptd_do_crypt(struct work_struct *work) 655 { 656 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 657 658 if (bio_data_dir(io->base_bio) == READ) 659 process_read_endio(io); 660 else 661 process_write(io); 662 } 663 664 /* 665 * Decode key from its hex representation 666 */ 667 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 668 { 669 char buffer[3]; 670 char *endp; 671 unsigned int i; 672 673 buffer[2] = '\0'; 674 675 for (i = 0; i < size; i++) { 676 buffer[0] = *hex++; 677 buffer[1] = *hex++; 678 679 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 680 681 if (endp != &buffer[2]) 682 return -EINVAL; 683 } 684 685 if (*hex != '\0') 686 return -EINVAL; 687 688 return 0; 689 } 690 691 /* 692 * Encode key into its hex representation 693 */ 694 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 695 { 696 unsigned int i; 697 698 for (i = 0; i < size; i++) { 699 sprintf(hex, "%02x", *key); 700 hex += 2; 701 key++; 702 } 703 } 704 705 static int crypt_set_key(struct crypt_config *cc, char *key) 706 { 707 unsigned key_size = strlen(key) >> 1; 708 709 if (cc->key_size && cc->key_size != key_size) 710 return -EINVAL; 711 712 cc->key_size = key_size; /* initial settings */ 713 714 if ((!key_size && strcmp(key, "-")) || 715 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 716 return -EINVAL; 717 718 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 719 720 return 0; 721 } 722 723 static int crypt_wipe_key(struct crypt_config *cc) 724 { 725 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 726 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 727 return 0; 728 } 729 730 /* 731 * Construct an encryption mapping: 732 * <cipher> <key> <iv_offset> <dev_path> <start> 733 */ 734 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 735 { 736 struct crypt_config *cc; 737 struct crypto_blkcipher *tfm; 738 char *tmp; 739 char *cipher; 740 char *chainmode; 741 char *ivmode; 742 char *ivopts; 743 unsigned int key_size; 744 unsigned long long tmpll; 745 746 if (argc != 5) { 747 ti->error = "Not enough arguments"; 748 return -EINVAL; 749 } 750 751 tmp = argv[0]; 752 cipher = strsep(&tmp, "-"); 753 chainmode = strsep(&tmp, "-"); 754 ivopts = strsep(&tmp, "-"); 755 ivmode = strsep(&ivopts, ":"); 756 757 if (tmp) 758 DMWARN("Unexpected additional cipher options"); 759 760 key_size = strlen(argv[1]) >> 1; 761 762 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 763 if (cc == NULL) { 764 ti->error = 765 "Cannot allocate transparent encryption context"; 766 return -ENOMEM; 767 } 768 769 if (crypt_set_key(cc, argv[1])) { 770 ti->error = "Error decoding key"; 771 goto bad_cipher; 772 } 773 774 /* Compatiblity mode for old dm-crypt cipher strings */ 775 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { 776 chainmode = "cbc"; 777 ivmode = "plain"; 778 } 779 780 if (strcmp(chainmode, "ecb") && !ivmode) { 781 ti->error = "This chaining mode requires an IV mechanism"; 782 goto bad_cipher; 783 } 784 785 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", 786 chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { 787 ti->error = "Chain mode + cipher name is too long"; 788 goto bad_cipher; 789 } 790 791 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 792 if (IS_ERR(tfm)) { 793 ti->error = "Error allocating crypto tfm"; 794 goto bad_cipher; 795 } 796 797 strcpy(cc->cipher, cipher); 798 strcpy(cc->chainmode, chainmode); 799 cc->tfm = tfm; 800 801 /* 802 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". 803 * See comments at iv code 804 */ 805 806 if (ivmode == NULL) 807 cc->iv_gen_ops = NULL; 808 else if (strcmp(ivmode, "plain") == 0) 809 cc->iv_gen_ops = &crypt_iv_plain_ops; 810 else if (strcmp(ivmode, "essiv") == 0) 811 cc->iv_gen_ops = &crypt_iv_essiv_ops; 812 else if (strcmp(ivmode, "benbi") == 0) 813 cc->iv_gen_ops = &crypt_iv_benbi_ops; 814 else if (strcmp(ivmode, "null") == 0) 815 cc->iv_gen_ops = &crypt_iv_null_ops; 816 else { 817 ti->error = "Invalid IV mode"; 818 goto bad_ivmode; 819 } 820 821 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && 822 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 823 goto bad_ivmode; 824 825 cc->iv_size = crypto_blkcipher_ivsize(tfm); 826 if (cc->iv_size) 827 /* at least a 64 bit sector number should fit in our buffer */ 828 cc->iv_size = max(cc->iv_size, 829 (unsigned int)(sizeof(u64) / sizeof(u8))); 830 else { 831 if (cc->iv_gen_ops) { 832 DMWARN("Selected cipher does not support IVs"); 833 if (cc->iv_gen_ops->dtr) 834 cc->iv_gen_ops->dtr(cc); 835 cc->iv_gen_ops = NULL; 836 } 837 } 838 839 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 840 if (!cc->io_pool) { 841 ti->error = "Cannot allocate crypt io mempool"; 842 goto bad_slab_pool; 843 } 844 845 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 846 if (!cc->page_pool) { 847 ti->error = "Cannot allocate page mempool"; 848 goto bad_page_pool; 849 } 850 851 cc->bs = bioset_create(MIN_IOS, MIN_IOS); 852 if (!cc->bs) { 853 ti->error = "Cannot allocate crypt bioset"; 854 goto bad_bs; 855 } 856 857 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { 858 ti->error = "Error setting key"; 859 goto bad_device; 860 } 861 862 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 863 ti->error = "Invalid iv_offset sector"; 864 goto bad_device; 865 } 866 cc->iv_offset = tmpll; 867 868 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 869 ti->error = "Invalid device sector"; 870 goto bad_device; 871 } 872 cc->start = tmpll; 873 874 if (dm_get_device(ti, argv[3], cc->start, ti->len, 875 dm_table_get_mode(ti->table), &cc->dev)) { 876 ti->error = "Device lookup failed"; 877 goto bad_device; 878 } 879 880 if (ivmode && cc->iv_gen_ops) { 881 if (ivopts) 882 *(ivopts - 1) = ':'; 883 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 884 if (!cc->iv_mode) { 885 ti->error = "Error kmallocing iv_mode string"; 886 goto bad_ivmode_string; 887 } 888 strcpy(cc->iv_mode, ivmode); 889 } else 890 cc->iv_mode = NULL; 891 892 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 893 if (!cc->io_queue) { 894 ti->error = "Couldn't create kcryptd io queue"; 895 goto bad_io_queue; 896 } 897 898 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 899 if (!cc->crypt_queue) { 900 ti->error = "Couldn't create kcryptd queue"; 901 goto bad_crypt_queue; 902 } 903 904 ti->private = cc; 905 return 0; 906 907 bad_crypt_queue: 908 destroy_workqueue(cc->io_queue); 909 bad_io_queue: 910 kfree(cc->iv_mode); 911 bad_ivmode_string: 912 dm_put_device(ti, cc->dev); 913 bad_device: 914 bioset_free(cc->bs); 915 bad_bs: 916 mempool_destroy(cc->page_pool); 917 bad_page_pool: 918 mempool_destroy(cc->io_pool); 919 bad_slab_pool: 920 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 921 cc->iv_gen_ops->dtr(cc); 922 bad_ivmode: 923 crypto_free_blkcipher(tfm); 924 bad_cipher: 925 /* Must zero key material before freeing */ 926 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 927 kfree(cc); 928 return -EINVAL; 929 } 930 931 static void crypt_dtr(struct dm_target *ti) 932 { 933 struct crypt_config *cc = (struct crypt_config *) ti->private; 934 935 destroy_workqueue(cc->io_queue); 936 destroy_workqueue(cc->crypt_queue); 937 938 bioset_free(cc->bs); 939 mempool_destroy(cc->page_pool); 940 mempool_destroy(cc->io_pool); 941 942 kfree(cc->iv_mode); 943 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 944 cc->iv_gen_ops->dtr(cc); 945 crypto_free_blkcipher(cc->tfm); 946 dm_put_device(ti, cc->dev); 947 948 /* Must zero key material before freeing */ 949 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 950 kfree(cc); 951 } 952 953 static int crypt_map(struct dm_target *ti, struct bio *bio, 954 union map_info *map_context) 955 { 956 struct crypt_config *cc = ti->private; 957 struct dm_crypt_io *io; 958 959 io = mempool_alloc(cc->io_pool, GFP_NOIO); 960 io->target = ti; 961 io->base_bio = bio; 962 io->error = 0; 963 atomic_set(&io->pending, 0); 964 965 if (bio_data_dir(io->base_bio) == READ) 966 kcryptd_queue_io(io); 967 else 968 kcryptd_queue_crypt(io); 969 970 return DM_MAPIO_SUBMITTED; 971 } 972 973 static int crypt_status(struct dm_target *ti, status_type_t type, 974 char *result, unsigned int maxlen) 975 { 976 struct crypt_config *cc = (struct crypt_config *) ti->private; 977 unsigned int sz = 0; 978 979 switch (type) { 980 case STATUSTYPE_INFO: 981 result[0] = '\0'; 982 break; 983 984 case STATUSTYPE_TABLE: 985 if (cc->iv_mode) 986 DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, 987 cc->iv_mode); 988 else 989 DMEMIT("%s-%s ", cc->cipher, cc->chainmode); 990 991 if (cc->key_size > 0) { 992 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 993 return -ENOMEM; 994 995 crypt_encode_key(result + sz, cc->key, cc->key_size); 996 sz += cc->key_size << 1; 997 } else { 998 if (sz >= maxlen) 999 return -ENOMEM; 1000 result[sz++] = '-'; 1001 } 1002 1003 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1004 cc->dev->name, (unsigned long long)cc->start); 1005 break; 1006 } 1007 return 0; 1008 } 1009 1010 static void crypt_postsuspend(struct dm_target *ti) 1011 { 1012 struct crypt_config *cc = ti->private; 1013 1014 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1015 } 1016 1017 static int crypt_preresume(struct dm_target *ti) 1018 { 1019 struct crypt_config *cc = ti->private; 1020 1021 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1022 DMERR("aborting resume - crypt key is not set."); 1023 return -EAGAIN; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static void crypt_resume(struct dm_target *ti) 1030 { 1031 struct crypt_config *cc = ti->private; 1032 1033 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1034 } 1035 1036 /* Message interface 1037 * key set <key> 1038 * key wipe 1039 */ 1040 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1041 { 1042 struct crypt_config *cc = ti->private; 1043 1044 if (argc < 2) 1045 goto error; 1046 1047 if (!strnicmp(argv[0], MESG_STR("key"))) { 1048 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1049 DMWARN("not suspended during key manipulation."); 1050 return -EINVAL; 1051 } 1052 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) 1053 return crypt_set_key(cc, argv[2]); 1054 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) 1055 return crypt_wipe_key(cc); 1056 } 1057 1058 error: 1059 DMWARN("unrecognised message received."); 1060 return -EINVAL; 1061 } 1062 1063 static struct target_type crypt_target = { 1064 .name = "crypt", 1065 .version= {1, 5, 0}, 1066 .module = THIS_MODULE, 1067 .ctr = crypt_ctr, 1068 .dtr = crypt_dtr, 1069 .map = crypt_map, 1070 .status = crypt_status, 1071 .postsuspend = crypt_postsuspend, 1072 .preresume = crypt_preresume, 1073 .resume = crypt_resume, 1074 .message = crypt_message, 1075 }; 1076 1077 static int __init dm_crypt_init(void) 1078 { 1079 int r; 1080 1081 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1082 if (!_crypt_io_pool) 1083 return -ENOMEM; 1084 1085 r = dm_register_target(&crypt_target); 1086 if (r < 0) { 1087 DMERR("register failed %d", r); 1088 kmem_cache_destroy(_crypt_io_pool); 1089 } 1090 1091 return r; 1092 } 1093 1094 static void __exit dm_crypt_exit(void) 1095 { 1096 int r = dm_unregister_target(&crypt_target); 1097 1098 if (r < 0) 1099 DMERR("unregister failed %d", r); 1100 1101 kmem_cache_destroy(_crypt_io_pool); 1102 } 1103 1104 module_init(dm_crypt_init); 1105 module_exit(dm_crypt_exit); 1106 1107 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1108 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1109 MODULE_LICENSE("GPL"); 1110