1 /* 2 * Copyright (C) 2003 Jana Saout <jana@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/bio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/crypto.h> 20 #include <linux/workqueue.h> 21 #include <linux/kthread.h> 22 #include <linux/backing-dev.h> 23 #include <linux/atomic.h> 24 #include <linux/scatterlist.h> 25 #include <linux/rbtree.h> 26 #include <asm/page.h> 27 #include <asm/unaligned.h> 28 #include <crypto/hash.h> 29 #include <crypto/md5.h> 30 #include <crypto/algapi.h> 31 32 #include <linux/device-mapper.h> 33 34 #define DM_MSG_PREFIX "crypt" 35 36 /* 37 * context holding the current state of a multi-part conversion 38 */ 39 struct convert_context { 40 struct completion restart; 41 struct bio *bio_in; 42 struct bio *bio_out; 43 struct bvec_iter iter_in; 44 struct bvec_iter iter_out; 45 sector_t cc_sector; 46 atomic_t cc_pending; 47 struct ablkcipher_request *req; 48 }; 49 50 /* 51 * per bio private data 52 */ 53 struct dm_crypt_io { 54 struct crypt_config *cc; 55 struct bio *base_bio; 56 struct work_struct work; 57 58 struct convert_context ctx; 59 60 atomic_t io_pending; 61 int error; 62 sector_t sector; 63 64 struct rb_node rb_node; 65 } CRYPTO_MINALIGN_ATTR; 66 67 struct dm_crypt_request { 68 struct convert_context *ctx; 69 struct scatterlist sg_in; 70 struct scatterlist sg_out; 71 sector_t iv_sector; 72 }; 73 74 struct crypt_config; 75 76 struct crypt_iv_operations { 77 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 78 const char *opts); 79 void (*dtr)(struct crypt_config *cc); 80 int (*init)(struct crypt_config *cc); 81 int (*wipe)(struct crypt_config *cc); 82 int (*generator)(struct crypt_config *cc, u8 *iv, 83 struct dm_crypt_request *dmreq); 84 int (*post)(struct crypt_config *cc, u8 *iv, 85 struct dm_crypt_request *dmreq); 86 }; 87 88 struct iv_essiv_private { 89 struct crypto_hash *hash_tfm; 90 u8 *salt; 91 }; 92 93 struct iv_benbi_private { 94 int shift; 95 }; 96 97 #define LMK_SEED_SIZE 64 /* hash + 0 */ 98 struct iv_lmk_private { 99 struct crypto_shash *hash_tfm; 100 u8 *seed; 101 }; 102 103 #define TCW_WHITENING_SIZE 16 104 struct iv_tcw_private { 105 struct crypto_shash *crc32_tfm; 106 u8 *iv_seed; 107 u8 *whitening; 108 }; 109 110 /* 111 * Crypt: maps a linear range of a block device 112 * and encrypts / decrypts at the same time. 113 */ 114 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 116 117 /* 118 * The fields in here must be read only after initialization. 119 */ 120 struct crypt_config { 121 struct dm_dev *dev; 122 sector_t start; 123 124 /* 125 * pool for per bio private data, crypto requests and 126 * encryption requeusts/buffer pages 127 */ 128 mempool_t *req_pool; 129 mempool_t *page_pool; 130 struct bio_set *bs; 131 struct mutex bio_alloc_lock; 132 133 struct workqueue_struct *io_queue; 134 struct workqueue_struct *crypt_queue; 135 136 struct task_struct *write_thread; 137 wait_queue_head_t write_thread_wait; 138 struct rb_root write_tree; 139 140 char *cipher; 141 char *cipher_string; 142 143 struct crypt_iv_operations *iv_gen_ops; 144 union { 145 struct iv_essiv_private essiv; 146 struct iv_benbi_private benbi; 147 struct iv_lmk_private lmk; 148 struct iv_tcw_private tcw; 149 } iv_gen_private; 150 sector_t iv_offset; 151 unsigned int iv_size; 152 153 /* ESSIV: struct crypto_cipher *essiv_tfm */ 154 void *iv_private; 155 struct crypto_ablkcipher **tfms; 156 unsigned tfms_count; 157 158 /* 159 * Layout of each crypto request: 160 * 161 * struct ablkcipher_request 162 * context 163 * padding 164 * struct dm_crypt_request 165 * padding 166 * IV 167 * 168 * The padding is added so that dm_crypt_request and the IV are 169 * correctly aligned. 170 */ 171 unsigned int dmreq_start; 172 173 unsigned int per_bio_data_size; 174 175 unsigned long flags; 176 unsigned int key_size; 177 unsigned int key_parts; /* independent parts in key buffer */ 178 unsigned int key_extra_size; /* additional keys length */ 179 u8 key[0]; 180 }; 181 182 #define MIN_IOS 16 183 184 static void clone_init(struct dm_crypt_io *, struct bio *); 185 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 186 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 187 188 /* 189 * Use this to access cipher attributes that are the same for each CPU. 190 */ 191 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 192 { 193 return cc->tfms[0]; 194 } 195 196 /* 197 * Different IV generation algorithms: 198 * 199 * plain: the initial vector is the 32-bit little-endian version of the sector 200 * number, padded with zeros if necessary. 201 * 202 * plain64: the initial vector is the 64-bit little-endian version of the sector 203 * number, padded with zeros if necessary. 204 * 205 * essiv: "encrypted sector|salt initial vector", the sector number is 206 * encrypted with the bulk cipher using a salt as key. The salt 207 * should be derived from the bulk cipher's key via hashing. 208 * 209 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 210 * (needed for LRW-32-AES and possible other narrow block modes) 211 * 212 * null: the initial vector is always zero. Provides compatibility with 213 * obsolete loop_fish2 devices. Do not use for new devices. 214 * 215 * lmk: Compatible implementation of the block chaining mode used 216 * by the Loop-AES block device encryption system 217 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 218 * It operates on full 512 byte sectors and uses CBC 219 * with an IV derived from the sector number, the data and 220 * optionally extra IV seed. 221 * This means that after decryption the first block 222 * of sector must be tweaked according to decrypted data. 223 * Loop-AES can use three encryption schemes: 224 * version 1: is plain aes-cbc mode 225 * version 2: uses 64 multikey scheme with lmk IV generator 226 * version 3: the same as version 2 with additional IV seed 227 * (it uses 65 keys, last key is used as IV seed) 228 * 229 * tcw: Compatible implementation of the block chaining mode used 230 * by the TrueCrypt device encryption system (prior to version 4.1). 231 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat 232 * It operates on full 512 byte sectors and uses CBC 233 * with an IV derived from initial key and the sector number. 234 * In addition, whitening value is applied on every sector, whitening 235 * is calculated from initial key, sector number and mixed using CRC32. 236 * Note that this encryption scheme is vulnerable to watermarking attacks 237 * and should be used for old compatible containers access only. 238 * 239 * plumb: unimplemented, see: 240 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 241 */ 242 243 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 244 struct dm_crypt_request *dmreq) 245 { 246 memset(iv, 0, cc->iv_size); 247 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 248 249 return 0; 250 } 251 252 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 253 struct dm_crypt_request *dmreq) 254 { 255 memset(iv, 0, cc->iv_size); 256 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 257 258 return 0; 259 } 260 261 /* Initialise ESSIV - compute salt but no local memory allocations */ 262 static int crypt_iv_essiv_init(struct crypt_config *cc) 263 { 264 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 265 struct hash_desc desc; 266 struct scatterlist sg; 267 struct crypto_cipher *essiv_tfm; 268 int err; 269 270 sg_init_one(&sg, cc->key, cc->key_size); 271 desc.tfm = essiv->hash_tfm; 272 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 273 274 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 275 if (err) 276 return err; 277 278 essiv_tfm = cc->iv_private; 279 280 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 281 crypto_hash_digestsize(essiv->hash_tfm)); 282 if (err) 283 return err; 284 285 return 0; 286 } 287 288 /* Wipe salt and reset key derived from volume key */ 289 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 290 { 291 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 292 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 293 struct crypto_cipher *essiv_tfm; 294 int r, err = 0; 295 296 memset(essiv->salt, 0, salt_size); 297 298 essiv_tfm = cc->iv_private; 299 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 300 if (r) 301 err = r; 302 303 return err; 304 } 305 306 /* Set up per cpu cipher state */ 307 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 308 struct dm_target *ti, 309 u8 *salt, unsigned saltsize) 310 { 311 struct crypto_cipher *essiv_tfm; 312 int err; 313 314 /* Setup the essiv_tfm with the given salt */ 315 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 316 if (IS_ERR(essiv_tfm)) { 317 ti->error = "Error allocating crypto tfm for ESSIV"; 318 return essiv_tfm; 319 } 320 321 if (crypto_cipher_blocksize(essiv_tfm) != 322 crypto_ablkcipher_ivsize(any_tfm(cc))) { 323 ti->error = "Block size of ESSIV cipher does " 324 "not match IV size of block cipher"; 325 crypto_free_cipher(essiv_tfm); 326 return ERR_PTR(-EINVAL); 327 } 328 329 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 330 if (err) { 331 ti->error = "Failed to set key for ESSIV cipher"; 332 crypto_free_cipher(essiv_tfm); 333 return ERR_PTR(err); 334 } 335 336 return essiv_tfm; 337 } 338 339 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 340 { 341 struct crypto_cipher *essiv_tfm; 342 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 343 344 crypto_free_hash(essiv->hash_tfm); 345 essiv->hash_tfm = NULL; 346 347 kzfree(essiv->salt); 348 essiv->salt = NULL; 349 350 essiv_tfm = cc->iv_private; 351 352 if (essiv_tfm) 353 crypto_free_cipher(essiv_tfm); 354 355 cc->iv_private = NULL; 356 } 357 358 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 359 const char *opts) 360 { 361 struct crypto_cipher *essiv_tfm = NULL; 362 struct crypto_hash *hash_tfm = NULL; 363 u8 *salt = NULL; 364 int err; 365 366 if (!opts) { 367 ti->error = "Digest algorithm missing for ESSIV mode"; 368 return -EINVAL; 369 } 370 371 /* Allocate hash algorithm */ 372 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 373 if (IS_ERR(hash_tfm)) { 374 ti->error = "Error initializing ESSIV hash"; 375 err = PTR_ERR(hash_tfm); 376 goto bad; 377 } 378 379 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 380 if (!salt) { 381 ti->error = "Error kmallocing salt storage in ESSIV"; 382 err = -ENOMEM; 383 goto bad; 384 } 385 386 cc->iv_gen_private.essiv.salt = salt; 387 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 388 389 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 390 crypto_hash_digestsize(hash_tfm)); 391 if (IS_ERR(essiv_tfm)) { 392 crypt_iv_essiv_dtr(cc); 393 return PTR_ERR(essiv_tfm); 394 } 395 cc->iv_private = essiv_tfm; 396 397 return 0; 398 399 bad: 400 if (hash_tfm && !IS_ERR(hash_tfm)) 401 crypto_free_hash(hash_tfm); 402 kfree(salt); 403 return err; 404 } 405 406 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 407 struct dm_crypt_request *dmreq) 408 { 409 struct crypto_cipher *essiv_tfm = cc->iv_private; 410 411 memset(iv, 0, cc->iv_size); 412 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 413 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 414 415 return 0; 416 } 417 418 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 419 const char *opts) 420 { 421 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 422 int log = ilog2(bs); 423 424 /* we need to calculate how far we must shift the sector count 425 * to get the cipher block count, we use this shift in _gen */ 426 427 if (1 << log != bs) { 428 ti->error = "cypher blocksize is not a power of 2"; 429 return -EINVAL; 430 } 431 432 if (log > 9) { 433 ti->error = "cypher blocksize is > 512"; 434 return -EINVAL; 435 } 436 437 cc->iv_gen_private.benbi.shift = 9 - log; 438 439 return 0; 440 } 441 442 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 443 { 444 } 445 446 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 447 struct dm_crypt_request *dmreq) 448 { 449 __be64 val; 450 451 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 452 453 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 454 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 455 456 return 0; 457 } 458 459 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 460 struct dm_crypt_request *dmreq) 461 { 462 memset(iv, 0, cc->iv_size); 463 464 return 0; 465 } 466 467 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 468 { 469 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 470 471 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 472 crypto_free_shash(lmk->hash_tfm); 473 lmk->hash_tfm = NULL; 474 475 kzfree(lmk->seed); 476 lmk->seed = NULL; 477 } 478 479 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 480 const char *opts) 481 { 482 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 483 484 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 485 if (IS_ERR(lmk->hash_tfm)) { 486 ti->error = "Error initializing LMK hash"; 487 return PTR_ERR(lmk->hash_tfm); 488 } 489 490 /* No seed in LMK version 2 */ 491 if (cc->key_parts == cc->tfms_count) { 492 lmk->seed = NULL; 493 return 0; 494 } 495 496 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 497 if (!lmk->seed) { 498 crypt_iv_lmk_dtr(cc); 499 ti->error = "Error kmallocing seed storage in LMK"; 500 return -ENOMEM; 501 } 502 503 return 0; 504 } 505 506 static int crypt_iv_lmk_init(struct crypt_config *cc) 507 { 508 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 509 int subkey_size = cc->key_size / cc->key_parts; 510 511 /* LMK seed is on the position of LMK_KEYS + 1 key */ 512 if (lmk->seed) 513 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 514 crypto_shash_digestsize(lmk->hash_tfm)); 515 516 return 0; 517 } 518 519 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 520 { 521 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 522 523 if (lmk->seed) 524 memset(lmk->seed, 0, LMK_SEED_SIZE); 525 526 return 0; 527 } 528 529 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 530 struct dm_crypt_request *dmreq, 531 u8 *data) 532 { 533 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 534 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); 535 struct md5_state md5state; 536 __le32 buf[4]; 537 int i, r; 538 539 desc->tfm = lmk->hash_tfm; 540 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 541 542 r = crypto_shash_init(desc); 543 if (r) 544 return r; 545 546 if (lmk->seed) { 547 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); 548 if (r) 549 return r; 550 } 551 552 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 553 r = crypto_shash_update(desc, data + 16, 16 * 31); 554 if (r) 555 return r; 556 557 /* Sector is cropped to 56 bits here */ 558 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 559 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 560 buf[2] = cpu_to_le32(4024); 561 buf[3] = 0; 562 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); 563 if (r) 564 return r; 565 566 /* No MD5 padding here */ 567 r = crypto_shash_export(desc, &md5state); 568 if (r) 569 return r; 570 571 for (i = 0; i < MD5_HASH_WORDS; i++) 572 __cpu_to_le32s(&md5state.hash[i]); 573 memcpy(iv, &md5state.hash, cc->iv_size); 574 575 return 0; 576 } 577 578 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 579 struct dm_crypt_request *dmreq) 580 { 581 u8 *src; 582 int r = 0; 583 584 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 585 src = kmap_atomic(sg_page(&dmreq->sg_in)); 586 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 587 kunmap_atomic(src); 588 } else 589 memset(iv, 0, cc->iv_size); 590 591 return r; 592 } 593 594 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 595 struct dm_crypt_request *dmreq) 596 { 597 u8 *dst; 598 int r; 599 600 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 601 return 0; 602 603 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 604 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 605 606 /* Tweak the first block of plaintext sector */ 607 if (!r) 608 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 609 610 kunmap_atomic(dst); 611 return r; 612 } 613 614 static void crypt_iv_tcw_dtr(struct crypt_config *cc) 615 { 616 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 617 618 kzfree(tcw->iv_seed); 619 tcw->iv_seed = NULL; 620 kzfree(tcw->whitening); 621 tcw->whitening = NULL; 622 623 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 624 crypto_free_shash(tcw->crc32_tfm); 625 tcw->crc32_tfm = NULL; 626 } 627 628 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 629 const char *opts) 630 { 631 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 632 633 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 634 ti->error = "Wrong key size for TCW"; 635 return -EINVAL; 636 } 637 638 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 639 if (IS_ERR(tcw->crc32_tfm)) { 640 ti->error = "Error initializing CRC32 in TCW"; 641 return PTR_ERR(tcw->crc32_tfm); 642 } 643 644 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 645 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 646 if (!tcw->iv_seed || !tcw->whitening) { 647 crypt_iv_tcw_dtr(cc); 648 ti->error = "Error allocating seed storage in TCW"; 649 return -ENOMEM; 650 } 651 652 return 0; 653 } 654 655 static int crypt_iv_tcw_init(struct crypt_config *cc) 656 { 657 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 658 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 659 660 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 661 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 662 TCW_WHITENING_SIZE); 663 664 return 0; 665 } 666 667 static int crypt_iv_tcw_wipe(struct crypt_config *cc) 668 { 669 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 670 671 memset(tcw->iv_seed, 0, cc->iv_size); 672 memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 673 674 return 0; 675 } 676 677 static int crypt_iv_tcw_whitening(struct crypt_config *cc, 678 struct dm_crypt_request *dmreq, 679 u8 *data) 680 { 681 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 682 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 683 u8 buf[TCW_WHITENING_SIZE]; 684 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); 685 int i, r; 686 687 /* xor whitening with sector number */ 688 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); 689 crypto_xor(buf, (u8 *)§or, 8); 690 crypto_xor(&buf[8], (u8 *)§or, 8); 691 692 /* calculate crc32 for every 32bit part and xor it */ 693 desc->tfm = tcw->crc32_tfm; 694 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 695 for (i = 0; i < 4; i++) { 696 r = crypto_shash_init(desc); 697 if (r) 698 goto out; 699 r = crypto_shash_update(desc, &buf[i * 4], 4); 700 if (r) 701 goto out; 702 r = crypto_shash_final(desc, &buf[i * 4]); 703 if (r) 704 goto out; 705 } 706 crypto_xor(&buf[0], &buf[12], 4); 707 crypto_xor(&buf[4], &buf[8], 4); 708 709 /* apply whitening (8 bytes) to whole sector */ 710 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 711 crypto_xor(data + i * 8, buf, 8); 712 out: 713 memzero_explicit(buf, sizeof(buf)); 714 return r; 715 } 716 717 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 718 struct dm_crypt_request *dmreq) 719 { 720 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 721 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 722 u8 *src; 723 int r = 0; 724 725 /* Remove whitening from ciphertext */ 726 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 727 src = kmap_atomic(sg_page(&dmreq->sg_in)); 728 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); 729 kunmap_atomic(src); 730 } 731 732 /* Calculate IV */ 733 memcpy(iv, tcw->iv_seed, cc->iv_size); 734 crypto_xor(iv, (u8 *)§or, 8); 735 if (cc->iv_size > 8) 736 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); 737 738 return r; 739 } 740 741 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 742 struct dm_crypt_request *dmreq) 743 { 744 u8 *dst; 745 int r; 746 747 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 748 return 0; 749 750 /* Apply whitening on ciphertext */ 751 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 752 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); 753 kunmap_atomic(dst); 754 755 return r; 756 } 757 758 static struct crypt_iv_operations crypt_iv_plain_ops = { 759 .generator = crypt_iv_plain_gen 760 }; 761 762 static struct crypt_iv_operations crypt_iv_plain64_ops = { 763 .generator = crypt_iv_plain64_gen 764 }; 765 766 static struct crypt_iv_operations crypt_iv_essiv_ops = { 767 .ctr = crypt_iv_essiv_ctr, 768 .dtr = crypt_iv_essiv_dtr, 769 .init = crypt_iv_essiv_init, 770 .wipe = crypt_iv_essiv_wipe, 771 .generator = crypt_iv_essiv_gen 772 }; 773 774 static struct crypt_iv_operations crypt_iv_benbi_ops = { 775 .ctr = crypt_iv_benbi_ctr, 776 .dtr = crypt_iv_benbi_dtr, 777 .generator = crypt_iv_benbi_gen 778 }; 779 780 static struct crypt_iv_operations crypt_iv_null_ops = { 781 .generator = crypt_iv_null_gen 782 }; 783 784 static struct crypt_iv_operations crypt_iv_lmk_ops = { 785 .ctr = crypt_iv_lmk_ctr, 786 .dtr = crypt_iv_lmk_dtr, 787 .init = crypt_iv_lmk_init, 788 .wipe = crypt_iv_lmk_wipe, 789 .generator = crypt_iv_lmk_gen, 790 .post = crypt_iv_lmk_post 791 }; 792 793 static struct crypt_iv_operations crypt_iv_tcw_ops = { 794 .ctr = crypt_iv_tcw_ctr, 795 .dtr = crypt_iv_tcw_dtr, 796 .init = crypt_iv_tcw_init, 797 .wipe = crypt_iv_tcw_wipe, 798 .generator = crypt_iv_tcw_gen, 799 .post = crypt_iv_tcw_post 800 }; 801 802 static void crypt_convert_init(struct crypt_config *cc, 803 struct convert_context *ctx, 804 struct bio *bio_out, struct bio *bio_in, 805 sector_t sector) 806 { 807 ctx->bio_in = bio_in; 808 ctx->bio_out = bio_out; 809 if (bio_in) 810 ctx->iter_in = bio_in->bi_iter; 811 if (bio_out) 812 ctx->iter_out = bio_out->bi_iter; 813 ctx->cc_sector = sector + cc->iv_offset; 814 init_completion(&ctx->restart); 815 } 816 817 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 818 struct ablkcipher_request *req) 819 { 820 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 821 } 822 823 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 824 struct dm_crypt_request *dmreq) 825 { 826 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 827 } 828 829 static u8 *iv_of_dmreq(struct crypt_config *cc, 830 struct dm_crypt_request *dmreq) 831 { 832 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 833 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 834 } 835 836 static int crypt_convert_block(struct crypt_config *cc, 837 struct convert_context *ctx, 838 struct ablkcipher_request *req) 839 { 840 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 841 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 842 struct dm_crypt_request *dmreq; 843 u8 *iv; 844 int r; 845 846 dmreq = dmreq_of_req(cc, req); 847 iv = iv_of_dmreq(cc, dmreq); 848 849 dmreq->iv_sector = ctx->cc_sector; 850 dmreq->ctx = ctx; 851 sg_init_table(&dmreq->sg_in, 1); 852 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, 853 bv_in.bv_offset); 854 855 sg_init_table(&dmreq->sg_out, 1); 856 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, 857 bv_out.bv_offset); 858 859 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); 860 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); 861 862 if (cc->iv_gen_ops) { 863 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 864 if (r < 0) 865 return r; 866 } 867 868 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 869 1 << SECTOR_SHIFT, iv); 870 871 if (bio_data_dir(ctx->bio_in) == WRITE) 872 r = crypto_ablkcipher_encrypt(req); 873 else 874 r = crypto_ablkcipher_decrypt(req); 875 876 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 877 r = cc->iv_gen_ops->post(cc, iv, dmreq); 878 879 return r; 880 } 881 882 static void kcryptd_async_done(struct crypto_async_request *async_req, 883 int error); 884 885 static void crypt_alloc_req(struct crypt_config *cc, 886 struct convert_context *ctx) 887 { 888 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 889 890 if (!ctx->req) 891 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); 892 893 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); 894 895 /* 896 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 897 * requests if driver request queue is full. 898 */ 899 ablkcipher_request_set_callback(ctx->req, 900 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 901 kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 902 } 903 904 static void crypt_free_req(struct crypt_config *cc, 905 struct ablkcipher_request *req, struct bio *base_bio) 906 { 907 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 908 909 if ((struct ablkcipher_request *)(io + 1) != req) 910 mempool_free(req, cc->req_pool); 911 } 912 913 /* 914 * Encrypt / decrypt data from one bio to another one (can be the same one) 915 */ 916 static int crypt_convert(struct crypt_config *cc, 917 struct convert_context *ctx) 918 { 919 int r; 920 921 atomic_set(&ctx->cc_pending, 1); 922 923 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 924 925 crypt_alloc_req(cc, ctx); 926 927 atomic_inc(&ctx->cc_pending); 928 929 r = crypt_convert_block(cc, ctx, ctx->req); 930 931 switch (r) { 932 /* 933 * The request was queued by a crypto driver 934 * but the driver request queue is full, let's wait. 935 */ 936 case -EBUSY: 937 wait_for_completion(&ctx->restart); 938 reinit_completion(&ctx->restart); 939 /* fall through */ 940 /* 941 * The request is queued and processed asynchronously, 942 * completion function kcryptd_async_done() will be called. 943 */ 944 case -EINPROGRESS: 945 ctx->req = NULL; 946 ctx->cc_sector++; 947 continue; 948 /* 949 * The request was already processed (synchronously). 950 */ 951 case 0: 952 atomic_dec(&ctx->cc_pending); 953 ctx->cc_sector++; 954 cond_resched(); 955 continue; 956 957 /* There was an error while processing the request. */ 958 default: 959 atomic_dec(&ctx->cc_pending); 960 return r; 961 } 962 } 963 964 return 0; 965 } 966 967 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); 968 969 /* 970 * Generate a new unfragmented bio with the given size 971 * This should never violate the device limitations 972 * 973 * This function may be called concurrently. If we allocate from the mempool 974 * concurrently, there is a possibility of deadlock. For example, if we have 975 * mempool of 256 pages, two processes, each wanting 256, pages allocate from 976 * the mempool concurrently, it may deadlock in a situation where both processes 977 * have allocated 128 pages and the mempool is exhausted. 978 * 979 * In order to avoid this scenario we allocate the pages under a mutex. 980 * 981 * In order to not degrade performance with excessive locking, we try 982 * non-blocking allocations without a mutex first but on failure we fallback 983 * to blocking allocations with a mutex. 984 */ 985 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 986 { 987 struct crypt_config *cc = io->cc; 988 struct bio *clone; 989 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 990 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; 991 unsigned i, len, remaining_size; 992 struct page *page; 993 struct bio_vec *bvec; 994 995 retry: 996 if (unlikely(gfp_mask & __GFP_WAIT)) 997 mutex_lock(&cc->bio_alloc_lock); 998 999 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 1000 if (!clone) 1001 goto return_clone; 1002 1003 clone_init(io, clone); 1004 1005 remaining_size = size; 1006 1007 for (i = 0; i < nr_iovecs; i++) { 1008 page = mempool_alloc(cc->page_pool, gfp_mask); 1009 if (!page) { 1010 crypt_free_buffer_pages(cc, clone); 1011 bio_put(clone); 1012 gfp_mask |= __GFP_WAIT; 1013 goto retry; 1014 } 1015 1016 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; 1017 1018 bvec = &clone->bi_io_vec[clone->bi_vcnt++]; 1019 bvec->bv_page = page; 1020 bvec->bv_len = len; 1021 bvec->bv_offset = 0; 1022 1023 clone->bi_iter.bi_size += len; 1024 1025 remaining_size -= len; 1026 } 1027 1028 return_clone: 1029 if (unlikely(gfp_mask & __GFP_WAIT)) 1030 mutex_unlock(&cc->bio_alloc_lock); 1031 1032 return clone; 1033 } 1034 1035 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1036 { 1037 unsigned int i; 1038 struct bio_vec *bv; 1039 1040 bio_for_each_segment_all(bv, clone, i) { 1041 BUG_ON(!bv->bv_page); 1042 mempool_free(bv->bv_page, cc->page_pool); 1043 bv->bv_page = NULL; 1044 } 1045 } 1046 1047 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1048 struct bio *bio, sector_t sector) 1049 { 1050 io->cc = cc; 1051 io->base_bio = bio; 1052 io->sector = sector; 1053 io->error = 0; 1054 io->ctx.req = NULL; 1055 atomic_set(&io->io_pending, 0); 1056 } 1057 1058 static void crypt_inc_pending(struct dm_crypt_io *io) 1059 { 1060 atomic_inc(&io->io_pending); 1061 } 1062 1063 /* 1064 * One of the bios was finished. Check for completion of 1065 * the whole request and correctly clean up the buffer. 1066 */ 1067 static void crypt_dec_pending(struct dm_crypt_io *io) 1068 { 1069 struct crypt_config *cc = io->cc; 1070 struct bio *base_bio = io->base_bio; 1071 int error = io->error; 1072 1073 if (!atomic_dec_and_test(&io->io_pending)) 1074 return; 1075 1076 if (io->ctx.req) 1077 crypt_free_req(cc, io->ctx.req, base_bio); 1078 1079 bio_endio(base_bio, error); 1080 } 1081 1082 /* 1083 * kcryptd/kcryptd_io: 1084 * 1085 * Needed because it would be very unwise to do decryption in an 1086 * interrupt context. 1087 * 1088 * kcryptd performs the actual encryption or decryption. 1089 * 1090 * kcryptd_io performs the IO submission. 1091 * 1092 * They must be separated as otherwise the final stages could be 1093 * starved by new requests which can block in the first stages due 1094 * to memory allocation. 1095 * 1096 * The work is done per CPU global for all dm-crypt instances. 1097 * They should not depend on each other and do not block. 1098 */ 1099 static void crypt_endio(struct bio *clone, int error) 1100 { 1101 struct dm_crypt_io *io = clone->bi_private; 1102 struct crypt_config *cc = io->cc; 1103 unsigned rw = bio_data_dir(clone); 1104 1105 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 1106 error = -EIO; 1107 1108 /* 1109 * free the processed pages 1110 */ 1111 if (rw == WRITE) 1112 crypt_free_buffer_pages(cc, clone); 1113 1114 bio_put(clone); 1115 1116 if (rw == READ && !error) { 1117 kcryptd_queue_crypt(io); 1118 return; 1119 } 1120 1121 if (unlikely(error)) 1122 io->error = error; 1123 1124 crypt_dec_pending(io); 1125 } 1126 1127 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 1128 { 1129 struct crypt_config *cc = io->cc; 1130 1131 clone->bi_private = io; 1132 clone->bi_end_io = crypt_endio; 1133 clone->bi_bdev = cc->dev->bdev; 1134 clone->bi_rw = io->base_bio->bi_rw; 1135 } 1136 1137 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1138 { 1139 struct crypt_config *cc = io->cc; 1140 struct bio *clone; 1141 1142 /* 1143 * We need the original biovec array in order to decrypt 1144 * the whole bio data *afterwards* -- thanks to immutable 1145 * biovecs we don't need to worry about the block layer 1146 * modifying the biovec array; so leverage bio_clone_fast(). 1147 */ 1148 clone = bio_clone_fast(io->base_bio, gfp, cc->bs); 1149 if (!clone) 1150 return 1; 1151 1152 crypt_inc_pending(io); 1153 1154 clone_init(io, clone); 1155 clone->bi_iter.bi_sector = cc->start + io->sector; 1156 1157 generic_make_request(clone); 1158 return 0; 1159 } 1160 1161 static void kcryptd_io_read_work(struct work_struct *work) 1162 { 1163 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1164 1165 crypt_inc_pending(io); 1166 if (kcryptd_io_read(io, GFP_NOIO)) 1167 io->error = -ENOMEM; 1168 crypt_dec_pending(io); 1169 } 1170 1171 static void kcryptd_queue_read(struct dm_crypt_io *io) 1172 { 1173 struct crypt_config *cc = io->cc; 1174 1175 INIT_WORK(&io->work, kcryptd_io_read_work); 1176 queue_work(cc->io_queue, &io->work); 1177 } 1178 1179 static void kcryptd_io_write(struct dm_crypt_io *io) 1180 { 1181 struct bio *clone = io->ctx.bio_out; 1182 1183 generic_make_request(clone); 1184 } 1185 1186 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) 1187 1188 static int dmcrypt_write(void *data) 1189 { 1190 struct crypt_config *cc = data; 1191 struct dm_crypt_io *io; 1192 1193 while (1) { 1194 struct rb_root write_tree; 1195 struct blk_plug plug; 1196 1197 DECLARE_WAITQUEUE(wait, current); 1198 1199 spin_lock_irq(&cc->write_thread_wait.lock); 1200 continue_locked: 1201 1202 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1203 goto pop_from_list; 1204 1205 __set_current_state(TASK_INTERRUPTIBLE); 1206 __add_wait_queue(&cc->write_thread_wait, &wait); 1207 1208 spin_unlock_irq(&cc->write_thread_wait.lock); 1209 1210 if (unlikely(kthread_should_stop())) { 1211 set_task_state(current, TASK_RUNNING); 1212 remove_wait_queue(&cc->write_thread_wait, &wait); 1213 break; 1214 } 1215 1216 schedule(); 1217 1218 set_task_state(current, TASK_RUNNING); 1219 spin_lock_irq(&cc->write_thread_wait.lock); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait); 1221 goto continue_locked; 1222 1223 pop_from_list: 1224 write_tree = cc->write_tree; 1225 cc->write_tree = RB_ROOT; 1226 spin_unlock_irq(&cc->write_thread_wait.lock); 1227 1228 BUG_ON(rb_parent(write_tree.rb_node)); 1229 1230 /* 1231 * Note: we cannot walk the tree here with rb_next because 1232 * the structures may be freed when kcryptd_io_write is called. 1233 */ 1234 blk_start_plug(&plug); 1235 do { 1236 io = crypt_io_from_node(rb_first(&write_tree)); 1237 rb_erase(&io->rb_node, &write_tree); 1238 kcryptd_io_write(io); 1239 } while (!RB_EMPTY_ROOT(&write_tree)); 1240 blk_finish_plug(&plug); 1241 } 1242 return 0; 1243 } 1244 1245 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1246 { 1247 struct bio *clone = io->ctx.bio_out; 1248 struct crypt_config *cc = io->cc; 1249 unsigned long flags; 1250 sector_t sector; 1251 struct rb_node **rbp, *parent; 1252 1253 if (unlikely(io->error < 0)) { 1254 crypt_free_buffer_pages(cc, clone); 1255 bio_put(clone); 1256 crypt_dec_pending(io); 1257 return; 1258 } 1259 1260 /* crypt_convert should have filled the clone bio */ 1261 BUG_ON(io->ctx.iter_out.bi_size); 1262 1263 clone->bi_iter.bi_sector = cc->start + io->sector; 1264 1265 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { 1266 generic_make_request(clone); 1267 return; 1268 } 1269 1270 spin_lock_irqsave(&cc->write_thread_wait.lock, flags); 1271 rbp = &cc->write_tree.rb_node; 1272 parent = NULL; 1273 sector = io->sector; 1274 while (*rbp) { 1275 parent = *rbp; 1276 if (sector < crypt_io_from_node(parent)->sector) 1277 rbp = &(*rbp)->rb_left; 1278 else 1279 rbp = &(*rbp)->rb_right; 1280 } 1281 rb_link_node(&io->rb_node, parent, rbp); 1282 rb_insert_color(&io->rb_node, &cc->write_tree); 1283 1284 wake_up_locked(&cc->write_thread_wait); 1285 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); 1286 } 1287 1288 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1289 { 1290 struct crypt_config *cc = io->cc; 1291 struct bio *clone; 1292 int crypt_finished; 1293 sector_t sector = io->sector; 1294 int r; 1295 1296 /* 1297 * Prevent io from disappearing until this function completes. 1298 */ 1299 crypt_inc_pending(io); 1300 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1301 1302 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1303 if (unlikely(!clone)) { 1304 io->error = -EIO; 1305 goto dec; 1306 } 1307 1308 io->ctx.bio_out = clone; 1309 io->ctx.iter_out = clone->bi_iter; 1310 1311 sector += bio_sectors(clone); 1312 1313 crypt_inc_pending(io); 1314 r = crypt_convert(cc, &io->ctx); 1315 if (r) 1316 io->error = -EIO; 1317 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1318 1319 /* Encryption was already finished, submit io now */ 1320 if (crypt_finished) { 1321 kcryptd_crypt_write_io_submit(io, 0); 1322 io->sector = sector; 1323 } 1324 1325 dec: 1326 crypt_dec_pending(io); 1327 } 1328 1329 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1330 { 1331 crypt_dec_pending(io); 1332 } 1333 1334 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1335 { 1336 struct crypt_config *cc = io->cc; 1337 int r = 0; 1338 1339 crypt_inc_pending(io); 1340 1341 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1342 io->sector); 1343 1344 r = crypt_convert(cc, &io->ctx); 1345 if (r < 0) 1346 io->error = -EIO; 1347 1348 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1349 kcryptd_crypt_read_done(io); 1350 1351 crypt_dec_pending(io); 1352 } 1353 1354 static void kcryptd_async_done(struct crypto_async_request *async_req, 1355 int error) 1356 { 1357 struct dm_crypt_request *dmreq = async_req->data; 1358 struct convert_context *ctx = dmreq->ctx; 1359 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1360 struct crypt_config *cc = io->cc; 1361 1362 /* 1363 * A request from crypto driver backlog is going to be processed now, 1364 * finish the completion and continue in crypt_convert(). 1365 * (Callback will be called for the second time for this request.) 1366 */ 1367 if (error == -EINPROGRESS) { 1368 complete(&ctx->restart); 1369 return; 1370 } 1371 1372 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1373 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1374 1375 if (error < 0) 1376 io->error = -EIO; 1377 1378 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1379 1380 if (!atomic_dec_and_test(&ctx->cc_pending)) 1381 return; 1382 1383 if (bio_data_dir(io->base_bio) == READ) 1384 kcryptd_crypt_read_done(io); 1385 else 1386 kcryptd_crypt_write_io_submit(io, 1); 1387 } 1388 1389 static void kcryptd_crypt(struct work_struct *work) 1390 { 1391 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1392 1393 if (bio_data_dir(io->base_bio) == READ) 1394 kcryptd_crypt_read_convert(io); 1395 else 1396 kcryptd_crypt_write_convert(io); 1397 } 1398 1399 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1400 { 1401 struct crypt_config *cc = io->cc; 1402 1403 INIT_WORK(&io->work, kcryptd_crypt); 1404 queue_work(cc->crypt_queue, &io->work); 1405 } 1406 1407 /* 1408 * Decode key from its hex representation 1409 */ 1410 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1411 { 1412 char buffer[3]; 1413 unsigned int i; 1414 1415 buffer[2] = '\0'; 1416 1417 for (i = 0; i < size; i++) { 1418 buffer[0] = *hex++; 1419 buffer[1] = *hex++; 1420 1421 if (kstrtou8(buffer, 16, &key[i])) 1422 return -EINVAL; 1423 } 1424 1425 if (*hex != '\0') 1426 return -EINVAL; 1427 1428 return 0; 1429 } 1430 1431 static void crypt_free_tfms(struct crypt_config *cc) 1432 { 1433 unsigned i; 1434 1435 if (!cc->tfms) 1436 return; 1437 1438 for (i = 0; i < cc->tfms_count; i++) 1439 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { 1440 crypto_free_ablkcipher(cc->tfms[i]); 1441 cc->tfms[i] = NULL; 1442 } 1443 1444 kfree(cc->tfms); 1445 cc->tfms = NULL; 1446 } 1447 1448 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 1449 { 1450 unsigned i; 1451 int err; 1452 1453 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), 1454 GFP_KERNEL); 1455 if (!cc->tfms) 1456 return -ENOMEM; 1457 1458 for (i = 0; i < cc->tfms_count; i++) { 1459 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1460 if (IS_ERR(cc->tfms[i])) { 1461 err = PTR_ERR(cc->tfms[i]); 1462 crypt_free_tfms(cc); 1463 return err; 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 static int crypt_setkey_allcpus(struct crypt_config *cc) 1471 { 1472 unsigned subkey_size; 1473 int err = 0, i, r; 1474 1475 /* Ignore extra keys (which are used for IV etc) */ 1476 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 1477 1478 for (i = 0; i < cc->tfms_count; i++) { 1479 r = crypto_ablkcipher_setkey(cc->tfms[i], 1480 cc->key + (i * subkey_size), 1481 subkey_size); 1482 if (r) 1483 err = r; 1484 } 1485 1486 return err; 1487 } 1488 1489 static int crypt_set_key(struct crypt_config *cc, char *key) 1490 { 1491 int r = -EINVAL; 1492 int key_string_len = strlen(key); 1493 1494 /* The key size may not be changed. */ 1495 if (cc->key_size != (key_string_len >> 1)) 1496 goto out; 1497 1498 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1499 if (!cc->key_size && strcmp(key, "-")) 1500 goto out; 1501 1502 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1503 goto out; 1504 1505 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1506 1507 r = crypt_setkey_allcpus(cc); 1508 1509 out: 1510 /* Hex key string not needed after here, so wipe it. */ 1511 memset(key, '0', key_string_len); 1512 1513 return r; 1514 } 1515 1516 static int crypt_wipe_key(struct crypt_config *cc) 1517 { 1518 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1519 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1520 1521 return crypt_setkey_allcpus(cc); 1522 } 1523 1524 static void crypt_dtr(struct dm_target *ti) 1525 { 1526 struct crypt_config *cc = ti->private; 1527 1528 ti->private = NULL; 1529 1530 if (!cc) 1531 return; 1532 1533 if (cc->write_thread) 1534 kthread_stop(cc->write_thread); 1535 1536 if (cc->io_queue) 1537 destroy_workqueue(cc->io_queue); 1538 if (cc->crypt_queue) 1539 destroy_workqueue(cc->crypt_queue); 1540 1541 crypt_free_tfms(cc); 1542 1543 if (cc->bs) 1544 bioset_free(cc->bs); 1545 1546 if (cc->page_pool) 1547 mempool_destroy(cc->page_pool); 1548 if (cc->req_pool) 1549 mempool_destroy(cc->req_pool); 1550 1551 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1552 cc->iv_gen_ops->dtr(cc); 1553 1554 if (cc->dev) 1555 dm_put_device(ti, cc->dev); 1556 1557 kzfree(cc->cipher); 1558 kzfree(cc->cipher_string); 1559 1560 /* Must zero key material before freeing */ 1561 kzfree(cc); 1562 } 1563 1564 static int crypt_ctr_cipher(struct dm_target *ti, 1565 char *cipher_in, char *key) 1566 { 1567 struct crypt_config *cc = ti->private; 1568 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1569 char *cipher_api = NULL; 1570 int ret = -EINVAL; 1571 char dummy; 1572 1573 /* Convert to crypto api definition? */ 1574 if (strchr(cipher_in, '(')) { 1575 ti->error = "Bad cipher specification"; 1576 return -EINVAL; 1577 } 1578 1579 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1580 if (!cc->cipher_string) 1581 goto bad_mem; 1582 1583 /* 1584 * Legacy dm-crypt cipher specification 1585 * cipher[:keycount]-mode-iv:ivopts 1586 */ 1587 tmp = cipher_in; 1588 keycount = strsep(&tmp, "-"); 1589 cipher = strsep(&keycount, ":"); 1590 1591 if (!keycount) 1592 cc->tfms_count = 1; 1593 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 1594 !is_power_of_2(cc->tfms_count)) { 1595 ti->error = "Bad cipher key count specification"; 1596 return -EINVAL; 1597 } 1598 cc->key_parts = cc->tfms_count; 1599 cc->key_extra_size = 0; 1600 1601 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1602 if (!cc->cipher) 1603 goto bad_mem; 1604 1605 chainmode = strsep(&tmp, "-"); 1606 ivopts = strsep(&tmp, "-"); 1607 ivmode = strsep(&ivopts, ":"); 1608 1609 if (tmp) 1610 DMWARN("Ignoring unexpected additional cipher options"); 1611 1612 /* 1613 * For compatibility with the original dm-crypt mapping format, if 1614 * only the cipher name is supplied, use cbc-plain. 1615 */ 1616 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1617 chainmode = "cbc"; 1618 ivmode = "plain"; 1619 } 1620 1621 if (strcmp(chainmode, "ecb") && !ivmode) { 1622 ti->error = "IV mechanism required"; 1623 return -EINVAL; 1624 } 1625 1626 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1627 if (!cipher_api) 1628 goto bad_mem; 1629 1630 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1631 "%s(%s)", chainmode, cipher); 1632 if (ret < 0) { 1633 kfree(cipher_api); 1634 goto bad_mem; 1635 } 1636 1637 /* Allocate cipher */ 1638 ret = crypt_alloc_tfms(cc, cipher_api); 1639 if (ret < 0) { 1640 ti->error = "Error allocating crypto tfm"; 1641 goto bad; 1642 } 1643 1644 /* Initialize IV */ 1645 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1646 if (cc->iv_size) 1647 /* at least a 64 bit sector number should fit in our buffer */ 1648 cc->iv_size = max(cc->iv_size, 1649 (unsigned int)(sizeof(u64) / sizeof(u8))); 1650 else if (ivmode) { 1651 DMWARN("Selected cipher does not support IVs"); 1652 ivmode = NULL; 1653 } 1654 1655 /* Choose ivmode, see comments at iv code. */ 1656 if (ivmode == NULL) 1657 cc->iv_gen_ops = NULL; 1658 else if (strcmp(ivmode, "plain") == 0) 1659 cc->iv_gen_ops = &crypt_iv_plain_ops; 1660 else if (strcmp(ivmode, "plain64") == 0) 1661 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1662 else if (strcmp(ivmode, "essiv") == 0) 1663 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1664 else if (strcmp(ivmode, "benbi") == 0) 1665 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1666 else if (strcmp(ivmode, "null") == 0) 1667 cc->iv_gen_ops = &crypt_iv_null_ops; 1668 else if (strcmp(ivmode, "lmk") == 0) { 1669 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1670 /* 1671 * Version 2 and 3 is recognised according 1672 * to length of provided multi-key string. 1673 * If present (version 3), last key is used as IV seed. 1674 * All keys (including IV seed) are always the same size. 1675 */ 1676 if (cc->key_size % cc->key_parts) { 1677 cc->key_parts++; 1678 cc->key_extra_size = cc->key_size / cc->key_parts; 1679 } 1680 } else if (strcmp(ivmode, "tcw") == 0) { 1681 cc->iv_gen_ops = &crypt_iv_tcw_ops; 1682 cc->key_parts += 2; /* IV + whitening */ 1683 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 1684 } else { 1685 ret = -EINVAL; 1686 ti->error = "Invalid IV mode"; 1687 goto bad; 1688 } 1689 1690 /* Initialize and set key */ 1691 ret = crypt_set_key(cc, key); 1692 if (ret < 0) { 1693 ti->error = "Error decoding and setting key"; 1694 goto bad; 1695 } 1696 1697 /* Allocate IV */ 1698 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1699 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1700 if (ret < 0) { 1701 ti->error = "Error creating IV"; 1702 goto bad; 1703 } 1704 } 1705 1706 /* Initialize IV (set keys for ESSIV etc) */ 1707 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1708 ret = cc->iv_gen_ops->init(cc); 1709 if (ret < 0) { 1710 ti->error = "Error initialising IV"; 1711 goto bad; 1712 } 1713 } 1714 1715 ret = 0; 1716 bad: 1717 kfree(cipher_api); 1718 return ret; 1719 1720 bad_mem: 1721 ti->error = "Cannot allocate cipher strings"; 1722 return -ENOMEM; 1723 } 1724 1725 /* 1726 * Construct an encryption mapping: 1727 * <cipher> <key> <iv_offset> <dev_path> <start> 1728 */ 1729 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1730 { 1731 struct crypt_config *cc; 1732 unsigned int key_size, opt_params; 1733 unsigned long long tmpll; 1734 int ret; 1735 size_t iv_size_padding; 1736 struct dm_arg_set as; 1737 const char *opt_string; 1738 char dummy; 1739 1740 static struct dm_arg _args[] = { 1741 {0, 3, "Invalid number of feature args"}, 1742 }; 1743 1744 if (argc < 5) { 1745 ti->error = "Not enough arguments"; 1746 return -EINVAL; 1747 } 1748 1749 key_size = strlen(argv[1]) >> 1; 1750 1751 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1752 if (!cc) { 1753 ti->error = "Cannot allocate encryption context"; 1754 return -ENOMEM; 1755 } 1756 cc->key_size = key_size; 1757 1758 ti->private = cc; 1759 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1760 if (ret < 0) 1761 goto bad; 1762 1763 cc->dmreq_start = sizeof(struct ablkcipher_request); 1764 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1765 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1766 1767 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { 1768 /* Allocate the padding exactly */ 1769 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 1770 & crypto_ablkcipher_alignmask(any_tfm(cc)); 1771 } else { 1772 /* 1773 * If the cipher requires greater alignment than kmalloc 1774 * alignment, we don't know the exact position of the 1775 * initialization vector. We must assume worst case. 1776 */ 1777 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1778 } 1779 1780 ret = -ENOMEM; 1781 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1782 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1783 if (!cc->req_pool) { 1784 ti->error = "Cannot allocate crypt request mempool"; 1785 goto bad; 1786 } 1787 1788 cc->per_bio_data_size = ti->per_bio_data_size = 1789 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + 1790 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1791 ARCH_KMALLOC_MINALIGN); 1792 1793 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); 1794 if (!cc->page_pool) { 1795 ti->error = "Cannot allocate page mempool"; 1796 goto bad; 1797 } 1798 1799 cc->bs = bioset_create(MIN_IOS, 0); 1800 if (!cc->bs) { 1801 ti->error = "Cannot allocate crypt bioset"; 1802 goto bad; 1803 } 1804 1805 mutex_init(&cc->bio_alloc_lock); 1806 1807 ret = -EINVAL; 1808 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1809 ti->error = "Invalid iv_offset sector"; 1810 goto bad; 1811 } 1812 cc->iv_offset = tmpll; 1813 1814 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1815 ti->error = "Device lookup failed"; 1816 goto bad; 1817 } 1818 1819 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { 1820 ti->error = "Invalid device sector"; 1821 goto bad; 1822 } 1823 cc->start = tmpll; 1824 1825 argv += 5; 1826 argc -= 5; 1827 1828 /* Optional parameters */ 1829 if (argc) { 1830 as.argc = argc; 1831 as.argv = argv; 1832 1833 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1834 if (ret) 1835 goto bad; 1836 1837 ret = -EINVAL; 1838 while (opt_params--) { 1839 opt_string = dm_shift_arg(&as); 1840 if (!opt_string) { 1841 ti->error = "Not enough feature arguments"; 1842 goto bad; 1843 } 1844 1845 if (!strcasecmp(opt_string, "allow_discards")) 1846 ti->num_discard_bios = 1; 1847 1848 else if (!strcasecmp(opt_string, "same_cpu_crypt")) 1849 set_bit(DM_CRYPT_SAME_CPU, &cc->flags); 1850 1851 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) 1852 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 1853 1854 else { 1855 ti->error = "Invalid feature arguments"; 1856 goto bad; 1857 } 1858 } 1859 } 1860 1861 ret = -ENOMEM; 1862 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); 1863 if (!cc->io_queue) { 1864 ti->error = "Couldn't create kcryptd io queue"; 1865 goto bad; 1866 } 1867 1868 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 1869 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1870 else 1871 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, 1872 num_online_cpus()); 1873 if (!cc->crypt_queue) { 1874 ti->error = "Couldn't create kcryptd queue"; 1875 goto bad; 1876 } 1877 1878 init_waitqueue_head(&cc->write_thread_wait); 1879 cc->write_tree = RB_ROOT; 1880 1881 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); 1882 if (IS_ERR(cc->write_thread)) { 1883 ret = PTR_ERR(cc->write_thread); 1884 cc->write_thread = NULL; 1885 ti->error = "Couldn't spawn write thread"; 1886 goto bad; 1887 } 1888 wake_up_process(cc->write_thread); 1889 1890 ti->num_flush_bios = 1; 1891 ti->discard_zeroes_data_unsupported = true; 1892 1893 return 0; 1894 1895 bad: 1896 crypt_dtr(ti); 1897 return ret; 1898 } 1899 1900 static int crypt_map(struct dm_target *ti, struct bio *bio) 1901 { 1902 struct dm_crypt_io *io; 1903 struct crypt_config *cc = ti->private; 1904 1905 /* 1906 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1907 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1908 * - for REQ_DISCARD caller must use flush if IO ordering matters 1909 */ 1910 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1911 bio->bi_bdev = cc->dev->bdev; 1912 if (bio_sectors(bio)) 1913 bio->bi_iter.bi_sector = cc->start + 1914 dm_target_offset(ti, bio->bi_iter.bi_sector); 1915 return DM_MAPIO_REMAPPED; 1916 } 1917 1918 io = dm_per_bio_data(bio, cc->per_bio_data_size); 1919 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1920 io->ctx.req = (struct ablkcipher_request *)(io + 1); 1921 1922 if (bio_data_dir(io->base_bio) == READ) { 1923 if (kcryptd_io_read(io, GFP_NOWAIT)) 1924 kcryptd_queue_read(io); 1925 } else 1926 kcryptd_queue_crypt(io); 1927 1928 return DM_MAPIO_SUBMITTED; 1929 } 1930 1931 static void crypt_status(struct dm_target *ti, status_type_t type, 1932 unsigned status_flags, char *result, unsigned maxlen) 1933 { 1934 struct crypt_config *cc = ti->private; 1935 unsigned i, sz = 0; 1936 int num_feature_args = 0; 1937 1938 switch (type) { 1939 case STATUSTYPE_INFO: 1940 result[0] = '\0'; 1941 break; 1942 1943 case STATUSTYPE_TABLE: 1944 DMEMIT("%s ", cc->cipher_string); 1945 1946 if (cc->key_size > 0) 1947 for (i = 0; i < cc->key_size; i++) 1948 DMEMIT("%02x", cc->key[i]); 1949 else 1950 DMEMIT("-"); 1951 1952 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1953 cc->dev->name, (unsigned long long)cc->start); 1954 1955 num_feature_args += !!ti->num_discard_bios; 1956 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); 1957 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 1958 if (num_feature_args) { 1959 DMEMIT(" %d", num_feature_args); 1960 if (ti->num_discard_bios) 1961 DMEMIT(" allow_discards"); 1962 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 1963 DMEMIT(" same_cpu_crypt"); 1964 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) 1965 DMEMIT(" submit_from_crypt_cpus"); 1966 } 1967 1968 break; 1969 } 1970 } 1971 1972 static void crypt_postsuspend(struct dm_target *ti) 1973 { 1974 struct crypt_config *cc = ti->private; 1975 1976 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1977 } 1978 1979 static int crypt_preresume(struct dm_target *ti) 1980 { 1981 struct crypt_config *cc = ti->private; 1982 1983 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1984 DMERR("aborting resume - crypt key is not set."); 1985 return -EAGAIN; 1986 } 1987 1988 return 0; 1989 } 1990 1991 static void crypt_resume(struct dm_target *ti) 1992 { 1993 struct crypt_config *cc = ti->private; 1994 1995 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1996 } 1997 1998 /* Message interface 1999 * key set <key> 2000 * key wipe 2001 */ 2002 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 2003 { 2004 struct crypt_config *cc = ti->private; 2005 int ret = -EINVAL; 2006 2007 if (argc < 2) 2008 goto error; 2009 2010 if (!strcasecmp(argv[0], "key")) { 2011 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 2012 DMWARN("not suspended during key manipulation."); 2013 return -EINVAL; 2014 } 2015 if (argc == 3 && !strcasecmp(argv[1], "set")) { 2016 ret = crypt_set_key(cc, argv[2]); 2017 if (ret) 2018 return ret; 2019 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2020 ret = cc->iv_gen_ops->init(cc); 2021 return ret; 2022 } 2023 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2024 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 2025 ret = cc->iv_gen_ops->wipe(cc); 2026 if (ret) 2027 return ret; 2028 } 2029 return crypt_wipe_key(cc); 2030 } 2031 } 2032 2033 error: 2034 DMWARN("unrecognised message received."); 2035 return -EINVAL; 2036 } 2037 2038 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 2039 struct bio_vec *biovec, int max_size) 2040 { 2041 struct crypt_config *cc = ti->private; 2042 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 2043 2044 if (!q->merge_bvec_fn) 2045 return max_size; 2046 2047 bvm->bi_bdev = cc->dev->bdev; 2048 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 2049 2050 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2051 } 2052 2053 static int crypt_iterate_devices(struct dm_target *ti, 2054 iterate_devices_callout_fn fn, void *data) 2055 { 2056 struct crypt_config *cc = ti->private; 2057 2058 return fn(ti, cc->dev, cc->start, ti->len, data); 2059 } 2060 2061 static struct target_type crypt_target = { 2062 .name = "crypt", 2063 .version = {1, 14, 0}, 2064 .module = THIS_MODULE, 2065 .ctr = crypt_ctr, 2066 .dtr = crypt_dtr, 2067 .map = crypt_map, 2068 .status = crypt_status, 2069 .postsuspend = crypt_postsuspend, 2070 .preresume = crypt_preresume, 2071 .resume = crypt_resume, 2072 .message = crypt_message, 2073 .merge = crypt_merge, 2074 .iterate_devices = crypt_iterate_devices, 2075 }; 2076 2077 static int __init dm_crypt_init(void) 2078 { 2079 int r; 2080 2081 r = dm_register_target(&crypt_target); 2082 if (r < 0) 2083 DMERR("register failed %d", r); 2084 2085 return r; 2086 } 2087 2088 static void __exit dm_crypt_exit(void) 2089 { 2090 dm_unregister_target(&crypt_target); 2091 } 2092 2093 module_init(dm_crypt_init); 2094 module_exit(dm_crypt_exit); 2095 2096 MODULE_AUTHOR("Jana Saout <jana@saout.de>"); 2097 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2098 MODULE_LICENSE("GPL"); 2099