1 /* 2 * Copyright (C) 2003 Jana Saout <jana@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 6 * 7 * This file is released under the GPL. 8 */ 9 10 #include <linux/completion.h> 11 #include <linux/err.h> 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/bio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mempool.h> 18 #include <linux/slab.h> 19 #include <linux/crypto.h> 20 #include <linux/workqueue.h> 21 #include <linux/kthread.h> 22 #include <linux/backing-dev.h> 23 #include <linux/atomic.h> 24 #include <linux/scatterlist.h> 25 #include <linux/rbtree.h> 26 #include <asm/page.h> 27 #include <asm/unaligned.h> 28 #include <crypto/hash.h> 29 #include <crypto/md5.h> 30 #include <crypto/algapi.h> 31 32 #include <linux/device-mapper.h> 33 34 #define DM_MSG_PREFIX "crypt" 35 36 /* 37 * context holding the current state of a multi-part conversion 38 */ 39 struct convert_context { 40 struct completion restart; 41 struct bio *bio_in; 42 struct bio *bio_out; 43 struct bvec_iter iter_in; 44 struct bvec_iter iter_out; 45 sector_t cc_sector; 46 atomic_t cc_pending; 47 struct ablkcipher_request *req; 48 }; 49 50 /* 51 * per bio private data 52 */ 53 struct dm_crypt_io { 54 struct crypt_config *cc; 55 struct bio *base_bio; 56 struct work_struct work; 57 58 struct convert_context ctx; 59 60 atomic_t io_pending; 61 int error; 62 sector_t sector; 63 64 struct rb_node rb_node; 65 } CRYPTO_MINALIGN_ATTR; 66 67 struct dm_crypt_request { 68 struct convert_context *ctx; 69 struct scatterlist sg_in; 70 struct scatterlist sg_out; 71 sector_t iv_sector; 72 }; 73 74 struct crypt_config; 75 76 struct crypt_iv_operations { 77 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 78 const char *opts); 79 void (*dtr)(struct crypt_config *cc); 80 int (*init)(struct crypt_config *cc); 81 int (*wipe)(struct crypt_config *cc); 82 int (*generator)(struct crypt_config *cc, u8 *iv, 83 struct dm_crypt_request *dmreq); 84 int (*post)(struct crypt_config *cc, u8 *iv, 85 struct dm_crypt_request *dmreq); 86 }; 87 88 struct iv_essiv_private { 89 struct crypto_hash *hash_tfm; 90 u8 *salt; 91 }; 92 93 struct iv_benbi_private { 94 int shift; 95 }; 96 97 #define LMK_SEED_SIZE 64 /* hash + 0 */ 98 struct iv_lmk_private { 99 struct crypto_shash *hash_tfm; 100 u8 *seed; 101 }; 102 103 #define TCW_WHITENING_SIZE 16 104 struct iv_tcw_private { 105 struct crypto_shash *crc32_tfm; 106 u8 *iv_seed; 107 u8 *whitening; 108 }; 109 110 /* 111 * Crypt: maps a linear range of a block device 112 * and encrypts / decrypts at the same time. 113 */ 114 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, 116 DM_CRYPT_EXIT_THREAD}; 117 118 /* 119 * The fields in here must be read only after initialization. 120 */ 121 struct crypt_config { 122 struct dm_dev *dev; 123 sector_t start; 124 125 /* 126 * pool for per bio private data, crypto requests and 127 * encryption requeusts/buffer pages 128 */ 129 mempool_t *req_pool; 130 mempool_t *page_pool; 131 struct bio_set *bs; 132 struct mutex bio_alloc_lock; 133 134 struct workqueue_struct *io_queue; 135 struct workqueue_struct *crypt_queue; 136 137 struct task_struct *write_thread; 138 wait_queue_head_t write_thread_wait; 139 struct rb_root write_tree; 140 141 char *cipher; 142 char *cipher_string; 143 144 struct crypt_iv_operations *iv_gen_ops; 145 union { 146 struct iv_essiv_private essiv; 147 struct iv_benbi_private benbi; 148 struct iv_lmk_private lmk; 149 struct iv_tcw_private tcw; 150 } iv_gen_private; 151 sector_t iv_offset; 152 unsigned int iv_size; 153 154 /* ESSIV: struct crypto_cipher *essiv_tfm */ 155 void *iv_private; 156 struct crypto_ablkcipher **tfms; 157 unsigned tfms_count; 158 159 /* 160 * Layout of each crypto request: 161 * 162 * struct ablkcipher_request 163 * context 164 * padding 165 * struct dm_crypt_request 166 * padding 167 * IV 168 * 169 * The padding is added so that dm_crypt_request and the IV are 170 * correctly aligned. 171 */ 172 unsigned int dmreq_start; 173 174 unsigned int per_bio_data_size; 175 176 unsigned long flags; 177 unsigned int key_size; 178 unsigned int key_parts; /* independent parts in key buffer */ 179 unsigned int key_extra_size; /* additional keys length */ 180 u8 key[0]; 181 }; 182 183 #define MIN_IOS 16 184 185 static void clone_init(struct dm_crypt_io *, struct bio *); 186 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 187 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 188 189 /* 190 * Use this to access cipher attributes that are the same for each CPU. 191 */ 192 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) 193 { 194 return cc->tfms[0]; 195 } 196 197 /* 198 * Different IV generation algorithms: 199 * 200 * plain: the initial vector is the 32-bit little-endian version of the sector 201 * number, padded with zeros if necessary. 202 * 203 * plain64: the initial vector is the 64-bit little-endian version of the sector 204 * number, padded with zeros if necessary. 205 * 206 * essiv: "encrypted sector|salt initial vector", the sector number is 207 * encrypted with the bulk cipher using a salt as key. The salt 208 * should be derived from the bulk cipher's key via hashing. 209 * 210 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 211 * (needed for LRW-32-AES and possible other narrow block modes) 212 * 213 * null: the initial vector is always zero. Provides compatibility with 214 * obsolete loop_fish2 devices. Do not use for new devices. 215 * 216 * lmk: Compatible implementation of the block chaining mode used 217 * by the Loop-AES block device encryption system 218 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ 219 * It operates on full 512 byte sectors and uses CBC 220 * with an IV derived from the sector number, the data and 221 * optionally extra IV seed. 222 * This means that after decryption the first block 223 * of sector must be tweaked according to decrypted data. 224 * Loop-AES can use three encryption schemes: 225 * version 1: is plain aes-cbc mode 226 * version 2: uses 64 multikey scheme with lmk IV generator 227 * version 3: the same as version 2 with additional IV seed 228 * (it uses 65 keys, last key is used as IV seed) 229 * 230 * tcw: Compatible implementation of the block chaining mode used 231 * by the TrueCrypt device encryption system (prior to version 4.1). 232 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat 233 * It operates on full 512 byte sectors and uses CBC 234 * with an IV derived from initial key and the sector number. 235 * In addition, whitening value is applied on every sector, whitening 236 * is calculated from initial key, sector number and mixed using CRC32. 237 * Note that this encryption scheme is vulnerable to watermarking attacks 238 * and should be used for old compatible containers access only. 239 * 240 * plumb: unimplemented, see: 241 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 242 */ 243 244 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, 245 struct dm_crypt_request *dmreq) 246 { 247 memset(iv, 0, cc->iv_size); 248 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 249 250 return 0; 251 } 252 253 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 254 struct dm_crypt_request *dmreq) 255 { 256 memset(iv, 0, cc->iv_size); 257 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 258 259 return 0; 260 } 261 262 /* Initialise ESSIV - compute salt but no local memory allocations */ 263 static int crypt_iv_essiv_init(struct crypt_config *cc) 264 { 265 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 266 struct hash_desc desc; 267 struct scatterlist sg; 268 struct crypto_cipher *essiv_tfm; 269 int err; 270 271 sg_init_one(&sg, cc->key, cc->key_size); 272 desc.tfm = essiv->hash_tfm; 273 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 274 275 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 276 if (err) 277 return err; 278 279 essiv_tfm = cc->iv_private; 280 281 err = crypto_cipher_setkey(essiv_tfm, essiv->salt, 282 crypto_hash_digestsize(essiv->hash_tfm)); 283 if (err) 284 return err; 285 286 return 0; 287 } 288 289 /* Wipe salt and reset key derived from volume key */ 290 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 291 { 292 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 293 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 294 struct crypto_cipher *essiv_tfm; 295 int r, err = 0; 296 297 memset(essiv->salt, 0, salt_size); 298 299 essiv_tfm = cc->iv_private; 300 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); 301 if (r) 302 err = r; 303 304 return err; 305 } 306 307 /* Set up per cpu cipher state */ 308 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, 309 struct dm_target *ti, 310 u8 *salt, unsigned saltsize) 311 { 312 struct crypto_cipher *essiv_tfm; 313 int err; 314 315 /* Setup the essiv_tfm with the given salt */ 316 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 317 if (IS_ERR(essiv_tfm)) { 318 ti->error = "Error allocating crypto tfm for ESSIV"; 319 return essiv_tfm; 320 } 321 322 if (crypto_cipher_blocksize(essiv_tfm) != 323 crypto_ablkcipher_ivsize(any_tfm(cc))) { 324 ti->error = "Block size of ESSIV cipher does " 325 "not match IV size of block cipher"; 326 crypto_free_cipher(essiv_tfm); 327 return ERR_PTR(-EINVAL); 328 } 329 330 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); 331 if (err) { 332 ti->error = "Failed to set key for ESSIV cipher"; 333 crypto_free_cipher(essiv_tfm); 334 return ERR_PTR(err); 335 } 336 337 return essiv_tfm; 338 } 339 340 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 341 { 342 struct crypto_cipher *essiv_tfm; 343 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 344 345 crypto_free_hash(essiv->hash_tfm); 346 essiv->hash_tfm = NULL; 347 348 kzfree(essiv->salt); 349 essiv->salt = NULL; 350 351 essiv_tfm = cc->iv_private; 352 353 if (essiv_tfm) 354 crypto_free_cipher(essiv_tfm); 355 356 cc->iv_private = NULL; 357 } 358 359 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 360 const char *opts) 361 { 362 struct crypto_cipher *essiv_tfm = NULL; 363 struct crypto_hash *hash_tfm = NULL; 364 u8 *salt = NULL; 365 int err; 366 367 if (!opts) { 368 ti->error = "Digest algorithm missing for ESSIV mode"; 369 return -EINVAL; 370 } 371 372 /* Allocate hash algorithm */ 373 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 374 if (IS_ERR(hash_tfm)) { 375 ti->error = "Error initializing ESSIV hash"; 376 err = PTR_ERR(hash_tfm); 377 goto bad; 378 } 379 380 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 381 if (!salt) { 382 ti->error = "Error kmallocing salt storage in ESSIV"; 383 err = -ENOMEM; 384 goto bad; 385 } 386 387 cc->iv_gen_private.essiv.salt = salt; 388 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 389 390 essiv_tfm = setup_essiv_cpu(cc, ti, salt, 391 crypto_hash_digestsize(hash_tfm)); 392 if (IS_ERR(essiv_tfm)) { 393 crypt_iv_essiv_dtr(cc); 394 return PTR_ERR(essiv_tfm); 395 } 396 cc->iv_private = essiv_tfm; 397 398 return 0; 399 400 bad: 401 if (hash_tfm && !IS_ERR(hash_tfm)) 402 crypto_free_hash(hash_tfm); 403 kfree(salt); 404 return err; 405 } 406 407 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, 408 struct dm_crypt_request *dmreq) 409 { 410 struct crypto_cipher *essiv_tfm = cc->iv_private; 411 412 memset(iv, 0, cc->iv_size); 413 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); 414 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 415 416 return 0; 417 } 418 419 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 420 const char *opts) 421 { 422 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); 423 int log = ilog2(bs); 424 425 /* we need to calculate how far we must shift the sector count 426 * to get the cipher block count, we use this shift in _gen */ 427 428 if (1 << log != bs) { 429 ti->error = "cypher blocksize is not a power of 2"; 430 return -EINVAL; 431 } 432 433 if (log > 9) { 434 ti->error = "cypher blocksize is > 512"; 435 return -EINVAL; 436 } 437 438 cc->iv_gen_private.benbi.shift = 9 - log; 439 440 return 0; 441 } 442 443 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 444 { 445 } 446 447 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, 448 struct dm_crypt_request *dmreq) 449 { 450 __be64 val; 451 452 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 453 454 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); 455 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 456 457 return 0; 458 } 459 460 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, 461 struct dm_crypt_request *dmreq) 462 { 463 memset(iv, 0, cc->iv_size); 464 465 return 0; 466 } 467 468 static void crypt_iv_lmk_dtr(struct crypt_config *cc) 469 { 470 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 471 472 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) 473 crypto_free_shash(lmk->hash_tfm); 474 lmk->hash_tfm = NULL; 475 476 kzfree(lmk->seed); 477 lmk->seed = NULL; 478 } 479 480 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, 481 const char *opts) 482 { 483 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 484 485 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); 486 if (IS_ERR(lmk->hash_tfm)) { 487 ti->error = "Error initializing LMK hash"; 488 return PTR_ERR(lmk->hash_tfm); 489 } 490 491 /* No seed in LMK version 2 */ 492 if (cc->key_parts == cc->tfms_count) { 493 lmk->seed = NULL; 494 return 0; 495 } 496 497 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); 498 if (!lmk->seed) { 499 crypt_iv_lmk_dtr(cc); 500 ti->error = "Error kmallocing seed storage in LMK"; 501 return -ENOMEM; 502 } 503 504 return 0; 505 } 506 507 static int crypt_iv_lmk_init(struct crypt_config *cc) 508 { 509 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 510 int subkey_size = cc->key_size / cc->key_parts; 511 512 /* LMK seed is on the position of LMK_KEYS + 1 key */ 513 if (lmk->seed) 514 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), 515 crypto_shash_digestsize(lmk->hash_tfm)); 516 517 return 0; 518 } 519 520 static int crypt_iv_lmk_wipe(struct crypt_config *cc) 521 { 522 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 523 524 if (lmk->seed) 525 memset(lmk->seed, 0, LMK_SEED_SIZE); 526 527 return 0; 528 } 529 530 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, 531 struct dm_crypt_request *dmreq, 532 u8 *data) 533 { 534 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; 535 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); 536 struct md5_state md5state; 537 __le32 buf[4]; 538 int i, r; 539 540 desc->tfm = lmk->hash_tfm; 541 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 542 543 r = crypto_shash_init(desc); 544 if (r) 545 return r; 546 547 if (lmk->seed) { 548 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); 549 if (r) 550 return r; 551 } 552 553 /* Sector is always 512B, block size 16, add data of blocks 1-31 */ 554 r = crypto_shash_update(desc, data + 16, 16 * 31); 555 if (r) 556 return r; 557 558 /* Sector is cropped to 56 bits here */ 559 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); 560 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); 561 buf[2] = cpu_to_le32(4024); 562 buf[3] = 0; 563 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); 564 if (r) 565 return r; 566 567 /* No MD5 padding here */ 568 r = crypto_shash_export(desc, &md5state); 569 if (r) 570 return r; 571 572 for (i = 0; i < MD5_HASH_WORDS; i++) 573 __cpu_to_le32s(&md5state.hash[i]); 574 memcpy(iv, &md5state.hash, cc->iv_size); 575 576 return 0; 577 } 578 579 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, 580 struct dm_crypt_request *dmreq) 581 { 582 u8 *src; 583 int r = 0; 584 585 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 586 src = kmap_atomic(sg_page(&dmreq->sg_in)); 587 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 588 kunmap_atomic(src); 589 } else 590 memset(iv, 0, cc->iv_size); 591 592 return r; 593 } 594 595 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, 596 struct dm_crypt_request *dmreq) 597 { 598 u8 *dst; 599 int r; 600 601 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 602 return 0; 603 604 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 605 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 606 607 /* Tweak the first block of plaintext sector */ 608 if (!r) 609 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 610 611 kunmap_atomic(dst); 612 return r; 613 } 614 615 static void crypt_iv_tcw_dtr(struct crypt_config *cc) 616 { 617 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 618 619 kzfree(tcw->iv_seed); 620 tcw->iv_seed = NULL; 621 kzfree(tcw->whitening); 622 tcw->whitening = NULL; 623 624 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) 625 crypto_free_shash(tcw->crc32_tfm); 626 tcw->crc32_tfm = NULL; 627 } 628 629 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, 630 const char *opts) 631 { 632 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 633 634 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { 635 ti->error = "Wrong key size for TCW"; 636 return -EINVAL; 637 } 638 639 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); 640 if (IS_ERR(tcw->crc32_tfm)) { 641 ti->error = "Error initializing CRC32 in TCW"; 642 return PTR_ERR(tcw->crc32_tfm); 643 } 644 645 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); 646 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); 647 if (!tcw->iv_seed || !tcw->whitening) { 648 crypt_iv_tcw_dtr(cc); 649 ti->error = "Error allocating seed storage in TCW"; 650 return -ENOMEM; 651 } 652 653 return 0; 654 } 655 656 static int crypt_iv_tcw_init(struct crypt_config *cc) 657 { 658 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 659 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; 660 661 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); 662 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], 663 TCW_WHITENING_SIZE); 664 665 return 0; 666 } 667 668 static int crypt_iv_tcw_wipe(struct crypt_config *cc) 669 { 670 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 671 672 memset(tcw->iv_seed, 0, cc->iv_size); 673 memset(tcw->whitening, 0, TCW_WHITENING_SIZE); 674 675 return 0; 676 } 677 678 static int crypt_iv_tcw_whitening(struct crypt_config *cc, 679 struct dm_crypt_request *dmreq, 680 u8 *data) 681 { 682 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 683 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 684 u8 buf[TCW_WHITENING_SIZE]; 685 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); 686 int i, r; 687 688 /* xor whitening with sector number */ 689 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); 690 crypto_xor(buf, (u8 *)§or, 8); 691 crypto_xor(&buf[8], (u8 *)§or, 8); 692 693 /* calculate crc32 for every 32bit part and xor it */ 694 desc->tfm = tcw->crc32_tfm; 695 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 696 for (i = 0; i < 4; i++) { 697 r = crypto_shash_init(desc); 698 if (r) 699 goto out; 700 r = crypto_shash_update(desc, &buf[i * 4], 4); 701 if (r) 702 goto out; 703 r = crypto_shash_final(desc, &buf[i * 4]); 704 if (r) 705 goto out; 706 } 707 crypto_xor(&buf[0], &buf[12], 4); 708 crypto_xor(&buf[4], &buf[8], 4); 709 710 /* apply whitening (8 bytes) to whole sector */ 711 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) 712 crypto_xor(data + i * 8, buf, 8); 713 out: 714 memzero_explicit(buf, sizeof(buf)); 715 return r; 716 } 717 718 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, 719 struct dm_crypt_request *dmreq) 720 { 721 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; 722 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); 723 u8 *src; 724 int r = 0; 725 726 /* Remove whitening from ciphertext */ 727 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { 728 src = kmap_atomic(sg_page(&dmreq->sg_in)); 729 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); 730 kunmap_atomic(src); 731 } 732 733 /* Calculate IV */ 734 memcpy(iv, tcw->iv_seed, cc->iv_size); 735 crypto_xor(iv, (u8 *)§or, 8); 736 if (cc->iv_size > 8) 737 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8); 738 739 return r; 740 } 741 742 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, 743 struct dm_crypt_request *dmreq) 744 { 745 u8 *dst; 746 int r; 747 748 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) 749 return 0; 750 751 /* Apply whitening on ciphertext */ 752 dst = kmap_atomic(sg_page(&dmreq->sg_out)); 753 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); 754 kunmap_atomic(dst); 755 756 return r; 757 } 758 759 static struct crypt_iv_operations crypt_iv_plain_ops = { 760 .generator = crypt_iv_plain_gen 761 }; 762 763 static struct crypt_iv_operations crypt_iv_plain64_ops = { 764 .generator = crypt_iv_plain64_gen 765 }; 766 767 static struct crypt_iv_operations crypt_iv_essiv_ops = { 768 .ctr = crypt_iv_essiv_ctr, 769 .dtr = crypt_iv_essiv_dtr, 770 .init = crypt_iv_essiv_init, 771 .wipe = crypt_iv_essiv_wipe, 772 .generator = crypt_iv_essiv_gen 773 }; 774 775 static struct crypt_iv_operations crypt_iv_benbi_ops = { 776 .ctr = crypt_iv_benbi_ctr, 777 .dtr = crypt_iv_benbi_dtr, 778 .generator = crypt_iv_benbi_gen 779 }; 780 781 static struct crypt_iv_operations crypt_iv_null_ops = { 782 .generator = crypt_iv_null_gen 783 }; 784 785 static struct crypt_iv_operations crypt_iv_lmk_ops = { 786 .ctr = crypt_iv_lmk_ctr, 787 .dtr = crypt_iv_lmk_dtr, 788 .init = crypt_iv_lmk_init, 789 .wipe = crypt_iv_lmk_wipe, 790 .generator = crypt_iv_lmk_gen, 791 .post = crypt_iv_lmk_post 792 }; 793 794 static struct crypt_iv_operations crypt_iv_tcw_ops = { 795 .ctr = crypt_iv_tcw_ctr, 796 .dtr = crypt_iv_tcw_dtr, 797 .init = crypt_iv_tcw_init, 798 .wipe = crypt_iv_tcw_wipe, 799 .generator = crypt_iv_tcw_gen, 800 .post = crypt_iv_tcw_post 801 }; 802 803 static void crypt_convert_init(struct crypt_config *cc, 804 struct convert_context *ctx, 805 struct bio *bio_out, struct bio *bio_in, 806 sector_t sector) 807 { 808 ctx->bio_in = bio_in; 809 ctx->bio_out = bio_out; 810 if (bio_in) 811 ctx->iter_in = bio_in->bi_iter; 812 if (bio_out) 813 ctx->iter_out = bio_out->bi_iter; 814 ctx->cc_sector = sector + cc->iv_offset; 815 init_completion(&ctx->restart); 816 } 817 818 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 819 struct ablkcipher_request *req) 820 { 821 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 822 } 823 824 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 825 struct dm_crypt_request *dmreq) 826 { 827 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 828 } 829 830 static u8 *iv_of_dmreq(struct crypt_config *cc, 831 struct dm_crypt_request *dmreq) 832 { 833 return (u8 *)ALIGN((unsigned long)(dmreq + 1), 834 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); 835 } 836 837 static int crypt_convert_block(struct crypt_config *cc, 838 struct convert_context *ctx, 839 struct ablkcipher_request *req) 840 { 841 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); 842 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); 843 struct dm_crypt_request *dmreq; 844 u8 *iv; 845 int r; 846 847 dmreq = dmreq_of_req(cc, req); 848 iv = iv_of_dmreq(cc, dmreq); 849 850 dmreq->iv_sector = ctx->cc_sector; 851 dmreq->ctx = ctx; 852 sg_init_table(&dmreq->sg_in, 1); 853 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, 854 bv_in.bv_offset); 855 856 sg_init_table(&dmreq->sg_out, 1); 857 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, 858 bv_out.bv_offset); 859 860 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); 861 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); 862 863 if (cc->iv_gen_ops) { 864 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 865 if (r < 0) 866 return r; 867 } 868 869 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 870 1 << SECTOR_SHIFT, iv); 871 872 if (bio_data_dir(ctx->bio_in) == WRITE) 873 r = crypto_ablkcipher_encrypt(req); 874 else 875 r = crypto_ablkcipher_decrypt(req); 876 877 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) 878 r = cc->iv_gen_ops->post(cc, iv, dmreq); 879 880 return r; 881 } 882 883 static void kcryptd_async_done(struct crypto_async_request *async_req, 884 int error); 885 886 static void crypt_alloc_req(struct crypt_config *cc, 887 struct convert_context *ctx) 888 { 889 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 890 891 if (!ctx->req) 892 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); 893 894 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); 895 896 /* 897 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs 898 * requests if driver request queue is full. 899 */ 900 ablkcipher_request_set_callback(ctx->req, 901 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 902 kcryptd_async_done, dmreq_of_req(cc, ctx->req)); 903 } 904 905 static void crypt_free_req(struct crypt_config *cc, 906 struct ablkcipher_request *req, struct bio *base_bio) 907 { 908 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); 909 910 if ((struct ablkcipher_request *)(io + 1) != req) 911 mempool_free(req, cc->req_pool); 912 } 913 914 /* 915 * Encrypt / decrypt data from one bio to another one (can be the same one) 916 */ 917 static int crypt_convert(struct crypt_config *cc, 918 struct convert_context *ctx) 919 { 920 int r; 921 922 atomic_set(&ctx->cc_pending, 1); 923 924 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 925 926 crypt_alloc_req(cc, ctx); 927 928 atomic_inc(&ctx->cc_pending); 929 930 r = crypt_convert_block(cc, ctx, ctx->req); 931 932 switch (r) { 933 /* 934 * The request was queued by a crypto driver 935 * but the driver request queue is full, let's wait. 936 */ 937 case -EBUSY: 938 wait_for_completion(&ctx->restart); 939 reinit_completion(&ctx->restart); 940 /* fall through */ 941 /* 942 * The request is queued and processed asynchronously, 943 * completion function kcryptd_async_done() will be called. 944 */ 945 case -EINPROGRESS: 946 ctx->req = NULL; 947 ctx->cc_sector++; 948 continue; 949 /* 950 * The request was already processed (synchronously). 951 */ 952 case 0: 953 atomic_dec(&ctx->cc_pending); 954 ctx->cc_sector++; 955 cond_resched(); 956 continue; 957 958 /* There was an error while processing the request. */ 959 default: 960 atomic_dec(&ctx->cc_pending); 961 return r; 962 } 963 } 964 965 return 0; 966 } 967 968 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); 969 970 /* 971 * Generate a new unfragmented bio with the given size 972 * This should never violate the device limitations (but only because 973 * max_segment_size is being constrained to PAGE_SIZE). 974 * 975 * This function may be called concurrently. If we allocate from the mempool 976 * concurrently, there is a possibility of deadlock. For example, if we have 977 * mempool of 256 pages, two processes, each wanting 256, pages allocate from 978 * the mempool concurrently, it may deadlock in a situation where both processes 979 * have allocated 128 pages and the mempool is exhausted. 980 * 981 * In order to avoid this scenario we allocate the pages under a mutex. 982 * 983 * In order to not degrade performance with excessive locking, we try 984 * non-blocking allocations without a mutex first but on failure we fallback 985 * to blocking allocations with a mutex. 986 */ 987 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 988 { 989 struct crypt_config *cc = io->cc; 990 struct bio *clone; 991 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 992 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; 993 unsigned i, len, remaining_size; 994 struct page *page; 995 struct bio_vec *bvec; 996 997 retry: 998 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 999 mutex_lock(&cc->bio_alloc_lock); 1000 1001 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 1002 if (!clone) 1003 goto return_clone; 1004 1005 clone_init(io, clone); 1006 1007 remaining_size = size; 1008 1009 for (i = 0; i < nr_iovecs; i++) { 1010 page = mempool_alloc(cc->page_pool, gfp_mask); 1011 if (!page) { 1012 crypt_free_buffer_pages(cc, clone); 1013 bio_put(clone); 1014 gfp_mask |= __GFP_DIRECT_RECLAIM; 1015 goto retry; 1016 } 1017 1018 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; 1019 1020 bvec = &clone->bi_io_vec[clone->bi_vcnt++]; 1021 bvec->bv_page = page; 1022 bvec->bv_len = len; 1023 bvec->bv_offset = 0; 1024 1025 clone->bi_iter.bi_size += len; 1026 1027 remaining_size -= len; 1028 } 1029 1030 return_clone: 1031 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) 1032 mutex_unlock(&cc->bio_alloc_lock); 1033 1034 return clone; 1035 } 1036 1037 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 1038 { 1039 unsigned int i; 1040 struct bio_vec *bv; 1041 1042 bio_for_each_segment_all(bv, clone, i) { 1043 BUG_ON(!bv->bv_page); 1044 mempool_free(bv->bv_page, cc->page_pool); 1045 bv->bv_page = NULL; 1046 } 1047 } 1048 1049 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, 1050 struct bio *bio, sector_t sector) 1051 { 1052 io->cc = cc; 1053 io->base_bio = bio; 1054 io->sector = sector; 1055 io->error = 0; 1056 io->ctx.req = NULL; 1057 atomic_set(&io->io_pending, 0); 1058 } 1059 1060 static void crypt_inc_pending(struct dm_crypt_io *io) 1061 { 1062 atomic_inc(&io->io_pending); 1063 } 1064 1065 /* 1066 * One of the bios was finished. Check for completion of 1067 * the whole request and correctly clean up the buffer. 1068 */ 1069 static void crypt_dec_pending(struct dm_crypt_io *io) 1070 { 1071 struct crypt_config *cc = io->cc; 1072 struct bio *base_bio = io->base_bio; 1073 int error = io->error; 1074 1075 if (!atomic_dec_and_test(&io->io_pending)) 1076 return; 1077 1078 if (io->ctx.req) 1079 crypt_free_req(cc, io->ctx.req, base_bio); 1080 1081 base_bio->bi_error = error; 1082 bio_endio(base_bio); 1083 } 1084 1085 /* 1086 * kcryptd/kcryptd_io: 1087 * 1088 * Needed because it would be very unwise to do decryption in an 1089 * interrupt context. 1090 * 1091 * kcryptd performs the actual encryption or decryption. 1092 * 1093 * kcryptd_io performs the IO submission. 1094 * 1095 * They must be separated as otherwise the final stages could be 1096 * starved by new requests which can block in the first stages due 1097 * to memory allocation. 1098 * 1099 * The work is done per CPU global for all dm-crypt instances. 1100 * They should not depend on each other and do not block. 1101 */ 1102 static void crypt_endio(struct bio *clone) 1103 { 1104 struct dm_crypt_io *io = clone->bi_private; 1105 struct crypt_config *cc = io->cc; 1106 unsigned rw = bio_data_dir(clone); 1107 int error; 1108 1109 /* 1110 * free the processed pages 1111 */ 1112 if (rw == WRITE) 1113 crypt_free_buffer_pages(cc, clone); 1114 1115 error = clone->bi_error; 1116 bio_put(clone); 1117 1118 if (rw == READ && !error) { 1119 kcryptd_queue_crypt(io); 1120 return; 1121 } 1122 1123 if (unlikely(error)) 1124 io->error = error; 1125 1126 crypt_dec_pending(io); 1127 } 1128 1129 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 1130 { 1131 struct crypt_config *cc = io->cc; 1132 1133 clone->bi_private = io; 1134 clone->bi_end_io = crypt_endio; 1135 clone->bi_bdev = cc->dev->bdev; 1136 clone->bi_rw = io->base_bio->bi_rw; 1137 } 1138 1139 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1140 { 1141 struct crypt_config *cc = io->cc; 1142 struct bio *clone; 1143 1144 /* 1145 * We need the original biovec array in order to decrypt 1146 * the whole bio data *afterwards* -- thanks to immutable 1147 * biovecs we don't need to worry about the block layer 1148 * modifying the biovec array; so leverage bio_clone_fast(). 1149 */ 1150 clone = bio_clone_fast(io->base_bio, gfp, cc->bs); 1151 if (!clone) 1152 return 1; 1153 1154 crypt_inc_pending(io); 1155 1156 clone_init(io, clone); 1157 clone->bi_iter.bi_sector = cc->start + io->sector; 1158 1159 generic_make_request(clone); 1160 return 0; 1161 } 1162 1163 static void kcryptd_io_read_work(struct work_struct *work) 1164 { 1165 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1166 1167 crypt_inc_pending(io); 1168 if (kcryptd_io_read(io, GFP_NOIO)) 1169 io->error = -ENOMEM; 1170 crypt_dec_pending(io); 1171 } 1172 1173 static void kcryptd_queue_read(struct dm_crypt_io *io) 1174 { 1175 struct crypt_config *cc = io->cc; 1176 1177 INIT_WORK(&io->work, kcryptd_io_read_work); 1178 queue_work(cc->io_queue, &io->work); 1179 } 1180 1181 static void kcryptd_io_write(struct dm_crypt_io *io) 1182 { 1183 struct bio *clone = io->ctx.bio_out; 1184 1185 generic_make_request(clone); 1186 } 1187 1188 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) 1189 1190 static int dmcrypt_write(void *data) 1191 { 1192 struct crypt_config *cc = data; 1193 struct dm_crypt_io *io; 1194 1195 while (1) { 1196 struct rb_root write_tree; 1197 struct blk_plug plug; 1198 1199 DECLARE_WAITQUEUE(wait, current); 1200 1201 spin_lock_irq(&cc->write_thread_wait.lock); 1202 continue_locked: 1203 1204 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1205 goto pop_from_list; 1206 1207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { 1208 spin_unlock_irq(&cc->write_thread_wait.lock); 1209 break; 1210 } 1211 1212 __set_current_state(TASK_INTERRUPTIBLE); 1213 __add_wait_queue(&cc->write_thread_wait, &wait); 1214 1215 spin_unlock_irq(&cc->write_thread_wait.lock); 1216 1217 schedule(); 1218 1219 spin_lock_irq(&cc->write_thread_wait.lock); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait); 1221 goto continue_locked; 1222 1223 pop_from_list: 1224 write_tree = cc->write_tree; 1225 cc->write_tree = RB_ROOT; 1226 spin_unlock_irq(&cc->write_thread_wait.lock); 1227 1228 BUG_ON(rb_parent(write_tree.rb_node)); 1229 1230 /* 1231 * Note: we cannot walk the tree here with rb_next because 1232 * the structures may be freed when kcryptd_io_write is called. 1233 */ 1234 blk_start_plug(&plug); 1235 do { 1236 io = crypt_io_from_node(rb_first(&write_tree)); 1237 rb_erase(&io->rb_node, &write_tree); 1238 kcryptd_io_write(io); 1239 } while (!RB_EMPTY_ROOT(&write_tree)); 1240 blk_finish_plug(&plug); 1241 } 1242 return 0; 1243 } 1244 1245 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) 1246 { 1247 struct bio *clone = io->ctx.bio_out; 1248 struct crypt_config *cc = io->cc; 1249 unsigned long flags; 1250 sector_t sector; 1251 struct rb_node **rbp, *parent; 1252 1253 if (unlikely(io->error < 0)) { 1254 crypt_free_buffer_pages(cc, clone); 1255 bio_put(clone); 1256 crypt_dec_pending(io); 1257 return; 1258 } 1259 1260 /* crypt_convert should have filled the clone bio */ 1261 BUG_ON(io->ctx.iter_out.bi_size); 1262 1263 clone->bi_iter.bi_sector = cc->start + io->sector; 1264 1265 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { 1266 generic_make_request(clone); 1267 return; 1268 } 1269 1270 spin_lock_irqsave(&cc->write_thread_wait.lock, flags); 1271 rbp = &cc->write_tree.rb_node; 1272 parent = NULL; 1273 sector = io->sector; 1274 while (*rbp) { 1275 parent = *rbp; 1276 if (sector < crypt_io_from_node(parent)->sector) 1277 rbp = &(*rbp)->rb_left; 1278 else 1279 rbp = &(*rbp)->rb_right; 1280 } 1281 rb_link_node(&io->rb_node, parent, rbp); 1282 rb_insert_color(&io->rb_node, &cc->write_tree); 1283 1284 wake_up_locked(&cc->write_thread_wait); 1285 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); 1286 } 1287 1288 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 1289 { 1290 struct crypt_config *cc = io->cc; 1291 struct bio *clone; 1292 int crypt_finished; 1293 sector_t sector = io->sector; 1294 int r; 1295 1296 /* 1297 * Prevent io from disappearing until this function completes. 1298 */ 1299 crypt_inc_pending(io); 1300 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 1301 1302 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1303 if (unlikely(!clone)) { 1304 io->error = -EIO; 1305 goto dec; 1306 } 1307 1308 io->ctx.bio_out = clone; 1309 io->ctx.iter_out = clone->bi_iter; 1310 1311 sector += bio_sectors(clone); 1312 1313 crypt_inc_pending(io); 1314 r = crypt_convert(cc, &io->ctx); 1315 if (r) 1316 io->error = -EIO; 1317 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1318 1319 /* Encryption was already finished, submit io now */ 1320 if (crypt_finished) { 1321 kcryptd_crypt_write_io_submit(io, 0); 1322 io->sector = sector; 1323 } 1324 1325 dec: 1326 crypt_dec_pending(io); 1327 } 1328 1329 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 1330 { 1331 crypt_dec_pending(io); 1332 } 1333 1334 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1335 { 1336 struct crypt_config *cc = io->cc; 1337 int r = 0; 1338 1339 crypt_inc_pending(io); 1340 1341 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 1342 io->sector); 1343 1344 r = crypt_convert(cc, &io->ctx); 1345 if (r < 0) 1346 io->error = -EIO; 1347 1348 if (atomic_dec_and_test(&io->ctx.cc_pending)) 1349 kcryptd_crypt_read_done(io); 1350 1351 crypt_dec_pending(io); 1352 } 1353 1354 static void kcryptd_async_done(struct crypto_async_request *async_req, 1355 int error) 1356 { 1357 struct dm_crypt_request *dmreq = async_req->data; 1358 struct convert_context *ctx = dmreq->ctx; 1359 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1360 struct crypt_config *cc = io->cc; 1361 1362 /* 1363 * A request from crypto driver backlog is going to be processed now, 1364 * finish the completion and continue in crypt_convert(). 1365 * (Callback will be called for the second time for this request.) 1366 */ 1367 if (error == -EINPROGRESS) { 1368 complete(&ctx->restart); 1369 return; 1370 } 1371 1372 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1373 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1374 1375 if (error < 0) 1376 io->error = -EIO; 1377 1378 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1379 1380 if (!atomic_dec_and_test(&ctx->cc_pending)) 1381 return; 1382 1383 if (bio_data_dir(io->base_bio) == READ) 1384 kcryptd_crypt_read_done(io); 1385 else 1386 kcryptd_crypt_write_io_submit(io, 1); 1387 } 1388 1389 static void kcryptd_crypt(struct work_struct *work) 1390 { 1391 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1392 1393 if (bio_data_dir(io->base_bio) == READ) 1394 kcryptd_crypt_read_convert(io); 1395 else 1396 kcryptd_crypt_write_convert(io); 1397 } 1398 1399 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 1400 { 1401 struct crypt_config *cc = io->cc; 1402 1403 INIT_WORK(&io->work, kcryptd_crypt); 1404 queue_work(cc->crypt_queue, &io->work); 1405 } 1406 1407 /* 1408 * Decode key from its hex representation 1409 */ 1410 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 1411 { 1412 char buffer[3]; 1413 unsigned int i; 1414 1415 buffer[2] = '\0'; 1416 1417 for (i = 0; i < size; i++) { 1418 buffer[0] = *hex++; 1419 buffer[1] = *hex++; 1420 1421 if (kstrtou8(buffer, 16, &key[i])) 1422 return -EINVAL; 1423 } 1424 1425 if (*hex != '\0') 1426 return -EINVAL; 1427 1428 return 0; 1429 } 1430 1431 static void crypt_free_tfms(struct crypt_config *cc) 1432 { 1433 unsigned i; 1434 1435 if (!cc->tfms) 1436 return; 1437 1438 for (i = 0; i < cc->tfms_count; i++) 1439 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { 1440 crypto_free_ablkcipher(cc->tfms[i]); 1441 cc->tfms[i] = NULL; 1442 } 1443 1444 kfree(cc->tfms); 1445 cc->tfms = NULL; 1446 } 1447 1448 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) 1449 { 1450 unsigned i; 1451 int err; 1452 1453 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), 1454 GFP_KERNEL); 1455 if (!cc->tfms) 1456 return -ENOMEM; 1457 1458 for (i = 0; i < cc->tfms_count; i++) { 1459 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); 1460 if (IS_ERR(cc->tfms[i])) { 1461 err = PTR_ERR(cc->tfms[i]); 1462 crypt_free_tfms(cc); 1463 return err; 1464 } 1465 } 1466 1467 return 0; 1468 } 1469 1470 static int crypt_setkey_allcpus(struct crypt_config *cc) 1471 { 1472 unsigned subkey_size; 1473 int err = 0, i, r; 1474 1475 /* Ignore extra keys (which are used for IV etc) */ 1476 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); 1477 1478 for (i = 0; i < cc->tfms_count; i++) { 1479 r = crypto_ablkcipher_setkey(cc->tfms[i], 1480 cc->key + (i * subkey_size), 1481 subkey_size); 1482 if (r) 1483 err = r; 1484 } 1485 1486 return err; 1487 } 1488 1489 static int crypt_set_key(struct crypt_config *cc, char *key) 1490 { 1491 int r = -EINVAL; 1492 int key_string_len = strlen(key); 1493 1494 /* The key size may not be changed. */ 1495 if (cc->key_size != (key_string_len >> 1)) 1496 goto out; 1497 1498 /* Hyphen (which gives a key_size of zero) means there is no key. */ 1499 if (!cc->key_size && strcmp(key, "-")) 1500 goto out; 1501 1502 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) 1503 goto out; 1504 1505 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1506 1507 r = crypt_setkey_allcpus(cc); 1508 1509 out: 1510 /* Hex key string not needed after here, so wipe it. */ 1511 memset(key, '0', key_string_len); 1512 1513 return r; 1514 } 1515 1516 static int crypt_wipe_key(struct crypt_config *cc) 1517 { 1518 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1519 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1520 1521 return crypt_setkey_allcpus(cc); 1522 } 1523 1524 static void crypt_dtr(struct dm_target *ti) 1525 { 1526 struct crypt_config *cc = ti->private; 1527 1528 ti->private = NULL; 1529 1530 if (!cc) 1531 return; 1532 1533 if (cc->write_thread) { 1534 spin_lock_irq(&cc->write_thread_wait.lock); 1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); 1536 wake_up_locked(&cc->write_thread_wait); 1537 spin_unlock_irq(&cc->write_thread_wait.lock); 1538 kthread_stop(cc->write_thread); 1539 } 1540 1541 if (cc->io_queue) 1542 destroy_workqueue(cc->io_queue); 1543 if (cc->crypt_queue) 1544 destroy_workqueue(cc->crypt_queue); 1545 1546 crypt_free_tfms(cc); 1547 1548 if (cc->bs) 1549 bioset_free(cc->bs); 1550 1551 mempool_destroy(cc->page_pool); 1552 mempool_destroy(cc->req_pool); 1553 1554 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1555 cc->iv_gen_ops->dtr(cc); 1556 1557 if (cc->dev) 1558 dm_put_device(ti, cc->dev); 1559 1560 kzfree(cc->cipher); 1561 kzfree(cc->cipher_string); 1562 1563 /* Must zero key material before freeing */ 1564 kzfree(cc); 1565 } 1566 1567 static int crypt_ctr_cipher(struct dm_target *ti, 1568 char *cipher_in, char *key) 1569 { 1570 struct crypt_config *cc = ti->private; 1571 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; 1572 char *cipher_api = NULL; 1573 int ret = -EINVAL; 1574 char dummy; 1575 1576 /* Convert to crypto api definition? */ 1577 if (strchr(cipher_in, '(')) { 1578 ti->error = "Bad cipher specification"; 1579 return -EINVAL; 1580 } 1581 1582 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); 1583 if (!cc->cipher_string) 1584 goto bad_mem; 1585 1586 /* 1587 * Legacy dm-crypt cipher specification 1588 * cipher[:keycount]-mode-iv:ivopts 1589 */ 1590 tmp = cipher_in; 1591 keycount = strsep(&tmp, "-"); 1592 cipher = strsep(&keycount, ":"); 1593 1594 if (!keycount) 1595 cc->tfms_count = 1; 1596 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || 1597 !is_power_of_2(cc->tfms_count)) { 1598 ti->error = "Bad cipher key count specification"; 1599 return -EINVAL; 1600 } 1601 cc->key_parts = cc->tfms_count; 1602 cc->key_extra_size = 0; 1603 1604 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1605 if (!cc->cipher) 1606 goto bad_mem; 1607 1608 chainmode = strsep(&tmp, "-"); 1609 ivopts = strsep(&tmp, "-"); 1610 ivmode = strsep(&ivopts, ":"); 1611 1612 if (tmp) 1613 DMWARN("Ignoring unexpected additional cipher options"); 1614 1615 /* 1616 * For compatibility with the original dm-crypt mapping format, if 1617 * only the cipher name is supplied, use cbc-plain. 1618 */ 1619 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1620 chainmode = "cbc"; 1621 ivmode = "plain"; 1622 } 1623 1624 if (strcmp(chainmode, "ecb") && !ivmode) { 1625 ti->error = "IV mechanism required"; 1626 return -EINVAL; 1627 } 1628 1629 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1630 if (!cipher_api) 1631 goto bad_mem; 1632 1633 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1634 "%s(%s)", chainmode, cipher); 1635 if (ret < 0) { 1636 kfree(cipher_api); 1637 goto bad_mem; 1638 } 1639 1640 /* Allocate cipher */ 1641 ret = crypt_alloc_tfms(cc, cipher_api); 1642 if (ret < 0) { 1643 ti->error = "Error allocating crypto tfm"; 1644 goto bad; 1645 } 1646 1647 /* Initialize IV */ 1648 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); 1649 if (cc->iv_size) 1650 /* at least a 64 bit sector number should fit in our buffer */ 1651 cc->iv_size = max(cc->iv_size, 1652 (unsigned int)(sizeof(u64) / sizeof(u8))); 1653 else if (ivmode) { 1654 DMWARN("Selected cipher does not support IVs"); 1655 ivmode = NULL; 1656 } 1657 1658 /* Choose ivmode, see comments at iv code. */ 1659 if (ivmode == NULL) 1660 cc->iv_gen_ops = NULL; 1661 else if (strcmp(ivmode, "plain") == 0) 1662 cc->iv_gen_ops = &crypt_iv_plain_ops; 1663 else if (strcmp(ivmode, "plain64") == 0) 1664 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1665 else if (strcmp(ivmode, "essiv") == 0) 1666 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1667 else if (strcmp(ivmode, "benbi") == 0) 1668 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1669 else if (strcmp(ivmode, "null") == 0) 1670 cc->iv_gen_ops = &crypt_iv_null_ops; 1671 else if (strcmp(ivmode, "lmk") == 0) { 1672 cc->iv_gen_ops = &crypt_iv_lmk_ops; 1673 /* 1674 * Version 2 and 3 is recognised according 1675 * to length of provided multi-key string. 1676 * If present (version 3), last key is used as IV seed. 1677 * All keys (including IV seed) are always the same size. 1678 */ 1679 if (cc->key_size % cc->key_parts) { 1680 cc->key_parts++; 1681 cc->key_extra_size = cc->key_size / cc->key_parts; 1682 } 1683 } else if (strcmp(ivmode, "tcw") == 0) { 1684 cc->iv_gen_ops = &crypt_iv_tcw_ops; 1685 cc->key_parts += 2; /* IV + whitening */ 1686 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; 1687 } else { 1688 ret = -EINVAL; 1689 ti->error = "Invalid IV mode"; 1690 goto bad; 1691 } 1692 1693 /* Initialize and set key */ 1694 ret = crypt_set_key(cc, key); 1695 if (ret < 0) { 1696 ti->error = "Error decoding and setting key"; 1697 goto bad; 1698 } 1699 1700 /* Allocate IV */ 1701 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1702 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1703 if (ret < 0) { 1704 ti->error = "Error creating IV"; 1705 goto bad; 1706 } 1707 } 1708 1709 /* Initialize IV (set keys for ESSIV etc) */ 1710 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1711 ret = cc->iv_gen_ops->init(cc); 1712 if (ret < 0) { 1713 ti->error = "Error initialising IV"; 1714 goto bad; 1715 } 1716 } 1717 1718 ret = 0; 1719 bad: 1720 kfree(cipher_api); 1721 return ret; 1722 1723 bad_mem: 1724 ti->error = "Cannot allocate cipher strings"; 1725 return -ENOMEM; 1726 } 1727 1728 /* 1729 * Construct an encryption mapping: 1730 * <cipher> <key> <iv_offset> <dev_path> <start> 1731 */ 1732 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1733 { 1734 struct crypt_config *cc; 1735 unsigned int key_size, opt_params; 1736 unsigned long long tmpll; 1737 int ret; 1738 size_t iv_size_padding; 1739 struct dm_arg_set as; 1740 const char *opt_string; 1741 char dummy; 1742 1743 static struct dm_arg _args[] = { 1744 {0, 3, "Invalid number of feature args"}, 1745 }; 1746 1747 if (argc < 5) { 1748 ti->error = "Not enough arguments"; 1749 return -EINVAL; 1750 } 1751 1752 key_size = strlen(argv[1]) >> 1; 1753 1754 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1755 if (!cc) { 1756 ti->error = "Cannot allocate encryption context"; 1757 return -ENOMEM; 1758 } 1759 cc->key_size = key_size; 1760 1761 ti->private = cc; 1762 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1763 if (ret < 0) 1764 goto bad; 1765 1766 cc->dmreq_start = sizeof(struct ablkcipher_request); 1767 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1768 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); 1769 1770 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { 1771 /* Allocate the padding exactly */ 1772 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) 1773 & crypto_ablkcipher_alignmask(any_tfm(cc)); 1774 } else { 1775 /* 1776 * If the cipher requires greater alignment than kmalloc 1777 * alignment, we don't know the exact position of the 1778 * initialization vector. We must assume worst case. 1779 */ 1780 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); 1781 } 1782 1783 ret = -ENOMEM; 1784 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1785 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); 1786 if (!cc->req_pool) { 1787 ti->error = "Cannot allocate crypt request mempool"; 1788 goto bad; 1789 } 1790 1791 cc->per_bio_data_size = ti->per_bio_data_size = 1792 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + 1793 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, 1794 ARCH_KMALLOC_MINALIGN); 1795 1796 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); 1797 if (!cc->page_pool) { 1798 ti->error = "Cannot allocate page mempool"; 1799 goto bad; 1800 } 1801 1802 cc->bs = bioset_create(MIN_IOS, 0); 1803 if (!cc->bs) { 1804 ti->error = "Cannot allocate crypt bioset"; 1805 goto bad; 1806 } 1807 1808 mutex_init(&cc->bio_alloc_lock); 1809 1810 ret = -EINVAL; 1811 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { 1812 ti->error = "Invalid iv_offset sector"; 1813 goto bad; 1814 } 1815 cc->iv_offset = tmpll; 1816 1817 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); 1818 if (ret) { 1819 ti->error = "Device lookup failed"; 1820 goto bad; 1821 } 1822 1823 ret = -EINVAL; 1824 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { 1825 ti->error = "Invalid device sector"; 1826 goto bad; 1827 } 1828 cc->start = tmpll; 1829 1830 argv += 5; 1831 argc -= 5; 1832 1833 /* Optional parameters */ 1834 if (argc) { 1835 as.argc = argc; 1836 as.argv = argv; 1837 1838 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); 1839 if (ret) 1840 goto bad; 1841 1842 ret = -EINVAL; 1843 while (opt_params--) { 1844 opt_string = dm_shift_arg(&as); 1845 if (!opt_string) { 1846 ti->error = "Not enough feature arguments"; 1847 goto bad; 1848 } 1849 1850 if (!strcasecmp(opt_string, "allow_discards")) 1851 ti->num_discard_bios = 1; 1852 1853 else if (!strcasecmp(opt_string, "same_cpu_crypt")) 1854 set_bit(DM_CRYPT_SAME_CPU, &cc->flags); 1855 1856 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) 1857 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 1858 1859 else { 1860 ti->error = "Invalid feature arguments"; 1861 goto bad; 1862 } 1863 } 1864 } 1865 1866 ret = -ENOMEM; 1867 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); 1868 if (!cc->io_queue) { 1869 ti->error = "Couldn't create kcryptd io queue"; 1870 goto bad; 1871 } 1872 1873 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 1874 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); 1875 else 1876 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, 1877 num_online_cpus()); 1878 if (!cc->crypt_queue) { 1879 ti->error = "Couldn't create kcryptd queue"; 1880 goto bad; 1881 } 1882 1883 init_waitqueue_head(&cc->write_thread_wait); 1884 cc->write_tree = RB_ROOT; 1885 1886 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); 1887 if (IS_ERR(cc->write_thread)) { 1888 ret = PTR_ERR(cc->write_thread); 1889 cc->write_thread = NULL; 1890 ti->error = "Couldn't spawn write thread"; 1891 goto bad; 1892 } 1893 wake_up_process(cc->write_thread); 1894 1895 ti->num_flush_bios = 1; 1896 ti->discard_zeroes_data_unsupported = true; 1897 1898 return 0; 1899 1900 bad: 1901 crypt_dtr(ti); 1902 return ret; 1903 } 1904 1905 static int crypt_map(struct dm_target *ti, struct bio *bio) 1906 { 1907 struct dm_crypt_io *io; 1908 struct crypt_config *cc = ti->private; 1909 1910 /* 1911 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. 1912 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight 1913 * - for REQ_DISCARD caller must use flush if IO ordering matters 1914 */ 1915 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1916 bio->bi_bdev = cc->dev->bdev; 1917 if (bio_sectors(bio)) 1918 bio->bi_iter.bi_sector = cc->start + 1919 dm_target_offset(ti, bio->bi_iter.bi_sector); 1920 return DM_MAPIO_REMAPPED; 1921 } 1922 1923 io = dm_per_bio_data(bio, cc->per_bio_data_size); 1924 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1925 io->ctx.req = (struct ablkcipher_request *)(io + 1); 1926 1927 if (bio_data_dir(io->base_bio) == READ) { 1928 if (kcryptd_io_read(io, GFP_NOWAIT)) 1929 kcryptd_queue_read(io); 1930 } else 1931 kcryptd_queue_crypt(io); 1932 1933 return DM_MAPIO_SUBMITTED; 1934 } 1935 1936 static void crypt_status(struct dm_target *ti, status_type_t type, 1937 unsigned status_flags, char *result, unsigned maxlen) 1938 { 1939 struct crypt_config *cc = ti->private; 1940 unsigned i, sz = 0; 1941 int num_feature_args = 0; 1942 1943 switch (type) { 1944 case STATUSTYPE_INFO: 1945 result[0] = '\0'; 1946 break; 1947 1948 case STATUSTYPE_TABLE: 1949 DMEMIT("%s ", cc->cipher_string); 1950 1951 if (cc->key_size > 0) 1952 for (i = 0; i < cc->key_size; i++) 1953 DMEMIT("%02x", cc->key[i]); 1954 else 1955 DMEMIT("-"); 1956 1957 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1958 cc->dev->name, (unsigned long long)cc->start); 1959 1960 num_feature_args += !!ti->num_discard_bios; 1961 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); 1962 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); 1963 if (num_feature_args) { 1964 DMEMIT(" %d", num_feature_args); 1965 if (ti->num_discard_bios) 1966 DMEMIT(" allow_discards"); 1967 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) 1968 DMEMIT(" same_cpu_crypt"); 1969 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) 1970 DMEMIT(" submit_from_crypt_cpus"); 1971 } 1972 1973 break; 1974 } 1975 } 1976 1977 static void crypt_postsuspend(struct dm_target *ti) 1978 { 1979 struct crypt_config *cc = ti->private; 1980 1981 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1982 } 1983 1984 static int crypt_preresume(struct dm_target *ti) 1985 { 1986 struct crypt_config *cc = ti->private; 1987 1988 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1989 DMERR("aborting resume - crypt key is not set."); 1990 return -EAGAIN; 1991 } 1992 1993 return 0; 1994 } 1995 1996 static void crypt_resume(struct dm_target *ti) 1997 { 1998 struct crypt_config *cc = ti->private; 1999 2000 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 2001 } 2002 2003 /* Message interface 2004 * key set <key> 2005 * key wipe 2006 */ 2007 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 2008 { 2009 struct crypt_config *cc = ti->private; 2010 int ret = -EINVAL; 2011 2012 if (argc < 2) 2013 goto error; 2014 2015 if (!strcasecmp(argv[0], "key")) { 2016 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 2017 DMWARN("not suspended during key manipulation."); 2018 return -EINVAL; 2019 } 2020 if (argc == 3 && !strcasecmp(argv[1], "set")) { 2021 ret = crypt_set_key(cc, argv[2]); 2022 if (ret) 2023 return ret; 2024 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2025 ret = cc->iv_gen_ops->init(cc); 2026 return ret; 2027 } 2028 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2029 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 2030 ret = cc->iv_gen_ops->wipe(cc); 2031 if (ret) 2032 return ret; 2033 } 2034 return crypt_wipe_key(cc); 2035 } 2036 } 2037 2038 error: 2039 DMWARN("unrecognised message received."); 2040 return -EINVAL; 2041 } 2042 2043 static int crypt_iterate_devices(struct dm_target *ti, 2044 iterate_devices_callout_fn fn, void *data) 2045 { 2046 struct crypt_config *cc = ti->private; 2047 2048 return fn(ti, cc->dev, cc->start, ti->len, data); 2049 } 2050 2051 static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) 2052 { 2053 /* 2054 * Unfortunate constraint that is required to avoid the potential 2055 * for exceeding underlying device's max_segments limits -- due to 2056 * crypt_alloc_buffer() possibly allocating pages for the encryption 2057 * bio that are not as physically contiguous as the original bio. 2058 */ 2059 limits->max_segment_size = PAGE_SIZE; 2060 } 2061 2062 static struct target_type crypt_target = { 2063 .name = "crypt", 2064 .version = {1, 14, 1}, 2065 .module = THIS_MODULE, 2066 .ctr = crypt_ctr, 2067 .dtr = crypt_dtr, 2068 .map = crypt_map, 2069 .status = crypt_status, 2070 .postsuspend = crypt_postsuspend, 2071 .preresume = crypt_preresume, 2072 .resume = crypt_resume, 2073 .message = crypt_message, 2074 .iterate_devices = crypt_iterate_devices, 2075 .io_hints = crypt_io_hints, 2076 }; 2077 2078 static int __init dm_crypt_init(void) 2079 { 2080 int r; 2081 2082 r = dm_register_target(&crypt_target); 2083 if (r < 0) 2084 DMERR("register failed %d", r); 2085 2086 return r; 2087 } 2088 2089 static void __exit dm_crypt_exit(void) 2090 { 2091 dm_unregister_target(&crypt_target); 2092 } 2093 2094 module_init(dm_crypt_init); 2095 module_exit(dm_crypt_exit); 2096 2097 MODULE_AUTHOR("Jana Saout <jana@saout.de>"); 2098 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2099 MODULE_LICENSE("GPL"); 2100