1 /* 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/bio.h> 15 #include <linux/blkdev.h> 16 #include <linux/mempool.h> 17 #include <linux/slab.h> 18 #include <linux/crypto.h> 19 #include <linux/workqueue.h> 20 #include <linux/backing-dev.h> 21 #include <asm/atomic.h> 22 #include <linux/scatterlist.h> 23 #include <asm/page.h> 24 #include <asm/unaligned.h> 25 26 #include <linux/device-mapper.h> 27 28 #define DM_MSG_PREFIX "crypt" 29 #define MESG_STR(x) x, sizeof(x) 30 31 /* 32 * context holding the current state of a multi-part conversion 33 */ 34 struct convert_context { 35 struct completion restart; 36 struct bio *bio_in; 37 struct bio *bio_out; 38 unsigned int offset_in; 39 unsigned int offset_out; 40 unsigned int idx_in; 41 unsigned int idx_out; 42 sector_t sector; 43 atomic_t pending; 44 }; 45 46 /* 47 * per bio private data 48 */ 49 struct dm_crypt_io { 50 struct dm_target *target; 51 struct bio *base_bio; 52 struct work_struct work; 53 54 struct convert_context ctx; 55 56 atomic_t pending; 57 int error; 58 sector_t sector; 59 struct dm_crypt_io *base_io; 60 }; 61 62 struct dm_crypt_request { 63 struct convert_context *ctx; 64 struct scatterlist sg_in; 65 struct scatterlist sg_out; 66 }; 67 68 struct crypt_config; 69 70 struct crypt_iv_operations { 71 int (*ctr)(struct crypt_config *cc, struct dm_target *ti, 72 const char *opts); 73 void (*dtr)(struct crypt_config *cc); 74 int (*init)(struct crypt_config *cc); 75 int (*wipe)(struct crypt_config *cc); 76 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 77 }; 78 79 struct iv_essiv_private { 80 struct crypto_cipher *tfm; 81 struct crypto_hash *hash_tfm; 82 u8 *salt; 83 }; 84 85 struct iv_benbi_private { 86 int shift; 87 }; 88 89 /* 90 * Crypt: maps a linear range of a block device 91 * and encrypts / decrypts at the same time. 92 */ 93 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 94 struct crypt_config { 95 struct dm_dev *dev; 96 sector_t start; 97 98 /* 99 * pool for per bio private data, crypto requests and 100 * encryption requeusts/buffer pages 101 */ 102 mempool_t *io_pool; 103 mempool_t *req_pool; 104 mempool_t *page_pool; 105 struct bio_set *bs; 106 107 struct workqueue_struct *io_queue; 108 struct workqueue_struct *crypt_queue; 109 110 char *cipher; 111 char *cipher_mode; 112 113 struct crypt_iv_operations *iv_gen_ops; 114 union { 115 struct iv_essiv_private essiv; 116 struct iv_benbi_private benbi; 117 } iv_gen_private; 118 sector_t iv_offset; 119 unsigned int iv_size; 120 121 /* 122 * Layout of each crypto request: 123 * 124 * struct ablkcipher_request 125 * context 126 * padding 127 * struct dm_crypt_request 128 * padding 129 * IV 130 * 131 * The padding is added so that dm_crypt_request and the IV are 132 * correctly aligned. 133 */ 134 unsigned int dmreq_start; 135 struct ablkcipher_request *req; 136 137 struct crypto_ablkcipher *tfm; 138 unsigned long flags; 139 unsigned int key_size; 140 u8 key[0]; 141 }; 142 143 #define MIN_IOS 16 144 #define MIN_POOL_PAGES 32 145 #define MIN_BIO_PAGES 8 146 147 static struct kmem_cache *_crypt_io_pool; 148 149 static void clone_init(struct dm_crypt_io *, struct bio *); 150 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 151 152 /* 153 * Different IV generation algorithms: 154 * 155 * plain: the initial vector is the 32-bit little-endian version of the sector 156 * number, padded with zeros if necessary. 157 * 158 * plain64: the initial vector is the 64-bit little-endian version of the sector 159 * number, padded with zeros if necessary. 160 * 161 * essiv: "encrypted sector|salt initial vector", the sector number is 162 * encrypted with the bulk cipher using a salt as key. The salt 163 * should be derived from the bulk cipher's key via hashing. 164 * 165 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 166 * (needed for LRW-32-AES and possible other narrow block modes) 167 * 168 * null: the initial vector is always zero. Provides compatibility with 169 * obsolete loop_fish2 devices. Do not use for new devices. 170 * 171 * plumb: unimplemented, see: 172 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 173 */ 174 175 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 176 { 177 memset(iv, 0, cc->iv_size); 178 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 179 180 return 0; 181 } 182 183 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 184 sector_t sector) 185 { 186 memset(iv, 0, cc->iv_size); 187 *(u64 *)iv = cpu_to_le64(sector); 188 189 return 0; 190 } 191 192 /* Initialise ESSIV - compute salt but no local memory allocations */ 193 static int crypt_iv_essiv_init(struct crypt_config *cc) 194 { 195 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 196 struct hash_desc desc; 197 struct scatterlist sg; 198 int err; 199 200 sg_init_one(&sg, cc->key, cc->key_size); 201 desc.tfm = essiv->hash_tfm; 202 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 203 204 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); 205 if (err) 206 return err; 207 208 return crypto_cipher_setkey(essiv->tfm, essiv->salt, 209 crypto_hash_digestsize(essiv->hash_tfm)); 210 } 211 212 /* Wipe salt and reset key derived from volume key */ 213 static int crypt_iv_essiv_wipe(struct crypt_config *cc) 214 { 215 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 216 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 217 218 memset(essiv->salt, 0, salt_size); 219 220 return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); 221 } 222 223 static void crypt_iv_essiv_dtr(struct crypt_config *cc) 224 { 225 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 226 227 crypto_free_cipher(essiv->tfm); 228 essiv->tfm = NULL; 229 230 crypto_free_hash(essiv->hash_tfm); 231 essiv->hash_tfm = NULL; 232 233 kzfree(essiv->salt); 234 essiv->salt = NULL; 235 } 236 237 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 238 const char *opts) 239 { 240 struct crypto_cipher *essiv_tfm = NULL; 241 struct crypto_hash *hash_tfm = NULL; 242 u8 *salt = NULL; 243 int err; 244 245 if (!opts) { 246 ti->error = "Digest algorithm missing for ESSIV mode"; 247 return -EINVAL; 248 } 249 250 /* Allocate hash algorithm */ 251 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); 252 if (IS_ERR(hash_tfm)) { 253 ti->error = "Error initializing ESSIV hash"; 254 err = PTR_ERR(hash_tfm); 255 goto bad; 256 } 257 258 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); 259 if (!salt) { 260 ti->error = "Error kmallocing salt storage in ESSIV"; 261 err = -ENOMEM; 262 goto bad; 263 } 264 265 /* Allocate essiv_tfm */ 266 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); 267 if (IS_ERR(essiv_tfm)) { 268 ti->error = "Error allocating crypto tfm for ESSIV"; 269 err = PTR_ERR(essiv_tfm); 270 goto bad; 271 } 272 if (crypto_cipher_blocksize(essiv_tfm) != 273 crypto_ablkcipher_ivsize(cc->tfm)) { 274 ti->error = "Block size of ESSIV cipher does " 275 "not match IV size of block cipher"; 276 err = -EINVAL; 277 goto bad; 278 } 279 280 cc->iv_gen_private.essiv.salt = salt; 281 cc->iv_gen_private.essiv.tfm = essiv_tfm; 282 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 283 284 return 0; 285 286 bad: 287 if (essiv_tfm && !IS_ERR(essiv_tfm)) 288 crypto_free_cipher(essiv_tfm); 289 if (hash_tfm && !IS_ERR(hash_tfm)) 290 crypto_free_hash(hash_tfm); 291 kfree(salt); 292 return err; 293 } 294 295 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 296 { 297 memset(iv, 0, cc->iv_size); 298 *(u64 *)iv = cpu_to_le64(sector); 299 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); 300 return 0; 301 } 302 303 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 304 const char *opts) 305 { 306 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 307 int log = ilog2(bs); 308 309 /* we need to calculate how far we must shift the sector count 310 * to get the cipher block count, we use this shift in _gen */ 311 312 if (1 << log != bs) { 313 ti->error = "cypher blocksize is not a power of 2"; 314 return -EINVAL; 315 } 316 317 if (log > 9) { 318 ti->error = "cypher blocksize is > 512"; 319 return -EINVAL; 320 } 321 322 cc->iv_gen_private.benbi.shift = 9 - log; 323 324 return 0; 325 } 326 327 static void crypt_iv_benbi_dtr(struct crypt_config *cc) 328 { 329 } 330 331 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 332 { 333 __be64 val; 334 335 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 336 337 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); 338 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 339 340 return 0; 341 } 342 343 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 344 { 345 memset(iv, 0, cc->iv_size); 346 347 return 0; 348 } 349 350 static struct crypt_iv_operations crypt_iv_plain_ops = { 351 .generator = crypt_iv_plain_gen 352 }; 353 354 static struct crypt_iv_operations crypt_iv_plain64_ops = { 355 .generator = crypt_iv_plain64_gen 356 }; 357 358 static struct crypt_iv_operations crypt_iv_essiv_ops = { 359 .ctr = crypt_iv_essiv_ctr, 360 .dtr = crypt_iv_essiv_dtr, 361 .init = crypt_iv_essiv_init, 362 .wipe = crypt_iv_essiv_wipe, 363 .generator = crypt_iv_essiv_gen 364 }; 365 366 static struct crypt_iv_operations crypt_iv_benbi_ops = { 367 .ctr = crypt_iv_benbi_ctr, 368 .dtr = crypt_iv_benbi_dtr, 369 .generator = crypt_iv_benbi_gen 370 }; 371 372 static struct crypt_iv_operations crypt_iv_null_ops = { 373 .generator = crypt_iv_null_gen 374 }; 375 376 static void crypt_convert_init(struct crypt_config *cc, 377 struct convert_context *ctx, 378 struct bio *bio_out, struct bio *bio_in, 379 sector_t sector) 380 { 381 ctx->bio_in = bio_in; 382 ctx->bio_out = bio_out; 383 ctx->offset_in = 0; 384 ctx->offset_out = 0; 385 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 386 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 387 ctx->sector = sector + cc->iv_offset; 388 init_completion(&ctx->restart); 389 } 390 391 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, 392 struct ablkcipher_request *req) 393 { 394 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); 395 } 396 397 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, 398 struct dm_crypt_request *dmreq) 399 { 400 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 401 } 402 403 static int crypt_convert_block(struct crypt_config *cc, 404 struct convert_context *ctx, 405 struct ablkcipher_request *req) 406 { 407 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 408 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 409 struct dm_crypt_request *dmreq; 410 u8 *iv; 411 int r = 0; 412 413 dmreq = dmreq_of_req(cc, req); 414 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 415 crypto_ablkcipher_alignmask(cc->tfm) + 1); 416 417 dmreq->ctx = ctx; 418 sg_init_table(&dmreq->sg_in, 1); 419 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 420 bv_in->bv_offset + ctx->offset_in); 421 422 sg_init_table(&dmreq->sg_out, 1); 423 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 424 bv_out->bv_offset + ctx->offset_out); 425 426 ctx->offset_in += 1 << SECTOR_SHIFT; 427 if (ctx->offset_in >= bv_in->bv_len) { 428 ctx->offset_in = 0; 429 ctx->idx_in++; 430 } 431 432 ctx->offset_out += 1 << SECTOR_SHIFT; 433 if (ctx->offset_out >= bv_out->bv_len) { 434 ctx->offset_out = 0; 435 ctx->idx_out++; 436 } 437 438 if (cc->iv_gen_ops) { 439 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 440 if (r < 0) 441 return r; 442 } 443 444 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, 445 1 << SECTOR_SHIFT, iv); 446 447 if (bio_data_dir(ctx->bio_in) == WRITE) 448 r = crypto_ablkcipher_encrypt(req); 449 else 450 r = crypto_ablkcipher_decrypt(req); 451 452 return r; 453 } 454 455 static void kcryptd_async_done(struct crypto_async_request *async_req, 456 int error); 457 static void crypt_alloc_req(struct crypt_config *cc, 458 struct convert_context *ctx) 459 { 460 if (!cc->req) 461 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 462 ablkcipher_request_set_tfm(cc->req, cc->tfm); 463 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 464 CRYPTO_TFM_REQ_MAY_SLEEP, 465 kcryptd_async_done, 466 dmreq_of_req(cc, cc->req)); 467 } 468 469 /* 470 * Encrypt / decrypt data from one bio to another one (can be the same one) 471 */ 472 static int crypt_convert(struct crypt_config *cc, 473 struct convert_context *ctx) 474 { 475 int r; 476 477 atomic_set(&ctx->pending, 1); 478 479 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 480 ctx->idx_out < ctx->bio_out->bi_vcnt) { 481 482 crypt_alloc_req(cc, ctx); 483 484 atomic_inc(&ctx->pending); 485 486 r = crypt_convert_block(cc, ctx, cc->req); 487 488 switch (r) { 489 /* async */ 490 case -EBUSY: 491 wait_for_completion(&ctx->restart); 492 INIT_COMPLETION(ctx->restart); 493 /* fall through*/ 494 case -EINPROGRESS: 495 cc->req = NULL; 496 ctx->sector++; 497 continue; 498 499 /* sync */ 500 case 0: 501 atomic_dec(&ctx->pending); 502 ctx->sector++; 503 cond_resched(); 504 continue; 505 506 /* error */ 507 default: 508 atomic_dec(&ctx->pending); 509 return r; 510 } 511 } 512 513 return 0; 514 } 515 516 static void dm_crypt_bio_destructor(struct bio *bio) 517 { 518 struct dm_crypt_io *io = bio->bi_private; 519 struct crypt_config *cc = io->target->private; 520 521 bio_free(bio, cc->bs); 522 } 523 524 /* 525 * Generate a new unfragmented bio with the given size 526 * This should never violate the device limitations 527 * May return a smaller bio when running out of pages, indicated by 528 * *out_of_pages set to 1. 529 */ 530 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, 531 unsigned *out_of_pages) 532 { 533 struct crypt_config *cc = io->target->private; 534 struct bio *clone; 535 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 536 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 537 unsigned i, len; 538 struct page *page; 539 540 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 541 if (!clone) 542 return NULL; 543 544 clone_init(io, clone); 545 *out_of_pages = 0; 546 547 for (i = 0; i < nr_iovecs; i++) { 548 page = mempool_alloc(cc->page_pool, gfp_mask); 549 if (!page) { 550 *out_of_pages = 1; 551 break; 552 } 553 554 /* 555 * if additional pages cannot be allocated without waiting, 556 * return a partially allocated bio, the caller will then try 557 * to allocate additional bios while submitting this partial bio 558 */ 559 if (i == (MIN_BIO_PAGES - 1)) 560 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 561 562 len = (size > PAGE_SIZE) ? PAGE_SIZE : size; 563 564 if (!bio_add_page(clone, page, len, 0)) { 565 mempool_free(page, cc->page_pool); 566 break; 567 } 568 569 size -= len; 570 } 571 572 if (!clone->bi_size) { 573 bio_put(clone); 574 return NULL; 575 } 576 577 return clone; 578 } 579 580 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) 581 { 582 unsigned int i; 583 struct bio_vec *bv; 584 585 for (i = 0; i < clone->bi_vcnt; i++) { 586 bv = bio_iovec_idx(clone, i); 587 BUG_ON(!bv->bv_page); 588 mempool_free(bv->bv_page, cc->page_pool); 589 bv->bv_page = NULL; 590 } 591 } 592 593 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, 594 struct bio *bio, sector_t sector) 595 { 596 struct crypt_config *cc = ti->private; 597 struct dm_crypt_io *io; 598 599 io = mempool_alloc(cc->io_pool, GFP_NOIO); 600 io->target = ti; 601 io->base_bio = bio; 602 io->sector = sector; 603 io->error = 0; 604 io->base_io = NULL; 605 atomic_set(&io->pending, 0); 606 607 return io; 608 } 609 610 static void crypt_inc_pending(struct dm_crypt_io *io) 611 { 612 atomic_inc(&io->pending); 613 } 614 615 /* 616 * One of the bios was finished. Check for completion of 617 * the whole request and correctly clean up the buffer. 618 * If base_io is set, wait for the last fragment to complete. 619 */ 620 static void crypt_dec_pending(struct dm_crypt_io *io) 621 { 622 struct crypt_config *cc = io->target->private; 623 struct bio *base_bio = io->base_bio; 624 struct dm_crypt_io *base_io = io->base_io; 625 int error = io->error; 626 627 if (!atomic_dec_and_test(&io->pending)) 628 return; 629 630 mempool_free(io, cc->io_pool); 631 632 if (likely(!base_io)) 633 bio_endio(base_bio, error); 634 else { 635 if (error && !base_io->error) 636 base_io->error = error; 637 crypt_dec_pending(base_io); 638 } 639 } 640 641 /* 642 * kcryptd/kcryptd_io: 643 * 644 * Needed because it would be very unwise to do decryption in an 645 * interrupt context. 646 * 647 * kcryptd performs the actual encryption or decryption. 648 * 649 * kcryptd_io performs the IO submission. 650 * 651 * They must be separated as otherwise the final stages could be 652 * starved by new requests which can block in the first stages due 653 * to memory allocation. 654 */ 655 static void crypt_endio(struct bio *clone, int error) 656 { 657 struct dm_crypt_io *io = clone->bi_private; 658 struct crypt_config *cc = io->target->private; 659 unsigned rw = bio_data_dir(clone); 660 661 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) 662 error = -EIO; 663 664 /* 665 * free the processed pages 666 */ 667 if (rw == WRITE) 668 crypt_free_buffer_pages(cc, clone); 669 670 bio_put(clone); 671 672 if (rw == READ && !error) { 673 kcryptd_queue_crypt(io); 674 return; 675 } 676 677 if (unlikely(error)) 678 io->error = error; 679 680 crypt_dec_pending(io); 681 } 682 683 static void clone_init(struct dm_crypt_io *io, struct bio *clone) 684 { 685 struct crypt_config *cc = io->target->private; 686 687 clone->bi_private = io; 688 clone->bi_end_io = crypt_endio; 689 clone->bi_bdev = cc->dev->bdev; 690 clone->bi_rw = io->base_bio->bi_rw; 691 clone->bi_destructor = dm_crypt_bio_destructor; 692 } 693 694 static void kcryptd_io_read(struct dm_crypt_io *io) 695 { 696 struct crypt_config *cc = io->target->private; 697 struct bio *base_bio = io->base_bio; 698 struct bio *clone; 699 700 crypt_inc_pending(io); 701 702 /* 703 * The block layer might modify the bvec array, so always 704 * copy the required bvecs because we need the original 705 * one in order to decrypt the whole bio data *afterwards*. 706 */ 707 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 708 if (unlikely(!clone)) { 709 io->error = -ENOMEM; 710 crypt_dec_pending(io); 711 return; 712 } 713 714 clone_init(io, clone); 715 clone->bi_idx = 0; 716 clone->bi_vcnt = bio_segments(base_bio); 717 clone->bi_size = base_bio->bi_size; 718 clone->bi_sector = cc->start + io->sector; 719 memcpy(clone->bi_io_vec, bio_iovec(base_bio), 720 sizeof(struct bio_vec) * clone->bi_vcnt); 721 722 generic_make_request(clone); 723 } 724 725 static void kcryptd_io_write(struct dm_crypt_io *io) 726 { 727 struct bio *clone = io->ctx.bio_out; 728 generic_make_request(clone); 729 } 730 731 static void kcryptd_io(struct work_struct *work) 732 { 733 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 734 735 if (bio_data_dir(io->base_bio) == READ) 736 kcryptd_io_read(io); 737 else 738 kcryptd_io_write(io); 739 } 740 741 static void kcryptd_queue_io(struct dm_crypt_io *io) 742 { 743 struct crypt_config *cc = io->target->private; 744 745 INIT_WORK(&io->work, kcryptd_io); 746 queue_work(cc->io_queue, &io->work); 747 } 748 749 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, 750 int error, int async) 751 { 752 struct bio *clone = io->ctx.bio_out; 753 struct crypt_config *cc = io->target->private; 754 755 if (unlikely(error < 0)) { 756 crypt_free_buffer_pages(cc, clone); 757 bio_put(clone); 758 io->error = -EIO; 759 crypt_dec_pending(io); 760 return; 761 } 762 763 /* crypt_convert should have filled the clone bio */ 764 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 765 766 clone->bi_sector = cc->start + io->sector; 767 768 if (async) 769 kcryptd_queue_io(io); 770 else 771 generic_make_request(clone); 772 } 773 774 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 775 { 776 struct crypt_config *cc = io->target->private; 777 struct bio *clone; 778 struct dm_crypt_io *new_io; 779 int crypt_finished; 780 unsigned out_of_pages = 0; 781 unsigned remaining = io->base_bio->bi_size; 782 sector_t sector = io->sector; 783 int r; 784 785 /* 786 * Prevent io from disappearing until this function completes. 787 */ 788 crypt_inc_pending(io); 789 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); 790 791 /* 792 * The allocated buffers can be smaller than the whole bio, 793 * so repeat the whole process until all the data can be handled. 794 */ 795 while (remaining) { 796 clone = crypt_alloc_buffer(io, remaining, &out_of_pages); 797 if (unlikely(!clone)) { 798 io->error = -ENOMEM; 799 break; 800 } 801 802 io->ctx.bio_out = clone; 803 io->ctx.idx_out = 0; 804 805 remaining -= clone->bi_size; 806 sector += bio_sectors(clone); 807 808 crypt_inc_pending(io); 809 r = crypt_convert(cc, &io->ctx); 810 crypt_finished = atomic_dec_and_test(&io->ctx.pending); 811 812 /* Encryption was already finished, submit io now */ 813 if (crypt_finished) { 814 kcryptd_crypt_write_io_submit(io, r, 0); 815 816 /* 817 * If there was an error, do not try next fragments. 818 * For async, error is processed in async handler. 819 */ 820 if (unlikely(r < 0)) 821 break; 822 823 io->sector = sector; 824 } 825 826 /* 827 * Out of memory -> run queues 828 * But don't wait if split was due to the io size restriction 829 */ 830 if (unlikely(out_of_pages)) 831 congestion_wait(BLK_RW_ASYNC, HZ/100); 832 833 /* 834 * With async crypto it is unsafe to share the crypto context 835 * between fragments, so switch to a new dm_crypt_io structure. 836 */ 837 if (unlikely(!crypt_finished && remaining)) { 838 new_io = crypt_io_alloc(io->target, io->base_bio, 839 sector); 840 crypt_inc_pending(new_io); 841 crypt_convert_init(cc, &new_io->ctx, NULL, 842 io->base_bio, sector); 843 new_io->ctx.idx_in = io->ctx.idx_in; 844 new_io->ctx.offset_in = io->ctx.offset_in; 845 846 /* 847 * Fragments after the first use the base_io 848 * pending count. 849 */ 850 if (!io->base_io) 851 new_io->base_io = io; 852 else { 853 new_io->base_io = io->base_io; 854 crypt_inc_pending(io->base_io); 855 crypt_dec_pending(io); 856 } 857 858 io = new_io; 859 } 860 } 861 862 crypt_dec_pending(io); 863 } 864 865 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) 866 { 867 if (unlikely(error < 0)) 868 io->error = -EIO; 869 870 crypt_dec_pending(io); 871 } 872 873 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 874 { 875 struct crypt_config *cc = io->target->private; 876 int r = 0; 877 878 crypt_inc_pending(io); 879 880 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 881 io->sector); 882 883 r = crypt_convert(cc, &io->ctx); 884 885 if (atomic_dec_and_test(&io->ctx.pending)) 886 kcryptd_crypt_read_done(io, r); 887 888 crypt_dec_pending(io); 889 } 890 891 static void kcryptd_async_done(struct crypto_async_request *async_req, 892 int error) 893 { 894 struct dm_crypt_request *dmreq = async_req->data; 895 struct convert_context *ctx = dmreq->ctx; 896 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 897 struct crypt_config *cc = io->target->private; 898 899 if (error == -EINPROGRESS) { 900 complete(&ctx->restart); 901 return; 902 } 903 904 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 905 906 if (!atomic_dec_and_test(&ctx->pending)) 907 return; 908 909 if (bio_data_dir(io->base_bio) == READ) 910 kcryptd_crypt_read_done(io, error); 911 else 912 kcryptd_crypt_write_io_submit(io, error, 1); 913 } 914 915 static void kcryptd_crypt(struct work_struct *work) 916 { 917 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 918 919 if (bio_data_dir(io->base_bio) == READ) 920 kcryptd_crypt_read_convert(io); 921 else 922 kcryptd_crypt_write_convert(io); 923 } 924 925 static void kcryptd_queue_crypt(struct dm_crypt_io *io) 926 { 927 struct crypt_config *cc = io->target->private; 928 929 INIT_WORK(&io->work, kcryptd_crypt); 930 queue_work(cc->crypt_queue, &io->work); 931 } 932 933 /* 934 * Decode key from its hex representation 935 */ 936 static int crypt_decode_key(u8 *key, char *hex, unsigned int size) 937 { 938 char buffer[3]; 939 char *endp; 940 unsigned int i; 941 942 buffer[2] = '\0'; 943 944 for (i = 0; i < size; i++) { 945 buffer[0] = *hex++; 946 buffer[1] = *hex++; 947 948 key[i] = (u8)simple_strtoul(buffer, &endp, 16); 949 950 if (endp != &buffer[2]) 951 return -EINVAL; 952 } 953 954 if (*hex != '\0') 955 return -EINVAL; 956 957 return 0; 958 } 959 960 /* 961 * Encode key into its hex representation 962 */ 963 static void crypt_encode_key(char *hex, u8 *key, unsigned int size) 964 { 965 unsigned int i; 966 967 for (i = 0; i < size; i++) { 968 sprintf(hex, "%02x", *key); 969 hex += 2; 970 key++; 971 } 972 } 973 974 static int crypt_set_key(struct crypt_config *cc, char *key) 975 { 976 unsigned key_size = strlen(key) >> 1; 977 978 if (cc->key_size && cc->key_size != key_size) 979 return -EINVAL; 980 981 cc->key_size = key_size; /* initial settings */ 982 983 if ((!key_size && strcmp(key, "-")) || 984 (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) 985 return -EINVAL; 986 987 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 988 989 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 990 } 991 992 static int crypt_wipe_key(struct crypt_config *cc) 993 { 994 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 995 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 996 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 997 } 998 999 static void crypt_dtr(struct dm_target *ti) 1000 { 1001 struct crypt_config *cc = ti->private; 1002 1003 ti->private = NULL; 1004 1005 if (!cc) 1006 return; 1007 1008 if (cc->io_queue) 1009 destroy_workqueue(cc->io_queue); 1010 if (cc->crypt_queue) 1011 destroy_workqueue(cc->crypt_queue); 1012 1013 if (cc->bs) 1014 bioset_free(cc->bs); 1015 1016 if (cc->page_pool) 1017 mempool_destroy(cc->page_pool); 1018 if (cc->req_pool) 1019 mempool_destroy(cc->req_pool); 1020 if (cc->io_pool) 1021 mempool_destroy(cc->io_pool); 1022 1023 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1024 cc->iv_gen_ops->dtr(cc); 1025 1026 if (cc->tfm && !IS_ERR(cc->tfm)) 1027 crypto_free_ablkcipher(cc->tfm); 1028 1029 if (cc->dev) 1030 dm_put_device(ti, cc->dev); 1031 1032 kzfree(cc->cipher); 1033 kzfree(cc->cipher_mode); 1034 1035 /* Must zero key material before freeing */ 1036 kzfree(cc); 1037 } 1038 1039 static int crypt_ctr_cipher(struct dm_target *ti, 1040 char *cipher_in, char *key) 1041 { 1042 struct crypt_config *cc = ti->private; 1043 char *tmp, *cipher, *chainmode, *ivmode, *ivopts; 1044 char *cipher_api = NULL; 1045 int ret = -EINVAL; 1046 1047 /* Convert to crypto api definition? */ 1048 if (strchr(cipher_in, '(')) { 1049 ti->error = "Bad cipher specification"; 1050 return -EINVAL; 1051 } 1052 1053 /* 1054 * Legacy dm-crypt cipher specification 1055 * cipher-mode-iv:ivopts 1056 */ 1057 tmp = cipher_in; 1058 cipher = strsep(&tmp, "-"); 1059 1060 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1061 if (!cc->cipher) 1062 goto bad_mem; 1063 1064 if (tmp) { 1065 cc->cipher_mode = kstrdup(tmp, GFP_KERNEL); 1066 if (!cc->cipher_mode) 1067 goto bad_mem; 1068 } 1069 1070 chainmode = strsep(&tmp, "-"); 1071 ivopts = strsep(&tmp, "-"); 1072 ivmode = strsep(&ivopts, ":"); 1073 1074 if (tmp) 1075 DMWARN("Ignoring unexpected additional cipher options"); 1076 1077 /* Compatibility mode for old dm-crypt mappings */ 1078 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1079 kfree(cc->cipher_mode); 1080 cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL); 1081 chainmode = "cbc"; 1082 ivmode = "plain"; 1083 } 1084 1085 if (strcmp(chainmode, "ecb") && !ivmode) { 1086 ti->error = "IV mechanism required"; 1087 return -EINVAL; 1088 } 1089 1090 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1091 if (!cipher_api) 1092 goto bad_mem; 1093 1094 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1095 "%s(%s)", chainmode, cipher); 1096 if (ret < 0) { 1097 kfree(cipher_api); 1098 goto bad_mem; 1099 } 1100 1101 /* Allocate cipher */ 1102 cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); 1103 if (IS_ERR(cc->tfm)) { 1104 ret = PTR_ERR(cc->tfm); 1105 ti->error = "Error allocating crypto tfm"; 1106 goto bad; 1107 } 1108 1109 /* Initialize and set key */ 1110 ret = crypt_set_key(cc, key); 1111 if (ret < 0) { 1112 ti->error = "Error decoding and setting key"; 1113 goto bad; 1114 } 1115 1116 /* Initialize IV */ 1117 cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); 1118 if (cc->iv_size) 1119 /* at least a 64 bit sector number should fit in our buffer */ 1120 cc->iv_size = max(cc->iv_size, 1121 (unsigned int)(sizeof(u64) / sizeof(u8))); 1122 else if (ivmode) { 1123 DMWARN("Selected cipher does not support IVs"); 1124 ivmode = NULL; 1125 } 1126 1127 /* Choose ivmode, see comments at iv code. */ 1128 if (ivmode == NULL) 1129 cc->iv_gen_ops = NULL; 1130 else if (strcmp(ivmode, "plain") == 0) 1131 cc->iv_gen_ops = &crypt_iv_plain_ops; 1132 else if (strcmp(ivmode, "plain64") == 0) 1133 cc->iv_gen_ops = &crypt_iv_plain64_ops; 1134 else if (strcmp(ivmode, "essiv") == 0) 1135 cc->iv_gen_ops = &crypt_iv_essiv_ops; 1136 else if (strcmp(ivmode, "benbi") == 0) 1137 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1138 else if (strcmp(ivmode, "null") == 0) 1139 cc->iv_gen_ops = &crypt_iv_null_ops; 1140 else { 1141 ret = -EINVAL; 1142 ti->error = "Invalid IV mode"; 1143 goto bad; 1144 } 1145 1146 /* Allocate IV */ 1147 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1148 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1149 if (ret < 0) { 1150 ti->error = "Error creating IV"; 1151 goto bad; 1152 } 1153 } 1154 1155 /* Initialize IV (set keys for ESSIV etc) */ 1156 if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1157 ret = cc->iv_gen_ops->init(cc); 1158 if (ret < 0) { 1159 ti->error = "Error initialising IV"; 1160 goto bad; 1161 } 1162 } 1163 1164 ret = 0; 1165 bad: 1166 kfree(cipher_api); 1167 return ret; 1168 1169 bad_mem: 1170 ti->error = "Cannot allocate cipher strings"; 1171 return -ENOMEM; 1172 } 1173 1174 /* 1175 * Construct an encryption mapping: 1176 * <cipher> <key> <iv_offset> <dev_path> <start> 1177 */ 1178 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1179 { 1180 struct crypt_config *cc; 1181 unsigned int key_size; 1182 unsigned long long tmpll; 1183 int ret; 1184 1185 if (argc != 5) { 1186 ti->error = "Not enough arguments"; 1187 return -EINVAL; 1188 } 1189 1190 key_size = strlen(argv[1]) >> 1; 1191 1192 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1193 if (!cc) { 1194 ti->error = "Cannot allocate encryption context"; 1195 return -ENOMEM; 1196 } 1197 1198 ti->private = cc; 1199 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1200 if (ret < 0) 1201 goto bad; 1202 1203 ret = -ENOMEM; 1204 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1205 if (!cc->io_pool) { 1206 ti->error = "Cannot allocate crypt io mempool"; 1207 goto bad; 1208 } 1209 1210 cc->dmreq_start = sizeof(struct ablkcipher_request); 1211 cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); 1212 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1213 cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & 1214 ~(crypto_tfm_ctx_alignment() - 1); 1215 1216 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1217 sizeof(struct dm_crypt_request) + cc->iv_size); 1218 if (!cc->req_pool) { 1219 ti->error = "Cannot allocate crypt request mempool"; 1220 goto bad; 1221 } 1222 cc->req = NULL; 1223 1224 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1225 if (!cc->page_pool) { 1226 ti->error = "Cannot allocate page mempool"; 1227 goto bad; 1228 } 1229 1230 cc->bs = bioset_create(MIN_IOS, 0); 1231 if (!cc->bs) { 1232 ti->error = "Cannot allocate crypt bioset"; 1233 goto bad; 1234 } 1235 1236 ret = -EINVAL; 1237 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1238 ti->error = "Invalid iv_offset sector"; 1239 goto bad; 1240 } 1241 cc->iv_offset = tmpll; 1242 1243 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1244 ti->error = "Device lookup failed"; 1245 goto bad; 1246 } 1247 1248 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1249 ti->error = "Invalid device sector"; 1250 goto bad; 1251 } 1252 cc->start = tmpll; 1253 1254 ret = -ENOMEM; 1255 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1256 if (!cc->io_queue) { 1257 ti->error = "Couldn't create kcryptd io queue"; 1258 goto bad; 1259 } 1260 1261 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1262 if (!cc->crypt_queue) { 1263 ti->error = "Couldn't create kcryptd queue"; 1264 goto bad; 1265 } 1266 1267 ti->num_flush_requests = 1; 1268 return 0; 1269 1270 bad: 1271 crypt_dtr(ti); 1272 return ret; 1273 } 1274 1275 static int crypt_map(struct dm_target *ti, struct bio *bio, 1276 union map_info *map_context) 1277 { 1278 struct dm_crypt_io *io; 1279 struct crypt_config *cc; 1280 1281 if (unlikely(bio_empty_barrier(bio))) { 1282 cc = ti->private; 1283 bio->bi_bdev = cc->dev->bdev; 1284 return DM_MAPIO_REMAPPED; 1285 } 1286 1287 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1288 1289 if (bio_data_dir(io->base_bio) == READ) 1290 kcryptd_queue_io(io); 1291 else 1292 kcryptd_queue_crypt(io); 1293 1294 return DM_MAPIO_SUBMITTED; 1295 } 1296 1297 static int crypt_status(struct dm_target *ti, status_type_t type, 1298 char *result, unsigned int maxlen) 1299 { 1300 struct crypt_config *cc = ti->private; 1301 unsigned int sz = 0; 1302 1303 switch (type) { 1304 case STATUSTYPE_INFO: 1305 result[0] = '\0'; 1306 break; 1307 1308 case STATUSTYPE_TABLE: 1309 if (cc->cipher_mode) 1310 DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode); 1311 else 1312 DMEMIT("%s ", cc->cipher); 1313 1314 if (cc->key_size > 0) { 1315 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1316 return -ENOMEM; 1317 1318 crypt_encode_key(result + sz, cc->key, cc->key_size); 1319 sz += cc->key_size << 1; 1320 } else { 1321 if (sz >= maxlen) 1322 return -ENOMEM; 1323 result[sz++] = '-'; 1324 } 1325 1326 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1327 cc->dev->name, (unsigned long long)cc->start); 1328 break; 1329 } 1330 return 0; 1331 } 1332 1333 static void crypt_postsuspend(struct dm_target *ti) 1334 { 1335 struct crypt_config *cc = ti->private; 1336 1337 set_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1338 } 1339 1340 static int crypt_preresume(struct dm_target *ti) 1341 { 1342 struct crypt_config *cc = ti->private; 1343 1344 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { 1345 DMERR("aborting resume - crypt key is not set."); 1346 return -EAGAIN; 1347 } 1348 1349 return 0; 1350 } 1351 1352 static void crypt_resume(struct dm_target *ti) 1353 { 1354 struct crypt_config *cc = ti->private; 1355 1356 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); 1357 } 1358 1359 /* Message interface 1360 * key set <key> 1361 * key wipe 1362 */ 1363 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) 1364 { 1365 struct crypt_config *cc = ti->private; 1366 int ret = -EINVAL; 1367 1368 if (argc < 2) 1369 goto error; 1370 1371 if (!strnicmp(argv[0], MESG_STR("key"))) { 1372 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1373 DMWARN("not suspended during key manipulation."); 1374 return -EINVAL; 1375 } 1376 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { 1377 ret = crypt_set_key(cc, argv[2]); 1378 if (ret) 1379 return ret; 1380 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 1381 ret = cc->iv_gen_ops->init(cc); 1382 return ret; 1383 } 1384 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { 1385 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1386 ret = cc->iv_gen_ops->wipe(cc); 1387 if (ret) 1388 return ret; 1389 } 1390 return crypt_wipe_key(cc); 1391 } 1392 } 1393 1394 error: 1395 DMWARN("unrecognised message received."); 1396 return -EINVAL; 1397 } 1398 1399 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 1400 struct bio_vec *biovec, int max_size) 1401 { 1402 struct crypt_config *cc = ti->private; 1403 struct request_queue *q = bdev_get_queue(cc->dev->bdev); 1404 1405 if (!q->merge_bvec_fn) 1406 return max_size; 1407 1408 bvm->bi_bdev = cc->dev->bdev; 1409 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1410 1411 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1412 } 1413 1414 static int crypt_iterate_devices(struct dm_target *ti, 1415 iterate_devices_callout_fn fn, void *data) 1416 { 1417 struct crypt_config *cc = ti->private; 1418 1419 return fn(ti, cc->dev, cc->start, ti->len, data); 1420 } 1421 1422 static struct target_type crypt_target = { 1423 .name = "crypt", 1424 .version = {1, 7, 0}, 1425 .module = THIS_MODULE, 1426 .ctr = crypt_ctr, 1427 .dtr = crypt_dtr, 1428 .map = crypt_map, 1429 .status = crypt_status, 1430 .postsuspend = crypt_postsuspend, 1431 .preresume = crypt_preresume, 1432 .resume = crypt_resume, 1433 .message = crypt_message, 1434 .merge = crypt_merge, 1435 .iterate_devices = crypt_iterate_devices, 1436 }; 1437 1438 static int __init dm_crypt_init(void) 1439 { 1440 int r; 1441 1442 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); 1443 if (!_crypt_io_pool) 1444 return -ENOMEM; 1445 1446 r = dm_register_target(&crypt_target); 1447 if (r < 0) { 1448 DMERR("register failed %d", r); 1449 kmem_cache_destroy(_crypt_io_pool); 1450 } 1451 1452 return r; 1453 } 1454 1455 static void __exit dm_crypt_exit(void) 1456 { 1457 dm_unregister_target(&crypt_target); 1458 kmem_cache_destroy(_crypt_io_pool); 1459 } 1460 1461 module_init(dm_crypt_init); 1462 module_exit(dm_crypt_exit); 1463 1464 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1465 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 1466 MODULE_LICENSE("GPL"); 1467