Lines Matching refs:cc
77 struct crypt_config *cc;
105 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
107 void (*dtr)(struct crypt_config *cc);
108 int (*init)(struct crypt_config *cc);
109 int (*wipe)(struct crypt_config *cc);
110 int (*generator)(struct crypt_config *cc, u8 *iv,
112 int (*post)(struct crypt_config *cc, u8 *iv,
256 static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
262 if (wrt || cc->used_tag_size) {
266 sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
275 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
278 static bool crypt_integrity_aead(struct crypt_config *cc);
283 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
285 return cc->cipher_tfm.tfms[0];
288 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
290 return cc->cipher_tfm.tfms_aead[0];
349 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
352 memset(iv, 0, cc->iv_size);
358 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
361 memset(iv, 0, cc->iv_size);
367 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
370 memset(iv, 0, cc->iv_size);
372 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
377 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
384 memset(iv, 0, cc->iv_size);
390 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
396 if (crypt_integrity_aead(cc))
397 bs = crypto_aead_blocksize(any_tfm_aead(cc));
399 bs = crypto_skcipher_blocksize(any_tfm(cc));
416 cc->iv_gen_private.benbi.shift = 9 - log;
421 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
425 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
430 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
432 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
433 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
438 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
441 memset(iv, 0, cc->iv_size);
446 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
448 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
458 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
461 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
463 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
476 if (cc->key_parts == cc->tfms_count) {
483 crypt_iv_lmk_dtr(cc);
491 static int crypt_iv_lmk_init(struct crypt_config *cc)
493 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
494 int subkey_size = cc->key_size / cc->key_parts;
498 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
504 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
506 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
514 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
518 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
557 memcpy(iv, &md5state.hash, cc->iv_size);
562 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
570 sg = crypt_get_sg_data(cc, dmreq->sg_in);
572 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
575 memset(iv, 0, cc->iv_size);
580 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
590 sg = crypt_get_sg_data(cc, dmreq->sg_out);
592 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
596 crypto_xor(dst + sg->offset, iv, cc->iv_size);
602 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
604 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
616 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
619 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
621 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
626 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
638 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
641 crypt_iv_tcw_dtr(cc);
649 static int crypt_iv_tcw_init(struct crypt_config *cc)
651 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
652 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
654 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
655 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
661 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
663 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
665 memset(tcw->iv_seed, 0, cc->iv_size);
671 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
675 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
703 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
707 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
714 sg = crypt_get_sg_data(cc, dmreq->sg_in);
716 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
722 if (cc->iv_size > 8)
724 cc->iv_size - 8);
729 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
740 sg = crypt_get_sg_data(cc, dmreq->sg_out);
742 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
748 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
752 get_random_bytes(iv, cc->iv_size);
756 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
759 if (crypt_integrity_aead(cc)) {
764 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
772 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
775 struct crypto_skcipher *tfm = any_tfm(cc);
786 req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
793 memset(buf, 0, cc->iv_size);
794 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
796 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
797 sg_init_one(&dst, iv, cc->iv_size);
798 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
806 static void crypt_iv_elephant_dtr(struct crypt_config *cc)
808 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
814 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
817 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
828 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
830 crypt_iv_elephant_dtr(cc);
978 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
980 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
996 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
1014 sg = crypt_get_sg_data(cc, dmreq->sg_out);
1020 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
1022 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
1027 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1028 diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1029 diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1030 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
1033 for (i = 0; i < (cc->sector_size / 32); i++)
1037 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1038 diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1039 diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1040 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
1051 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1057 r = crypt_iv_elephant(cc, dmreq);
1062 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1065 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1069 return crypt_iv_elephant(cc, dmreq);
1074 static int crypt_iv_elephant_init(struct crypt_config *cc)
1076 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1077 int key_offset = cc->key_size - cc->key_extra_size;
1079 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1082 static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1084 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1087 memset(key, 0, cc->key_extra_size);
1088 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1156 static bool crypt_integrity_aead(struct crypt_config *cc)
1158 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1161 static bool crypt_integrity_hmac(struct crypt_config *cc)
1163 return crypt_integrity_aead(cc) && cc->key_mac_size;
1167 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1170 if (unlikely(crypt_integrity_aead(cc)))
1182 if (!bio_sectors(bio) || !io->cc->tuple_size)
1189 tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);
1201 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1204 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1213 if (bi->tuple_size < cc->used_tag_size) {
1217 cc->tuple_size = bi->tuple_size;
1218 if (1 << bi->interval_exp != cc->sector_size) {
1223 if (crypt_integrity_aead(cc)) {
1224 cc->integrity_tag_size = cc->used_tag_size - cc->integrity_iv_size;
1226 cc->integrity_tag_size, cc->integrity_iv_size);
1228 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1232 } else if (cc->integrity_iv_size)
1234 cc->integrity_iv_size);
1236 if ((cc->integrity_tag_size + cc->integrity_iv_size) > cc->tuple_size) {
1248 static void crypt_convert_init(struct crypt_config *cc,
1259 ctx->cc_sector = sector + cc->iv_offset;
1264 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1267 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1270 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1272 return (void *)((char *)dmreq - cc->dmreq_start);
1275 static u8 *iv_of_dmreq(struct crypt_config *cc,
1278 if (crypt_integrity_aead(cc))
1280 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1283 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1286 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1289 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1292 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1295 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1300 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1303 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1304 cc->iv_size + sizeof(uint64_t);
1309 static void *tag_from_dmreq(struct crypt_config *cc,
1315 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1316 cc->tuple_size];
1319 static void *iv_tag_from_dmreq(struct crypt_config *cc,
1322 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1325 static int crypt_convert_block_aead(struct crypt_config *cc,
1337 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1340 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1343 dmreq = dmreq_of_req(cc, req);
1345 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1346 dmreq->iv_sector >>= cc->sector_shift;
1349 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1351 sector = org_sector_of_dmreq(cc, dmreq);
1352 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1354 iv = iv_of_dmreq(cc, dmreq);
1355 org_iv = org_iv_of_dmreq(cc, dmreq);
1356 tag = tag_from_dmreq(cc, dmreq);
1357 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1366 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1367 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1368 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1372 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1373 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1374 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1376 if (cc->iv_gen_ops) {
1378 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1379 memcpy(org_iv, tag_iv, cc->iv_size);
1381 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1385 if (cc->integrity_iv_size)
1386 memcpy(tag_iv, org_iv, cc->iv_size);
1389 memcpy(iv, org_iv, cc->iv_size);
1392 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1395 cc->sector_size, iv);
1397 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->tuple_size)
1398 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1399 cc->tuple_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1402 cc->sector_size + cc->integrity_tag_size, iv);
1418 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1419 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1421 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1422 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1427 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1441 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1444 dmreq = dmreq_of_req(cc, req);
1446 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1447 dmreq->iv_sector >>= cc->sector_shift;
1450 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1452 iv = iv_of_dmreq(cc, dmreq);
1453 org_iv = org_iv_of_dmreq(cc, dmreq);
1454 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1456 sector = org_sector_of_dmreq(cc, dmreq);
1457 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1464 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1467 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1469 if (cc->iv_gen_ops) {
1471 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1472 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1474 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1478 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1481 if (cc->integrity_iv_size)
1482 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1485 memcpy(iv, org_iv, cc->iv_size);
1488 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1495 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1496 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1498 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1499 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1506 static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1509 unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
1512 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1517 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1525 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1530 static int crypt_alloc_req_aead(struct crypt_config *cc,
1534 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1539 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1547 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1552 static int crypt_alloc_req(struct crypt_config *cc,
1555 if (crypt_integrity_aead(cc))
1556 return crypt_alloc_req_aead(cc, ctx);
1558 return crypt_alloc_req_skcipher(cc, ctx);
1561 static void crypt_free_req_skcipher(struct crypt_config *cc,
1564 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1567 mempool_free(req, &cc->req_pool);
1570 static void crypt_free_req_aead(struct crypt_config *cc,
1573 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1576 mempool_free(req, &cc->req_pool);
1579 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1581 if (crypt_integrity_aead(cc))
1582 crypt_free_req_aead(cc, req, base_bio);
1584 crypt_free_req_skcipher(cc, req, base_bio);
1590 static blk_status_t crypt_convert(struct crypt_config *cc,
1593 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1606 r = crypt_alloc_req(cc, ctx);
1614 if (crypt_integrity_aead(cc))
1615 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
1617 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
1683 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1707 struct crypt_config *cc = io->cc;
1716 mutex_lock(&cc->bio_alloc_lock);
1718 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1719 GFP_NOIO, &cc->bs);
1723 clone->bi_iter.bi_sector = cc->start + io->sector;
1734 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
1741 percpu_counter_add(&cc->n_allocated_pages, 1 << order);
1748 pages = mempool_alloc(&cc->page_pool, gfp_mask);
1750 crypt_free_buffer_pages(cc, clone);
1765 crypt_free_buffer_pages(cc, clone);
1771 mutex_unlock(&cc->bio_alloc_lock);
1776 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1783 percpu_counter_sub(&cc->n_allocated_pages,
1787 mempool_free(&fi.folio->page, &cc->page_pool);
1793 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1796 io->cc = cc;
1821 struct crypt_config *cc = io->cc;
1829 cc->used_tag_size && bio_data_dir(base_bio) == READ) {
1838 crypt_free_req(cc, io->ctx.r.req, base_bio);
1841 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1870 struct crypt_config *cc = io->cc;
1883 crypt_free_buffer_pages(cc, clone);
1902 struct crypt_config *cc = io->cc;
1914 crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
1926 clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
1930 clone->bi_iter.bi_sector = cc->start + io->sector;
1958 struct crypt_config *cc = io->cc;
1961 queue_work(cc->io_queue, &io->work);
1975 struct crypt_config *cc = data;
1982 spin_lock_irq(&cc->write_thread_lock);
1985 if (!RB_EMPTY_ROOT(&cc->write_tree))
1990 spin_unlock_irq(&cc->write_thread_lock);
1999 spin_lock_irq(&cc->write_thread_lock);
2003 write_tree = cc->write_tree;
2004 cc->write_tree = RB_ROOT;
2005 spin_unlock_irq(&cc->write_thread_lock);
2028 struct crypt_config *cc = io->cc;
2034 crypt_free_buffer_pages(cc, clone);
2043 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
2044 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
2049 spin_lock_irqsave(&cc->write_thread_lock, flags);
2050 if (RB_EMPTY_ROOT(&cc->write_tree))
2051 wake_up_process(cc->write_thread);
2052 rbp = &cc->write_tree.rb_node;
2063 rb_insert_color(&io->rb_node, &cc->write_tree);
2064 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
2067 static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
2071 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
2091 struct crypt_config *cc = io->cc;
2099 r = crypt_convert(cc, &io->ctx, false, false);
2103 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2118 struct crypt_config *cc = io->cc;
2128 crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
2139 if (crypt_integrity_aead(cc)) {
2146 r = crypt_convert(cc, ctx,
2147 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2155 queue_work(cc->crypt_queue, &io->work);
2161 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2182 crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
2191 struct crypt_config *cc = io->cc;
2197 r = crypt_convert(cc, &io->ctx, false, false);
2209 struct crypt_config *cc = io->cc;
2215 r = crypt_convert(cc, &io->ctx,
2216 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2218 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2221 r = crypt_convert(cc, &io->ctx,
2222 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2230 queue_work(cc->crypt_queue, &io->work);
2247 struct crypt_config *cc = io->cc;
2259 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2260 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2263 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2276 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2290 if (kcryptd_crypt_write_inline(cc, ctx)) {
2310 struct crypt_config *cc = io->cc;
2312 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2313 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2330 queue_work(cc->crypt_queue, &io->work);
2333 static void crypt_free_tfms_aead(struct crypt_config *cc)
2335 if (!cc->cipher_tfm.tfms_aead)
2338 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2339 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2340 cc->cipher_tfm.tfms_aead[0] = NULL;
2343 kfree(cc->cipher_tfm.tfms_aead);
2344 cc->cipher_tfm.tfms_aead = NULL;
2347 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2351 if (!cc->cipher_tfm.tfms)
2354 for (i = 0; i < cc->tfms_count; i++)
2355 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2356 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2357 cc->cipher_tfm.tfms[i] = NULL;
2360 kfree(cc->cipher_tfm.tfms);
2361 cc->cipher_tfm.tfms = NULL;
2364 static void crypt_free_tfms(struct crypt_config *cc)
2366 if (crypt_integrity_aead(cc))
2367 crypt_free_tfms_aead(cc);
2369 crypt_free_tfms_skcipher(cc);
2372 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2377 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2380 if (!cc->cipher_tfm.tfms)
2383 for (i = 0; i < cc->tfms_count; i++) {
2384 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2386 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2387 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2388 crypt_free_tfms(cc);
2399 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2403 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2407 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2408 if (!cc->cipher_tfm.tfms)
2411 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2413 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2414 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2415 crypt_free_tfms(cc);
2420 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2424 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2426 if (crypt_integrity_aead(cc))
2427 return crypt_alloc_tfms_aead(cc, ciphermode);
2429 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2432 static unsigned int crypt_subkey_size(struct crypt_config *cc)
2434 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2437 static unsigned int crypt_authenckey_size(struct crypt_config *cc)
2439 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2445 * This funcion converts cc->key to this special format.
2464 static int crypt_setkey(struct crypt_config *cc)
2470 subkey_size = crypt_subkey_size(cc);
2472 if (crypt_integrity_hmac(cc)) {
2473 if (subkey_size < cc->key_mac_size)
2476 crypt_copy_authenckey(cc->authenc_key, cc->key,
2477 subkey_size - cc->key_mac_size,
2478 cc->key_mac_size);
2481 for (i = 0; i < cc->tfms_count; i++) {
2482 if (crypt_integrity_hmac(cc))
2483 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2484 cc->authenc_key, crypt_authenckey_size(cc));
2485 else if (crypt_integrity_aead(cc))
2486 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2487 cc->key + (i * subkey_size),
2490 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2491 cc->key + (i * subkey_size),
2497 if (crypt_integrity_hmac(cc))
2498 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2513 static int set_key_user(struct crypt_config *cc, struct key *key)
2521 if (cc->key_size != ukp->datalen)
2524 memcpy(cc->key, ukp->data, cc->key_size);
2529 static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2537 if (cc->key_size != ekp->decrypted_datalen)
2540 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2545 static int set_key_trusted(struct crypt_config *cc, struct key *key)
2553 if (cc->key_size != tkp->key_len)
2556 memcpy(cc->key, tkp->key, cc->key_size);
2561 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2567 int (*set_key)(struct crypt_config *cc, struct key *key);
2612 ret = set_key(cc, key);
2619 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2621 ret = crypt_setkey(cc);
2625 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2626 kfree_sensitive(cc->key_string);
2627 cc->key_string = new_key_string;
2660 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2672 static int crypt_set_key(struct crypt_config *cc, char *key)
2678 if (!cc->key_size && strcmp(key, "-"))
2683 r = crypt_set_keyring_key(cc, key + 1);
2688 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2691 kfree_sensitive(cc->key_string);
2692 cc->key_string = NULL;
2695 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2698 r = crypt_setkey(cc);
2700 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2709 static int crypt_wipe_key(struct crypt_config *cc)
2713 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2714 get_random_bytes(&cc->key, cc->key_size);
2717 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2718 r = cc->iv_gen_ops->wipe(cc);
2723 kfree_sensitive(cc->key_string);
2724 cc->key_string = NULL;
2725 r = crypt_setkey(cc);
2726 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2746 struct crypt_config *cc = pool_data;
2754 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2760 percpu_counter_add(&cc->n_allocated_pages, 1);
2767 struct crypt_config *cc = pool_data;
2770 percpu_counter_sub(&cc->n_allocated_pages, 1);
2775 struct crypt_config *cc = ti->private;
2779 if (!cc)
2782 if (cc->write_thread)
2783 kthread_stop(cc->write_thread);
2785 if (cc->io_queue)
2786 destroy_workqueue(cc->io_queue);
2787 if (cc->crypt_queue)
2788 destroy_workqueue(cc->crypt_queue);
2790 if (cc->workqueue_id)
2791 ida_free(&workqueue_ida, cc->workqueue_id);
2793 crypt_free_tfms(cc);
2795 bioset_exit(&cc->bs);
2797 mempool_exit(&cc->page_pool);
2798 mempool_exit(&cc->req_pool);
2799 mempool_exit(&cc->tag_pool);
2801 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2802 percpu_counter_destroy(&cc->n_allocated_pages);
2804 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2805 cc->iv_gen_ops->dtr(cc);
2807 if (cc->dev)
2808 dm_put_device(ti, cc->dev);
2810 kfree_sensitive(cc->cipher_string);
2811 kfree_sensitive(cc->key_string);
2812 kfree_sensitive(cc->cipher_auth);
2813 kfree_sensitive(cc->authenc_key);
2815 mutex_destroy(&cc->bio_alloc_lock);
2818 kfree_sensitive(cc);
2831 struct crypt_config *cc = ti->private;
2833 if (crypt_integrity_aead(cc))
2834 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2836 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2838 if (cc->iv_size)
2840 cc->iv_size = max(cc->iv_size,
2849 cc->iv_gen_ops = NULL;
2851 cc->iv_gen_ops = &crypt_iv_plain_ops;
2853 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2855 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2857 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2859 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2861 cc->iv_gen_ops = &crypt_iv_null_ops;
2863 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2865 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2866 cc->key_parts = 2;
2867 cc->key_extra_size = cc->key_size / 2;
2868 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2870 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2872 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2879 if (cc->key_size % cc->key_parts) {
2880 cc->key_parts++;
2881 cc->key_extra_size = cc->key_size / cc->key_parts;
2884 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2885 cc->key_parts += 2; /* IV + whitening */
2886 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2888 cc->iv_gen_ops = &crypt_iv_random_ops;
2890 cc->integrity_iv_size = cc->iv_size;
2904 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2927 if (!test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
2928 cc->key_mac_size = crypto_ahash_digestsize(mac);
2931 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2932 if (!cc->authenc_key)
2941 struct crypt_config *cc = ti->private;
2945 cc->tfms_count = 1;
2969 if (crypt_integrity_aead(cc)) {
2970 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2978 cc->tfms_count = 64;
2994 cc->key_parts = cc->tfms_count;
2997 ret = crypt_alloc_tfms(cc, cipher_api);
3003 if (crypt_integrity_aead(cc))
3004 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
3006 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
3014 struct crypt_config *cc = ti->private;
3020 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
3034 cc->tfms_count = 1;
3035 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
3036 !is_power_of_2(cc->tfms_count)) {
3040 cc->key_parts = cc->tfms_count;
3082 ret = crypt_alloc_tfms(cc, cipher_api);
3098 struct crypt_config *cc = ti->private;
3102 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3103 if (!cc->cipher_string) {
3121 ret = crypt_set_key(cc, key);
3128 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3129 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3137 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3138 ret = cc->iv_gen_ops->init(cc);
3146 if (cc->key_string)
3147 memset(cc->key, 0, cc->key_size * sizeof(u8));
3154 struct crypt_config *cc = ti->private;
3183 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3185 set_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
3188 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3190 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3192 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3198 cc->used_tag_size = val;
3201 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3207 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3208 if (!cc->cipher_auth)
3215 cc->key_mac_size = val;
3216 set_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
3217 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3218 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3219 cc->sector_size > 4096 ||
3220 (cc->sector_size & (cc->sector_size - 1))) {
3224 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3228 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3230 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3244 struct crypt_config *cc = ti->private;
3246 return dm_report_zones(cc->dev->bdev, cc->start,
3247 cc->start + dm_target_offset(ti, args->next_sector),
3260 struct crypt_config *cc;
3281 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3282 if (!cc) {
3286 cc->key_size = key_size;
3287 cc->sector_size = (1 << SECTOR_SHIFT);
3288 cc->sector_shift = 0;
3290 ti->private = cc;
3297 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3312 if (crypt_integrity_aead(cc)) {
3313 cc->dmreq_start = sizeof(struct aead_request);
3314 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3315 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3317 cc->dmreq_start = sizeof(struct skcipher_request);
3318 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3319 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3321 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3325 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3338 iv_size_padding + cc->iv_size +
3339 cc->iv_size +
3343 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3349 cc->per_bio_data_size = ti->per_io_data_size =
3350 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3353 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3359 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3365 mutex_init(&cc->bio_alloc_lock);
3369 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3373 cc->iv_offset = tmpll;
3375 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3386 cc->start = tmpll;
3388 if (bdev_is_zoned(cc->dev->bdev)) {
3394 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3395 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3412 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3413 ret = crypt_integrity_ctr(cc, ti);
3417 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->tuple_size;
3418 if (!cc->tag_pool_max_sectors)
3419 cc->tag_pool_max_sectors = 1;
3421 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3422 cc->tag_pool_max_sectors * cc->tuple_size);
3428 cc->tag_pool_max_sectors <<= cc->sector_shift;
3437 cc->workqueue_id = wq_id;
3441 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3444 cc->io_queue = alloc_workqueue("kcryptd_io-%s-%d", common_wq_flags, 1, devname, wq_id);
3445 if (!cc->io_queue) {
3450 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) {
3451 cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
3459 cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
3463 if (!cc->crypt_queue) {
3468 spin_lock_init(&cc->write_thread_lock);
3469 cc->write_tree = RB_ROOT;
3471 cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3472 if (IS_ERR(cc->write_thread)) {
3473 ret = PTR_ERR(cc->write_thread);
3474 cc->write_thread = NULL;
3478 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3479 set_user_nice(cc->write_thread, MIN_NICE);
3497 struct crypt_config *cc = ti->private;
3507 bio_set_dev(bio, cc->dev->bdev);
3509 bio->bi_iter.bi_sector = cc->start +
3517 max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
3525 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3528 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3531 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3532 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3534 if (cc->tuple_size) {
3535 unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift);
3543 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3544 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3545 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3550 if (crypt_integrity_aead(cc))
3572 struct crypt_config *cc = ti->private;
3582 DMEMIT("%s ", cc->cipher_string);
3584 if (cc->key_size > 0) {
3585 if (cc->key_string)
3586 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3588 for (i = 0; i < cc->key_size; i++) {
3589 DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3590 hex2asc(cc->key[i] & 0xf));
3596 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3597 cc->dev->name, (unsigned long long)cc->start);
3600 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3601 num_feature_args += test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
3602 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3603 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3604 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3605 num_feature_args += !!cc->used_tag_size;
3606 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3607 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3608 num_feature_args += test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
3613 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3615 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3617 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3619 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3621 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3623 if (cc->used_tag_size)
3624 DMEMIT(" integrity:%u:%s", cc->used_tag_size, cc->cipher_auth);
3625 if (cc->sector_size != (1 << SECTOR_SHIFT))
3626 DMEMIT(" sector_size:%d", cc->sector_size);
3627 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3629 if (test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
3630 DMEMIT(" integrity_key_size:%u", cc->key_mac_size);
3637 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3638 DMEMIT(",high_priority=%c", test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags) ? 'y' : 'n');
3639 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3641 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3643 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3645 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3648 if (cc->used_tag_size)
3650 cc->used_tag_size, cc->cipher_auth);
3651 if (cc->sector_size != (1 << SECTOR_SHIFT))
3652 DMEMIT(",sector_size=%d", cc->sector_size);
3653 if (cc->cipher_string)
3654 DMEMIT(",cipher_string=%s", cc->cipher_string);
3656 DMEMIT(",key_size=%u", cc->key_size);
3657 DMEMIT(",key_parts=%u", cc->key_parts);
3658 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3659 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3667 struct crypt_config *cc = ti->private;
3669 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3674 struct crypt_config *cc = ti->private;
3676 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3686 struct crypt_config *cc = ti->private;
3688 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3698 struct crypt_config *cc = ti->private;
3705 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3712 if (key_size < 0 || cc->key_size != key_size) {
3717 ret = crypt_set_key(cc, argv[2]);
3720 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3721 ret = cc->iv_gen_ops->init(cc);
3723 if (cc->key_string)
3724 memset(cc->key, 0, cc->key_size * sizeof(u8));
3728 return crypt_wipe_key(cc);
3739 struct crypt_config *cc = ti->private;
3741 return fn(ti, cc->dev, cc->start, ti->len, data);
3746 struct crypt_config *cc = ti->private;
3749 max_t(unsigned int, limits->logical_block_size, cc->sector_size);
3751 max_t(unsigned int, limits->physical_block_size, cc->sector_size);
3752 limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);